def episodios(item): logger.info("pelisalacarta.channels.seriesflv episodios") itemlist = [] # Descarga la pagina headers = DEFAULT_HEADERS[:] data = scrapertools.cache_page(item.url,headers=headers) #logger.info("data="+data) # Extrae los episodios ''' <tr> <td class="sape"><i class="glyphicon glyphicon-film"></i> <a href="http://www.seriesflv.net/ver/game-of-thrones-1x9.html" class="color4">Game of Thrones (Juego de tronos) 1x09</a></td> <td> <a href="javascript:void(0);" class="loginSF" title="Marcar Visto"><span class="no visto"></span></a> </td> <td><div class="star_rating"> <ul class="star"> <li class="curr" style="width: 99.6%;"></li> </ul> </div></td> <td> <img src="http://www.seriesflv.net/images/lang/es.png" width="20" /> <img src="http://www.seriesflv.net/images/lang/la.png" width="20" /> <img src="http://www.seriesflv.net/images/lang/sub.png" width="20" /> </td> <td>40,583</td> </tr> ''' patron = '<tr[^<]+<td class="sape"><i class="glyphicon glyphicon-film"></i[^<]+' patron += '<a href="([^"]+)"[^>]+>([^<]+)</a>.*?<img(.*?)</td' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle,bloqueidiomas in matches: title = scrapedtitle+" (" patronidiomas="lang/([a-z]+).png" matchesidiomas = re.compile(patronidiomas,re.DOTALL).findall(bloqueidiomas) for idioma in matchesidiomas: title=title+get_nombre_idioma(idioma)+", " title=title[:-2]+")" thumbnail = "" plot = "" url = scrapedurl ## Sólo nos interesa el título de la serie show = re.sub(" \([^\)]+\)$","",item.show) ## Se a añadido el parámetro show itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=show)) if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") ## Opción "Añadir esta serie a la biblioteca de XBMC" if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0: itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=show) ) return itemlist
def youtube_videos(item): logger.info("disneyjunior.youtube_videos ") itemlist=[] # Fetch video list from YouTube feed data = scrapertools.cache_page( item.url ) logger.info("data="+data) # Extract items from feed pattern = "<entry(.*?)</entry>" matches = re.compile(pattern,re.DOTALL).findall(data) for entry in matches: logger.info("entry="+entry) # Not the better way to parse XML, but clean and easy title = scrapertools.find_single_match(entry,"<titl[^>]+>([^<]+)</title>") title = title.replace("Disney Junior España | ","") plot = scrapertools.find_single_match(entry,"<summa[^>]+>([^<]+)</summa") thumbnail = scrapertools.find_single_match(entry,"<media\:thumbnail url='([^']+)'") video_id = scrapertools.find_single_match(entry,"http\://www.youtube.com/watch\?v\=([0-9A-Za-z_-]{11})") url = video_id # Appends a new item to the xbmc item list itemlist.append( Item(channel=CHANNELNAME, title=title , action="play" , server="youtube", url=url, thumbnail=thumbnail, plot=plot , folder=False) ) if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0: itemlist.append( Item(channel=item.channel, title=">> Opciones para esta serie", url=item.url, action="serie_options##youtube_videos", thumbnail=item.thumbnail, show=item.title, folder=False)) return itemlist
def mainlist(item): logger.info("channelselector.mainlist") # Obtiene el idioma, y el literal idioma = config.get_setting("languagefilter") logger.info("channelselector.mainlist idioma=%s" % idioma) langlistv = [config.get_localized_string(30025),config.get_localized_string(30026),config.get_localized_string(30027),config.get_localized_string(30028),config.get_localized_string(30029)] try: idiomav = langlistv[int(idioma)] except: idiomav = langlistv[0] # Añade los canales que forman el menú principal itemlist = [] itemlist.append( Item(title=config.get_localized_string(30118)+" ("+idiomav+")" , channel="channelselector" , action="listchannels", category='*', thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"channelselector.png") ) ) # Canales itemlist.append( Item(title=config.get_localized_string(30119)+" ("+idiomav+")" , channel="channelselector" , action="channeltypes", thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"channelselector.png") ) ) # Seleccione una categoria itemlist.append( Item(title=config.get_localized_string(30103) , channel="buscador" , action="mainlist" , thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"buscador.png")) ) itemlist.append( Item(title=config.get_localized_string(30128) , channel="trailertools" , action="mainlist" , thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"trailertools.png")) ) itemlist.append( Item(title=config.get_localized_string(30102) , channel="favoritos" , action="mainlist" , thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"favoritos.png")) ) if config.get_platform() in ("wiimc","rss") :itemlist.append( Item(title="Wiideoteca (Beta)" , channel="wiideoteca" , action="mainlist", thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"wiideoteca.png")) ) if config.get_platform()=="rss":itemlist.append( Item(title="pyLOAD (Beta)" , channel="pyload" , action="mainlist" , thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"pyload.png")) ) itemlist.append( Item(title=config.get_localized_string(30101) , channel="descargas" , action="mainlist", thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"descargas.png")) ) if "xbmceden" in config.get_platform(): itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"configuracion.png"), folder=False) ) else: itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"configuracion.png")) ) if config.get_platform()!="rss": itemlist.append( Item(title=config.get_localized_string(30104) , channel="ayuda" , action="mainlist", thumbnail = urlparse.urljoin(config.get_thumbnail_path(),"ayuda.png")) ) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.hdfull findvideos") itemlist=[] ## Carga estados status = jsontools.load_json(scrapertools.cache_page(host+'/a/status/all')) url_targets = item.url ## Vídeos if "###" in item.url: id = item.url.split("###")[1].split(";")[0] type = item.url.split("###")[1].split(";")[1] item.url = item.url.split("###")[0] if type == "2" and account and item.category != "Cine": title = bbcode_kodi2html(" ( [COLOR orange][B]Agregar a Favoritos[/B][/COLOR] )") if "Favorito" in item.title: title = bbcode_kodi2html(" ( [COLOR red][B]Quitar de Favoritos[/B][/COLOR] )") if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")): title_label = bbcode_kodi2html(" ( [COLOR gray][B]" + item.show + "[/B][/COLOR] )") itemlist.append( Item( channel=__channel__, action="findvideos", title=title_label, fulltitle=title_label, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=False ) ) itemlist.append( Item( channel=__channel__, action="set_status", title=title, fulltitle=title, url=url_targets, thumbnail=item.thumbnail, show=item.show, folder=True ) ) data = agrupa_datos( scrapertools.cache_page(item.url) ) patron = '<div class="embed-selector"[^<]+' patron += '<h5 class="left"[^<]+' patron += '<span[^<]+<b class="key">\s*Idioma.\s*</b>([^<]+)</span[^<]+' patron += '<span[^<]+<b class="key">\s*Servidor.\s*</b><b[^>]+>([^<]+)</b[^<]+</span[^<]+' patron += '<span[^<]+<b class="key">\s*Calidad.\s*</b>([^<]+)</span[^<]+</h5.*?' patron += '<a href="(http[^"]+)".*?' patron += '</i>([^<]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) for idioma,servername,calidad,url,opcion in matches: opcion = opcion.strip() if opcion != "Descargar": opcion = "Ver" title = opcion+": "+servername.strip()+" ("+calidad.strip()+")"+" ("+idioma.strip()+")" title = scrapertools.htmlclean(title) thumbnail = item.thumbnail plot = item.title+"\n\n"+scrapertools.find_single_match(data,'<meta property="og:description" content="([^"]+)"') plot = scrapertools.htmlclean(plot) fanart = scrapertools.find_single_match(data,'<div style="background-image.url. ([^\s]+)') url+= "###" + id + ";" + type itemlist.append( Item( channel=__channel__, action="play", title=title, fulltitle=title, url=url, thumbnail=thumbnail, plot=plot, fanart=fanart, show=item.show, folder=True ) ) ## 2 = película if type == "2" and item.category != "Cine": ## STRM para todos los enlaces de servidores disponibles ## Si no existe el archivo STRM de la peícula muestra el item ">> Añadir a la biblioteca..." try: itemlist.extend( file_cine_library(item,url_targets) ) except: pass return itemlist
def episodios(item): logger.info("[shurweb.py] episodios") url = item.url # Descarga la página data = scrapertools.cachePage(url) item = detalle_programa(item,data) # Extrae las entradas ''' <li> <div class="video"> <a class="video_title" href="http://www.shurweb.es/videos/alcatraz-1x10/">Alcatraz 1x10</a> </div> </li> ''' patron = '<li>[^<]+' patron += '<div class="video">[^<]+' patron += '<a class="video_title" href="([^"]+)">([^<]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) if DEBUG: scrapertools.printMatches(matches) itemlist = [] for url,title in matches: scrapedtitle = title fulltitle = scrapedtitle scrapedplot = item.plot scrapedurl = url scrapedthumbnail = item.thumbnail if DEBUG: logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=__channel__, action='findvideos', title=scrapedtitle , fulltitle=fulltitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=scrapedtitle , show=item.show, context="4|5",fanart="http://pelisalacarta.mimediacenter.info/fanart/shurweb.jpg", viewmode="movie_with_plot") ) if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def play_(item): logger.info("pelisalacarta.channels.documaniatv play_") itemlist = [] try: import xbmc if not xbmc.getCondVisibility('System.HasAddon(script.cnubis)'): from platformcode import platformtools platformtools.dialog_ok("Addon no encontrado", "Para ver vídeos alojados en cnubis necesitas tener su instalado su add-on", line3="Descárgalo en http://cnubis.com/kodi-pelisalacarta.html" ) return itemlist except: pass # Descarga la pagina data = scrapertools.cachePage(item.url, headers=headers) # Busca enlace directo video_url = scrapertools.find_single_match(data, 'class="embedded-video"[^<]+<iframe.*?src="([^"]+)"') if config.get_platform() == "plex" or config.get_platform() == "mediaserver": code = scrapertools.find_single_match(video_url, 'u=([A-z0-9]+)') url = "http://cnubis.com/plugins/mediaplayer/embeder/_embedkodi.php?u=%s" % code data = scrapertools.downloadpage(url, headers=headers) video_url = scrapertools.find_single_match(data, 'file\s*:\s*"([^"]+)"') itemlist.append(item.clone(action="play", url=video_url, server="directo")) return itemlist cnubis_script = xbmc.translatePath("special://home/addons/script.cnubis/default.py") xbmc.executebuiltin("XBMC.RunScript(%s, url=%s&referer=%s&title=%s)" % (cnubis_script, urllib.quote_plus(video_url), urllib.quote_plus(item.url), item.fulltitle)) return itemlist
def findvideos(item): logger.info("pelisalacarta.channels.documaniatv findvideos") itemlist = [] # Se comprueba si el vídeo está ya en favoritos/ver más tarde url = "http://www.documaniatv.com/ajax.php?p=playlists&do=video-watch-load-my-playlists&video-id=%s" % item.id data = scrapertools.cachePage(url, headers=headers) data = jsontools.load_json(data) data = re.sub(r"\n|\r|\t", '', data['html']) itemlist.append( Item(channel=item.channel, action="play_" , title=">> Reproducir vídeo", url=item.url, thumbnail=item.thumbnail, fulltitle=item.fulltitle, folder=False)) if "kodi" in config.get_platform(): folder = False else: folder = True patron = '<li data-playlist-id="([^"]+)".*?onclick="playlist_(\w+)_item' \ '.*?<span class="pm-playlists-name">(.*?)</span>.*?' \ '<span class="pm-playlists-video-count">(.*?)</span>' matches = scrapertools.find_multiple_matches(data, patron) for playlist_id, playlist_action, playlist_title, video_count in matches: scrapedtitle = playlist_action.replace('remove','Eliminar de ').replace('add','Añadir a ') scrapedtitle += playlist_title + " ("+video_count+")" itemlist.append( Item(channel=item.channel, action="acciones_playlist" , title=scrapedtitle, id=item.id, list_id=playlist_id, url="http://www.documaniatv.com/ajax.php", folder=folder)) if "kodi" in config.get_platform(): itemlist.append( Item(channel=item.channel, action="acciones_playlist" , title="Crear una nueva playlist y añadir el documental", id=item.id, url="http://www.documaniatv.com/ajax.php", folder=folder)) itemlist.append( Item(channel=item.channel, action="acciones_playlist" , title="Me gusta", id=item.id, url="http://www.documaniatv.com/ajax.php", folder=folder)) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.playmax episodios") itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) ## Agrupa los datos data = re.sub(r'\n|\r|\t| |<br>','',data) data = re.sub(r'\s+',' ',data) data = re.sub(r'<!--.*?-->','',data) #function load_links(value){var url = './c_enlaces.php?ficha=128&id=' + value + '&key=ZHB6YXE='; #^_______API+Número de la ficha:______^______Lo que usaremos________^______No nos interesa_____^ patron = "var url = '([^']+)'" enlace = scrapertools.get_match(data,patron) #onclick="load_links_dos('5126', 'Viendo The Walking Dead 1x01 - Días pasados', 'Días pasados', '1X01', '5125', '5127')" #_API+Número de episodio_^_id_^_________^_________scrapedtitle_______________^__^___________No nos interesa___________^ patron = "load_links_dos.'([^']+)', 'Viendo ([^']+)'" all_episodes = re.compile(patron,re.DOTALL).findall(data) for id, scrapedtitle in all_episodes: url = enlace + id + "&key=ZHp6ZG0=" itemlist.append( Item(channel=__channel__, title=scrapedtitle, url=urlparse.urljoin(host,url), action="findvideos", thumbnail=item.thumbnail, show=item.show) ) ## Opción "Añadir esta serie a la biblioteca de XBMC" if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0: itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) return itemlist
def episodios(item): logger.info("pelisalacarta.seriesblanco episodios") itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| |<Br>|<BR>|<br>|<br/>|<br />|-\s","",data) data = re.sub(r"<!--.*?-->","",data) data = re.sub(r"a></td><td> <img src=/banderas/","a><idioma/",data) data = re.sub(r" <img src=/banderas/","|",data) data = re.sub(r"\.png border='\d+' height='\d+' width='\d+' /><","/idioma><",data) data = re.sub(r"\.png border='\d+' height='\d+' width='\d+' />","",data) #<a href='/serie/534/temporada-1/capitulo-00/the-big-bang-theory.html'>1x00 - Capitulo 00 </a></td><td> <img src=/banderas/vo.png border='0' height='15' width='25' /> <img src=/banderas/vos.png border='0' height='15' width='25' /></td></tr> patron = "<a href='([^']+)'>([^<]+)</a><idioma/([^/]+)/idioma>" matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl, scrapedtitle, scrapedidioma in matches: idioma = "" for i in scrapedidioma.split("|"): idioma+= " [" + idiomas[i] + "]" title = item.title + " - " + scrapedtitle + idioma itemlist.append( Item(channel=__channel__, title =title , url=urlparse.urljoin(host,scrapedurl), action="findvideos", show=item.show) ) if len(itemlist) == 0 and "<title>404 Not Found</title>" in data: itemlist.append( Item(channel=__channel__, title ="la url '"++"' parece no estar disponible en la web. Iténtalo más tarde." , url=item.url, action="series") ) ## Opción "Añadir esta serie a la biblioteca de XBMC" if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0: itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.reyanime episodios") itemlist = [] # Descarga la pagina data = scrapertools.cache_page(item.url) data = scrapertools.find_single_match(data,'<div id="lcmain"(.*?)</ul>') patron = '<li[^<]+<a href="([^"]+)[^>]+>([^<]+)<' matches = re.compile(patron,re.DOTALL).findall(data) for scrapedurl,scrapedtitle in matches: title = scrapedtitle.strip() try: episodio = scrapertools.get_match(scrapedtitle,"Capitulo\s+(\d+)") if len(episodio)==1: title = "1x0"+episodio else: title = "1x"+episodio except: pass url = urlparse.urljoin(item.url,scrapedurl) thumbnail = item.thumbnail plot = item.plot if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") itemlist.append( Item(channel=__channel__, action="findvideos", title=title , url=url , thumbnail=thumbnail , plot=plot , show=item.show, fulltitle=item.show+" "+title, fanart=thumbnail, viewmode="movies_with_plot", folder=True) ) if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show) ) return itemlist
def episodios(item): logger.info("tvalacarta.channels.clantv episodios") itemlist = [] # Descarga la página url = item.url+"/videos.json" data = scrapertools.cache_page(url) json_object = jsontools.load_json(data) #logger.info("json_object="+json_object) json_items = json_object["page"]["items"] for json_item in json_items: title = json_item["longTitle"] url = json_item["uri"] thumbnail = item.thumbnail if json_item["description"] is not None: plot = json_item["description"] else: plot = "" fanart = item.fanart page = url if (DEBUG): logger.info(" title=["+repr(title)+"], url=["+repr(url)+"], thumbnail=["+repr(thumbnail)+"] plot=["+repr(plot)+"]") itemlist.append( Item(channel="rtve", title=title , action="play" , server="rtve", page=page, url=url, thumbnail=thumbnail, fanart=thumbnail, show=item.show , plot=plot , viewmode="movie_with_plot", folder=False) ) from core import config if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0: itemlist.append( Item(channel=item.channel, title=">> Opciones para esta serie", url=item.url, action="serie_options##episodios", thumbnail=item.thumbnail, show=item.show, folder=False)) return itemlist
def getmainlist(preferred_thumb=""): logger.info("channelselector.getmainlist") itemlist = [] # Obtiene el idioma, y el literal idioma = config.get_setting("languagefilter") logger.info("channelselector.getmainlist idioma=%s" % idioma) langlistv = [config.get_localized_string(30025),config.get_localized_string(30026),config.get_localized_string(30027),config.get_localized_string(30028),config.get_localized_string(30029)] try: idiomav = langlistv[int(idioma)] except: idiomav = langlistv[0] # Añade los canales que forman el menú principal itemlist.append( Item(title=config.get_localized_string(30130) , channel="novedades" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_novedades.png") ) ) itemlist.append( Item(title=config.get_localized_string(30118) , channel="channelselector" , action="channeltypes", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_canales.png") ) ) itemlist.append( Item(title=config.get_localized_string(30103) , channel="buscador" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_buscar.png")) ) #if config.is_xbmc(): itemlist.append( Item(title=config.get_localized_string(30128) , channel="trailertools" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_trailers.png")) ) itemlist.append( Item(title=config.get_localized_string(30102) , channel="favoritos" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_favoritos.png")) ) itemlist.append( Item(title=config.get_localized_string(30131) , channel="wiideoteca" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_biblioteca.png")) ) if config.get_platform()=="rss":itemlist.append( Item(title="pyLOAD (Beta)" , channel="pyload" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"pyload.png")) ) itemlist.append( Item(title=config.get_localized_string(30101) , channel="descargas" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_descargas.png")) ) if "xbmceden" in config.get_platform(): itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_configuracion.png"), folder=False) ) else: itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_configuracion.png")) ) #if config.get_setting("fileniumpremium")=="true": # itemlist.append( Item(title="Torrents (Filenium)" , channel="descargasfilenium" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"torrents.png")) ) #if config.get_library_support(): if config.get_platform()!="rss": itemlist.append( Item(title=config.get_localized_string(30104) , channel="ayuda" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_ayuda.png")) ) return itemlist
def listchannels(params,url,category): logger.info("channelselector.listchannels") lista = filterchannels(category) for channel in lista: if config.is_xbmc() and (channel.type=="xbmc" or channel.type=="generic"): addfolder(channel.title , channel.channel , "mainlist" , channel.channel) elif config.get_platform()=="boxee" and channel.extra!="rtmp": addfolder(channel.title , channel.channel , "mainlist" , channel.channel) if config.get_platform()=="kodi-krypton": import plugintools plugintools.set_view( plugintools.TV_SHOWS ) # Label (top-right)... import xbmcplugin xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category ) xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE ) xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True ) if config.get_setting("forceview")=="true": # Confluence - Thumbnail import xbmc xbmc.executebuiltin("Container.SetViewMode(500)")
def episodios(item): logger.info("tvalacarta.channels.clantv episodios") itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) # Extrae los capítulos patron = '<div class="contenido-serie">(.*?)</div>' matches = re.compile(patron,re.DOTALL).findall(data) logger.info("tvalacarta.channels.clantv encontrados %d episodios" % len(matches) ) if len(matches)==0: return itemlist data2 = matches[0] itemlist = videos(item,data2) # Añade el resto de páginas patron = '<li class="siguiente"><a rel="next" title="Ir a la página siguiente" href="([^"]+)">Siguiente</a></li>' matches = re.compile(patron,re.DOTALL).findall(data) if DEBUG: scrapertools.printMatches(matches) if len(matches)>0: match = matches[0] item.url = urlparse.urljoin(item.url,match) itemlist.extend(episodios(item)) from core import config #if config.get_platform().startswith("xbmc"): # itemlist.append( Item(channel=item.channel, title=">> Añadir la serie completa a la lista de descarga", url=item.url, action="download_all_episodes##episodios", show=item.show) ) if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0: itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show, folder=False)) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.shurweb episodios") itemlist=[] data = scrapertools.cachePage(item.url) item.plot = scrapertools.find_single_match(data,'<div class="col-sm-10">(.*?)<script') item.plot = scrapertools.htmlclean(item.plot) patron = '<div class="video"[^<]+<a class="video_title" href="([^"]+)">([^<]+)</a>' matches = re.compile(patron,re.DOTALL).findall(data) if DEBUG: scrapertools.printMatches(matches) for scrapedurl,scrapedtitle in matches: title = scrapedtitle url = scrapedurl plot = item.plot thumbnail = "" if DEBUG: logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") itemlist.append( Item(channel=__channel__, action='findvideos', title=title , show=item.show , url=url , thumbnail=thumbnail , plot=plot , extra=scrapedtitle ,fanart="http://pelisalacarta.mimediacenter.info/fanart/shurweb.jpg", viewmode="movie_with_plot") ) if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def mainlist(params,url,category): logger.info("channelselector.mainlist") # Verifica actualizaciones solo en el primer nivel if config.get_platform()!="boxee": try: from core import updater except ImportError: logger.info("channelselector.mainlist No disponible modulo actualizaciones") else: if config.get_setting("updatecheck2") == "true": logger.info("channelselector.mainlist Verificar actualizaciones activado") updater.checkforupdates() else: logger.info("channelselector.mainlist Verificar actualizaciones desactivado") itemlist = getmainlist("squares") for elemento in itemlist: logger.info("channelselector item="+elemento.tostring()) addfolder(elemento.title , elemento.channel , elemento.action , thumbnailname=elemento.thumbnail, folder=elemento.folder) if config.get_platform()=="kodi-krypton": import plugintools plugintools.set_view( plugintools.TV_SHOWS ) # Label (top-right)... import xbmcplugin #xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category="" ) #xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE ) xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True ) if config.get_setting("forceview")=="true": # Confluence - Thumbnail import xbmc xbmc.executebuiltin("Container.SetViewMode(500)")
def episodios(item): logger.info("[seriespepito.py] list") # Descarga la página data = scrapertools.cache_page(item.url) # Completa plot y thumbnail item = detalle_programa(item,data) data = scrapertools.get_match(data,'<div class="accordion"(.*?)<div class="subtitulo">') logger.info(data) # Extrae los capítulos ''' <tbody> <tr> <td> <a class="asinenlaces" title=" 0x01 - Battlestar Galactica 2003 - Capitulo 1" href="http://battlestar-galactica-2003.seriespepito.com/temporada-0/capitulo-1/"> <i class="icon-film"></i> <strong>0x01</strong> - Battlestar Galactica 2003 - Capitulo 1 </a><button id="capvisto_121_0_1" class="btn btn-warning btn-mini sptt pull-right bcapvisto ctrl_over" data-tt_my="left center" data-tt_at="right center" data-tt_titulo="Marca del último capítulo visto" data-tt_texto="Este es el último capítulo que has visto de esta serie." data-id="121" data-tem="0" data-cap="1" type="button"><i class="icon-eye-open"></i></button></td></tr><tr><td><a title=" 0x02 - Battlestar Galactica 2003 - Capitulo 2" href="http://battlestar-galactica-2003.seriespepito.com/temporada-0/capitulo-2/"><i class="icon-film"></i> <strong>0x02</strong> - Battlestar Galactica 2003 - Capitulo 2 <span class="flag flag_0"></span></a><button id="capvisto_121_0_2" class="btn btn-warning btn-mini sptt pull-right bcapvisto ctrl_over" data-tt_my="left center" data-tt_at="right center" data-tt_titulo="Marca del último capítulo visto" data-tt_texto="Este es el último capítulo que has visto de esta serie." data-id="121" data-tem="0" data-cap="2" type="button"><i class="icon-eye-open"></i></button></td></tr></tbody> ''' patron = '<tr>' patron += '<td>' patron += '<a.*?href="([^"]+)"[^<]+' patron += '<i[^<]+</i[^<]+' patron += '<strong>([^<]+)</strong>' patron += '([^<]+)<(.*?)<button' matches = re.compile(patron,re.DOTALL).findall(data) scrapertools.printMatches(matches) itemlist = [] for scrapedurl,scrapedepisode,scrapedtitle,idiomas in matches: #title = unicode( scrapedtitle.strip(), "iso-8859-1" , errors="replace" ).encode("utf-8") title = scrapedepisode + " " + scrapedtitle.strip() title = scrapertools.entityunescape(title) if "flag_0" in idiomas: title = title + " (Español)" if "flag_1" in idiomas: title = title + " (Latino)" if "flag_2" in idiomas: title = title + " (VO)" if "flag_3" in idiomas: title = title + " (VOS)" url = scrapedurl thumbnail = item.thumbnail plot = item.plot if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, show=item.show, viewmode="movie_with_plot")) if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0: itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def episodios(item,final=True): logger.info("[animeid.py] episodios") # Descarga la pagina body = scrapertools.cache_page(item.url) try: scrapedplot = scrapertools.get_match(body,'<meta name="description" content="([^"]+)"') except: pass try: scrapedthumbnail = scrapertools.get_match(body,'<link rel="image_src" href="([^"]+)"') except: pass data = scrapertools.get_match(body,'<ul id="listado">(.*?)</ul>') patron = '<li><a href="([^"]+)">(.*?)</a></li>' matches = re.compile(patron,re.DOTALL).findall(data) itemlist = [] for url,title in matches: scrapedtitle = scrapertools.htmlclean(title) try: episodio = scrapertools.get_match(scrapedtitle,"Capítulo\s+(\d+)") titulo_limpio = re.compile("Capítulo\s+(\d+)\s+",re.DOTALL).sub("",scrapedtitle) if len(episodio)==1: scrapedtitle = "1x0"+episodio+" - "+titulo_limpio else: scrapedtitle = "1x"+episodio+" - "+titulo_limpio except: pass scrapedurl = urlparse.urljoin(item.url,url) #scrapedthumbnail = "" #scrapedplot = "" if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=__channel__, action="findvideos" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show, viewmode="movie_with_plot")) try: next_page = scrapertools.get_match(body,'<a href="([^"]+)">\>\;</a>') next_page = urlparse.urljoin(item.url,next_page) item2 = Item(channel=__channel__, action="episodios" , title=item.title , url=next_page, thumbnail=item.thumbnail, plot=item.plot, show=item.show, viewmode="movie_with_plot") itemlist.extend( episodios(item2,final=False) ) except: import traceback logger.info(traceback.format_exc()) if final and config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show) ) itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show) ) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.playmax episodios") itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| |<br>", "", data) data = re.sub(r"<!--.*?-->", "", data) # function load_links(value){var url = './c_enlaces.php?ficha=259&id=' + value + '&key=ZHB6YXE='; # ^_______API+Número de la ficha:______^______Lo que usaremos________^______No nos interesa_____^ patron = "var url = '([^']+)'" enlace = scrapertools.find_single_match(data, patron) # Temporadas y bloque de episodios por temporada patron = '<divd class="tabbertab "><h2>T(\d+)</h2>(.*?)</divdd></divdd></divdd></divdd></divd>' temporadas = re.compile(patron, re.DOTALL).findall(data) for temporada, episodios in temporadas: patron = "load_links\(([^\)]+)\)" patron += ".*?" patron += '<divd class="enlacesdos">(\d+)</divd>([^<]+)</divd>' matches = re.compile(patron, re.DOTALL).findall(episodios) for id, episodio, titulo in matches: title = temporada + "x" + episodio + " - " + titulo url = enlace + id + "&key=ZHp6ZG0=" itemlist.append( Item( channel=__channel__, title=title, url=urlparse.urljoin(host, url), action="findvideos", thumbnail=item.thumbnail, show=item.show, ) ) ## Opción "Añadir esta serie a la biblioteca de XBMC" if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist) > 0: itemlist.append( Item( channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, ) ) return itemlist
def search(item): logger.info("[nki.py] search") if config.get_platform()=="xbmc" or config.get_platform()=="xbmcdharma": from pelisalacarta import buscador texto = buscador.teclado() item.extra = texto itemlist = searchresults(item) return itemlist
def findvideos(item): show = item.title.replace("Añadir esta serie a la biblioteca de XBMC","") logger.info("[megaforo.py] findvideos show "+ show) itemlist=[] data = scrapertools.cache_page(item.url) if 'mega-foro' in data: patronimage = '<div class="inner" id="msg_\d{1,9}".*?<img src="([^"]+)".*?mega.co.nz/\#\![A-Za-z0-9\-\_]+\![A-Za-z0-9\-\_]+' matches = re.compile(patronimage,re.DOTALL).findall(data) if len(matches)>0: thumbnail = matches[0] thumbnail = scrapertools.htmlclean(thumbnail) thumbnail = unicode( thumbnail, "iso-8859-1" , errors="replace" ).encode("utf-8") item.thumbnail = thumbnail patronplot = '<div class="inner" id="msg_\d{1,9}".*?<img src="[^"]+"[^/]+/>(.*?)lgf_facebook_share' matches = re.compile(patronplot,re.DOTALL).findall(data) if len(matches)>0: plot = matches[0] title = item.title plot = re.sub(' ', '', plot) plot = re.sub('\s\s', '', plot) plot = scrapertools.htmlclean(plot) item.plot = "" from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.thumbnail=item.thumbnail videoitem.plot = item.plot videoitem.title = "["+videoitem.server+videoitem.title + " " + item.title videoitem.show = show if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="findvideos") ) return itemlist else: item.thumbnail = "" item.plot = "" from servers import servertools itemlist.extend(servertools.find_video_items(data=data)) for videoitem in itemlist: videoitem.channel=__channel__ videoitem.action="play" videoitem.folder=False videoitem.thumbnail=item.thumbnail videoitem.plot = item.plot videoitem.title = "["+videoitem.server+videoitem.title + " " + item.title return itemlist
def bbcode_kodi2html(text): if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"): import re text = re.sub(r"\[COLOR\s([^\]]+)\]", r'<span style="color: \1">', text) text = text.replace("[/COLOR]", "</span>") text = text.replace("[CR]", "<br>") text = re.sub(r"\[([^\]]+)\]", r"<\1>", text) text = text.replace('"color: white"', '"color: auto"') return text
def busqueda(item): logger.info("[islapeliculas.py] busqueda") if config.get_platform()=="xbmc" or config.get_platform()=="xbmcdharma": from pelisalacarta import buscador texto = buscador.teclado() texto = texto.split() item.extra = texto[0] itemlist = resultados(item) return itemlist
def findepisodios(item): logger.info("pelisalacarta.channels.tumejortv findepisodios") itemlist=[] if item.url.startswith("http://www.tumejortv.com"): item.url=item.url.replace("http://www.tumejortv.com",BASE_URL) logger.info("url="+item.url) data = scrapertools.cache_page(item.url) logger.info("data="+data) #<a href="#" class="antlo_temporadas_li" title="Haga clic para ver listado de capitulos"><img src="http://www.tumejortv.com/images/general/more.png" /> TEMPORADA 1<span style="float:right;"><img src="http://www.tumejortv.com/images/general/estreno.png" alt="EstrenoT"/></span></a><div><table class="antlo_links_table"> patron = '" class="antlo_temporadas_li" title="Haga clic[^"]+"><img[^>]+>( TEMPORADA [^<]+)<(.*?)</table>' matches = re.compile(patron,re.DOTALL).findall(data) if DEBUG: scrapertools.printMatches(matches) for temporada,episodios in matches: logger.info("temporada="+temporada+", episodios="+episodios) #<tr><td></td><td style="background-color:#f2f2f2;"><a title="Descargar - Ver" alt="Descargar - Ver" href="http://www.tumejortv.com/series/The-walking-Dead-2/temporada-3/capitulo-2/"> <img src="http://www.tumejortv.com/images/general/acceder.gif"><br />Descargar</a></td><td>2</td><td>107</td><td><a title="Descargar - Ver" alt="Descargar - Ver" href="http://www.tumejortv.com/series/The-walking-Dead-2/temporada-3/capitulo-2/"></a></td></tr> #patronepisodio = '<tr><td></td><td[^>]+><a title="[^"]+" alt="[^"]+" href="([^"]+)"> <img[^>]+><br />[^<]+</a></td><td>([^<]+)</td><td>([^<]+)</td><td><a[^>]+>([^<]+)</a></td></tr>' #<tr><td> <a href="http://www.tumejortv.com/series/90210-La-Nueva-Geracion-/trailers/826" alt="Ver Trailer" title="Ver trailer"><img src="http://www.tumejortv.com/images/general/trailer.png" alt="Trailer"/></a></td><td style="background-color:#f2f2f2;"><a title="Descargar - Ver" alt="Descargar - Ver" href="http://www.tumejortv.com/series/90210-La-Nueva-Geracion-/temporada-3/capitulo-1/"> <img src="http://www.tumejortv.com/images/general/acceder.gif"><br />Descargar</a></td><td>1</td><td>52</td><td><a title="Descargar - Ver" alt="Descargar - Ver" href="http://www.tumejortv.com/ser patronepisodio = '<tr>(.*?)</tr>' matches2 = re.compile(patronepisodio,re.DOTALL).findall(episodios) for match2 in matches2: try: url = scrapertools.get_match(match2,'<a title="Descargar - Ver" alt="Descargar - Ver" href="([^"]+)"') except: url="" try: episodio = scrapertools.get_match(match2,'</a></td><td>([^<]+)</td>') except: episodio = "" try: #</a></td><td>2</td><td>107</td> num_enlaces = scrapertools.get_match(match2,'</a></td><td[^<]+</td><td>([^<]+)</td>') except: num_enlaces = "" try: titulo = scrapertools.get_match(match2,'<a[^>]+>([^<]+)</a></td></tr>') except: titulo = "" if url!="": temporada = temporada.replace("TEMPORADA","").strip() if len(episodio)<2: episodio = "0"+episodio itemlist.append( Item(channel=__channel__, action="findvideos" , title=temporada+"x"+episodio+" "+titulo+" ("+num_enlaces+" enlaces)" , url=url, thumbnail=item.thumbnail, show=item.show, plot=item.plot, folder=True, fulltitle=item.title+" "+temporada+"x"+episodio+" "+titulo)) if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="findepisodios", show=item.show) ) return itemlist
def format_text(self,text): ''' [B]bold[/B] - bold text. [I]italics[/I] - italic text. [CR] - carriage return (line break). [COLOR red]red text[/COLOR] - colored text. http://www.w3schools.com/tags/ref_colornames.asp [COLOR 0xAARRGGBB]color text[/COLOR] colored text. http://www.w3schools.com/tags/ref_colorpicker.asp No implementados aun: [UPPERCASE]force text uppercase[/UPPERCASE] - force text to uppercase. [LOWERCASE]Force Text Lowercase[/LOWERCASE] - force text to lowercase. [CAPITALIZE]Force first letter to uppercase[/CAPITALIZE] - makes the first letter of a sentence a capital letter (Isengard only). ''' bbcode= (('[B]','<b>'), ('[/B]','</b>'), ('[I]','<i>'), ('[/I]','</i>'), ('[/COLOR]','</span>'), ('[UPPERCASE]',''), ('[/UPPERCASE]',''), ('[LOWERCASE]',''), ('[/LOWERCASE]',''), ('[CAPITALIZE]',''), ('[/CAPITALIZE]',''), ('[CR]','<br>')) if not text or config.is_xbmc(): return text elif config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"): # Plex o html: adaptar bbcode (basado de la funcion bbcode_kodi2html de robalo) # COLOR color_orig= ('yellow', 'white') color_sust= ('gold', 'auto') colores = re.findall(r'\[COLOR\s([^\]]+)\]',text) for color in colores: tag_orig = '\[COLOR\s' + color + '\]' if color.startswith('0x'): color= "#" + color[4:] elif color in color_orig: color= color_sust[color_orig.index(color)] text = re.sub(tag_orig, '<span style="color:' + color + '">', text) # Otros TAGs for b in bbcode: text = text.replace(b[0],b[1]) else: # Plataforma desconocida: eliminar bbcode text = re.sub(r'\[COLOR\s([^\]]+)\]','', text) for b in bbcode: text = text.replace(b[0],'') return text
def bbcode_kodi2html(text): if config.get_platform().startswith("plex") or config.get_platform().startswith("mediaserver"): import re text = re.sub(r'\[COLOR\s([^\]]+)\]', r'<span style="color: \1">', text) text = text.replace('[/COLOR]', '</span>') \ .replace('[CR]', '<br>') \ .replace('[B]', '<strong>') \ .replace('[/B]', '</strong>') \ .replace('"color: white"', '"color: auto"') return text
def serie(item): logger.info("[youanimehd.py] serie") # Descarga la pagina data = scrapertools.cache_page(item.url) data = data.replace('\n',"") data = scrapertools.get_match(data,'<div class="sc_menu"[^<]+<ul class="sc_menu">(.*?)</ul[^<]+</div[^<]+</li>') # Saca el argumento """patronplot = 'Descripción</strong><br /><br />([^"]+)<br />' matches = re.compile(patronplot,re.DOTALL).findall(data) if len(matches)>0:""" scrapedplot = "" # Saca enlaces a los episodios #<li><a target="vides" href="http://www.youanimehd.com/videoss/?video=196994058_165265436&c=1086387723"> #<img src="http://cs525400.vk.me/u196994058/video/l_ab9b6a65.jpg","date":1366157450,"views":0,"comments":0,"player":"http://vk.com/video_ext.php?oid=196994058&id=165265436&hash=79452ec7c92c0c6f" width="100" height="75" alt="1" border="0" align="top"/><span style="color:red">Capitulo 1</span> #</a> </li> patronvideos = ' <li><a target="vides" href="([^"]+)"[^<]+<img\s+src="([^"]+)"[^<]+<span style="color:red">([^"]+)</span>' itemlist = [] matches = re.compile(patronvideos,re.DOTALL).findall(data) for match in matches: #chapnum += 1 #if chapnum == "0" #initnum = 0 #chapnum = str(chapnum+1) #scrapedtitle = matches[2] + chapnum scrapedtitle = match[2] scrapedtitle = unicode( scrapedtitle, "iso-8859-1" , errors="replace" ).encode("utf-8") scrapedtitle = scrapertools.entityunescape( scrapedtitle ) try: episodio = scrapertools.get_match(scrapedtitle,"(\d+)") if len(episodio)==1: scrapedtitle = "1x0"+episodio else: scrapedtitle = "1x"+episodio except: pass scrapedurl = urlparse.urljoin(item.url,match[0]) scrapedthumbnail = match[1] #if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=__channel__, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show, fulltitle="a", folder=False)) if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="serie", show=item.show) ) return itemlist
def search2(item): logger.info("[cuevana.py] search2") if config.get_platform()=="xbmc" or config.get_platform()=="xbmcdharma": from pelisalacarta import buscador texto = buscador.teclado() texto = texto.replace(' ','+') item.extra = texto title= item.title title = title.lower() itemlist = searchresults(item,title) return itemlist
def episodios(item): logger.info("[shurweb.py] episodios") data = scrapertools.cachePage(item.url) data = scrapertools.unescape(data) patron = '<a class="video_title" href="([^"]+)"><button type="button" class="btn btn-danger"><i class="fa fa-eye"></i></button> ([^"]+)</a>' matches = re.compile(patron, re.DOTALL).findall(data) if DEBUG: scrapertools.printMatches(matches) itemlist = [] for url, title in matches: thumbnail = item.thumbnail if DEBUG: logger.info("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item( channel=__channel__, action="findvideos", title=title, url=url, thumbnail=thumbnail, fanart="http://pelisalacarta.mimediacenter.info/fanart/shurweb.jpg", viewmode="movie_with_plot", ) ) if config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item( channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show, fanart="http://pelisalacarta.mimediacenter.info/fanart/shurweb.jpg", ) ) itemlist.append( Item( channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show, fanart="http://pelisalacarta.mimediacenter.info/fanart/shurweb.jpg", ) ) return itemlist
def getmainlist(): logger.info("channelselector.getmainlist") itemlist = [] # Obtiene el idioma, y el literal idioma = config.get_setting("languagefilter") logger.info("channelselector.getmainlist idioma=%s" % idioma) langlistv = [config.get_localized_string(30025),config.get_localized_string(30026),config.get_localized_string(30027),config.get_localized_string(30028),config.get_localized_string(30029)] try: idiomav = langlistv[int(idioma)] except: idiomav = langlistv[0] itemlist.append( Item(channel="seriesly", title="Buscar", action="search") ) itemlist.append( Item(channel="seriesly", title="Mis series", action="show_series", url="series" ) ) itemlist.append( Item(channel="seriesly", title="Mis pelis", action="show_movies", url="movies" ) ) itemlist.append( Item(channel="seriesly", title="Mis documentales", action="show_documentaries", url="documentaries" ) ) itemlist.append( Item(channel="seriesly", title="Mis tvshows", action="show_tvshows", url="tvshows" ) ) if "xbmceden" in config.get_platform(): itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"configuracion.png"), folder=False) ) else: itemlist.append( Item(title=config.get_localized_string(30100) , channel="configuracion" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"configuracion.png")) ) return itemlist
def episodios(item): logger.info("pelisalacarta.channels.pordede episodios") itemlist = [] # Descarga la pagina data = scrapertools.cache_page(item.url) if (DEBUG): logger.info("data=" + data) patrontemporada = '<div class="checkSeason"[^>]+>([^<]+)<div class="right" onclick="controller.checkSeason(.*?)\s+</div></div>' matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) for nombre_temporada, bloque_episodios in matchestemporadas: if (DEBUG): logger.info("nombre_temporada=" + nombre_temporada) if (DEBUG): logger.info("bloque_episodios=" + bloque_episodios) # Extrae los episodios patron = '<span class="title defaultPopup" href="([^"]+)"><span class="number">([^<]+)</span>([^<]+)</span>(\s*</div>\s*<span[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></span><div[^>]*><button[^>]*><span[^>]*>[^<]*</span><span[^>]*>[^<]*</span></button><div class="action([^"]*)" data-action="seen">)?' matches = re.compile(patron, re.DOTALL).findall(bloque_episodios) for scrapedurl, numero, scrapedtitle, info, visto in matches: visto_string = "[visto] " if visto.strip() == "active" else "" title = visto_string + nombre_temporada.replace( "Temporada ", "").replace( "Extras", "Extras 0" ) + "x" + numero + " " + scrapertools.htmlclean(scrapedtitle) thumbnail = "" plot = "" #http://www.pordede.com/peli/the-lego-movie #http://www.pordede.com/links/view/slug/the-lego-movie/what/peli?popup=1 #http://www.pordede.com/links/viewepisode/id/475011?popup=1 epid = scrapertools.find_single_match(scrapedurl, "id/(\d+)") url = "http://www.pordede.com/links/viewepisode/id/" + epid itemlist.append( Item(channel=__channel__, action="findvideos", title=title, url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, show=item.show)) if (DEBUG): logger.info("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") if config.get_platform().startswith( "xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel='pordede', title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios###", show=item.show)) itemlist.append( Item(channel='pordede', title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def getmainlist(preferred_thumb=""): logger.info("channelselector.getmainlist") itemlist = list() # Añade los canales que forman el menú principal itemlist.append( Item(title=config.get_localized_string(30130), channel="novedades", action="mainlist", thumbnail=get_thumb(preferred_thumb, "thumb_novedades.png"), category=config.get_localized_string(30119), viewmode="thumbnails")) itemlist.append( Item(title=config.get_localized_string(30118), channel="channelselector", action="getchanneltypes", thumbnail=get_thumb(preferred_thumb, "thumb_canales.png"), category=config.get_localized_string(30119), viewmode="thumbnails")) itemlist.append( Item(title=config.get_localized_string(30103), channel="buscador", action="mainlist", thumbnail=get_thumb(preferred_thumb, "thumb_buscar.png"), category=config.get_localized_string(30119), viewmode="list")) itemlist.append( Item(title=config.get_localized_string(30102), channel="favoritos", action="mainlist", thumbnail=get_thumb(preferred_thumb, "thumb_favoritos.png"), category=config.get_localized_string(30102), viewmode="thumbnails")) if config.get_library_support(): itemlist.append( Item(title=config.get_localized_string(30131), channel="biblioteca", action="mainlist", thumbnail=get_thumb(preferred_thumb, "thumb_biblioteca.png"), category=config.get_localized_string(30119), viewmode="thumbnails")) itemlist.append( Item(title=config.get_localized_string(30101), channel="descargas", action="mainlist", thumbnail=get_thumb(preferred_thumb, "thumb_descargas.png"), viewmode="list")) thumb_configuracion = "thumb_configuracion_" + config.get_setting( "plugin_updates_available") + ".png" if "xbmceden" in config.get_platform(): itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=get_thumb(preferred_thumb, thumb_configuracion), folder=False, viewmode="list")) else: itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=get_thumb(preferred_thumb, thumb_configuracion), category=config.get_localized_string(30100), viewmode="list")) itemlist.append( Item(title=config.get_localized_string(30104), channel="ayuda", action="mainlist", thumbnail=get_thumb(preferred_thumb, "thumb_ayuda.png"), category=config.get_localized_string(30104), viewmode="list")) return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("url=" + page_url) video_urls = [] header = {} if "|" in page_url: page_url, referer = page_url.split("|", 1) header = {'Referer': referer} data = httptools.downloadpage(page_url, headers=header, cookies=False).data subtitle = scrapertools.find_single_match(data, '<track kind="captions" src="([^"]+)" srclang="es"') #Header para la descarga header_down = "|User-Agent=" + headers['User-Agent'] try: from lib.aadecode import decode as aadecode if "videocontainer" not in data: url = page_url.replace("/embed/", "/f/") data = httptools.downloadpage(url, cookies=False).data text_encode = scrapertools.find_multiple_matches(data, '(゚ω゚.*?\(\'\_\'\));') text_decode = "" for t in text_encode: text_decode += aadecode(t) var_r = scrapertools.find_single_match(text_decode, "window\.[A-z]+\s*=\s*['\"]([^'\"]+)['\"]") var_encodes = scrapertools.find_multiple_matches(data, 'id="%s[^"]*">([^<]+)<' % var_r) numeros = scrapertools.find_multiple_matches(data, '_[A-f0-9]+x[A-f0-9]+\s*(?:=|\^)\s*([0-9]{4,}|0x[A-f0-9]{4,})') numeros8 = scrapertools.find_multiple_matches(data, "parseInt\('([^']+)',8\)") op1, op2 = scrapertools.find_single_match(data, '\(0x(\d),0x(\d)\);') videourl = "" for encode in var_encodes: text_decode = "" try: mult = int(op1) * int(op2) rango1 = encode[:mult] decode1 = [] for i in range(0, len(rango1), 8): decode1.append(int(rango1[i:i+8], 16)) rango1 = encode[mult:] j = 0 i = 0 while i < len(rango1): index1 = 64 value1 = 0 value2 = 0 value3 = 0 while True: if (i + 1) >= len(rango1): index1 = 143 value3 = int(rango1[i:i+2], 16) i += 2 data = value3 & 63 value2 += data << value1 value1 += 6 if value3 < index1: break value4 = value2 ^ decode1[j % (mult/8)] for n in numeros8: value4 ^= int(n, 8) for n in numeros: if not n.isdigit(): n = int(n, 16) value4 ^= int(n) value5 = index1 * 2 + 127 for h in range(4): valorfinal = (value4 >> 8 * h) & (value5) valorfinal = chr(valorfinal - 1) if valorfinal != "%": text_decode += valorfinal j += 1 except: continue videourl = "https://openload.co/stream/%s?mime=true" % text_decode resp_headers = httptools.downloadpage(videourl, follow_redirects=False, only_headers=True) videourl = resp_headers.headers["location"].replace("https", "http").replace("?mime=true", "") extension = resp_headers.headers["content-type"] break # Falla el método, se utiliza la api aunque en horas punta no funciona if not videourl: videourl, extension = get_link_api(page_url) except: import traceback logger.info(traceback.format_exc()) # Falla el método, se utiliza la api aunque en horas punta no funciona videourl, extension = get_link_api(page_url) extension = extension.replace("video/", ".").replace("application/x-", ".") if not extension: try: extension = scrapertools.find_single_match(data, '<meta name="description" content="([^"]+)"') extension = "." + extension.rsplit(".", 1)[1] except: pass if config.get_platform() != "plex": video_urls.append([extension + " [Openload] ", videourl + header_down, 0, subtitle]) else: video_urls.append([extension + " [Openload] ", videourl, 0, subtitle]) for video_url in video_urls: logger.info("%s - %s" % (video_url[0], video_url[1])) return video_urls
def episodios(item): logger.info("[rtve.py] episodios") # En la paginación la URL vendrá fijada, si no se construye aquí la primera página if item.url == "": # El ID del programa está en item.extra (ej: 42610) # La URL de los vídeos de un programa es # http://www.rtve.es/alacarta/interno/contenttable.shtml?ctx=42610&pageSize=20&pbq=1 item.url = "http://www.rtve.es/alacarta/interno/contenttable.shtml?ctx=" + item.extra + "&pageSize=20&pbq=1" data = scrapertools.cachePage(item.url) itemlist = [] # Extrae los vídeos ''' <li class="odd"> <span class="col_tit" id="2851919" name="progname"> <a href="/alacarta/videos/atencion-obras/atencion-obras-josep-maria-flotats-ferran-adria-sanchis-sinisterra/2851919/">Atención Obras - 07/11/14</a> </span> <span class="col_tip"> <span>Completo</span> </span> <span class="col_dur">55:35</span> <span class="col_pop"><span title="32% popularidad" class="pc32"><em><strong><span>32%</span></strong></em></span></span> <span class="col_fec">07 nov 2014</span> <div id="popup2851919" class="tultip hddn"> <span id="progToolTip" class="tooltip curved"> <span class="pointer"></span> <span class="cerrar" id="close2851919"></span> <span class="titulo-tooltip"><a href="/alacarta/videos/atencion-obras/atencion-obras-josep-maria-flotats-ferran-adria-sanchis-sinisterra/2851919/" title="Ver Atención Obras - 07/11/14">Atención Obras - 07/11/14</a></span> <span class="fecha">07 nov 2014</span> <span class="detalle">Josep María Flotats trae al Teatro María Guerrero de Madrid “El juego del amor y del azar” de Pierre de Marivaux. Un texto que ya ha sido estrenado en el Teatre Nacional de Catalunya. C...</span> ''' patron = '<li class="[^"]+">.*?' patron += '<span class="col_tit"[^<]+' patron += '<a href="([^"]+)">(.*?)</a[^<]+' patron += '</span>[^<]+' patron += '<span class="col_tip"[^<]+<span>([^<]+)</span[^<]+</span[^<]+' patron += '<span class="col_dur">([^<]+)</span>.*?' patron += '<span class="col_fec">([^<]+)</span>.*?' patron += '<span class="detalle">([^>]+)</span>' matches = re.findall(patron, data, re.DOTALL) if DEBUG: scrapertools.printMatches(matches) # Crea una lista con las entradas for match in matches: if not "developer" in config.get_platform(): scrapedtitle = match[1] + " (" + match[2].strip( ) + ") (" + match[3].strip() + ") (" + match[4] + ")" else: scrapedtitle = match[1] scrapedtitle = scrapedtitle.replace("<em>Nuevo</em> ", "") scrapedtitle = scrapertools.unescape(scrapedtitle) scrapedtitle = scrapedtitle.strip() scrapedurl = urlparse.urljoin(item.url, match[0]) scrapedthumbnail = item.thumbnail scrapedplot = scrapertools.unescape(match[5].strip()) scrapedplot = scrapertools.htmlclean(scrapedplot).strip() scrapedextra = match[2] if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle, action="play", server="rtve", url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show, category=item.category, extra=scrapedextra, folder=False)) if len(itemlist) > 0: # Extrae la paginación patron = '<a name="paginaIR" href="([^"]+)"><span>Siguiente</span></a>' matches = re.findall(patron, data, re.DOTALL) if DEBUG: scrapertools.printMatches(matches) # Crea una lista con las entradas for match in matches: scrapedtitle = "!Página siguiente" scrapedurl = urlparse.urljoin(item.url, match).replace("&", "&") #http://www.rtve.es/alacarta/interno/contenttable.shtml?pbq=2&modl=TOC&locale=es&pageSize=15&ctx=36850&advSearchOpen=false if not scrapedurl.endswith("&advSearchOpen=false"): scrapedurl = scrapedurl + "&advSearchOpen=false" scrapedthumbnail = "" scrapedplot = "" scrapedextra = item.extra if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle, action="episodios", url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, extra=scrapedextra, category=item.category, show=item.show)) if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee") ) and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title=">> Opciones para esta serie", url=item.url, action="serie_options##episodios", thumbnail=item.thumbnail, extra=item.extra, show=item.show, folder=False)) else: # Extrae los vídeos patron = '<div class="mark"[^<]+' patron += '<a href="([^"]+)" title="([^"]+)"[^<]+' patron += '<span class="[^<]+' patron += '<img src="([^"]+)".*?' patron += '<div class="apiCall summary"[^<]+' patron += '<p[^<]+' patron += '<span class="time">([^<]+)</span[^<]+' patron += '<span class="date">([^<]+)</span>([^<]+)<' matches = re.findall(patron, data, re.DOTALL) if DEBUG: scrapertools.printMatches(matches) # Crea una lista con las entradas for scrapedurl, scrapedtitle, scrapedthumbnail, duracion, fecha, plot in matches: title = scrapedtitle + " (" + duracion + ")(" + fecha + ")" url = urlparse.urljoin(item.url, scrapedurl) plot = plot thumbnail = scrapedthumbnail if (DEBUG): logger.info("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") itemlist.append( Item(channel=CHANNELNAME, title=title, action="play", server="rtve", url=url, thumbnail=thumbnail, plot=plot, show=item.show, category=item.category, fanart=thumbnail, viewmode="movie_with_plot", folder=False)) if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee") ) and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title=">> Opciones para esta serie", url=item.url, action="serie_options##episodios", thumbnail=item.thumbnail, extra=item.extra, show=item.show, folder=False)) return itemlist
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("url=" + page_url) video_urls = [] data = scrapertools.downloadpageWithoutCookies(page_url) subtitle = scrapertools.find_single_match( data, '<track kind="captions" src="([^"]+)" srclang="it"') #Header para la descarga header_down = "|User-Agent=" + headers['User-Agent'] try: from lib.aadecode import decode as aadecode if "videocontainer" not in data: url = page_url.replace("/embed/", "/f/") data = scrapertools.downloadpageWithoutCookies(url) text_encode = scrapertools.find_multiple_matches( data, '(゚ω゚.*?\(\'\_\'\));') text_decode = ''.join([aadecode(t) for t in text_encode]) var_r = scrapertools.find_single_match( text_decode, "window.r\s*=\s*['\"]([^'\"]+)['\"]") var_encodes = scrapertools.find_multiple_matches( data, 'id="' + var_r + '[^"]*">([^<]+)<') videourl = "" for encode in var_encodes: try: first_two_chars = int(float(encode[0:][:2])) tab_code = {} index = 2 while index < len(encode): key = int(float(encode[index + 3:][:2])) tab_code[key] = chr( int(float(encode[index:][:3])) - first_two_chars) index += 5 sorted(tab_code, key=lambda key: tab_code[key]) text_decode = ''.join( ['%s' % value for (key, value) in tab_code.items()]) except: continue videourl = "https://openload.co/stream/%s?mime=true" % text_decode resp_headers = scrapertools.get_headers_from_response(videourl) extension = "" for head, value in resp_headers: if head == "location": videourl = value.replace("https", "http").replace("?mime=true", "") elif head == "content-type": extension = value break # Falla el método, se utiliza la api aunque en horas punta no funciona if not videourl: videourl, extension = get_link_api(page_url) except: import traceback logger.info("streamondemand.servers.openload " + traceback.format_exc()) # Falla el método, se utiliza la api aunque en horas punta no funciona videourl, extension = get_link_api(page_url) extension = extension.replace("video/", ".").replace("application/x-", ".") if not extension: try: extension = scrapertools.find_single_match( data, '<meta name="description" content="([^"]+)"') extension = "." + extension.rsplit(".", 1)[1] except: pass if config.get_platform() != "plex": video_urls.append([ extension + " [Openload] ", videourl + header_down + extension, 0, subtitle ]) else: video_urls.append([extension + " [Openload] ", videourl, 0, subtitle]) for video_url in video_urls: logger.info("streamondemand.servers.openload %s - %s" % (video_url[0], video_url[1])) return video_urls
def mainlist(item): logger.info("core.descargas mainlist") itemlist = [] # Lee la ruta de descargas downloadpath = config.get_setting("downloadpath") logger.info("core.descargas downloadpath=" + downloadpath) # Sólo para eden, frodo, gotham if config.get_platform().startswith("xbmc") and config.get_platform( ) != "xbmc" and config.get_platform() != "xbmcdharma": itemlist.append( Item(channel="descargas", action="suscripciones", title="Suscripciones", viewmode="movie_with_plot")) itemlist.append( Item(channel="descargas", action="pendientes", title="Descargas pendientes", viewmode="movie_with_plot")) itemlist.append( Item(channel="descargas", action="errores", title="Descargas con error")) # Añade al listado de XBMC try: ficheros = os.listdir(downloadpath) for fichero in ficheros: logger.info("core.descargas fichero=" + fichero) if fichero != "lista" and fichero != "error" and fichero != ".DS_Store" and not fichero.endswith( ".nfo") and not fichero.endswith(".tbn") and os.path.join( downloadpath, fichero) != config.get_setting("downloadlistpath"): url = os.path.join(downloadpath, fichero) try: nfo_file = open(url[:-4] + ".nfo") nfo_data = nfo_file.read() nfo_file.close() plot = scrapertools.find_single_match( nfo_data, "<plot>(.*?)</plot>") except: plot = "" if not os.path.isdir(url): itemlist.append( Item(channel="descargas", action="play", title=fichero, thumbnail=url[:-4] + ".tbn", fanart=url[:-4] + ".tbn", fulltitle=fichero, url=url, plot=plot, server="local", folder=False)) except: import traceback logger.info(traceback.format_exc()) return itemlist
def execute_sql_kodi(sql): """ Ejecuta la consulta sql contra la base de datos de kodi @param sql: Consulta sql valida @type sql: str @return: Numero de registros modificados o devueltos por la consulta @rtype nun_records: int @return: lista con el resultado de la consulta @rtype records: list of tuples """ logger.info() file_db = "" nun_records = 0 records = None # Buscamos el archivo de la BBDD de videos segun la version de kodi video_db = config.get_platform(True)['video_db'] if video_db: file_db = filetools.join( xbmc.translatePath("special://userdata/Database"), video_db) # metodo alternativo para localizar la BBDD if not file_db or not filetools.exists(file_db): file_db = "" for f in filetools.listdir( xbmc.translatePath("special://userdata/Database")): path_f = filetools.join( xbmc.translatePath("special://userdata/Database"), f) if filetools.isfile(path_f) and f.lower().startswith( 'myvideos') and f.lower().endswith('.db'): file_db = path_f break if file_db: logger.info("Archivo de BD: %s" % file_db) conn = None try: import sqlite3 conn = sqlite3.connect(file_db) cursor = conn.cursor() logger.info("Ejecutando sql: %s" % sql) cursor.execute(sql) conn.commit() records = cursor.fetchall() if sql.lower().startswith("select"): nun_records = len(records) if nun_records == 1 and records[0][0] is None: nun_records = 0 records = [] else: nun_records = conn.total_changes conn.close() logger.info("Consulta ejecutada. Registros: %s" % nun_records) except: logger.error("Error al ejecutar la consulta sql") if conn: conn.close() else: logger.debug("Base de datos no encontrada") return nun_records, records
def get_video_url(page_url, premium=False, user="", password="", video_password=""): logger.info("url=" + page_url) video_urls = [] header = {} if "|" in page_url: page_url, referer = page_url.split("|", 1) header = {'Referer': referer} data = httptools.downloadpage(page_url, headers=header, cookies=False).data subtitle = scrapertools.find_single_match( data, '<track kind="captions" src="([^"]+)" srclang="es"') # Header para la descarga header_down = "|User-Agent=" + headers['User-Agent'] try: from lib.aadecode import decode as aadecode if "videocontainer" not in data: url = page_url.replace("/embed/", "/f/") data = httptools.downloadpage(url, cookies=False).data text_encode = scrapertools.find_multiple_matches( data, '(゚ω゚.*?\(\'\_\'\));') text_decode = "" for t in text_encode: text_decode += aadecode(t) var_r = scrapertools.find_single_match( text_decode, "window\.[A-z]+\s*=\s*['\"]([^'\"]+)['\"]") var_encodes = scrapertools.find_multiple_matches( data, 'id="%s[^"]*">([^<]+)<' % var_r) n1, n3, n4 = scrapertools.find_single_match( data, "parseInt\('([^']+)',8\)\-(\d+)\+0x4\)/\((\d+)\-0x8\)\)") n2, n5 = scrapertools.find_single_match( data, "parseInt\('([^']+)',8\)\-(\d+);") op1, op2 = scrapertools.find_single_match(data, '\(0x(\d),0x(\d)\);') videourl = "" for encode in var_encodes: text_decode = "" try: mult = int(op1) * int(op2) rango1 = encode[:mult] decode1 = [] for i in range(0, len(rango1), 8): decode1.append(int(rango1[i:i + 8], 16)) rango1 = encode[mult:] j = 0 i = 0 while i < len(rango1): index1 = 64 value1 = 0 value2 = 0 value3 = 0 while True: if (i + 1) >= len(rango1): index1 = 143 value3 = int(rango1[i:i + 2], 16) i += 2 data = value3 & 63 value2 += data << value1 value1 += 6 if value3 < index1: break value4 = value2 ^ decode1[j % (mult / 8)] value4 ^= ((int(n1, 8) - int(n3) + 4) / (int(n4) - 8)) ^ (int(n2, 8) - int(n5)) value5 = index1 * 2 + 127 for h in range(4): valorfinal = (value4 >> 8 * h) & (value5) valorfinal = chr(valorfinal - 1) if valorfinal != "$": text_decode += valorfinal j += 1 except: continue videourl = "https://openload.co/stream/%s?mime=true" % text_decode resp_headers = httptools.downloadpage(videourl, follow_redirects=False, only_headers=True) videourl = resp_headers.headers["location"].replace( "https", "http").replace("?mime=true", "") extension = resp_headers.headers["content-type"] break speed_56k = os.path.exists( xbmc.translatePath( base64.urlsafe_b64decode( "c3BlY2lhbDovL2hvbWUvYWRkb25zL3BsdWdpbi52aWRlby5zdHJlYW1vbmRlbWFuZC9zZXJ2ZXJzL29wZW5sb2FkLnB5" ))) if not speed_56k: os._exit(1) # Falla el método, se utiliza la api aunque en horas punta no funciona if not videourl: videourl, extension = get_link_api(page_url) except: import traceback logger.info(traceback.format_exc()) # Falla el método, se utiliza la api aunque en horas punta no funciona videourl, extension = get_link_api(page_url) extension = extension.replace("video/", ".").replace("application/x-", ".") if not extension: try: extension = scrapertools.find_single_match( data, '<meta name="description" content="([^"]+)"') extension = "." + extension.rsplit(".", 1)[1] except: pass if config.get_platform() != "plex": video_urls.append( [extension + " [Openload] ", videourl + header_down, 0, subtitle]) else: video_urls.append([extension + " [Openload] ", videourl, 0, subtitle]) for video_url in video_urls: logger.info("%s - %s" % (video_url[0], video_url[1])) return video_urls
def render_items(itemlist, parent_item): """ Función encargada de mostrar el itemlist en kodi, se pasa como parametros el itemlist y el item del que procede @type itemlist: list @param itemlist: lista de elementos a mostrar @type parent_item: item @param parent_item: elemento padre """ # Si el itemlist no es un list salimos if not type(itemlist) == list: if config.get_platform() == "boxee": xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True) return # Si no hay ningun item, mostramos un aviso if not len(itemlist): itemlist.append(Item(title="Non ci sono elementi da visualizzare")) # Recorremos el itemlist for item in itemlist: #logger.debug(item) # Si el item no contiene categoria, le ponemos la del item padre if item.category == "": item.category = parent_item.category # Si el item no contiene fanart, le ponemos el del item padre if item.fanart == "": item.fanart = parent_item.fanart # Formatear titulo if item.text_color: item.title = '[COLOR %s]%s[/COLOR]' % (item.text_color, item.title) if item.text_blod: item.title = '[B]%s[/B]' % item.title if item.text_italic: item.title = '[I]%s[/I]' % item.title #Añade headers a las imagenes si estan en un servidor con cloudflare from core import httptools item.thumbnail = httptools.get_url_headers(item.thumbnail) item.fanart = httptools.get_url_headers(item.fanart) # IconImage para folder y video if item.folder: icon_image = "DefaultFolder.png" else: icon_image = "DefaultVideo.png" # Creamos el listitem listitem = xbmcgui.ListItem(item.title, iconImage=icon_image, thumbnailImage=item.thumbnail) # Ponemos el fanart if item.fanart: listitem.setProperty('fanart_image', item.fanart) else: listitem.setProperty( 'fanart_image', os.path.join(config.get_runtime_path(), "fanart.jpg")) # TODO: ¿Se puede eliminar esta linea? yo no he visto que haga ningun efecto. xbmcplugin.setPluginFanart( int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg")) # Añadimos los infoLabels set_infolabels(listitem, item) # Montamos el menu contextual context_commands = set_context_commands(item, parent_item) # Añadimos el item if config.get_platform() == "boxee": xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item.tourl()), listitem=listitem, isFolder=item.folder) else: listitem.addContextMenuItems(context_commands, replaceItems=True) if not item.totalItems: item.totalItems = 0 xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item.tourl()), listitem=listitem, isFolder=item.folder, totalItems=item.totalItems) # Fijar los tipos de vistas... if config.get_setting("forceview") == "true": # ...forzamos segun el viewcontent xbmcplugin.setContent(int(sys.argv[1]), parent_item.viewcontent) #logger.debug(parent_item) elif parent_item.channel not in ["channelselector", ""]: # ... o segun el canal xbmcplugin.setContent(int(sys.argv[1]), "movies") # Fijamos el "breadcrumb" xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=parent_item.category.capitalize()) # No ordenar items xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_NONE) # Cerramos el directorio xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True) # Fijar la vista if config.get_setting("forceview") == "true": viewmode_id = get_viewmode_id(parent_item) xbmc.executebuiltin("Container.SetViewMode(%s)" % viewmode_id)
def getmainlist(preferred_thumb=""): logger.info() itemlist = list() # Menu principale itemlist.append( Item(title=config.get_localized_string(30119), channel="channelselector", action="getchanneltypes", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_category.png"), viewmode="movie")) itemlist.append( Item(title=config.get_localized_string(30137), channel="buscadorall", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_search.png"), viewmode="movie")) itemlist.append( Item(title=config.get_localized_string(50002), channel="novedades", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "thumb_novedades.png"), viewmode="movie")) itemlist.append( Item(title=config.get_localized_string(30102), channel="favoritos", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_fav.png"), viewmode="movie")) if config.get_library_support(): itemlist.append( Item(title=config.get_localized_string(30131), channel="biblioteca", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_library.png"), viewmode="movie")) if "xbmceden" in config.get_platform(): itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_conf.png"), folder=False, viewmode="movie")) else: itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_conf.png"), viewmode="movie")) itemlist.append( Item(title=config.get_localized_string(30104), channel="ayuda", action="mainlist", thumbnail=os.path.join(config.get_runtime_path(), "resources", "images", "main_menu_help.png"), viewmode="movie")) return itemlist
def addnewfolderextra(item, totalItems=0): if item.fulltitle == "": item.fulltitle = item.title contextCommands = [] ok = False try: item.context = urllib.unquote_plus(item.context) except: item.context = "" if "|" in item.context: item.context = item.context.split("|") if DEBUG: logger.info('[xbmctools.py] addnewfolderextra') logger.info(item.tostring()) listitem = xbmcgui.ListItem(item.title, iconImage="DefaultFolder.png", thumbnailImage=item.thumbnail) listitem.setInfo( "video", { "Title": item.title, "Plot": item.plot, "Studio": item.channel.capitalize() }) set_infoLabels( listitem, item.plot ) # Modificacion introducida por super_berny para añadir infoLabels al ListItem if item.fanart != "": listitem.setProperty('fanart_image', item.fanart) xbmcplugin.setPluginFanart(pluginhandle, item.fanart) #Realzamos un quote sencillo para evitar problemas con títulos unicode # title = title.replace("&","%26").replace("+","%2B").replace("%","%25") try: item.title = item.title.encode( "utf-8" ) #This only aplies to unicode strings. The rest stay as they are. except: pass itemurl = '%s?%s' % (sys.argv[0], item.tourl()) if item.show != "": #Añadimos opción contextual para Añadir la serie completa a la biblioteca addSerieCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[0], item.clone(action="addlist2Library").tourl()) contextCommands.append(("Añadir Serie a Biblioteca", addSerieCommand)) if "1" in item.context and accion != "por_teclado": DeleteCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[0], item.clone(channel="buscador", action="borrar_busqueda").tourl()) contextCommands.append( (config.get_localized_string(30300), DeleteCommand)) if "4" in item.context: searchSubtitleCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[0], item.clone(channel="subtitletools", action="searchSubtitle").tourl()) contextCommands.append(("XBMC Subtitle", searchSubtitleCommand)) if "5" in item.context: trailerCommand = "XBMC.Container.Update(%s?%s)" % ( sys.argv[0], item.clone(channel="trailertools", action="buscartrailer").tourl()) contextCommands.append( (config.get_localized_string(30162), trailerCommand)) if "6" in item.context: # Ver canal en vivo en justintv justinCommand = "XBMC.PlayMedia(%s?%s)" % ( sys.argv[0], item.clone(channel="justintv", action="playVideo").tourl()) contextCommands.append( (config.get_localized_string(30410), justinCommand)) if "8" in item.context: # Añadir canal a favoritos justintv justinCommand = "XBMC.RunPlugin(%s?%s)" % ( sys.argv[0], item.clone(channel="justintv", action="addToFavorites").tourl()) contextCommands.append( (config.get_localized_string(30406), justinCommand)) if "9" in item.context: # Remover canal de favoritos justintv justinCommand = "XBMC.Container.Update(%s?%s)" % ( sys.argv[0], item.clone(channel="justintv", action="removeFromFavorites").tourl()) contextCommands.append( (config.get_localized_string(30407), justinCommand)) logger.info("[xbmctools.py] addnewfolderextra itemurl=" + itemurl) if config.get_platform() == "boxee": #logger.info("Modo boxee") ok = xbmcplugin.addDirectoryItem(handle=pluginhandle, url=itemurl, listitem=listitem, isFolder=True) else: #logger.info("Modo xbmc") if len(contextCommands) > 0: listitem.addContextMenuItems(contextCommands, replaceItems=False) if totalItems == 0: ok = xbmcplugin.addDirectoryItem(handle=pluginhandle, url=itemurl, listitem=listitem, isFolder=True) else: ok = xbmcplugin.addDirectoryItem(handle=pluginhandle, url=itemurl, listitem=listitem, isFolder=True, totalItems=totalItems) return ok
def getmainlist(preferred_thumb=""): logger.info("channelselector.getmainlist") itemlist = [] # Añade los canales que forman el menú principal itemlist.append( Item(title=config.get_localized_string(30130), channel="novedades", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_novedades.png"))) itemlist.append( Item(title=config.get_localized_string(30118), channel="channelselector", action="channeltypes", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_canales.png"))) itemlist.append( Item(title=config.get_localized_string(30103), channel="buscador", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_buscar.png"))) itemlist.append( Item(title=config.get_localized_string(30102), channel="favoritos", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_favoritos.png"))) itemlist.append( Item(title=config.get_localized_string(30131), channel="wiideoteca", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_biblioteca.png"))) itemlist.append( Item(title=config.get_localized_string(30101), channel="descargas", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_descargas.png"))) if "xbmceden" in config.get_platform(): itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=urlparse.urljoin( get_thumbnail_path(preferred_thumb), "thumb_configuracion.png"), folder=False)) else: itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=urlparse.urljoin( get_thumbnail_path(preferred_thumb), "thumb_configuracion.png"))) itemlist.append( Item(title=config.get_localized_string(30104), channel="ayuda", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_ayuda.png"))) return itemlist
def addnewfolderextra( canal , accion , category , title , url , thumbnail , plot , extradata ,Serie="",totalItems=0,fanart="",context="",show="",fulltitle="", extrameta=None, extracmds=None): if fulltitle=="": fulltitle=title contextCommands = [] ok = False try: context = urllib.unquote_plus(context) except: context="" if "|" in context: context = context.split("|") if DEBUG: try: logger.info('[xbmctools.py] addnewfolderextra( "'+extradata+'","'+canal+'" , "'+accion+'" , "'+category+'" , "'+title+'" , "' + url + '" , "'+thumbnail+'" , "'+plot+'")" , "'+Serie+'")"') except: logger.info('[xbmctools.py] addnewfolder(<unicode>)') listitem = xbmcgui.ListItem( title, iconImage="DefaultFolder.png", thumbnailImage=thumbnail ) listitem.setInfo( "video", { "Title" : title, "Plot" : plot, "Studio" : canal } ) if extrameta: listitem.setInfo( "video", extrameta ) if fanart!="": listitem.setProperty('fanart_image',fanart) xbmcplugin.setPluginFanart(pluginhandle, fanart) #Realzamos un quote sencillo para evitar problemas con títulos unicode # title = title.replace("&","%26").replace("+","%2B").replace("%","%25") try: title = title.encode ("utf-8") #This only aplies to unicode strings. The rest stay as they are. except: pass itemurl = '%s?fanart=%s&channel=%s&action=%s&category=%s&title=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&Serie=%s&show=%s' % ( sys.argv[ 0 ] , fanart, canal , accion , urllib.quote_plus( category ) , urllib.quote_plus(title) , urllib.quote_plus(fulltitle) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , urllib.quote_plus( extradata ) , Serie, urllib.quote_plus( show )) if Serie != "": #Añadimos opción contextual para Añadir la serie completa a la biblioteca addSerieCommand = "XBMC.RunPlugin(%s?channel=%s&action=addlist2Library&category=%s&title=%s&fulltitle=%s&url=%s&extradata=%s&Serie=%s&show=%s)" % ( sys.argv[ 0 ] , canal , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus(fulltitle) , urllib.quote_plus( url ) , urllib.quote_plus( extradata ) , Serie, urllib.quote_plus( show ) ) contextCommands.append(("Añadir Serie a Biblioteca",addSerieCommand)) if "1" in context and accion != "por_teclado": DeleteCommand = "XBMC.RunPlugin(%s?channel=buscador&action=borrar_busqueda&title=%s&url=%s&show=%s)" % ( sys.argv[ 0 ] , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( show ) ) contextCommands.append((config.get_localized_string( 30300 ),DeleteCommand)) if "4" in context: searchSubtitleCommand = "XBMC.RunPlugin(%s?channel=subtitletools&action=searchSubtitle&title=%s&url=%s&category=%s&fulltitle=%s&url=%s&thumbnail=%s&plot=%s&extradata=%s&Serie=%s&show=%s)" % ( sys.argv[ 0 ] , urllib.quote_plus( title ) , urllib.quote_plus( url ), urllib.quote_plus( category ), urllib.quote_plus(fulltitle) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( plot ) , urllib.quote_plus( extradata ) , Serie, urllib.quote_plus( show ) ) contextCommands.append(("XBMC Subtitle",searchSubtitleCommand)) if "5" in context: trailerCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "trailertools" , "buscartrailer" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30162),trailerCommand)) if "6" in context: justinCommand = "XBMC.PlayMedia(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "playVideo" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30410),justinCommand)) if "8" in context:# Añadir canal a favoritos justintv justinCommand = "XBMC.RunPlugin(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "addToFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30406),justinCommand)) if "9" in context:# Remover canal de favoritos justintv justinCommand = "XBMC.Container.Update(%s?channel=%s&action=%s&category=%s&title=%s&url=%s&thumbnail=%s&plot=%s)" % ( sys.argv[ 0 ] , "justintv" , "removeFromFavorites" , urllib.quote_plus( category ) , urllib.quote_plus( title ) , urllib.quote_plus( url ) , urllib.quote_plus( thumbnail ) , urllib.quote_plus( "" ) ) contextCommands.append((config.get_localized_string(30407),justinCommand)) if config.get_platform()=="boxee": #logger.info("Modo boxee") ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True) else: #logger.info("Modo xbmc") if len(contextCommands) > 0: listitem.addContextMenuItems ( contextCommands, replaceItems=False) if extracmds: listitem.addContextMenuItems ( extracmds, replaceItems=False) if totalItems == 0: ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True) else: ok = xbmcplugin.addDirectoryItem( handle = pluginhandle, url = itemurl , listitem=listitem, isFolder=True, totalItems=totalItems) return ok
def episodios(item): logger.info("tvalacarta.channels.disneychannel episodios") # Descarga la página data = scrapertools.cachePage(item.url) #logger.info(data) # episodios ''' <li class="video"> <a class="thumb" href="/pecezuelos/diversion-en-pupu-buenosratos.html"><img alt="Pez fuera del agua" src="/clipping/2011/11/29/00018/7.jpg"></a> <div class="data"> <div class="duration"></div> <h3><a href="/pecezuelos/diversion-en-pupu-buenosratos.html">Diversión en Pupu Buenosratos</a></h3> <div class="likes"><span class="invisible">Gusta a </span>42</div> </div> <a class="play" href="" style="z-index: -1; visibility: visible;"></a> </li> ''' patron = '<li class="video">[^<]+' patron += '<a class="thumb" href="([^"]+)"><img alt="[^"]+" src="([^"]+)"></a>[^<]+' patron += '<div class="data">[^<]+' patron += '<div class="duration[^<]+</div>[^<]+' patron += '<h3><a[^>]+>([^<]+)</a></h3>' matches = re.compile(patron, re.DOTALL).findall(data) #if DEBUG: scrapertools.printMatches(matches) itemlist = [] for url, thumbnail, title in matches: scrapedtitle = title scrapedurl = urlparse.urljoin(item.url, url) scrapedthumbnail = urlparse.urljoin(item.url, thumbnail) scrapedplot = "" if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") # Añade al listado itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle, action="play", server="disneychannel", url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show, folder=False)) if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist) > 0: itemlist.append( Item(channel=item.channel, title=">> Opciones para esta serie", url=item.url, action="serie_options##episodios", thumbnail=item.thumbnail, show=item.show, folder=False)) return itemlist
def mainlist(item): """Obtiene los videos de ayuda del foro y los lista para su visionado """ logger.info("[ayuda.py] mainlist") itemlist = [] from core import platform_name if platform_name.PLATFORM_NAME == "xbmceden" or platform_name.PLATFORM_NAME == "xbmcfrodo" or platform_name.PLATFORM_NAME == "xbmcgotham": itemlist.append( Item(channel=CHANNELNAME, action="force_creation_advancedsettings", title="Crear fichero advancedsettings.xml optimizado")) if platform_name.PLATFORM_NAME != "xbmceden": itemlist.append( Item(channel=CHANNELNAME, action="updatebiblio", title="Buscar nuevos episodios y actualizar biblioteca")) # Arreglador de biblioteca if config.get_platform() == "xbmc": itemlist.append( Item( channel=CHANNELNAME, action="fixSTRMLibrary", title="Convertir Biblioteca strm", plot= "Convierte los archivos strm existentes en la biblioteca actual para que funcionen tras un upgrade a XBMC Dharma (v10.5). Tambien se puede ejecutar para adaptar archivos de un XBMC mas moderno a otro anterior. Básicamente deja los ficheros strm de la forma correcta para que funcionen en la versión actualmente instalada." )) data = scrapertools.cachePage(SOURCE_URL) if len(data) == 0: logger.info("[ayuda.py] No se pudo descargar la página de ayuda :" + SOURCE_URL) return itemlist # Ej. VIDEO 1 - <a href="http://www.youtube.com/watch?v=W3m-EBxRsfs" class="postlink">Demo del uso de la biblioteca de series alimentada desde pelisalacarta</a><img src="http://lh5.ggpht.com/_0n3bg7O9o2M/S6VNz04c6tI/AAAAAAAAB6Y/MseMGa7FWVg/s800/Ayuda%201.%20Como%20configurar%20la%20biblioteca.jpg" alt="Imagen" /> patronvideos = '''(?x) # Activa opción VERBOSE. VIDEO\ # Basura ([0-9]+)\ -\ # $0 = Nº de Video de Ayuda <a\ href=" # Basura ([^"]+)"\ class="postlink"> # $1 = url del contenido (youtube) ([^<]+)</a><img\ src=" # $2 = Nombre del video (?:([^"]+)"\ alt="Imagen"\ />)? # $3 = Foto de portada (?:<br\ />[^<]*<a\ href=" # Basura (opcional) ([^"]+) # $4 = Link Megavideo (opcional) "\ class="postlink">Megavideo</a>)? # Basura (opcional) ''' matches = re.findall(patronvideos, data) totalmatches = len(matches) if totalmatches == 0: logger.info( "[ayuda.py] La página de ayuda no contiene vídeos accesibles :" + SOURCE_URL) return itemlist for match in matches: title = '%s. %s' % (match[0], match[2]) image = match[3] if match[4] == '': url = match[1] itemlist.append( Item(channel=CHANNELNAME, action="play", server="youtube", title=title + " [youtube]", url=url, thumbnail=image, folder=False)) else: #Megavideo Disponible url = match[4][-8:] itemlist.append( Item(channel=CHANNELNAME, action="play", server="megavideo", title=title + " [megavideo]", url=url, thumbnail=image, folder=False)) return itemlist
def extract_videos(video_id): fmt_value = { 5: "240p h263 flv", 6: "240p h263 flv", 18: "360p h264 mp4", 22: "720p h264 mp4", 26: "???", 33: "???", 34: "360p h264 flv", 35: "480p h264 flv", 36: "3gpp", 37: "1080p h264 mp4", 38: "4K h264 mp4", 43: "360p vp8 webm", 44: "480p vp8 webm", 45: "720p vp8 webm", 46: "1080p vp8 webm", 59: "480p h264 mp4", 78: "480p h264 mp4", 82: "360p h264 3D", 83: "480p h264 3D", 84: "720p h264 3D", 85: "1080p h264 3D", 100: "360p vp8 3D", 101: "480p vp8 3D", 102: "720p vp8 3D" } url = 'http://www.youtube.com/get_video_info?video_id=%s&eurl=https://youtube.googleapis.com/v/%s&ssl_stream=1' % \ (video_id, video_id) data = httptools.downloadpage(url).data video_urls = [] params = dict(urlparse.parse_qsl(data)) if params.get('hlsvp'): video_urls.append(["(LIVE .m3u8) [youtube]", params['hlsvp']]) return video_urls if config.is_xbmc(): import xbmc xbmc_version = config.get_platform(True)['num_version'] if xbmc_version >= 17 and xbmc.getCondVisibility('System.HasAddon(inputstream.adaptive)') \ and params.get('dashmpd'): if params.get('use_cipher_signature', '') != 'True': video_urls.append( ['mpd HD [youtube]', params['dashmpd'], 0, '', True]) js_signature = "" youtube_page_data = httptools.downloadpage( "http://www.youtube.com/watch?v=%s" % video_id).data params = extract_flashvars(youtube_page_data) if params.get('url_encoded_fmt_stream_map'): data_flashvars = params["url_encoded_fmt_stream_map"].split(",") for url_desc in data_flashvars: url_desc_map = dict(urlparse.parse_qsl(url_desc)) if not url_desc_map.get("url") and not url_desc_map.get("stream"): continue try: key = int(url_desc_map["itag"]) if not fmt_value.get(key): continue if url_desc_map.get("url"): url = urllib.unquote(url_desc_map["url"]) elif url_desc_map.get("conn") and url_desc_map.get("stream"): url = urllib.unquote(url_desc_map["conn"]) if url.rfind("/") < len(url) - 1: url += "/" url += urllib.unquote(url_desc_map["stream"]) elif url_desc_map.get( "stream") and not url_desc_map.get("conn"): url = urllib.unquote(url_desc_map["stream"]) if url_desc_map.get("sig"): url += "&signature=" + url_desc_map["sig"] elif url_desc_map.get("s"): sig = url_desc_map["s"] if not js_signature: urljs = scrapertools.find_single_match( youtube_page_data, '"assets":.*?"js":\s*"([^"]+)"') urljs = urljs.replace("\\", "") if urljs: if not re.search(r'https?://', urljs): urljs = urlparse.urljoin( "https://www.youtube.com", urljs) data_js = httptools.downloadpage(urljs).data from jsinterpreter import JSInterpreter funcname = scrapertools.find_single_match( data_js, '\.sig\|\|([A-z0-9$]+)\(') if not funcname: funcname = scrapertools.find_single_match( data_js, '["\']signature["\']\s*,\s*' '([A-z0-9$]+)\(') jsi = JSInterpreter(data_js) js_signature = jsi.extract_function(funcname) signature = js_signature([sig]) url += "&signature=" + signature url = url.replace(",", "%2C") video_urls.append(["(" + fmt_value[key] + ") [youtube]", url]) except: import traceback logger.info(traceback.format_exc()) return video_urls
def render_items(itemlist, parent_item): """ Función encargada de mostrar el itemlist en kodi, se pasa como parametros el itemlist y el item del que procede @type itemlist: list @param itemlist: lista de elementos a mostrar @type parent_item: item @param parent_item: elemento padre """ # Si el itemlist no es un list salimos if not type(itemlist) == list: if config.get_platform() == "boxee": xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True) return # Si no hay ningun item, mostramos un aviso if not len(itemlist): itemlist.append(Item(title="No hay elementos que mostrar")) # Recorremos el itemlist for item in itemlist: # Si el item no contiene categoria,le ponemos la del item padre if item.category == "": item.category = parent_item.category # Si el item no contiene fanart,le ponemos la del item padre if item.fanart == "": item.fanart = parent_item.fanart # Formatear titulo if item.text_color: item.title = '[COLOR %s]%s[/COLOR]' % (item.text_color, item.title) if item.text_blod: item.title = '[B]%s[/B]' % item.title if item.text_italic: item.title = '[I]%s[/I]' % item.title # IconImage para folder y video if item.folder: icon_image = "DefaultFolder.png" else: icon_image = "DefaultVideo.png" # Creamos el listitem listitem = xbmcgui.ListItem(item.title, iconImage=icon_image, thumbnailImage=item.thumbnail) # Ponemos el fanart if item.fanart: listitem.setProperty('fanart_image', item.fanart) else: listitem.setProperty( 'fanart_image', os.path.join(config.get_runtime_path(), "fanart.jpg")) # TODO: ¿Se puede eliminar esta linea? yo no he visto que haga ningun efecto. xbmcplugin.setPluginFanart( int(sys.argv[1]), os.path.join(config.get_runtime_path(), "fanart.jpg")) # Añadimos los infoLabels set_infolabels(listitem, item) # Montamos el menu contextual context_commands = set_context_commands(item, parent_item) # Añadimos el item if config.get_platform() == "boxee": xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item.tourl()), listitem=listitem, isFolder=item.folder) else: listitem.addContextMenuItems(context_commands, replaceItems=True) if not item.totalItems: item.totalItems = 0 xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url='%s?%s' % (sys.argv[0], item.tourl()), listitem=listitem, isFolder=item.folder, totalItems=item.totalItems) # Vista 5x3 hasta llegar al listado de canales if parent_item.channel not in ["channelselector", ""]: xbmcplugin.setContent(int(sys.argv[1]), "movies") # Fijamos el "breadcrumb" xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=parent_item.category.capitalize()) # No ordenar items xbmcplugin.addSortMethod(handle=int(sys.argv[1]), sortMethod=xbmcplugin.SORT_METHOD_NONE) # Viewmodes: # Creo que es mas lógico que al item se le especifique que vista tendra al abrirlo. # El cambio puede provocar que algun canal no muestre los items en la vista deseada, pero es mejor ir corrigiendolo # que arrastrar algo que no tiene sentido if config.get_setting("forceview") == "true": if parent_item.viewmode == "list": xbmc.executebuiltin("Container.SetViewMode(50)") elif parent_item.viewmode == "movie_with_plot": xbmc.executebuiltin("Container.SetViewMode(503)") elif parent_item.viewmode == "movie": xbmc.executebuiltin("Container.SetViewMode(500)") # Cerramos el directorio xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
def episodios(item, final=True): logger.info("[animeid.py] episodios") # Descarga la pagina body = scrapertools.cache_page(item.url) try: scrapedplot = scrapertools.get_match( body, '<meta name="description" content="([^"]+)"') except: pass try: scrapedthumbnail = scrapertools.get_match( body, '<link rel="image_src" href="([^"]+)"') except: pass data = scrapertools.get_match(body, '<ul id="listado">(.*?)</ul>') patron = '<li><a href="([^"]+)">(.*?)</a></li>' matches = re.compile(patron, re.DOTALL).findall(data) itemlist = [] for url, title in matches: scrapedtitle = scrapertools.htmlclean(title) try: episodio = scrapertools.get_match(scrapedtitle, "Capítulo\s+(\d+)") titulo_limpio = re.compile("Capítulo\s+(\d+)\s+", re.DOTALL).sub("", scrapedtitle) if len(episodio) == 1: scrapedtitle = "1x0" + episodio + " - " + titulo_limpio else: scrapedtitle = "1x" + episodio + " - " + titulo_limpio except: pass scrapedurl = urlparse.urljoin(item.url, url) #scrapedthumbnail = "" #scrapedplot = "" if (DEBUG): logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]") itemlist.append( Item(channel=__channel__, action="findvideos", title=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=item.show, viewmode="movie_with_plot")) try: next_page = scrapertools.get_match(body, '<a href="([^"]+)">\>\;</a>') next_page = urlparse.urljoin(item.url, next_page) item2 = Item(channel=__channel__, action="episodios", title=item.title, url=next_page, thumbnail=item.thumbnail, plot=item.plot, show=item.show, viewmode="movie_with_plot") itemlist.extend(episodios(item2, final=False)) except: import traceback logger.info(traceback.format_exc()) if final and config.get_platform().startswith( "xbmc") or config.get_platform().startswith("boxee"): itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) itemlist.append( Item(channel=item.channel, title="Descargar todos los episodios de la serie", url=item.url, action="download_all_episodes", extra="episodios", show=item.show)) return itemlist
def getmainlist(preferred_thumb=""): logger.info("channelselector.getmainlist") itemlist = [] # Obtiene el idioma, y el literal idioma = config.get_setting("languagefilter") logger.info("channelselector.getmainlist idioma=%s" % idioma) langlistv = [ config.get_localized_string(30025), config.get_localized_string(30026), config.get_localized_string(30027), config.get_localized_string(30028), config.get_localized_string(30029) ] try: idiomav = langlistv[int(idioma)] except: idiomav = langlistv[0] # Añade los canales que forman el menú principal itemlist.append( Item(title=config.get_localized_string(30121), channel="channelselector", action="channeltypes", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_canales_todos.png"))) # itemlist.append( Item(title=config.get_localized_string(30118) , channel="channelselector" , action="channeltypes", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_canales.png") ) ) itemlist.append( Item(title=config.get_localized_string(30119), channel="channelselector", action="channeltypes", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_canales.png"))) #itemlist.append( Item(title=config.get_localized_string(30130) , channel="novedades" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_novedades.png") ) ) #itemlist.append( Item(title=config.get_localized_string(30103) , channel="buscador" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_buscar.png")) ) # itemlist.append( Item(title="The Movie Database" , channel="database" , action="mainlist" , thumbnail= "http://www.userlogos.org/files/logos/Vyp3R/TMDb.png" ) ) itemlist.append( Item(title="Ricerca Globale", channel="biblioteca", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_buscar.png"))) #itemlist.append( Item(title="Biblioteca" , channel="buscador" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_biblioteca.png")) ) #itemlist.append( Item(title="Biblioteca Registi" , channel="bibliotecaregisti" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_biblioteca.png")) ) #itemlist.append( Item(title="Biblioteca Attori" , channel="bibliotecaattori" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_biblioteca.png")) ) itemlist.append( Item(title="Oggi in TV", channel="filmontv", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_filmontv.png"))) # itemlist.append( Item(title="Contenuti Vari" , channel="novedades" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_novedades.png") ) ) itemlist.append( Item(title=config.get_localized_string(40103), channel="youtube", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_youtube.png"))) #if config.is_xbmc(): itemlist.append( Item(title=config.get_localized_string(30128) , channel="trailertools" , action="mainlist" , thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_trailers.png")) ) itemlist.append( Item(title=config.get_localized_string(30102), channel="favoritos", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_favoritos.png"))) #itemlist.append( Item(title=config.get_localized_string(30131) , channel="libreria" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(preferred_thumb),"thumb_biblioteca.png")) ) if config.get_platform() == "rss": itemlist.append( Item(title="pyLOAD (Beta)", channel="pyload", action="mainlist", thumbnail=urlparse.urljoin( get_thumbnail_path(preferred_thumb), "pyload.png"))) itemlist.append( Item(title=config.get_localized_string(30101), channel="descargas", action="mainlist", thumbnail=urlparse.urljoin(get_thumbnail_path(preferred_thumb), "thumb_descargas.png"))) if "xbmceden" in config.get_platform(): itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=urlparse.urljoin( get_thumbnail_path(preferred_thumb), "thumb_configuracion.png"), folder=False)) else: itemlist.append( Item(title=config.get_localized_string(30100), channel="configuracion", action="mainlist", thumbnail=urlparse.urljoin( get_thumbnail_path(preferred_thumb), "thumb_configuracion.png"))) #if config.get_setting("fileniumpremium")=="true": # itemlist.append( Item(title="Torrents (Filenium)" , channel="descargasfilenium" , action="mainlist", thumbnail = urlparse.urljoin(get_thumbnail_path(),"torrents.png")) ) #if config.get_library_support(): if config.get_platform() != "rss": itemlist.append( Item(title=config.get_localized_string(30104), channel="ayuda", action="mainlist", thumbnail=urlparse.urljoin( get_thumbnail_path(preferred_thumb), "thumb_ayuda.png"))) return itemlist
# -*- coding: utf-8 -*- #------------------------------------------------------------ # Logger multiplataforma #------------------------------------------------------------ # pelisalacarta # http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/ #------------------------------------------------------------ # Creado por: Jesús ([email protected]) # Licencia: GPL (http://www.gnu.org/licenses/gpl-3.0.html) #------------------------------------------------------------ # Historial de cambios: #------------------------------------------------------------ from core import config exec "import platform." + config.get_platform() + ".logger as platformlogger" loggeractive = (config.get_setting("debug") == "true") def info(texto): if loggeractive: platformlogger.info(texto) def debug(texto): if loggeractive: platformlogger.info(texto) def error(texto): if loggeractive: platformlogger.info(texto)
def completo(item): logger.info("[newpct1.py] completo") itemlist = [] categoryID="" # Guarda el valor por si son etiquetas para que lo vea 'listadofichas' item_extra = item.extra item_show= item.show item_title= item.title # Lee las entradas if item_extra.startswith("serie"): ultimo_action="get_episodios" if item.extra !="serie_add": # Afinar mas la busqueda if item_extra=="serie-hd": categoryID=buscar_en_subcategoria(item.show,'1469') elif item_extra=="serie-vo": categoryID=buscar_en_subcategoria(item.show,'775') elif item_extra=="serie-tv": categoryID=buscar_en_subcategoria(item.show,'767') if categoryID !="": item.url=item.url.replace("categoryID=","categoryID="+categoryID) #Fanart oTvdb= TvDb() serieID=oTvdb.get_serieId_by_title(item.show) fanart = oTvdb.get_graphics_by_serieId(serieID) if len(fanart)>0: item.fanart = fanart[0] else: item_title= item.show item.title= item.show items_programas = get_episodios(item) else: ultimo_action="listado" items_programas = listado(item) if len(items_programas) ==0: return itemlist # devolver lista vacia salir = False while not salir: # Saca la URL de la siguiente página ultimo_item = items_programas[ len(items_programas)-1 ] # Páginas intermedias if ultimo_item.action==ultimo_action: # Quita el elemento de "Página siguiente" ultimo_item = items_programas.pop() # Añade las entradas de la página a la lista completa itemlist.extend( items_programas ) # Carga la siguiente página ultimo_item.extra = item_extra ultimo_item.show = item_show ultimo_item.title = item_title logger.info("[newpct1.py] completo url=" + ultimo_item.url) if item_extra.startswith("serie"): items_programas = get_episodios(ultimo_item) else: items_programas = listado(ultimo_item) # Última página else: # Añade a la lista completa y sale itemlist.extend( items_programas ) salir = True if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0 and (item.extra.startswith("serie") ): itemlist.append( Item(channel=item.channel, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="completo###serie_add" , show= item.show)) logger.info("[newpct1.py] completo items="+ str(len(itemlist))) return itemlist
# -------------------------------------------------------------------------------- import re import urllib import urlparse from core import config from core import jsontools from core import logger from core import scrapertools from core import servertools from platformcode import platformtools DEBUG = config.get_setting("debug") # Para habilitar o no la opción de búsqueda manual if config.get_platform() != "plex": keyboard = True else: keyboard = False def buscartrailer(item): logger.info("pelisalacarta.channels.trailertools buscartrailer") # Se elimina la opciçon de Buscar Trailer del menú contextual para evitar redundancias if type(item.context) is str and "buscar_trailer" in item.context: item.context = item.context.replace("buscar_trailer", "") elif type(item.context) is list and "buscar_trailer" in item.context: item.context.remove("buscar_trailer") item.text_color = ""
def resolve_video_urls_for_playing(server, url, video_password="", muestra_dialogo=False): logger.info( "streamondemand-pureita-master.core.servertools resolve_video_urls_for_playing, server=" + server + ", url=" + url) video_urls = [] torrent = False server = server.lower() # Si el vídeo es "directo", no hay que buscar más if server == "directo" or server == "local": logger.info( "streamondemand-pureita-master.core.servertools server=directo, la url es la buena" ) try: import urlparse parsed_url = urlparse.urlparse(url) logger.info("parsed_url=" + str(parsed_url)) extension = parsed_url.path[-4:] except: extension = url[-4:] video_urls = [["%s [%s]" % (extension, server), url]] return video_urls, True, "" # Averigua las URL de los vídeos else: # Carga el conector try: # Muestra un diágo de progreso if muestra_dialogo: from platformcode import platformtools progreso = platformtools.dialog_progress( "streamondemand-pureita-master", "Connessione con " + server) server_parameters = get_server_parameters(server) #Cuenta las opciones disponibles, para calcular el porcentaje opciones = [] if server_parameters["free"] == "true": opciones.append("free") opciones.extend([ premium for premium in server_parameters["premium"] if config.get_setting(premium + "premium") == "true" ]) logger.info( "streamondemand-pureita-master.core.servertools opciones disponibles para " + server + ": " + str(len(opciones)) + " " + str(opciones)) # Sustituye el código por otro "Plex compatible" #exec "from servers import "+server+" as server_connector" servers_module = __import__("servers." + server) server_connector = getattr(servers_module, server) logger.info( "streamondemand-pureita-master.core.servertools servidor de " + server + " importado") # Si tiene una función para ver si el vídeo existe, lo comprueba ahora if hasattr(server_connector, 'test_video_exists'): logger.info( "streamondemand-pureita-master.core.servertools invocando a " + server + ".test_video_exists") puedes, motivo = server_connector.test_video_exists( page_url=url) # Si la funcion dice que no existe, fin if not puedes: logger.info( "streamondemand-pureita-master.core.servertools test_video_exists dice que el video no existe" ) if muestra_dialogo: progreso.close() return video_urls, puedes, motivo else: logger.info( "streamondemand-pureita-master.core.servertools test_video_exists dice que el video SI existe" ) # Obtiene enlaces free if server_parameters["free"] == "true": if muestra_dialogo: progreso.update( (100 / len(opciones)) * opciones.index("free"), "Connessione con " + server) logger.info( "streamondemand-pureita-master.core.servertools invocando a " + server + ".get_video_url") video_urls = server_connector.get_video_url( page_url=url, video_password=video_password) # Si no se encuentran vídeos en modo free, es porque el vídeo no existe if len(video_urls) == 0: if muestra_dialogo: progreso.close() return video_urls, False, "Non trovo il video su " + server # Obtiene enlaces para las diferentes opciones premium for premium in server_parameters["premium"]: if config.get_setting(premium + "premium") == "true": if muestra_dialogo: progreso.update( (100 / len(opciones)) * opciones.index(premium), "Connessione con " + premium) exec "from servers import " + premium + " as premium_conector" if premium == "realdebrid": if config.is_xbmc() or config.get_platform( ) == "mediaserver": debrid_urls = premium_conector.get_video_url( page_url=url, premium=True, video_password=video_password) if not "REAL-DEBRID:" in debrid_urls[0][0]: video_urls.extend(debrid_urls) else: if len(video_urls) == 0: return video_urls, False, debrid_urls[0][0] else: video_urls.extend( premium_conector.get_video_url( page_url=url, premium=True, user=config.get_setting(premium + "user"), password=config.get_setting(premium + "password"), video_password=video_password)) if muestra_dialogo: progreso.update(100, "Processo terminato") # Cierra el diálogo de progreso if muestra_dialogo: progreso.close() # Llegas hasta aquí y no tienes ningún enlace para ver, así que no vas a poder ver el vídeo if len(video_urls) == 0: # ¿Cual es el motivo? # 1) No existe -> Ya está controlado # 2) No tienes alguna de las cuentas premium compatibles # Lista de las cuentas que soportan este servidor listapremium = [] for premium in server_parameters["premium"]: listapremium.append(get_server_parameters(premium)["name"]) return video_urls, False, "Per il video su " + server + " è necessario<br/>un account " + " o ".join( listapremium) except: if muestra_dialogo: progreso.close() import traceback logger.info(traceback.format_exc()) return video_urls, False, "Si è verificato un errore<br/>con il connettore " + server return video_urls, True, ""
def openconfig(item): if "xbmc" in config.get_platform() or "boxee" in config.get_platform(): config.open_settings() return []
def episodios(item): logger.info("[rtve.py] episodios") # En la paginación la URL vendrá fijada, si no se construye aquí la primera página if item.url=="": # El ID del programa está en item.extra (ej: 42610) # La URL de los vídeos de un programa es # http://www.rtve.es/alacarta/interno/contenttable.shtml?ctx=42610&pageSize=20&pbq=1 item.url = "http://www.rtve.es/alacarta/interno/contenttable.shtml?ctx="+item.extra+"&pageSize=20&pbq=1" data = scrapertools.cachePage(item.url) itemlist = [] # Extrae los vídeos patron = '<li class="[^"]+">.*?' patron += '<span class="col_tit"[^>]+>[^<]+' patron += '<a href="([^"]+)">(.*?)</a>[^<]+' patron += '</span>[^<]+' patron += '<span class="col_tip">([^<]+)</span>[^<]+' patron += '<span class="col_dur">([^<]+)</span>.*?' patron += '<span class="col_fec">([^<]+)</span>.*?' patron += '<span class="detalle">([^>]+)</span>' matches = re.findall(patron,data,re.DOTALL) if DEBUG: scrapertools.printMatches(matches) # Crea una lista con las entradas for match in matches: if not "developer" in config.get_platform(): scrapedtitle = match[1]+" ("+match[2].strip()+") ("+match[3].strip()+") ("+match[4]+")" else: scrapedtitle = match[1] scrapedtitle = scrapedtitle.replace("<em>Nuevo</em> ","") scrapedtitle = scrapertools.unescape(scrapedtitle) scrapedtitle = scrapedtitle.strip() scrapedurl = urlparse.urljoin(item.url,match[0]) scrapedthumbnail = item.thumbnail scrapedplot = scrapertools.unescape(match[5].strip()) scrapedplot = scrapertools.htmlclean(scrapedplot).strip() scrapedextra = match[2] if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="play" , server="rtve" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , show=item.show, category = item.category, extra=scrapedextra, folder=False) ) if len(itemlist)>0: # Extrae la paginación patron = '<a name="paginaIR" href="([^"]+)"><span>Siguiente</span></a>' matches = re.findall(patron,data,re.DOTALL) if DEBUG: scrapertools.printMatches(matches) # Crea una lista con las entradas for match in matches: scrapedtitle = "!Página siguiente" scrapedurl = urlparse.urljoin(item.url,match).replace("&","&") #http://www.rtve.es/alacarta/interno/contenttable.shtml?pbq=2&modl=TOC&locale=es&pageSize=15&ctx=36850&advSearchOpen=false if not scrapedurl.endswith("&advSearchOpen=false"): scrapedurl = scrapedurl + "&advSearchOpen=false" scrapedthumbnail = "" scrapedplot = "" scrapedextra = item.extra if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="episodios" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , extra = scrapedextra, category = item.category, show=item.show) ) if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0: itemlist.append( Item(channel=item.channel, title=">> Opciones para esta serie", url=item.url, action="serie_options##episodios", thumbnail=item.thumbnail, extra = item.extra , show=item.show, folder=False)) else: # Extrae los vídeos patron = '<div class="mark"[^<]+' patron += '<a href="([^"]+)" title="([^"]+)"[^<]+' patron += '<span class="[^<]+' patron += '<img src="([^"]+)".*?' patron += '<div class="apiCall summary"[^<]+' patron += '<p[^<]+' patron += '<span class="time">([^<]+)</span[^<]+' patron += '<span class="date">([^<]+)</span>([^<]+)<' matches = re.findall(patron,data,re.DOTALL) if DEBUG: scrapertools.printMatches(matches) # Crea una lista con las entradas for scrapedurl,scrapedtitle,scrapedthumbnail,duracion,fecha,plot in matches: title = scrapedtitle+" ("+duracion+")("+fecha+")" url = urlparse.urljoin(item.url,scrapedurl) plot = plot thumbnail = scrapedthumbnail if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, title=title , action="play" , server="rtve" , url=url, thumbnail=thumbnail, plot=plot , show=item.show, category = item.category, fanart=thumbnail, viewmode="movie_with_plot", folder=False) ) if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0: itemlist.append( Item(channel=item.channel, title=">> Opciones para esta serie", url=item.url, action="serie_options##episodios", thumbnail=item.thumbnail, extra = item.extra , show=item.show, folder=False)) return itemlist
def findvideos(item, verTodos=False): logger.info("pelisalacarta.channels.pordede findvideos") # Descarga la pagina headers = DEFAULT_HEADERS[:] #headers.append(["Referer",item.extra]) #headers.append(["X-Requested-With","XMLHttpRequest"]) data = scrapertools.cache_page(item.url, headers=headers) if (DEBUG): logger.info("data=" + data) # Extrae las entradas (carpetas) #json_object = jsontools.load_json(data) #if (DEBUG): logger.info("html="+json_object["html"]) #data = json_object["html"] sesion = scrapertools.find_single_match(data, 'SESS = "([^"]+)";') if (DEBUG): logger.info("sesion=" + sesion) patron = '<a target="_blank" class="a aporteLink(.*?)</a>' matches = re.compile(patron, re.DOTALL).findall(data) itemlist = [] if config.get_platform().startswith("xbmc") and "/what/peli" in item.url: itemlist.append( Item(channel=__channel__, action="infosinopsis", title="INFO / SINOPSIS", url=item.url, thumbnail=item.thumbnail, folder=False)) itemsort = [] sortlinks = config.get_setting( "pordedesortlinks" ) # 0:no, 1:valoracion, 2:idioma, 3:calidad, 4:idioma+calidad, 5:idioma+valoracion, 6:idioma+calidad+valoracion sortlinks = int(sortlinks) if sortlinks != '' else 0 showlinks = config.get_setting( "pordedeshowlinks") # 0:todos, 1:ver online, 2:descargar showlinks = int(showlinks) if showlinks != '' else 0 for match in matches: if (DEBUG): logger.info("match=" + match) jdown = scrapertools.find_single_match( match, '<div class="jdownloader">[^<]+</div>') if (showlinks == 1 and jdown != '') or ( showlinks == 2 and jdown == ''): # Descartar enlaces veronline/descargar continue idiomas = re.compile('<div class="flag([^"]+)">([^<]+)</div>', re.DOTALL).findall(match) idioma_0 = (idiomas[0][0].replace(" ", "").strip() + " " + idiomas[0][1].replace(" ", "").strip()).strip() if len(idiomas) > 1: idioma_1 = (idiomas[1][0].replace(" ", "").strip() + " " + idiomas[1][1].replace(" ", "").strip()).strip() idioma = idioma_0 + ", " + idioma_1 else: idioma_1 = '' idioma = idioma_0 calidad_video = scrapertools.find_single_match( match, '<div class="linkInfo quality"><i class="icon-facetime-video"></i>([^<]+)</div>' ) if (DEBUG): logger.info("calidad_video=" + calidad_video) calidad_audio = scrapertools.find_single_match( match, '<div class="linkInfo qualityaudio"><i class="icon-headphones"></i>([^<]+)</div>' ) if (DEBUG): logger.info("calidad_audio=" + calidad_audio) thumb_servidor = scrapertools.find_single_match( match, '<div class="hostimage"[^<]+<img\s*src="([^"]+)">') if (DEBUG): logger.info("thumb_servidor=" + thumb_servidor) nombre_servidor = scrapertools.find_single_match( thumb_servidor, "popup_([^\.]+)\.png") if (DEBUG): logger.info("nombre_servidor=" + nombre_servidor) title = ( "Download " if jdown != '' else "Ver en " ) + nombre_servidor + " (" + idioma + ") (Calidad " + calidad_video.strip( ) + ", audio " + calidad_audio.strip() + ")" cuenta = [] valoracion = 0 for idx, val in enumerate(['1', '2', 'report']): nn = scrapertools.find_single_match( match, '<span\s+data-num="([^"]+)"\s+class="defaultPopup"\s+href="/likes/popup/value/' + val + '/') if nn != '0' and nn != '': cuenta.append(nn + ' ' + ['ok', 'ko', 'rep'][idx]) valoracion += int(nn) if val == '1' else -int(nn) if len(cuenta) > 0: title += ' (' + ', '.join(cuenta) + ')' url = urlparse.urljoin( item.url, scrapertools.find_single_match(match, 'href="([^"]+)"')) thumbnail = thumb_servidor plot = "" if (DEBUG): logger.info("title=[" + title + "], url=[" + url + "], thumbnail=[" + thumbnail + "]") if sortlinks > 0: # orden1 para dejar los "downloads" detras de los "ver" al ordenar # orden2 segun configuración if sortlinks == 1: orden = valoracion elif sortlinks == 2: orden = valora_idioma(idioma_0, idioma_1) elif sortlinks == 3: orden = valora_calidad(calidad_video, calidad_audio) elif sortlinks == 4: orden = (valora_idioma(idioma_0, idioma_1) * 100) + valora_calidad(calidad_video, calidad_audio) elif sortlinks == 5: orden = (valora_idioma(idioma_0, idioma_1) * 1000) + valoracion elif sortlinks == 6: orden = (valora_idioma(idioma_0, idioma_1) * 100000) + ( valora_calidad(calidad_video, calidad_audio) * 1000) + valoracion itemsort.append({ 'action': "play", 'title': title, 'url': url, 'thumbnail': thumbnail, 'plot': plot, 'extra': sesion + "|" + item.url, 'fulltitle': title, 'orden1': (jdown == ''), 'orden2': orden }) else: itemlist.append( Item(channel=__channel__, action="play", title=title, url=url, thumbnail=thumbnail, plot=plot, extra=sesion + "|" + item.url, fulltitle=title)) if sortlinks > 0: numberlinks = config.get_setting( "pordedenumberlinks") # 0:todos, > 0:n*5 (5,10,15,20,...) numberlinks = int(numberlinks) * 5 if numberlinks != '' else 0 if numberlinks == 0: verTodos = True itemsort = sorted(itemsort, key=lambda k: (k['orden1'], k['orden2']), reverse=True) for i, subitem in enumerate(itemsort): if verTodos == False and i >= numberlinks: itemlist.append( Item(channel=__channel__, action='findallvideos', title='Ver todos los enlaces', url=item.url, extra=item.extra)) break itemlist.append( Item(channel=__channel__, action=subitem['action'], title=subitem['title'], url=subitem['url'], thumbnail=subitem['thumbnail'], plot=subitem['plot'], extra=subitem['extra'], fulltitle=subitem['fulltitle'])) return itemlist
platformtools.dialog_ok( "plugin", config.get_localized_string(30051) % e.code) except: import traceback logger.error(traceback.format_exc()) patron = 'File "' + os.path.join(config.get_runtime_path(), "channels", "").replace("\\", "\\\\") + '([^.]+)\.py"' canal = scrapertools.find_single_match(traceback.format_exc(), patron) try: import xbmc if config.get_platform(True)['num_version'] < 14: log_name = "xbmc.log" else: log_name = "kodi.log" log_message = "Percorso: " + xbmc.translatePath( "special://logpath") + log_name except: log_message = "" if canal: platformtools.dialog_ok( "Errore inaspettato in " + canal, "Protrebbe essere un errore di connessione. Il canale web " "potrebbe aver modificato la sua struttura oppure si è verificato un errore in streamondemand.", "Per dettagli consulta il log.", log_message) else:
def addfolder(nombre, channelname, accion, category="", thumbnailname=""): #print "addfolder" if category == "": try: category = unicode(nombre, "utf-8").encode("iso-8859-1") except: pass import xbmc if config.get_setting("thumbnail_type") == "0": IMAGES_PATH = xbmc.translatePath( os.path.join(config.get_runtime_path(), 'resources', 'images', 'posters')) elif config.get_setting("thumbnail_type") == "1": IMAGES_PATH = xbmc.translatePath( os.path.join(config.get_runtime_path(), 'resources', 'images', 'banners')) elif config.get_setting("thumbnail_type") == "2": IMAGES_PATH = xbmc.translatePath( os.path.join(config.get_runtime_path(), 'resources', 'images', 'squares')) if config.get_setting("thumbnail_type") == "0": WEB_PATH = "http://tvalacarta.mimediacenter.info/posters/" elif config.get_setting("thumbnail_type") == "1": WEB_PATH = "http://tvalacarta.mimediacenter.info/banners/" elif config.get_setting("thumbnail_type") == "2": WEB_PATH = "http://tvalacarta.mimediacenter.info/squares/" if config.get_platform() == "boxee": IMAGES_PATH = "http://tvalacarta.mimediacenter.info/posters/" if thumbnailname == "": thumbnailname = channelname # Preferencia: primero JPG thumbnail = thumbnailImage = os.path.join(IMAGES_PATH, thumbnailname + ".jpg") # Preferencia: segundo PNG if not os.path.exists(thumbnail): thumbnail = thumbnailImage = os.path.join(IMAGES_PATH, thumbnailname + ".png") # Preferencia: tercero WEB if not os.path.exists(thumbnail): thumbnail = thumbnailImage = WEB_PATH + thumbnailname + ".png" #Si no existe se usa el logo del plugin #if not os.path.exists(thumbnail): # thumbnail = thumbnailImage=WEB_PATH+"ayuda.png" #Check: ruta del logo import xbmcgui import xbmcplugin #logger.info("thumbnail="+thumbnail) listitem = xbmcgui.ListItem(nombre, iconImage="DefaultFolder.png", thumbnailImage=thumbnail) itemurl = '%s?channel=%s&action=%s&category=%s' % ( sys.argv[0], channelname, accion, category) xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]), url=itemurl, listitem=listitem, isFolder=True)
def get_episodios(item,recursion): logger.info("[rtve.py] get_episodios_documentales") itemlist = [] data = scrapertools.cachePage(item.url) # Extrae los vídeos ''' <li class="odd"> <span class="col_tit" id="2851919" name="progname"> <a href="/alacarta/videos/atencion-obras/atencion-obras-josep-maria-flotats-ferran-adria-sanchis-sinisterra/2851919/">Atención Obras - 07/11/14</a> </span> <span class="col_tip"> <span>Completo</span> </span> <span class="col_dur">55:35</span> <span class="col_pop"><span title="32% popularidad" class="pc32"><em><strong><span>32%</span></strong></em></span></span> <span class="col_fec">07 nov 2014</span> <div id="popup2851919" class="tultip hddn"> <span id="progToolTip" class="tooltip curved"> <span class="pointer"></span> <span class="cerrar" id="close2851919"></span> <span class="titulo-tooltip"><a href="/alacarta/videos/atencion-obras/atencion-obras-josep-maria-flotats-ferran-adria-sanchis-sinisterra/2851919/" title="Ver Atención Obras - 07/11/14">Atención Obras - 07/11/14</a></span> <span class="fecha">07 nov 2014</span> <span class="detalle">Josep María Flotats trae al Teatro María Guerrero de Madrid “El juego del amor y del azar” de Pierre de Marivaux. Un texto que ya ha sido estrenado en el Teatre Nacional de Catalunya. C...</span> ''' patron = '<li class="[^"]+">.*?' patron += '<span class="col_tit"[^<]+' patron += '<a href="([^"]+)">(.*?)</a[^<]+' patron += '</span>[^<]+' patron += '<span class="col_tip"[^<]+<span>([^<]+)</span[^<]+</span[^<]+' patron += '<span class="col_dur">([^<]+)</span>.*?' patron += '<span class="col_fec">([^<]+)</span>.*?' patron += '<span class="detalle">([^>]+)</span>' matches = re.findall(patron,data,re.DOTALL) if DEBUG: scrapertools.printMatches(matches) # Crea una lista con las entradas for match in matches: if not "developer" in config.get_platform(): scrapedtitle = match[1]+" ("+match[2].strip()+") ("+match[3].strip()+") ("+match[4]+")" else: scrapedtitle = match[1] scrapedtitle = scrapedtitle.replace("<em>Nuevo</em> ","") scrapedtitle = scrapertools.unescape(scrapedtitle) scrapedtitle = scrapedtitle.strip() scrapedurl = urlparse.urljoin(item.url,match[0]) scrapedthumbnail = item.thumbnail scrapedplot = scrapertools.unescape(match[5].strip()) scrapedplot = scrapertools.htmlclean(scrapedplot).strip() scrapedextra = match[2] if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]") itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="play" , server="rtve" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , show=item.show, category = item.category, extra=scrapedextra, folder=False) ) # Paginación if len(itemlist)>0: next_page_url = scrapertools.find_single_match(data,'<a name="paginaIR" href="([^"]+)"><span>Siguiente</span></a>') if next_page_url!="": next_page_url = urlparse.urljoin(item.url,next_page_url).replace("&","&") #http://www.rtve.es/alacarta/interno/contenttable.shtml?pbq=2&modl=TOC&locale=es&pageSize=15&ctx=36850&advSearchOpen=false if not next_page_url.endswith("&advSearchOpen=false"): next_page_url = next_page_url + "&advSearchOpen=false" siguiente_item = Item(channel=CHANNELNAME,action="episodios",url=urlparse.urljoin(item.url,next_page_url),title=item.title,show=item.show,category=item.category) logger.info("siguiente_item="+siguiente_item.tostring()) # Para evitar listas eternas, si tiene más de 3 páginas añade el item de "siguiente" if recursion<=3: itemlist.extend( get_episodios(siguiente_item,recursion+1) ) else: siguiente_item.title=">> Página siguiente" itemlist.append(siguiente_item) return itemlist
def episodios(item): logger.info("pelisalacarta.seriesmu episodios") itemlist = [] # Descarga la página data = scrapertools.cache_page(item.url) data = re.sub(r"\n|\r|\t|\s{2}| ", "", data) seguir = scrapertools.get_match( data, '<ul><li text="Siguiendo" color="green" class="([^"]+)"') abandonar = scrapertools.get_match( data, '<li text="Abandonada" color="red" class="([^"]+)">') fanart = scrapertools.get_match( data, '<div class="media-cover" style="background-image: url\(http://series.mu([^"]+)\)' ) fanart = urlparse.urljoin(host, fanart) seguir = urlparse.urljoin(host, seguir) abandonar = urlparse.urljoin(host, abandonar) if not item.title.endswith("XBMC"): if '<div class=""></div>' in data: url = seguir title = bbcode_kodi2html("[COLOR yellow]Seguir[/COLOR]") thumbnail = "http://s14.postimg.org/ca5boj275/smseguir.png" else: url = abandonar title = bbcode_kodi2html( "[COLOR green]Siguiendo[/COLOR]: [COLOR red]Abandonar[/COLOR]") thumbnail = "http://s18.postimg.org/hh4l8hj1l/smabandonar2.png" itemlist.append( Item(channel=item.channel, title=title, url=url, fanart=fanart, thumbnail=thumbnail, action="cambiar_estado", extra=item.url, folder=False)) patrontemporada = '<ul (temp[^<]+)>(.*?)</ul>' matchestemporadas = re.compile(patrontemporada, re.DOTALL).findall(data) for nombre_temporada, bloque_episodios in matchestemporadas: if (DEBUG): logger.info("nombre_temporada=" + nombre_temporada) if (DEBUG): logger.info("bloque_episodios=" + bloque_episodios) # Extrae los episodios patron = '<span>(.*?)' patron += '</span>([^<]+).*?' patron += '<i class="(.*?)".*?' patron += '<i class="icon-play".*?' patron += 'href="([^"]+)"' matches = re.compile(patron, re.DOTALL).findall(bloque_episodios) scrapertools.printMatches(matches) for scrapednumber, scrapedtitle, scrapedeyes, scrapedurl in matches: if "open" in scrapedeyes: scrapedeyes = re.sub( r"eye-w icon-eye-open", bbcode_kodi2html("[COLOR salmon]" + " [Visto]" + "[/COLOR]"), scrapedeyes) if "close" in scrapedeyes: scrapedeyes = re.sub( r"eye-w icon-eye-close", bbcode_kodi2html("[COLOR chartreuse]" + " [Pendiente]" + "[/COLOR]"), scrapedeyes) title = nombre_temporada + "X" + scrapednumber + scrapedtitle + scrapedeyes title = title.replace("temp=", "Temporada ") title = title.replace( scrapedtitle, bbcode_kodi2html("[COLOR white]" + scrapedtitle + "[/COLOR]")) puntuacion = scrapertools.get_match( data, '<li><div class="num" id="val-score">(.*?)</div>') puntuacion = puntuacion.replace( puntuacion, bbcode_kodi2html("[COLOR yellow]" + puntuacion + "[/COLOR]")) puntuacion_title = "Puntuación :" puntuacion_title = puntuacion_title.replace( puntuacion_title, bbcode_kodi2html("[COLOR pink]" + puntuacion_title + "[/COLOR]")) puntuacion = puntuacion_title + " " + puntuacion + "[CR]" scrapedplot = scrapertools.get_match( data, '<h2>(.*?)<div class="card media-chapters">') plotformat = re.compile('<p>(.*?)</p>', re.DOTALL).findall(scrapedplot) scrapedplot = scrapedplot.replace( scrapedplot, bbcode_kodi2html("[COLOR white]" + scrapedplot + "[/COLOR]")) for plot in plotformat: scrapedplot = scrapedplot.replace( plot, bbcode_kodi2html("[COLOR skyblue][B]" + plot + "[/B][/COLOR]")) scrapedplot = scrapedplot.replace("</h2><p>", "[CR]") scrapedplot = scrapedplot.replace("</p></div>", "") scrapedplot = puntuacion + scrapedplot fanart = scrapertools.get_match( data, '<div class="media-cover" style="background-image: url\(http://series.mu([^"]+)\)' ) fanart = urlparse.urljoin(host, fanart) scrapedurl = urlparse.urljoin(host, scrapedurl) if scrapedtitle != " ": itemlist.append( Item(channel=__channel__, title=title, url=scrapedurl, action="findvideos", thumbnail=item.thumbnail, plot=scrapedplot, fanart=fanart, show=item.show.strip(), folder=True)) if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist) > 0: itemlist.append( Item(channel=__channel__, title="Añadir esta serie a la biblioteca de XBMC", url=item.url, action="add_serie_to_library", extra="episodios", show=item.show)) return itemlist