Example #1
0
def mirrors(item):
	logger.info("[capitancinema.py] mirrors")

	title = item.title
	thumbnail = item.thumbnail
	plot = item.plot

	# Descarga la página
	data = scrapertools.cachePage(item.url)
	patronvideos  = '<li><strong>DISPONIBLE EN EL FORO</strong>[^<]+<a href="([^"]+)"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	itemlist = []
	if len(matches)>0:
		url = matches[0]
		data = scrapertools.cachePage(url)

		# ------------------------------------------------------------------------------------
		# Busca los enlaces a los videos
		# ------------------------------------------------------------------------------------
		listavideos = servertools.findvideos(data)

		for video in listavideos:
			scrapedtitle = title.strip() + " - " + video[0]
			scrapedurl = video[1]
			server = video[2]
			
			itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, server=server, folder=False))

	return itemlist
def mirrors(params,url,category):
	logger.info("[capitancinema.py] mirrors")

	title = urllib.unquote_plus( params.get("title") )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = urllib.unquote_plus( params.get("plot") )

	# Descarga la página
	data = scrapertools.cachePage(url)
	patronvideos  = '<li><strong>DISPONIBLE EN EL FORO</strong>[^<]+<a href="([^"]+)"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	if len(matches)>0:
		url = matches[0]
		data = scrapertools.cachePage(url)

		# ------------------------------------------------------------------------------------
		# Busca los enlaces a los videos
		# ------------------------------------------------------------------------------------
		listavideos = servertools.findvideos(data)

		for video in listavideos:
			videotitle = video[0]
			url = video[1]
			server = video[2]
			xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
		# ------------------------------------------------------------------------------------

	# Cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def play(params,url,category):
	logger.info("[peliculasid.py] play")
	
	
	title = unicode( xbmc.getInfoLabel( "ListItem.Title" ), "utf-8" )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	try:
		plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )
	except:
		plot = xbmc.getInfoLabel( "ListItem.Plot" )
	server = params["server"]
	if "|" in url:
		matches = url.split("|")
		patronvideos = 'file=([^\&]+)\&'
		c = 0
		listdata = []
		for match in matches:
			c += 1
			print match
			data = scrapertools.cachePage(match)
			matches2 = re.compile(patronvideos,re.DOTALL).findall(data)
			listdata.append(["Parte %d" %c,matches2[0]])
		
		url = xmltoplaylist.MakePlaylistFromList(listdata)	
	elif "iframeplayer.php" in url:      #"http://peliculasid.com/iframeplayer.php?url=aHR0cDovL3ZpZGVvLmFrLmZhY2Vib29rLmNvbS9jZnMtYWstc25jNC80MjIxNi82MS8xMjgxMTI4ODgxOTUwXzM5NTAwLm1wNA=="
		data = scrapertools.cachePage(url)
		patronvideos = 'file=([^\&]+)\&'
		matches = re.compile(patronvideos,re.DOTALL).findall(data)
		if len(matches)>0:
			url = matches[0]
	
	xbmctools.playvideo(CHANNELNAME,server,url,category,title,thumbnail,plot)
def download_server(server_name):
    logger.info("streamondemand.core.updater download_server('" + server_name + "')")

    import servertools
    remote_server_url, remote_version_url = servertools.get_server_remote_url(server_name)
    local_server_path, local_version_path, local_compiled_path = servertools.get_server_local_path(server_name)

    # Descarga el canal
    try:
        updated_server_data = scrapertools.cachePage(remote_server_url)
        outfile = open(local_server_path, "wb")
        outfile.write(updated_server_data)
        outfile.flush()
        outfile.close()
        logger.info("streamondemand.core.updater Grabado a " + local_server_path)
    except:
        import traceback
        logger.info(traceback.format_exc())

    # Descarga la version (puede no estar)
    try:
        updated_version_data = scrapertools.cachePage(remote_version_url)
        outfile = open(local_version_path, "w")
        outfile.write(updated_version_data)
        outfile.flush()
        outfile.close()
        logger.info("streamondemand.core.updater Grabado a " + local_version_path)
    except:
        import traceback
        logger.info(traceback.format_exc())

    if os.path.exists(local_compiled_path):
        os.remove(local_compiled_path)
def detail(params,url,category):
	logger.info("[sevillista.py] detail")

	# Recupera los parámetros
	title = urllib.unquote_plus( params.get("title") )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = urllib.unquote_plus( params.get("plot") )

	# Descarga la página de detalle, y busca el iframe
	data = scrapertools.cachePage(url)
	patron = '<iframe marginwidth="0" marginheight="0" src="([^"]+)"'
	matches = re.compile(patron,re.DOTALL).findall(data)
	if len(matches)>0:
		# Descarga el iframe
		url = matches[0]
		data = scrapertools.cachePage(url)
		
		# Busca vídeos no megavideo (playlist externa)
		patron = '<param name="flashvars" value=".amp.skin=.amp.plugins.captions.amp.file.([^\&]+)\&'
		matches = re.compile(patron,re.DOTALL).findall(data)
		if len(matches)>0:
			# Descarga la playlist
			url = matches[0]
			if url.endswith(".xml"):
				data2 = scrapertools.cachePage(url)
				# Busca los vídeos
				#<title>Parte 1</title>
				#<annotation>Castellano</annotation>
				#<location>http://video.ak.facebook.com/cfs-ak-ash1/27673/000/219/106288556079917_23239.mp4</location>
				patron  = '<title>([^<]+)</title>[^>]*'
				patron += '<annotation>([^<]+)</annotation>[^>]*'
				patron += '<location>([^<]+)</location>'
				matches = re.compile(patron,re.DOTALL).findall(data2)
				
				for match in matches:
					scrapedtitle = title + " " + match[0]+" "+match[1]+" [Directo]"
					scrapedurl = urlparse.urljoin(url,match[2])
					scrapedthumbnail = thumbnail
					scrapedplot = plot
					if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
					xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , scrapedtitle , scrapedurl , scrapedthumbnail , scrapedplot )
			else:
				xbmctools.addnewvideo( CHANNELNAME , "play" , category , "Directo" , title + " [Directo]" , url , thumbnail , plot )
	
	# ------------------------------------------------------------------------------------
	# Busca los enlaces a los videos conocidos en el iframe
	# ------------------------------------------------------------------------------------
	listavideos = servertools.findvideos(data)

	for video in listavideos:
		videotitle = video[0]
		url = video[1]
		server = video[2]
		xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
	# ------------------------------------------------------------------------------------

	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def detail(params,url,category):
	logger.info("[veranime.py] detail")

	title = urllib.unquote_plus( params.get("title") )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = urllib.unquote_plus( params.get("plot") )

	# Descarga la página
	data = scrapertools.cachePage(url)
	#logger.info(data)

	patron  = '<div id="listacapdd"><div class="listddserie">[^<]+'
	patron += '<a title="[^"]+" href="([^"]+)"><strong>[^<]+</strong></a>[^<]+'
	patron += '</div>'
	matches = re.compile(patron,re.DOTALL).findall(data)
	if len(matches)>0:
		url = matches[0]
		data = scrapertools.cachePage(url)

	# ------------------------------------------------------------------------------------
	# Busca los enlaces a los videos
	# ------------------------------------------------------------------------------------
	listavideos = servertools.findvideos(data)

	for video in listavideos:
		videotitle = video[0]
		url = video[1]
		server = video[2]
		xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
	# ------------------------------------------------------------------------------------

	# Asigna el título, desactiva la ordenación, y cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Example #7
0
def detail(item):
	logger.info("[cine15.py] detail")

	title = item.title
	thumbnail = item.thumbnail
	plot = item.plot

	# Descarga la página
	data = scrapertools.cachePage(item.url)
	#logger.info(data)

	# ------------------------------------------------------------------------------------
	# Busca los enlaces a videos no megavideo (playlist xml)
	# ------------------------------------------------------------------------------------
	patronvideos  = 'flashvars[^f]+file=([^\&]+)\&amp'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	
	itemlist = []
	if len(matches)>0:
		if ("xml" in matches[0]):
			data2 = scrapertools.cachePage(matches[0])
			logger.info("data2="+data2)
			patronvideos  = '<track>[^<]+'
			patronvideos += '<title>([^<]+)</title>[^<]+'
			patronvideos += '<location>([^<]+)</location>[^<]+'
			patronvideos += '</track>'
			matches = re.compile(patronvideos,re.DOTALL).findall(data2)
			scrapertools.printMatches(matches)

			for match in matches:
				scrapedtitle = match[0]
				scrapedurl = match[1].strip()
				scrapedthumbnail = thumbnail
				scrapedplot = plot
				if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

				itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle + " [Directo]" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))

		else:
			itemlist.append( Item(channel=CHANNELNAME, action="play" , title=title + " [Directo]" , url=matches[0], thumbnail=thumbnail, plot=plot, server="Directo", folder=False))
			
	# ------------------------------------------------------------------------------------
	# Busca los enlaces a los videos
	# ------------------------------------------------------------------------------------
	listavideos = servertools.findvideos(data)

	for video in listavideos:
		videotitle = video[0]
		url = video[1]
		server = video[2]
		itemlist.append( Item(channel=CHANNELNAME, action="play" , title=title.strip() + " - " + videotitle , url=url, thumbnail=thumbnail, plot=plot, server=server, folder=False))
	# ------------------------------------------------------------------------------------

	return itemlist
Example #8
0
def getlistWall(params,url,category):
	logger.info("[megalivewall.py] getlistWall")
	
	if url=="":
		url="http://www.megalive.com/"
	encontrados = set()
	# Descarga la página
	data = scrapertools.cachePage(url)
	patron = "flashvars.xmlurl = '([^']+)'"
	matches = re.compile(patron,re.DOTALL).findall(data)
	if len(matches)>0:
		xmlurl = urllib.unquote_plus(matches[0])
		#logger.info(data)
		#<image click_url="?v=7RJPHQN0" images="http://img6.megalive.com/f29efb78905a482f00dacb5f5e41e953.jpg^
		#http://img6.megalive.com/eecd5b9bda6035095ef672b7c5e6dd5a.jpg" description="Expansion Ixcan TV" time="" thumb="http://img6.megalive.com/568a3de4a6b15fddce5c0f9609334529.jpg" hq="1" icon="ml">
		# Extrae las entradas (carpetas)
		patron  = '<image click_url="\?v=([^"]+)".*?'
		patron += 'description="(?:([^"]+)|)" time="" '
		patron += 'thumb="([^"]+)" '
		patron += 'hq="([^"]+)"'
		data = scrapertools.cachePage(xmlurl)
		matches = re.compile(patron,re.DOTALL).findall(data)
		scrapertools.printMatches(matches)
		itemlist = []

		for match in matches:
			# Titulo
			if len(match[1])>0:
				scrapedtitle = decodeHtmlentities(match[1]).encode("utf-8")
			else:
				scrapedtitle = "(no title)"
			# URL
			if match[0] in encontrados:
				continue
			scrapedurl = match[0]
			encontrados.add(match[0])
			# Thumbnail
			scrapedthumbnail = match[2]
			# Argumento
			scrapedplot = ""
			if match[3]=="1":
				hq=" [HQ]"
			else:
				hq=""

			# Depuracion
			if (DEBUG):
				logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

			# Añade al listado de XBMC
			#addnewvideo( CHANNELNAME , "play" , category ,"Directo", scrapedtitle+hq , scrapedurl , scrapedthumbnail , scrapedplot )
			itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show = scrapedtitle, folder=False , context = True))

		return itemlist
def search(params,url,category):
	xbmc.output("[animeforos.py] search")

	letras = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
        
	opciones = []
	opciones.append("Teclado (Busca en Título y datos anexos)")
	for letra in letras:
		opciones.append(letra)
	searchtype = xbmcgui.Dialog()
	seleccion = searchtype.select("Búsqueda por Teclado o por Inicial del Título:", opciones)
	xbmc.output("seleccion=%d" % seleccion)
	if seleccion == -1 :return
	if seleccion == 0:
		keyboard = xbmc.Keyboard('')
		keyboard.doModal()
		if (keyboard.isConfirmed()):
			tecleado = keyboard.getText()
			if len(tecleado)>0:	
				# Descarga la página
				data = scrapertools.cachePage(url)
				#xbmc.output(data)

				if len(tecleado) == 1:
					listaseries = findcontenidos(data,"Completo","[^<]+",tecleado,"")
					for serie in listaseries:
						addnewfolder( CHANNELNAME , "detail" , category , serie[0]+" "+serie[1]+" ["+serie[5]+"]"+" ["+serie[2]+"]"+serie[3] , serie[4] , "" , "Tipo Contenido: "+serie[5] )
				else:
					listaseries = findcontenidos(data,"Completo","[^<]+","","")				
					for serie in listaseries:
						foldertitle = serie[0]+" "+serie[1]+" ["+serie[5]+"]"+" ["+serie[2]+"]"+serie[3]
						match = re.search(tecleado,foldertitle,re.IGNORECASE)
						if (match):
							addnewfolder( CHANNELNAME , "detail" , category , foldertitle , serie[4] , "" , "Tipo Contenido: "+serie[5] )

	else:
		# Descarga la página
		data = scrapertools.cachePage(url)
		#xbmc.output(data)

		listaseries = findcontenidos(data,"Completo","[^<]+",letras[seleccion-1],"")

		for serie in listaseries:

			addnewfolder( CHANNELNAME , "detail" , category , serie[0]+" "+serie[1]+" ["+serie[5]+"]"+" ["+serie[2]+"]"+serie[3] , serie[4] , "" , "Tipo Contenido: "+serie[5] )
					
	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )

	# Disable sorting...
	xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE)

	# End of directory...
	xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )			
def play(params,url,category):
	xbmc.output("[eitb.py] play")

	title = urllib.unquote_plus( params.get("title") )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = urllib.unquote_plus( params.get("plot") )
	xbmc.output("[eitb.py] thumbnail="+thumbnail)

	# Abre dialogo
	dialogWait = xbmcgui.DialogProgress()
	dialogWait.create( 'Descargando datos del vídeo...', title )

	# --------------------------------------------------------
	# Averigua la URL y la descripcion
	# --------------------------------------------------------
	data = scrapertools.cachePage(url)
	patron = '(/commons/pet/getMedia.php\?id\=[^&]+)&'
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	try:
		url = urlparse.urljoin(url,matches[0])
	except:
		url = ""

	xbmc.output("[eitb.py] url="+url)
	data = scrapertools.cachePage(url)
	xbmc.output("[eitb.py] data="+data)
	patron  = "<media\:content url\='([^']+)'"
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	try:
		url = urlparse.urljoin("http://www.eitb.com",matches[0])
	except:
		url = ""
	xbmc.output("[eitb.py] url="+url)

	# Playlist vacia
	playlist = xbmc.PlayList( xbmc.PLAYLIST_VIDEO )
	playlist.clear()

	# Crea la entrada y la añade al playlist
	listitem = xbmcgui.ListItem( title, iconImage="DefaultVideo.png", thumbnailImage=thumbnail )
	listitem.setInfo( "video", { "Title": title, "Plot" : plot , "Studio" : CHANNELNAME , "Genre" : category } )
	playlist.add( url, listitem )
	#xbmc.Player(xbmc.PLAYER_CORE_DVDPLAYER).play("rtmp://aialanetfs.fplive.net/aialanet?slist=Jardineria/palmera-roebelen.flv", nuevoitem)

	# Cierra dialogo
	dialogWait.close()
	del dialogWait

	# Reproduce
	xbmcPlayer = xbmc.Player( xbmc.PLAYER_CORE_AUTO )
	xbmcPlayer.play(playlist)   
def performsearch(texto):
	logger.info("[tutvsite.py] performsearch")
	url = "http://www.tu.tv/buscar/?str="+texto

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae las entradas (carpetas)
	patronvideos  = '<div class="fila clearfix">[^<]+<div.*?</div>[^<]+<a href="([^"]+)"[^<]+<img src="([^"]+)".*?<span id="txtN">(.*?)</span>.*?<span class="tmp">([^<]+)</span.*?<span id="txtN">(.*?)</span>'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	
	resultados = []

	for match in matches:
		# Titulo
		try:
			scrapedtitle = unicode( match[2], "utf-8" ).encode("iso-8859-1")
		except:
			scrapedtitle = match[2]
		scrapedtitle = scrapedtitle.replace("<b>","")
		scrapedtitle = scrapedtitle.replace("</b>","")
		scrapedtitle = scrapedtitle.strip()
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = urlparse.urljoin(url,match[1])
		scrapedplot = match[4].strip()

		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		resultados.append( [CHANNELNAME , "playfolder" , "buscador" , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ] )
		
	return resultados
def listcategorias(params,url,category):
	xbmc.output("[yotix.py] listcategorias")

	# ------------------------------------------------------
	# Descarga la página
	# ------------------------------------------------------
	data = scrapertools.cachePage(url)
	#xbmc.output(data)

	# ------------------------------------------------------
	# Extrae las entradas de la home como carpetas
	# ------------------------------------------------------
	patron  = '<a href="(/categoria/[^"]+)">([^<]+)</a>'

	matches = re.compile(patron,re.DOTALL).findall(data)
	if DEBUG: scrapertools.printMatches(matches)

	for match in matches:
		# Atributos
		scrapedtitle = match[1]
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = ""
		scrapedplot = ""
		if (DEBUG): xbmc.output("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		xbmctools.addnewfolder( CHANNELNAME , "videolist" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
	xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
def performsearch(texto):
	xbmc.output("[yotix.py] performsearch")
	url = "http://yotix.tv/?s="+texto

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae las entradas (carpetas)
	patronvideos  = '<div class="galleryitem">[^<]+'
	patronvideos += '<h1><a title="([^"]+)"[^<]+</a></h1>[^<]+'
	patronvideos += '<a href="([^"]+)"><img src="([^"]+)"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	
	resultados = []

	for match in matches:
		# Atributos
		scrapedtitle = match[0].replace("&#8211;","-")
		scrapedurl = match[1]
		scrapedthumbnail = match[2]
		scrapedplot = ""

		if (DEBUG): xbmc.output("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		resultados.append( [CHANNELNAME , "listmirrors" , "buscador" , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ] )
		
	return resultados
Example #14
0
def peliscat(item):
	logger.info("[cinegratis.py] peliscat")

	url = item.url

	itemlist = []
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple" , title="Versión original" , url="http://www.cinegratis.net/index.php?module=search&title=subtitulado"))
	itemlist.append( Item(channel=CHANNELNAME, action="listsimple" , title="Versión latina"   , url="http://www.cinegratis.net/index.php?module=search&title=latino"))

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae los items
	patronvideos  = "<td align='left'><a href='([^']+)'><img src='([^']+)' border='0'></a></td>"
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	for match in matches:
		# Atributos
		patron2 = "genero/([A-Za-z\-]+)/"
		matches2 = re.compile(patron2,re.DOTALL).findall(match[0])
		scrapertools.printMatches(matches2)
		
		scrapedtitle = matches2[0]
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = urlparse.urljoin(url,match[1])
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		itemlist.append( Item(channel=CHANNELNAME, action="listvideos" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot))

	return itemlist
def searchresults2(params,url,category):
	logger.info("[cineadicto.py] SearchResult")
	
	
	# Descarga la p�gina
	data = scrapertools.cachePage(url)
	#print data
	# Extrae las entradas (carpetas)
	patronvideos  = '<div class="poster">[^<]+<a href="([^"]+)"'                          # URL
	patronvideos += '><img src="([^"]+)" width=[^\/]+\/>'                                 # TUMBNAIL
	patronvideos += '</a>[^<]+<[^>]+>[^<]+<[^>]+>[^<]+<a href="[^"]+">([^<]+)</a>'        # TITULO 
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	for match in matches:
		# Atributos
		scrapedurl = match[0]
		
		scrapedtitle =match[2]
		scrapedtitle = scrapedtitle.replace("&#8211;","-")
		scrapedtitle = scrapedtitle.replace("&nbsp;"," ")
		scrapedthumbnail = match[1]
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# A�ade al listado de XBMC
		xbmctools.addnewfolder( CHANNELNAME , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

	# Propiedades
	xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
	xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
def performsearch(texto):
	xbmc.output("[peliculasyonkis.py] performsearch")
	url = "http://www.peliculasyonkis.com/buscarPelicula.php?s="+texto

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae las entradas (carpetas)
	patronvideos  = '<li> <a href="([^"]+)" title="([^"]+)"><img.*?src="([^"]+)"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	
	resultados = []

	for match in matches:
		# Atributos
		scrapedtitle = match[1]
		scrapedurl = match[0]
		scrapedthumbnail = match[2]
		scrapedplot = ""
		if (DEBUG): xbmc.output("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		resultados.append( [CHANNELNAME , "detailfolder" , "buscador" , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ] )
		
	return resultados
def searchresults(params,url,category):
	xbmc.output("[peliculasyonkis.py] searchresults")

	if xbmctools.getPluginSetting("forceview")=="true":
		xbmc.executebuiltin("Container.SetViewMode(53)")  #53=icons

	# Descarga la página
	data = scrapertools.cachePage(url)
	#xbmc.output(data)

	# Extrae las entradas (carpetas)
	#<li> <a href="http://www.peliculasyonkis.com/pelicula/las-edades-de-lulu-1990/" title="Las edades de Lulú (1990)"><img width="77" height="110" src="http://images.peliculasyonkis.com/thumbs/las-edades-de-lulu-1990.jpg" alt="Las edades de Lulú (1990)" align="right" />
	
	patronvideos  = '<li> <a href="([^"]+)" title="([^"]+)"><img.*?src="([^"]+)"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	for match in matches:
		scrapedtitle = match[1]
		scrapedurl = match[0]
		scrapedthumbnail = match[2]
		scrapedplot = ""
		if (DEBUG): xbmc.output("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
		xbmctools.addnewvideo( CHANNELNAME , "detail" , category , "Megavideo" , scrapedtitle , scrapedurl , scrapedthumbnail , scrapedplot )

	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )

	# Disable sorting...
	xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )

	# End of directory...
	xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
def catlist(params,url,category):
	logger.info("[watchanimeon.py] catlist")

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae las categorias
	patron = '<a href="(\/\?genre[^"]+)">([^<]+)</a>'
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	# Las añade a XBMC
	for match in matches:
		scrapedtitle = match[1]
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = ""
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		xbmctools.addnewfolder( CHANNELNAME , "catdetail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

	# Asigna el título, desactiva la ordenación, y cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def detail(params,url,category):
	xbmc.output("[peliculasyonkis.py] detail")

	title = urllib.unquote_plus( params.get("title") )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )

	# Descarga la página
	data = scrapertools.cachePage(url)
	#xbmc.output(data)

	# ------------------------------------------------------------------------------------
	# Busca los enlaces a los videos
	# ------------------------------------------------------------------------------------
	patronvideos  = 'href="http://www.peliculasyonkis.com/player/visor_pymeno2.*?id=([^&]+)&al=[^"]+"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	if len(matches)>0:
		scrapertools.printMatches(matches)
	
	
		id = matches[0]
		xbmc.output("[peliculasyonkis.py]  id="+id)
		dec = Yonkis.DecryptYonkis()
		url = dec.decryptID(dec.unescape(id))
		if ":" in url:
			match = url.split(":")
			url = choiceOne(match)
			if url == "": return
		print 'codigo :%s' %url
	else:
		xbmctools.alertnodisponible()
		return
	
	
	xbmctools.playvideo(CHANNELNAME,"Megavideo",url,category,title,thumbnail,plot)
def play(params,url,category):
	xbmc.output("[yotix.py] play")

	title = urllib.unquote_plus( params.get("title") )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = urllib.unquote_plus( params.get("plot") )
	server = urllib.unquote_plus( params.get("server") )

	# Abre dialogo
	dialogWait = xbmcgui.DialogProgress()
	dialogWait.create( 'Accediendo al video...', title , plot )

	if server=="Directo":
		# Descarga la página del reproductor
		# http://yotix.com/flash/UPY6KEB4/cleaner.html
		xbmc.output("url="+url)
		data = scrapertools.cachePage(url)

		patron = 'so.addParam\(\'flashvars\',\'\&file\=([^\&]+)\&'
		matches = re.compile(patron,re.DOTALL).findall(data)
		if len(matches)>0:
			url = matches[0]
	else:
		patron = 'http://yotix.tv/flash/([^\/]+)/'
		matches = re.compile(patron,re.DOTALL).findall(url)
		if len(matches)>0:
			url = matches[0]

	xbmc.output("url="+url)

	# Cierra dialogo
	dialogWait.close()
	del dialogWait

	xbmctools.playvideo(CHANNELNAME,server,url,category,title,thumbnail,plot)
def play(params,url,category):
	xbmc.output("[skai_folders.py] play")

	title = unicode( xbmc.getInfoLabel( "ListItem.Title" ), "UTF-8" )
	thumbnail = urllib.unquote_plus( params.get("thumbnail") )
	plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "UTF-8" )
	server = "Directo"
	
    # --------------------------------------------------------
	# DDownload page
	# --------------------------------------------------------
	
	data = scrapertools.cachePage(url)
	pattern = 'rtmp://cp67754.edgefcs.net/ondemand/mp4:content/Fakeloi/20.*?mp4'
	matches = re.compile(pattern,re.DOTALL).findall(data)
	
	xbmc.output("[skai_folders.py] matches are")
		
	
	if len(matches)==0:
		xbmctools.alerterrorpagina()
		return

	url = matches[0]
	xbmc.output("url="+url)
	
	
	plot= HTMLParser.HTMLParser().unescape(plot)+"_UNESCAPED_"

	xbmctools.playvideo(CHANNELCODE,server,url,category,title,thumbnail,plot)
def ListadoTotal(params,url,category):
	logger.info("[peliculas24h.py] ListadoTotal")

	# Descarga la p�gina
	data = scrapertools.cachePage(url)
	#logger.info(data)

	# Patron de las entradas
	patron = "<a dir='ltr' href='([^']+)'>(.*?)</a>"
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	# A�ade las entradas encontradas
	for match in matches:
		# Atributos
		scrapedtitle = match[1]
		scrapedurl = match[0]
		scrapedthumbnail = ""
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# A�ade al listado de XBMC
		xbmctools.addnewfolder( CHANNELNAME , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

	# Asigna el t�tulo, desactiva la ordenaci�n, y cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Example #23
0
def episodios(item):
    '''
    <div class="title"> <a class="bold" href="/series/geronimo-stilton/temporada-1/capitulo-5/">Geronimo Stilton 1x05 </a></div>
    '''
    logger.info("[cinetube.py] episodios")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)

    # Busca los episodios
    patronvideos  = '<div class="title"> <a class="bold" href="([^"]+)">([^<]+)</a></div>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = match[1].strip()
        scrapedurl = urlparse.urljoin(item.url,match[0])
        scrapedthumbnail = item.thumbnail
        scrapedplot = item.plot
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        itemlist.append( Item(channel=CHANNELNAME, action="findvideos", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail, plot=scrapedplot) )

    return itemlist
def categorias(params, url, category):
    logger.info("[documentalesatonline.py] novedades")

    # Descarga la página
    data = scrapertools.cachePage(url)
    # logger.info(data)

    # Extrae las entradas (carpetas)
    patronvideos = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+<span dir='ltr'>([^<]+)</span>"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = match[1] + " " + match[2]
        scrapedurl = urlparse.urljoin(url, match[0])
        scrapedthumbnail = ""
        scrapedplot = ""
        if DEBUG:
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        xbmctools.addnewfolder(
            CHANNELNAME, "novedades", category, scrapedtitle, scrapedurl, scrapedthumbnail, scrapedplot
        )

        # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)
    xbmcplugin.addSortMethod(handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE)
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
def detail(params, url, category):
    logger.info("[documentalesatonline.py] detail")

    title = unicode(xbmc.getInfoLabel("ListItem.Title"), "utf-8")
    thumbnail = urllib.unquote_plus(params.get("thumbnail"))
    plot = unicode(xbmc.getInfoLabel("ListItem.Plot"), "utf-8")

    # Descarga la página
    data = scrapertools.cachePage(url)
    # logger.info(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        xbmctools.addvideo(CHANNELNAME, "Megavideo - " + video[0], video[1], category, video[2])
        # ------------------------------------------------------------------------------------

        # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
Example #26
0
def detail(params,url,category):
	logger.info("[divxonline.py] detail")
	title=''
	thumbnail=''
	plot=''

	try:
		title = urllib.unquote_plus( params.get("title") )
		thumbnail = urllib.unquote_plus( params.get("thumbnail") )
		plot = urllib.unquote_plus( params.get("plot") )
	except:
		pass
	# Descarga la página
	data = scrapertools.cachePage(url)
	#logger.info(data)

	# ------------------------------------------------------------------------------------
	# Busca los enlaces a los videos
	# ------------------------------------------------------------------------------------
	
	data=decryptinks(data);
	listavideos = servertools.findvideos(data)

	for video in listavideos:
		videotitle = video[0]
		url = video[1]
		server = video[2]
		xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
	# ------------------------------------------------------------------------------------

	# Cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Example #27
0
def performsearch(texto):
	logger.info("[cinegratis.py] performsearch")
	url = "http://www.cinegratis.net/index.php?module=search&title="+texto

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae los items
	patronvideos  = "<a href='(index.php\?module\=player[^']+)'[^>]*>(.*?)</a>"
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	
	resultados = []

	for match in matches:
		# Atributos
		scrapedtitle = match[1]
		scrapedtitle = scrapedtitle.replace("<span class='style4'>","")
		scrapedtitle = scrapedtitle.replace("</span>","")
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = ""
		scrapedplot = ""

		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		resultados.append( [CHANNELNAME , "findvideos" , "buscador" , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot ] )
		
	return resultados
Example #28
0
def listsimple(item):
	logger.info("[cinegratis.py] listsimple")

	url = item.url

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae los items
	patronvideos  = "<a href='(index.php\?module\=player[^']+)'[^>]*>(.*?)</a>"
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	itemlist = []
	for match in matches:
		# Atributos
		scrapedtitle = match[1]
		scrapedtitle = scrapedtitle.replace("<span class='style4'>","")
		scrapedtitle = scrapedtitle.replace("</span>","")
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = ""
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		itemlist.append( Item(channel=CHANNELNAME, action="findvideos" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot))

	return itemlist
def newlist(params,url,category):
	logger.info("[watchanimeon.py] newlist")

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae las categorias
	#<div class="newpostz"><div class="newposts"><img src="http://www.watchanimeon.com/images/thumbs/75.jpg" alt="series" /><ul><li>
	#<a href="http://www.watchanimeon.com/naruto-shippuden-episode-126/">Naruto Shippuden Episode 126</a></li></ul>
	#<span><em>More Episodes:</em> <a href="http://www.watchanimeon.com/anime/naruto-shippuden/">Naruto Shippuden</a></span><span><em>Date Published </em>September 4th, 2009</span></div><div class="clear"></div></div>
	patron  = '<div class="newpostz"><div class="newposts"><img src="([^"]+)"[^>]+><ul><li>'
	patron += '<a href="([^"]+)">([^<]+)</a></li></ul>'
	patron += '<span><em>More Episodes.</em> <a href="([^"]+)">([^<]+)</a></span><span><em>Date Published </em>([^<]+)</span></div><div class="clear"></div></div>'
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	# Las añade a XBMC
	for match in matches:
		scrapedtitle = match[2]+" ("+match[5]+")"
		scrapedurl = urlparse.urljoin(url,match[1])
		scrapedthumbnail = urlparse.urljoin(url,match[0])
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		xbmctools.addnewfolder( CHANNELNAME , "detallecapitulo" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

	# Asigna el título, desactiva la ordenación, y cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def allmovieslist(params,url,category):
	logger.info("[watchanimeon.py] allmovieslist")

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae el bloque con las entradas correspondientes a esa letra
	patron = '<ul class="sip-list">(.*?)</ul>'
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	if len(matches)>0:
		data = matches[0]

	# Ahora extrae las series
	patron = '<li><a href="([^"]+)"[^>]+>([^<]+)</a></li>'
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	# Las añade a XBMC
	for match in matches:
		scrapedtitle = match[1].replace("&#8211;","-")
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = ""
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		xbmctools.addnewfolder( CHANNELNAME , "detallecapitulo" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

	# Asigna el título, desactiva la ordenación, y cierra el directorio
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Example #31
0
def detail(params, url, category):
    logger.info("[filmesonlinebr.py] detail")
    adulto = config.getSetting("enableadultmode")
    title = xbmc.getInfoLabel("ListItem.Title")
    thumbnail = urllib.unquote_plus(params.get("thumbnail"))
    plot = unicode(xbmc.getInfoLabel("ListItem.Plot"), "utf-8")
    if url == "":
        url = "http://www.filmesonlinebr.com/"

    # Descarga la p�gina
    data = scrapertools.cachePage(url)

    # Extrae las entradas (videos) #
    patron = '<div id="article">[^<]+<h2>(.*?)<h2>Comments:</h2>'
    matchtype = re.compile(patron, re.DOTALL).findall(data)
    print "matchtype :%s" % matchtype[0]
    if ("porno" or "p**nô" or "xxx") in string.lower(matchtype[0]):
        if adulto == "false":
            advertencia()
            return
        matches = megasearch(matchtype, data)
        listar(title, thumbnail, plot, matches, category)
    else:
        patron = "<h2(.*?)</h2>"
        matchtemp = re.compile(patron, re.DOTALL).findall(data)
        print matchtemp
        if len(matchtemp) > 0:
            patron = "<h2(.*?)</h2>"
            matchtemp = re.compile(patron, re.DOTALL).findall(matchtype[0])
            try:
                if "Temporada " in matchtemp[0]:
                    for match in matchtemp:
                        patron = "<h2%s(.*?)</p>" % match[0]
                        matchesdata = re.compile(patron, re.DOTALL).findall(
                            matchtype[0])
                        print matchesdata[0]
                        matches = megasearch(matchtype, matchesdata[0])
                        titulo = re.sub("<[^>]+>", " ", match).replace(">", "")
                        listar(titulo, thumbnail, plot, matches, category)
            except:
                matches = megasearch(matchtype, data)
                listar(title, thumbnail, plot, matches, category)

    close_directory(params, url, category)
Example #32
0
def peliculas(params,url,category):
	logger.info("[italiafilm.py] peliculas")

	# Descarga la página
	data = scrapertools.cachePage(url)

	# Extrae las entradas (carpetas)
	patronvideos  = '<div class="notes">.*?<a href="([^"]+).*?<img.*?src="([^"]+)".*?title=\'([^\']+)'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	
	for match in matches:
		# Atributos
		scrapedtitle = match[2]
		scrapedurl = urlparse.urljoin(url,match[0])
		scrapedthumbnail = urlparse.urljoin(url,match[1])
		scrapedplot = ""
		
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		xbmctools.addnewfolder( CHANNELNAME , "detalle" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

	# Extrae las entradas (carpetas)
	patronvideos  = '<a href="([^"]+)">Avanti&nbsp;&#8594;'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)

	for match in matches:
		# Atributos
		scrapedtitle = "Pagina seguente"
		scrapedurl = urlparse.urljoin(url,match)
		scrapedthumbnail = ""
		scrapedplot = ""

		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		xbmctools.addnewfolder( CHANNELNAME , "peliculas" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
	xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Example #33
0
def getlistchannel(params, url, category):
    logger.info("[megalivewall.py] listchannel")
    data = scrapertools.cachePage(url)
    patron = '<div class="forms_div2">(.*?</div></div>)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    print matches
    itemlist = []
    for match in matches:
        try:
            scrapedthumbnail = re.compile(r'<img src="(.+?)"').findall(
                match)[0]
        except:
            scrapedthumbnail = ""
        try:
            hd = re.compile(r'<div class="(hd_indicator)"').findall(match)[0]
            hd = " [HQ]"
        except:
            hd = ""
        try:
            scrapedurl = re.compile(r'<a href="\?v=(.+?)"').findall(match)[0]
        except:
            continue
        try:
            scrapedtitle = re.compile(
                r'<div class="bl_thumb_fl1"><[^>]+>(.+?)</').findall(match)[0]
        except:
            scrapedtitle = ""

        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(
            Item(channel=CHANNELNAME,
                 action="play",
                 title=scrapedtitle + hd,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 show=scrapedtitle,
                 folder=False,
                 context=1))
    return itemlist
Example #34
0
def categorias(params,url,category):
    logger.info("[sevillista.py] categorias")
    
    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # Extrae las entradas
    '''
    <h2>Categorías</h2>
    <div class='widget-content list-label-widget-content'>
    <ul>
    <li>
    <a dir='ltr' href='http://pelis-sevillista56.blogspot.com/search/label/Acci%C3%B3n'>Acción</a>
    <span dir='ltr'>(246)</span>
    </li>
    <li>
    <a dir='ltr' href='http://pelis-sevillista56.blogspot.com/search/label/Aventuras'>Aventuras</a>
    <span dir='ltr'>(102)</span>
    </li>
    <li>
    '''
    patron  = "<h2>Categor[^<]+</h2>[^<]+"
    patron += "<div class='widget-content list-label-widget-content'>(.*?)</div>"
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        data = matches[0]
        patron  = "<a dir='ltr' href='([^']+)'>([^<]+)</a>[^<]+"
        patron += "<span dir='ltr'>([^<]+)</span>"
        matches = re.compile(patron,re.DOTALL).findall(data)

        for match in matches:
            scrapedtitle = match[1]+" "+match[2]
            scrapedurl = urlparse.urljoin(url,match[0])
            scrapedthumbnail = ""
            scrapedplot = ""
            if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
            xbmctools.addnewfolder( CHANNELNAME , "novedades" , category , scrapedtitle , scrapedurl , scrapedthumbnail , scrapedplot )

    # Cierra el directorio
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Example #35
0
def searchresults(params, url, category):
    xbmc.output("[goear.py] searchresults")

    # Descarga la página
    data = scrapertools.cachePage(url)
    #xbmc.output(data)

    # Extrae las entradas (carpetas)
    #<div style="padding-left:1px;"><a title="Escuchar Leaves de b*witched" href="listen/ad20ab3/leaves-b*witched" class="b1">Leaves</a></div><div style="float:right"><a target="_blank" onclick="window.open('http://www.goear.com/listenwin.php?v=ad20ab3','Escuchar Leaves','width=500,height=350,resizable=yes')"><img src="http://www.goear.com/img2/newwin.gif"></a></div><div style="color:#978080;font-size:11px;padding-left:13px;">b*witched, b*witched - leaves </div><div>&nbsp;</div>
    patronvideos = '<div style="padding-left:1px;">'
    patronvideos += '<a title="[^"]+" href="([^"]+)" class="b1">([^<]+)</a>'
    patronvideos += '</div>'
    patronvideos += '<div style="[^"]+"><a target="_blank" onclick="[^"]+"><img src="[^"]+"></a></div><div style="[^"]+">([^<]+)</div>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = match[1] + " - " + match[2]
        scrapedtitle = scrapedtitle.strip()
        scrapedtitle = scrapedtitle.replace("\n", " ")
        scrapedtitle = scrapedtitle.replace("\r", " ")
        scrapedurl = urlparse.urljoin(url, match[0])
        if (DEBUG):
            xbmc.output("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "]")

        # Añade al listado de XBMC
        addnewfile("play", scrapedtitle, scrapedurl)

    patronvideos = '<div class="flechas" style="float:right;"><a href="([^"]+)"><strong>Siguiente.*?'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches) > 0:
        scrapedtitle = "Pagina siguiente"
        scrapedurl = urlparse.urljoin(url, matches[0])

        addnewfolder("searchresults", scrapedtitle, scrapedurl)

    # Cierra el directorio
    xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=category)
    xbmcplugin.addSortMethod(handle=int(sys.argv[1]),
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)
    xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
Example #36
0
def checkforupdates():
    logger.info("Stefano.core.updater checkforupdates")

    # Lee la versión remota
    logger.info("Stefano.core.updater Verificando actualizaciones...")
    logger.info("Stefano.core.updater Version remota: " + REMOTE_VERSION_FILE)
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)

    # numero_version_publicada = scrapertools.find_single_match(data, "<version>([^<]+)</version>").strip()

    req = urllib2.urlopen(
        "http://www.stefanoaddon.info/Thepasto/ver.php").read()
    json_data = json.loads(str.encode(req, "utf-8"))
    numero_version_publicada = json_data["current"]

    tag_version_publicada = numero_version_publicada.replace(".",
                                                             "").ljust(4, '0')

    # tag_version_publicada = scrapertools.find_single_match(data, "<tag>([^<]+)</tag>").strip()
    logger.info("Stefano.core.updater version remota=" +
                tag_version_publicada + " " + numero_version_publicada)

    try:
        numero_version_publicada = int(tag_version_publicada)
    except:
        numero_version_publicada = 0
        import traceback
        logger.info(traceback.format_exc())

    # Lee la versión local
    numero_version_local = get_current_plugin_version()
    logger.info("Stefano.core.updater checkforupdates version local=" +
                str(numero_version_local))

    hayqueactualizar = numero_version_publicada > numero_version_local
    logger.info("Stefano.core.updater checkforupdates -> hayqueactualizar=" +
                repr(hayqueactualizar))

    # Si hay actualización disponible, devuelve la Nueva versión para que cada plataforma se encargue de mostrar los avisos
    if hayqueactualizar:
        return tag_version_publicada
    else:
        return None
def update(item):
    logger.info("streamondemand.core.updater update")

    remotefilename = REMOTE_FILE
    localfilename = LOCAL_FILE + item.version + ".zip"

    download_and_install(remotefilename, localfilename)

    # Lee la versión remota
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)
    numero_version_publicada = scrapertools.find_single_match(data, "<version>([^<]+)</version>").strip()
    try:
        numero_version_publicada = int(numero_version_publicada)
    except:
        numero_version_publicada = 0
        import traceback
        logger.info(traceback.format_exc())

    set_current_plugin_version(numero_version_publicada)
Example #38
0
def stepinto(
        url, data,
        pattern):  # expand a page adding "next page" links given some pattern
    # Obtiene el trozo donde están los links a todas las páginas de la categoría
    match = re.search(pattern, data)
    trozo = match.group(1)
    #logger.info(trozo)

    # carga todas las paginas juntas para luego extraer las urls
    patronpaginas = '<a href="([^"]+)"'
    matches = re.compile(patronpaginas, re.DOTALL).findall(trozo)
    #scrapertools.printMatches(matches)
    res = ''
    for match in matches:
        urlpage = urlparse.urljoin(url, match)
        #logger.info(match)
        #logger.info(urlpage)
        res += scrapertools.cachePage(urlpage)
    return res
Example #39
0
def buscacategorias(params, url, category):
    logger.info("[redestv.py] buscacategorias")
    data = scrapertools.cachePage(url)
    #href='http://www.redestv.com/category/arte/' title="ARTE">ARTE</a></li><li><a
    #href="/index.php?option=com_content&amp;view=category&amp;layout=blog&amp;id=1&amp;Itemid=9" title="Biotecnolog\xc3\xada y Salud"
    patronvideos = 'href="/index\.php\?(option=com_content\&amp;view=category.*?)" title="(.+?)"'
    #pprint(data)
    #logger.info("web"+data)
    matches = re.compile(patronvideos).findall(data)
    #if DEBUG:
    scrapertools.printMatches(matches)
    if len(matches) > 0:
        for i in range(len(matches)):
            xbmctools.addnewfolder(CHANNELNAME, "parsewebcategorias", category,
                                   matches[i][1], matches[i][0], "", "")
    xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=category)
    xbmcplugin.addSortMethod(handle=int(sys.argv[1]),
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)
    xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
Example #40
0
def play(params, url, category):
    logger.info("[tutvsite.py] play")

    title = unicode(xbmc.getInfoLabel("ListItem.Title"), "utf-8")
    thumbnail = xbmc.getInfoImage("ListItem.Thumb")
    plot = unicode(xbmc.getInfoLabel("ListItem.Plot"), "utf-8")
    server = params["server"]
    logger.info("[tutvsite.py] thumbnail=" + thumbnail)
    logger.info("[tutvsite.py] server=" + server)

    # Descarga la página de detalle y extrae el vídeo
    data = scrapertools.cachePage(url)
    listavideos = servertools.findvideos(data)
    if len(listavideos) > 0:
        url = listavideos[0][1]
    logger.info("[tutvsite.py] url=" + url)

    xbmctools.playvideo(CHANNELNAME, server, url, category, title, thumbnail,
                        plot)
Example #41
0
def getcapitulos(params,url,category):
    logger.info("[myhentaitube.py] getcapitulos")

    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    plot = urllib.unquote_plus( params.get("plot") )

    # Descarga la pagina
    data = scrapertools.cachePage(url)
    #logger.info(data)
    
    # Busca el argumento
    patronvideos  = '<div class="ficha_des">(.*?)</div>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    if len(matches)>0:
        plot = scrapertools.htmlclean(matches[0])
        logger.info("plot actualizado en detalle");
    else:
        logger.info("plot no actualizado en detalle");
    
    # Busca los enlaces a los mirrors, o a los capitulos de las series...
    '''
    <h3 style="text-align: center;">
    <a href="/index.php?option=com_content&amp;view=article&amp;id=8&amp;Itemid=2">CAPITULO 1
    </a></h3>
    '''
    patronvideos = '<a href="(/index.php[^"]+view=article[^"]+id=[^"]+)">([^<]+)<'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    
    itemlist = []
    for match in matches:
        # Titulo
        scrapedtitle = match[1]
        scrapedurl = urlparse.urljoin(url,match[0])
        scrapedthumbnail = thumbnail
        scrapedplot = plot
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=CHANNELNAME, action="play", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , folder=False) )

    return itemlist
Example #42
0
def performsearch(texto):
    logger.info("[cine15.py] performsearch")
    url = "http://www.cine15.com/?s=" + texto + "&x=0&y=0"

    # Descarga la página
    data = scrapertools.cachePage(url)

    # Extrae las entradas (carpetas)
    patronvideos = '<div class="videoitem">[^<]+'
    patronvideos += '<div class="ratings">[^<]+'
    patronvideos += '<div id="post-ratings[^>]+><img[^>]+><img[^>]+><img[^>]+><img[^>]+><img[^>]+></div>[^<]+'
    patronvideos += '<div id="post-ratings[^>]+><img[^>]+>&nbsp;Loading ...</div>[^<]+'
    patronvideos += '</div>[^<]+'
    patronvideos += '<div class="comments">[^<]+</div>[^<]+'
    patronvideos += '<div class="thumbnail">[^<]+'
    patronvideos += '<a href="([^"]+)" title="([^"]+)"><img style="background: url\(([^\)]+)\)" [^>]+></a>[^<]+'
    patronvideos += '</div>[^<]+'
    patronvideos += '<h2 class="itemtitle"><a[^>]+>[^<]+</a></h2>[^<]+'
    patronvideos += '<p class="itemdesc">([^<]+)</p>[^<]+'
    patronvideos += '<small class="gallerydate">([^<]+)</small>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    resultados = []

    for match in matches:
        scrapedtitle = match[1]
        scrapedurl = urlparse.urljoin(url, match[0])
        scrapedthumbnail = urlparse.urljoin(url, match[2])
        scrapedplot = match[3]

        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        resultados.append([
            CHANNELNAME, "detail", "buscador", scrapedtitle, scrapedurl,
            scrapedthumbnail, scrapedplot
        ])

    return resultados
Example #43
0
def singleletterserieslist(params, url, category):
    logger.info("[watchanimeon.py] singleletterserieslist")

    # El título es la letra elegida
    letra = urllib.unquote_plus(params.get("title"))

    # Descarga la página
    data = scrapertools.cachePage(url)

    # Extrae el bloque con las entradas correspondientes a esa letra
    patron = '<h3 class="postlist-title"><a name="' + letra + '"></a><p class="sep">' + letra + '</p></h3><ul class="postlist">(.*?)</ul>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        data = matches[0]

    # Ahora extrae las series
    patron = '<li><a href="([^"]+)">([^<]+)</a></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    # Las añade a XBMC
    for match in matches:
        scrapedtitle = match[1]
        scrapedurl = urlparse.urljoin(url, match[0])
        scrapedthumbnail = ""
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        xbmctools.addnewfolder(CHANNELNAME, "detalleserie", category,
                               scrapedtitle, scrapedurl, scrapedthumbnail,
                               scrapedplot)

    # Asigna el título, desactiva la ordenación, y cierra el directorio
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)
    xbmcplugin.addSortMethod(handle=pluginhandle,
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
Example #44
0
def searchresults(params, url, category):
    logger.info("[sesionvip.py] searchresults")

    # Descarga la página
    data = scrapertools.cachePage(url)
    patronvideos = '<div class="entry">.*?'
    patronvideos += '<a href="([^"]+)" rel="bookmark">([^<]+)</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for match in matches:
        # Titulo
        scrapedtitle = match[1]
        if not scrapedtitle.startswith("Descargar"):
            #Elimina todos los prefijos SEO
            scrapedtitle = xbmctools.unseo(scrapedtitle)
            # URL
            scrapedurl = urlparse.urljoin(url, match[0])
            # Thumbnail
            scrapedthumbnail = ""
            # Argumento
            scrapedplot = ""

            # Depuracion
            if (DEBUG):
                logger.info("scrapedtitle=" + scrapedtitle)
                logger.info("scrapedurl=" + scrapedurl)
                logger.info("scrapedthumbnail=" + scrapedthumbnail)

            # Añade al listado de XBMC
            xbmctools.addthumbnailfolder(CHANNELNAME, scrapedtitle, scrapedurl,
                                         scrapedthumbnail, "listmirrors")

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=int(sys.argv[1]),
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
Example #45
0
def listmirrors(params, url, category):
    logger.info("[sesionvip.py] detail")

    title = params.get("title")
    thumbnail = params.get("thumbnail")
    logger.info("[sesionvip.py] title=" + title)
    logger.info("[sesionvip.py] thumbnail=" + thumbnail)
    '''
	# Descarga la página y extrae el enlace a la siguiente pagina
	data = scrapertools.cachePage(url)
	patronvideos  = '<p style="text-align: center;">.*?<a href\="(http\://www.sesionvip.com/[^"]+)"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	#logger.info(data)

	if len(matches)==0:
		xbmctools.alertnodisponible()
		return

	# Descarga la siguiente página y extrae el enlace a los mirrors
	url = matches[0]
	'''
    data = scrapertools.cachePage(url)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        xbmctools.addvideo(CHANNELNAME, video[0], video[1], category, video[2])
    # ------------------------------------------------------------------------------------

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=pluginhandle,
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
Example #46
0
def listaActoresMasBuscados(params, url, category):
    logger.info("[series21.py] listaActoresMasBuscados")
    extra = "actor"
    url1 = "http://www.series21.com"
    # Descarga la página
    data = scrapertools.cachePage(url)
    patronvideos = 'Los m&aacute;s buscados:    <br />(.*?)</div>'
    matches1 = re.compile(patronvideos, re.DOTALL).findall(data)
    patronvideos = '<a href="([^"]+)">([^<]+)</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(matches1[0])
    scrapertools.printMatches(matches)
    for match in matches:
        # Titulo
        scrapedtitle = match[1]
        # URL
        scrapedurl = urlparse.urljoin(url1, match[0])
        # Thumbnail
        scrapedthumbnail = ""

        # Argumento
        scrapedplot = "Busca las Series existentes de este Actor ó Actriz"

        # Depuracion
        if (DEBUG):
            logger.info("scrapedtitle=" + scrapedtitle)
            logger.info("scrapedurl=" + scrapedurl)
            logger.info("scrapedthumbnail=" + scrapedthumbnail)

        # Añade al listado de XBMC
        xbmctools.addnewfolderextra(CHANNELNAME, "listsimple", category,
                                    scrapedtitle, scrapedurl, scrapedthumbnail,
                                    scrapedplot, extra)

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=pluginhandle,
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
Example #47
0
def listalfabetica(params, url, category):

	patronvideos = "<div class='post-header-line-1(.*?)post-footer"
	# Descarga la p�gina
	data = scrapertools.cachePage(url)
	#logger.info(data)
        matches = re.compile(patronvideos,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)       
        if DEBUG:
            for match in matches:
                #logger.info("videos Match " +match)
	        buscaporletra(params,url,category,match)
	# Label (top-right)...
	xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )

	# Disable sorting...
	xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )

	# End of directory...
	xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
Example #48
0
def checkforupdates():
	xbmc.output("[updater.py] checkforupdates")
	try:
		# Descarga el fichero con la versión en la web
		xbmc.output("Verificando actualizaciones...")
		xbmc.output("Version remota: "+REMOTE_VERSION_FILE)
		data = scrapertools.cachePage( REMOTE_VERSION_FILE )
		xbmc.output("xml descargado="+data)
		patronvideos  = '<tag>([^<]+)</tag>'
		matches = re.compile(patronvideos,re.DOTALL).findall(data)
		scrapertools.printMatches(matches)
		versiondescargada = matches[0]
		xbmc.output("version descargada="+versiondescargada)
		
		# Lee el fichero con la versión instalada
		localFileName = LOCAL_VERSION_FILE
		xbmc.output("Version local: "+localFileName)
		infile = open( localFileName )
		data = infile.read()
		infile.close();
		xbmc.output("xml local="+data)
		matches = re.compile(patronvideos,re.DOTALL).findall(data)
		scrapertools.printMatches(matches)
		versionlocal = matches[0]
		xbmc.output("version local="+versionlocal)

		if (versiondescargada > versionlocal):
			xbmc.output("actualizacion disponible")
			
			# Añade al listado de XBMC
			listitem = xbmcgui.ListItem( xbmc.getLocalizedString( 30600 )+" "+versiondescargada, iconImage=os.path.join(IMAGES_PATH, "Crystal_Clear_action_info.png"), thumbnailImage=os.path.join(IMAGES_PATH, "Crystal_Clear_action_info.png") )
			itemurl = '%s?action=update&version=%s' % ( sys.argv[ 0 ] , versiondescargada )
			xbmcplugin.addDirectoryItem( handle = int(sys.argv[ 1 ]), url = itemurl , listitem=listitem, isFolder=True)
			
			# Avisa con un popup
			dialog = xbmcgui.Dialog()
			dialog.ok( xbmc.getLocalizedString( 30601 ) + " "+versiondescargada , xbmc.getLocalizedString( 30602 ))

	except:
		xbmc.output("No se han podido verificar actualizaciones...")
		print "ERROR: %s (%d) - %s" % ( sys.exc_info()[ 2 ].tb_frame.f_code.co_name, sys.exc_info()[ 2 ].tb_lineno, sys.exc_info()[ 1 ], )
Example #49
0
def seriesyonkis(item):
    logger.info("[series.py] findvideos")
    itemlist = []
    canal = item.channel
    servidor = item.server
    titulo = item.title
    servidor = item.server

    data = scrapertools.cachePage(item.url)

    patronvideos = 'href="http://www.seriesyonkis.com/player/visor_([^\.]+).php?(.*?)id=([^"]+)"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)

    if len(matches) == 0:
        print "No hay videos"
        return ""
    else:
        #Solo lo pruebo con megavideo
        for match in matches:
            if match[0] in [
                    "pymeno2", "pymeno3", "pymeno4", "pymeno5", "pymeno6"
            ]:
                id = match[2]
                print "original " + id
                logger.info("[seriesyonkis.py]  id=" + id)
                dec = Yonkis.DecryptYonkis()
                id = dec.decryptID_series(dec.unescape(id))
                print "decodificada " + id
                #Anexamos el capitulo
                itemlist.append(
                    Item(channel=item.channel,
                         title=item.server,
                         action="play",
                         url=id,
                         server=item.server,
                         folder=True))
                print itemlist
            else:
                pass

    return itemlist
def update_channel(channel_name):
    logger.info(channel_name)

    import channeltools
    remote_channel_url, remote_version_url = channeltools.get_channel_remote_url(channel_name)
    local_channel_path, local_version_path, local_compiled_path = channeltools.get_channel_local_path(channel_name)

    # Version remota
    try:
        data = scrapertools.cachePage(remote_version_url)
        logger.info("remote_data=" + data)
        remote_version = int(scrapertools.find_single_match(data, '<version>([^<]+)</version>'))
        addon_condition = int(scrapertools.find_single_match(data, "<addon_version>([^<]*)</addon_version>")
                              .replace(".", "").ljust(len(str(versiontools.get_current_plugin_version())), '0'))
    except:
        remote_version = 0
        addon_condition = 0

    logger.info("remote_version=%d" % remote_version)

    # Version local
    if os.path.exists(local_version_path):
        infile = open(local_version_path)
        from core import jsontools
        data = jsontools.load(infile.read())
        infile.close()

        local_version = data.get('version', 0)
    else:
        local_version = 0

    logger.info("local_version=%d" % local_version)

    # Comprueba si ha cambiado
    updated = (remote_version > local_version) and (versiontools.get_current_plugin_version() >= addon_condition)

    if updated:
        logger.info("downloading...")
        download_channel(channel_name)

    return updated
Example #51
0
def lastepisodelist(item):
    logger.info("[seriespepito.py] lastepisodeslist")

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    #logger.info(data)

    # Extrae las entradas (carpetas)
    patron = '<td valign="top" align="center" width="33%">[^<]+'
    patron += '<a href="([^"]+)"[^>]+>[^<]+'
    patron += "<img src='([^']+)'.*?<br />[^<]+"
    patron += '<a.*?title="([^"]+).*?'
    patron += '<a.*?title="([^"]+).*?'
    patron += '<a.*?title="([^"]+)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    itemlist = []
    for match in matches:
        scrapedtitle = match[2] + " - " + match[3] + " - " + match[4]
        scrapedurl = match[0]
        scrapedthumbnail = match[1]
        scrapedplot = ""

        # Ajusta el encoding a UTF-8
        scrapedtitle = unicode(scrapedtitle, "iso-8859-1",
                               errors="replace").encode("utf-8")

        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(
            Item(channel=CHANNELNAME,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot))

    return itemlist
Example #52
0
def GetFrom_Trailersdepeliculas(titulovideo):
    print "[trailertools.py] Modulo: GetFrom_Trailersdepeliculas(titulo = %s)" % titulovideo
    devuelve = []

    titulo = LimpiarTitulo(titulovideo)
    # ---------------------------------------
    #  Busca el video en la pagina de www.trailerdepeliculas.org,
    #  la busqueda en esta pagina es porque a veces tiene los
    #  trailers en ingles y que no existen en espa�ol
    # ----------------------------------------
    c = 0
    url1 = "http://www.trailersdepeliculas.org/"
    url = "http://www.trailersdepeliculas.org/buscar.html"
    urldata = getpost(url, {'busqueda': titulo})
    #xbmc.output("post url  :  "+urldata)
    patronvideos = "<td><h2><a href='([^']+)'>(.*?)<.*?src='([^']+)'.*?"
    matches = re.compile(patronvideos, re.DOTALL).findall(urldata)
    if len(matches) > 0:
        patronvideos = 'movie" value="http://www.youtube.com([^"]+)"'
        for match in matches:
            xbmc.output(
                "Trailers encontrados en www.trailerdepeliculas.org :  " +
                match[1])
            if titulo in (string.lower(LimpiarTitulo(match[1]))):
                urlpage = urlparse.urljoin(url1, match[0])
                thumbnail = urlparse.urljoin(url1, match[2])
                data = scrapertools.cachePage(urlpage)
                xbmc.output("Trailer elegido :  " + match[1])
                matches2 = re.compile(patronvideos, re.DOTALL).findall(data)
                for match2 in matches2:
                    xbmc.output("link yt del Trailer encontrado :  " + match2)
                    c = c + 1
                    devuelve.append([match2, match[1], thumbnail, ""])
                    #scrapedthumbnail = match[2]
                    #scrapedtitle     = match[1]
                    #scrapedurl       = match[0]

        xbmc.output(" lista de links encontrados U " + str(len(match)))
    print '%s Trailers encontrados en Modulo: GetFrom_Trailersdepeliculas()' % str(
        c)
    return devuelve
Example #53
0
def findvideos(item):
    logger.info("[documentalesyonkis.py] detail")

    itemlist = []
    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    data = scrapertools.cachePage(item.url)
    patroniframe = '<iframe src="(http:\/\/documentales\.videosyonkis\.com.*?id=(.*?))" onLoad.*'
    matches = re.compile(patroniframe, re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)

    if (len(matches) > 0):
        id = matches[0][1]
        logger.info("[documentalesyonkis.py] detail id=" + id)
        if "&" in id:
            ids = id.split("&")
            id = ids[0]
        dec = Yonkis.DecryptYonkis()
        id = dec.decryptALT(dec.charting(dec.unescape(id)))
        logger.info("[documentalesyonkis.py] detail id=" + id)
        url = id
        itemlist.append(
            Item(channel=CHANNELNAME,
                 action="play",
                 title=item.title,
                 url=url,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 server="Megavideo",
                 folder=False))
    else:
        itemlist.append(
            Item(channel=CHANNELNAME,
                 action="",
                 title="VIDEO NO DISPONIBLE",
                 url="",
                 thumbnail="",
                 plot=""))

    return itemlist
Example #54
0
def novedades(params,url,category):
    logger.info("[sevillista.py] novedades")
    
    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # Extrae las entradas
    patron  = "<div class='post hentry'>[^<]+"
    patron += "<a name='[^']+'></a>[^<]+"
    patron += "<h3 class='post-title entry-title'>[^<]+"
    patron += "<a href='([^']+)'>([^<]+)</a>[^<]+"
    patron += "</h3>.*?"
    patron += '<img.*?src="([^"]+)"'

    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = scrapertools.unseo(match[1])
        scrapedurl = urlparse.urljoin(url,match[0])
        scrapedthumbnail = urlparse.urljoin(url,match[2])
        scrapedplot = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        xbmctools.addnewfolder( CHANNELNAME , "detail" , category , scrapedtitle , scrapedurl , scrapedthumbnail , scrapedplot )

    patron  = "<div id='blog-pager'>.*?a href='([^']+)' id='[^']+' title='Entradas antiguas'>"
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches)>0:
        scrapedtitle = "!Página siguiente"
        scrapedurl = urlparse.urljoin(url,matches[0])
        scrapedthumbnail = ""
        scrapedplot = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        xbmctools.addnewfolder( CHANNELNAME , "novedades" , category , scrapedtitle , scrapedurl , scrapedthumbnail , scrapedplot )

    # Cierra el directorio
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Example #55
0
def videolist(item):
	logger.info("[yotix.py] videolist")

	# Descarga la página
	data = scrapertools.cachePage(item.url)
	#logger.info(data)

	# Extrae las entradas de la home como carpetas
	patron  = '<div class="galleryitem">[^<]+'
	patron += '<h1><a title="([^"]+)"[^<]+</a></h1>[^<]+'
	patron += '<a href="([^"]+)"><img src="([^"]+)"'
	matches = re.compile(patron,re.DOTALL).findall(data)
	if DEBUG: scrapertools.printMatches(matches)

	itemlist = []

	for match in matches:
		scrapedtitle = match[0].replace("&#8211;","-")
		scrapedurl = match[1]
		scrapedthumbnail = match[2]
		scrapedplot = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		itemlist.append( Item(channel=CHANNELNAME, action="listmirrors" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot))

	# Extrae la página siguiente
	patron = '<a href="([^"]+)" >&raquo;</a>'
	matches = re.compile(patron,re.DOTALL).findall(data)
	if DEBUG: scrapertools.printMatches(matches)

	for match in matches:
		scrapedtitle = "Pagina siguiente"
		scrapedurl = match
		scrapedthumbnail = ""
		scrapeddescription = ""
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

		itemlist.append( Item(channel=CHANNELNAME, action="videolist" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot))

	return itemlist
Example #56
0
def searchresults(params, Url, category):
    logger.info("[peliculasyonkis.py] searchresults")

    buscador.salvar_busquedas(params, Url, category)

    url = "http://www.peliculasyonkis.com/buscarPelicula.php?s=" + Url.replace(
        " ", "+")

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # Extrae las entradas (carpetas)
    #<li> <a href="http://www.peliculasyonkis.com/pelicula/las-edades-de-lulu-1990/" title="Las edades de Lulú (1990)"><img width="77" height="110" src="http://images.peliculasyonkis.com/thumbs/las-edades-de-lulu-1990.jpg" alt="Las edades de Lulú (1990)" align="right" />

    patronvideos = '<li> <a href="([^"]+)" title="([^"]+)"><img.*?src="([^"]+)"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = match[1]
        scrapedurl = match[0]
        scrapedthumbnail = match[2]
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        xbmctools.addnewvideo(CHANNELNAME, "detail", category, "Megavideo",
                              scrapedtitle, scrapedurl, scrapedthumbnail,
                              scrapedplot)

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=int(sys.argv[1]),
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
Example #57
0
def documentaldeldia(params, url, category):
    #	list(params,url,category,patronvideos)
    logger.info("[documentariestv.py] Documentaldeldia")

    # Descarga la p�gina
    data = scrapertools.cachePage(url)
    #logger.info(data)

    patronvideos = 'Now Playing: <a href="([^"]+)">([^<]+)</a>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    for match in matches:
        # Titulo
        # Titulo
        scrapedtitle = acentos(match[1])

        # URL
        scrapedurl = match[0]

        # Thumbnail
        scrapedthumbnail = ""

        # scrapedplot
        scrapedplot = ""
        if (DEBUG):
            logger.info("scrapedtitle=" + scrapedtitle)
            logger.info("scrapedurl=" + scrapedurl)
            logger.info("scrapedthumbnail=" + scrapedthumbnail)

        xbmctools.addnewfolder(CHANNELNAME, "detail", category, scrapedtitle,
                               scrapedurl, scrapedthumbnail, scrapedplot)
    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=int(sys.argv[1]),
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
def detail(params, url, category):
    logger.info("[peliculashd.py] detail")

    title = urllib.unquote_plus(params.get("title"))
    thumbnail = urllib.unquote_plus(params.get("thumbnail"))
    plot = urllib.unquote_plus(params.get("plot"))

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    patron = '<span class="title">([^<]+)</span>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) > 0:
        title = matches[0]

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = video[0]
        url = video[1]
        server = video[2]
        if server != "Megaupload":
            xbmctools.addnewvideo(__channel__, "play", category, server,
                                  title.strip() + " - " + videotitle, url,
                                  thumbnail, plot)
    # ------------------------------------------------------------------------------------

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=pluginhandle,
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
Example #59
0
def performsearch(texto):
    logger.info("[edumanmovies.py] performsearch")
    url = "http://edumanmovies.com/?s=" + texto

    # Descarga la página
    data = scrapertools.cachePage(url)

    # Extrae las entradas (carpetas)
    patronvideos = '<div class="item">[^<]+'
    patronvideos += '<div class="thumbwrap">[^<]+'
    patronvideos += '<div class="thumbnail" style="background. url\(([^\)]+)\) top left no-repeat.">[^<]+'
    patronvideos += '<a href="([^"]+)" Title="[^"]+"><img[^>]+></a>[^<]+'
    patronvideos += '</div>[^<]+'
    patronvideos += '</div>[^<]+'
    patronvideos += '<div class="content">[^<]+'
    patronvideos += '<h2><a href="[^"]+">([^<]+)</a></h2>[^<]+'
    patronvideos += '<p>([^<]+)<'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    resultados = []

    for match in matches:
        # Atributos

        scrapedtitle = match[2]
        scrapedurl = urlparse.urljoin(url, match[1])
        scrapedthumbnail = urlparse.urljoin(url, match[0])
        scrapedplot = match[3]
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        resultados.append([
            CHANNELNAME, "detail", "buscador", scrapedtitle, scrapedurl,
            scrapedthumbnail, scrapedplot
        ])

    return resultados
Example #60
0
def categorylist(params, url, category):
    xbmc.output("[kideoschannel.py] categorylist")

    # Carga la página actual
    data = scrapertools.cachePage("http://www.kideos.com/")

    # Pone el enlace para continuar con la siguiente página
    patron = '<a href="(http://kideos.com/category/[^"]+)"><h1>([^<]+)</h1></a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for match in matches:
        # Titulo
        scrapedtitle = match[1]
        # URL
        scrapedurl = urlparse.urljoin(url, match[0])
        # Thumbnail
        scrapedthumbnail = ""
        # Argumento
        scrapedplot = ""

        # Depuracion
        if (DEBUG):
            xbmc.output("scrapedtitle=" + scrapedtitle)
            xbmc.output("scrapedurl=" + scrapedurl)
            xbmc.output("scrapedthumbnail=" + scrapedthumbnail)

        # Añade al listado de XBMC
        xbmctools.addthumbnailfolder(CHANNELNAME, scrapedtitle, scrapedurl,
                                     scrapedthumbnail, "videolist")

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=pluginhandle,
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)