def resuelve(url,login, password=None):

	data=scrapertools.cachePage(url)
	password_data = re.search('filepassword',data)
	if password_data is not None:
		teclado = password_mega(password)
		if teclado is not None:
			data = scrapertools.cachePage(url, post="filepassword="+teclado)
		else:
			return None
	enlace = get_filelink(data)
	

	if enlace is None:
		return None
	else:
		if login == 'premium':
			espera = handle_wait(1,'Megaupload','Cargando video.')	
		elif login == 'gratis':
			espera = handle_wait(26,'Megaupload','Cargando video.')	
		else:
			espera = handle_wait(46,'Megaupload','Cargando video.')
	
		if espera == True:
			return enlace
		else:
			advertencia = xbmcgui.Dialog()
			resultado = advertencia.ok('pelisalacarta','Se canceló la reproducción')		
			return None
Beispiel #2
0
def play(item):
    logger.info("[mundonick.py] play video: " + item.url)
    itemlist=[]

    permalink = 'uri=mgid:uma:video:mundonick.com:' + item.url
	
    data = scrapertools.cachePage(__urlconfig__ + permalink)
    if (data == ''):
        return itemlist
    #logger.info(data)

    import xml.etree.ElementTree as xmlet
    configuration = xmlet.fromstring(data)
	
    swfurl = configuration.find('.//player//URL').text
    feedurl = configuration.find('.//player//feed').text
	
    data = scrapertools.cachePage(feedurl)
    #logger.info(data)
    
    feed = xmlet.fromstring(data)
    description = feed.find('.//item/description').text.encode("utf8","ignore").replace('<i>', '').replace('</i>', ' |').replace('<br/>', ' ').replace('LA', '');
    #mediacontent = feed.find('{http://search.yahoo.com/mrss/}content').get('url')

    patron = '<media:content type="text/xml" isDefault="true"\nurl="([^"]+)">'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)
    if matches: mediacontent = matches[0]

    #data = scrapertools.cachePage(mediacontent)
    #logger.info(data)

    logger.info(description)
    itemlist.append( Item(channel=__channel__, action="play", title=description, url=mediacontent, server="mundonick", thumbnail=item.thumbnail,  folder=False) )
    return itemlist
def videos(item):

	logger.info("[islapeliculas.py] videos")
	# Descarga la página
	data = scrapertools.cachePage(item.url)
	patron = '(modules.php\?name=Anime-Online&func=JokeView&jokeid=.*?&amp;Es=\d)'
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	for match in matches:
		url= urlparse.urljoin('http://www.buenaisla.com/',match)
		url = url.replace('&amp;','&')
		data2= scrapertools.cachePage(url)
		data = data + data2
			
	title= item.title
	scrapedthumbnail = item.thumbnail
	listavideos = servertools.findvideos(data)

	itemlist = []
	for video in listavideos:
		invalid = video[1]
		invalid = invalid[0:8]
		if invalid!= "FN3WE43K" and invalid!="9CC3F8&e":
			scrapedtitle = title.strip() + " - " + video[0]
			videourl = video[1]
			server = video[2]
			if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"], thumbnail=["+scrapedthumbnail+"]")

			# Añade al listado de XBMC
			itemlist.append( Item(channel=CHANNELNAME, action="play", title=scrapedtitle , url=videourl , thumbnail=scrapedthumbnail , server=server , folder=False) )

	return itemlist
Beispiel #4
0
def mainlist(item):

    logger.info("[oncetvmex.py] getplaylists")

    # Obtiene el feed segun el API de YouTube
    if item.title =="!Página siguiente":
        data=scrapertools.cachePage(item.url)
    else:
        data = scrapertools.cachePage('http://gdata.youtube.com/feeds/api/users/CanalOnceIPN/playlists?v=2&alt=json&start-index=1&max-results=30')

    #logger.info(data)
    import json
    playlists = json.loads(data)
    if playlists == None : playlists = []

    itemlist = []
    for playlist in playlists['feed']['entry']:
        scrapedtitle = playlist['title']['$t'].encode("utf8","ignore")
        scrapedurl = playlist['content']['src'].encode("utf8","ignore") + '&alt=json'
        scrapedthumbnail = playlist['media$group']['media$thumbnail'][1]['url']
        itemlist.append( Item(channel=__channel__, title=scrapedtitle , action="playlist" , url=scrapedurl, thumbnail=scrapedthumbnail, folder=True) )

    for link in playlists['feed']['link']:
        if (link['rel'] == 'next'):
            scrapedurl = link['href']
            itemlist.append( Item(channel=__channel__, action="mainlist", title="!Página siguiente" , url=scrapedurl, folder=True) ) 

    return itemlist
Beispiel #5
0
def completo(item):
    logger.info("[cinetube.py] completo()")
    
    url = item.url
    siguiente = True
    itemlist = []
    
    data = scrapertools.cachePage(url)
    patronpag  = '<li class="navs"><a class="pag_next" href="([^"]+)"></a></li>'
    while siguiente==True:
    
        patron = '<!--SERIE-->.*?<a href="([^"]+)" .*?>([^<]+)</a></span></li>.*?<!--FIN SERIE-->'
        matches = re.compile(patron,re.DOTALL).findall(data)
        for match in matches:
            scrapedtitle = match[1]
            # Convierte desde UTF-8 y quita entidades HTML
            scrapedtitle = unicode( scrapedtitle, "iso-8859-1" , errors="replace" ).encode("utf-8")
            scrapedtitle = scrapertools.entityunescape(scrapedtitle)
            fulltitle = scrapedtitle
            
            scrapedplot = ""
            scrapedurl = urlparse.urljoin(url,match[0])
            scrapedthumbnail = ""    

            itemlist.append( Item(channel=__channel__, action="temporadas", title=scrapedtitle , fulltitle=fulltitle , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=scrapedtitle, show=scrapedtitle) )

        # Extrae el paginador
        matches = re.compile(patronpag,re.DOTALL).findall(data)
        if len(matches)==0:
            siguiente = False
        else:
            data = scrapertools.cachePage(urlparse.urljoin(url,matches[0]))

    return itemlist
Beispiel #6
0
def findvideos(item):
    logger.info("pelisalacarta.channels.fullmatches findvideos")
    itemlist = []
    if item.extra == "":
        data = scrapertools.cachePage(item.url)
        data = scrapertools.decodeHtmlentities(data)
        acp = "&acp_pid="+scrapertools.find_single_match(data,'<input id="acp_post".*?value="([^"]+)"/>')
        acp_shortcode = "&acp_shortcode="+scrapertools.find_single_match(data,'<input id="acp_shortcode".*?value="([^"]+)"/>')
        matches = scrapertools.find_multiple_matches(data, 'id="item.*?"><a href="([^"]+)"><div.*?>(.*?)</div>')
        if len (matches) > 1:
            for scrapedurl, scrapedtitle in matches:
                scrapedtitle = scrapedtitle \
                            .replace("HL ", "Resumen ").replace("Extended","Extendido") \
                            .replace("1st half ", "1ª parte ").replace("2nd half ","2ª parte ") \
                            .replace("Pre-Match", "Pre-partido").replace("Post-Match","Post-Partido")
                post = "acp_currpage=" + scrapedurl.replace("#","") + acp + acp_shortcode + "&action=pp_with_ajax"
                itemlist.append(Item(channel=__channel__, title=scrapedtitle, url=item.url, action="findvideos", thumbnail=item.thumbnail, extra=post, folder=True))
        else:
            itemlist = servertools.find_video_items(data=data)
            for item in itemlist:
                item.channel = __channel__
    else:
        post = item.extra
        data = scrapertools.cachePage("http://www.fullmatchesandshows.com/wp-admin/admin-ajax.php", post=post)
        itemlist = servertools.find_video_items(data=data)
        for item in itemlist:
            item.channel = __channel__

    return itemlist
def episodios(item):
    logger.info("[dlmore.py] episodios")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    patron  = '<a href="(\./ajax/fiche_serie.ajax.php\?id=[^"]+)" name="lien" class="[^"]+">([^<]+)</a>'
    matches = re.compile(patron,re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = match[1]
        scrapedurl = urlparse.urljoin("http://www.dl-more.eu/",match[0])

        # Read episode iframe
        # http://www.dl-more.eu/ajax/fiche_serie.ajax.php?id=203&saison=1
        # http://www.dl-more.eu/series/203/ajax/fiche_serie.ajax.php?id=203&saison=1
        data = scrapertools.cachePage(scrapedurl)
        
        # Search videos in iframe
        videoitems = servertools.find_video_items(data=data)
        
        # Assigns channel name and appends season to episode title
        for videoitem in videoitems:
            videoitem.channel=__channel__
            videoitem.title = scrapedtitle + videoitem.title
        
        # All episodes from all seasons in the same list
        itemlist.extend( videoitems )

    return itemlist
def play(item):
    logger.info("documaniatv.play")
    itemlist = []

    data = scrapertools.cachePage(item.url)
    var_url, ajax = scrapertools.find_single_match(data, 'preroll_timeleft.*?url:([^+]+)\+"([^"]+)"')
    url_base = scrapertools.find_single_match(data, 'var.*?' + var_url + '="([^"]+)"')
    patron = 'preroll_timeleft.*?data:\{"([^"]+)":"([^"]+)","' \
             '([^"]+)":"([^"]+)","([^"]+)":"([^"]+)","([^"]+)"' \
             ':"([^"]+)","([^"]+)":"([^"]+)"\}'
    match = scrapertools.find_single_match(data, patron)
    params = "{0}={1}&{2}={3}&{4}={5}&{6}={7}&{8}={9}".format(match[0],match[1],match[2],
                                                              match[3],match[4],match[5],
                                                              match[6],match[7],match[8],
                                                              match[9])
    url = url_base + ajax + "?" + params
    data1 = scrapertools.cachePage(url)

    patron= '<iframe src="(.*?)"'
    match = re.compile(patron,re.DOTALL).findall(data1)
    logger.info(match[0])

    # Busca los enlaces a los videos
    video_itemlist = servertools.find_video_items(data=match[0])
    for video_item in video_itemlist:
        itemlist.append( Item(channel=__channel__ , action="play" , server=video_item.server, title=item.title+video_item.title,url=video_item.url, thumbnail=video_item.thumbnail, plot=video_item.plot, folder=False))

    return itemlist
Beispiel #9
0
def play(item):
    logger.info("[rtvv.py] play")

    url = item.url
    
    # Descarga pagina detalle
    #file: "/rtvvcontent/playlist/RTVVID20110207_0082/",
    #http://www.rtvv.es/rtvvcontent/playlist/RTVVID20110207_0082/
    data = scrapertools.cachePage(url)
    patron = 'file: "(/rtvvcontent/playlist/[^"]+)",'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        url = urlparse.urljoin(url,matches[0])
    logger.info("[rtvv.py] url="+url)

    # Extrae la URL del video
    #<media:content url="http://rtvv.ondemand.flumotion.com/rtvv/ondemand/pro/RTVVID20110207_0082-0.mp4"/>
    data = scrapertools.cachePage(url)
    patron = '<media.content url="([^"]+)"/>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        url = matches[0]

    itemlist = []
    itemlist.append( Item(channel=CHANNELNAME, title=item.title , action="play" , server="directo" , url=url, thumbnail=item.thumbnail, plot=item.plot , show=item.show , folder=False) )

    return itemlist
def mirrors(item):
    logger.info("[capitancinema.py] mirrors")

    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    patronvideos  = '<li><strong>DISPONIBLE EN EL FORO</strong>[^<]+<a href="([^"]+)"'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    itemlist = []
    if len(matches)>0:
        url = matches[0]
        data = scrapertools.cachePage(url)

        # ------------------------------------------------------------------------------------
        # Busca los enlaces a los videos
        # ------------------------------------------------------------------------------------
        listavideos = servertools.findvideos(data)

        for video in listavideos:
            scrapedtitle = title.strip() + " - " + video[0]
            scrapedurl = video[1]
            server = video[2]
            
            itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, server=server, folder=False))

    return itemlist
def entradas(item):
    logger.info("pelisalacarta.channels.grabaciones_deportivas entradas")
    itemlist = []
    data = scrapertools.cachePage(item.url)
    data = scrapertools.decodeHtmlentities(data)
    title = item.title.replace("+++ ","")
    ymd = scrapertools.find_single_match(data, '<div id="vafs".*?value="([^"]+)"')
    cat = scrapertools.find_single_match(data, '<label for="s([^"]+)">(?:<b>|)'+title+'(?:</b>|)</label>')

    item.extra = cat
    item.url = item.url + ymd
    itemlist = partidos(item)

    if itemlist[0].action== "": return itemlist
    if not "Primer día con vídeos disponibles" in itemlist[0].title: itemlist.insert(0, Item(channel=__channel__, title="--Hoy--", url="", action="", thumbnail=item.thumbnail, folder=False)) 
    itemlist.append(Item(channel=__channel__, title=bbcode_kodi2html("     [COLOR red]***Elegir Fecha***[/COLOR]"), url="", action="", thumbnail=item.thumbnail, folder=False))
    matches = scrapertools.find_multiple_matches(data, '<a class="small"href="([^"]+)".*?<b>(.*?)</b>')
    length = len(itemlist)
    for scrapedurl, scrapedtitle in matches:
        if scrapedtitle == "Hoy": continue
        scrapedurl = host_live + scrapedurl
        itemlist.insert(length, Item(channel=__channel__, title=scrapedtitle, url=scrapedurl, action="partidos", extra=cat, thumbnail=item.thumbnail, folder=True))

    calendar = scrapertools.cachePage("http://livetv.sx/ajax/vacal.php?cal&lng=es")
    matches = scrapertools.find_multiple_matches(calendar, "load\('([^']+)'\).*?<b>(.*?)</b>")
    for scrapedurl, scrapedtitle in matches:
        scrapedurl = host_live + scrapedurl
        itemlist.append(Item(channel=__channel__, title=scrapedtitle, url=scrapedurl, action="calendario", extra=cat, thumbnail=item.thumbnail, folder=True))

    return itemlist
def mainlist(item):
    logger.info("[sieterm.py] mainlist")

    if item.url=="":
        item.url="http://www.7rm.es/servlet/rtrm.servlets.ServletLink2?METHOD=LSTBLOGALACARTA&sit=c,6&serv=BlogPortal2&orden=2"

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    #logger.info(data)

    # Lee los primeros programas
    itemlist = getprogramas(item,data)

    # Busca la página siguiente
    salir = False
    while not salir:
        patron = '<a class="list-siguientes" href="([^"]+)" title="Ver siguientes a la cartas">Siguiente</a>'
        matches = re.compile(patron,re.DOTALL).findall(data)
        
        if len(matches)==0:
            salir = True
        else:
            item.url = urlparse.urljoin(item.url,matches[0])
            data = scrapertools.cachePage(item.url)
            itemlist.extend( getprogramas(item,data) )

    return itemlist
def acciones_playlist(item):
    logger.info("pelisalacarta.channels.documaniatv acciones_playlist")
    itemlist = []
    if item.title == "Crear una nueva playlist y añadir el documental":
        texto = dialog_input(heading="Introduce el título de la nueva playlist")
        if texto != "":
            post= "p=playlists&do=create-playlist&title=%s&visibility=1&video-id=%s&ui=video-watch" % (texto, item.id)
            data = scrapertools.cachePage(item.url, headers=headers, post=post)

    elif item.title != "Me gusta":
        if "Eliminar" in item.title: action = "remove-from-playlist"
        else: action = "add-to-playlist"
        post = "p=playlists&do=%s&playlist-id=%s&video-id=%s" % (action, item.list_id, item.id)
        data = scrapertools.cachePage(item.url, headers=headers, post=post)
    else:
        item.url = "http://www.documaniatv.com/ajax.php?vid=%s&p=video&do=like" % item.id
        data = scrapertools.cachePage(item.url, headers=headers)

    try:
        dialog_notification(item.title, "Se ha añadido/eliminado correctamente")
        import xbmc
        xbmc.executebuiltin("Container.Refresh")
    except:
        itemlist.append( Item(channel=item.channel, action=""  , title="Se ha añadido/eliminado correctamente", url="", folder=False))
        return itemlist
def play(item):
    logger.info("[rtva.py] play")

    url = item.url

    # Descarga pagina detalle
    #http://www.canalsuralacarta.es/television/video/jamaica/2590/12
    #_url_xml_datos=http://www.canalsuralacarta.es/webservice/video/2590"
    data = scrapertools.cachePage(url)
    patron = '_url_xml_datos=([^"]+)"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches)>0:
        url = urlparse.urljoin(url,matches[0])
    logger.info("[rtva.py] url="+url)

    # Extrae la URL del video
    #http://ondemand.rtva.ondemand.flumotion.com/rtva/ondemand/flash8/programas/andaluces-por-el-mundo/20110509112657-7-andaluces-por-el-mundo-jamaica-10-05-11.flv
    #http://ondemand.rtva.ondemand.flumotion.com/rtva/ondemand/flash8/programas/andaluces-por-el-mundo/20110509112657-7-andaluces-por-el-mundo-jamaica-10-05-11.flv
    data = scrapertools.cachePage(url)
    patron = '<url>([^<]+)</url>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches)>0:
        url = matches[len(matches)-1]

    itemlist = []
    itemlist.append( Item(channel=CHANNELNAME, title=item.title , action="play" , server="directo" , url=url, thumbnail=item.thumbnail, plot=item.plot , show=item.show , folder=False) )

    return itemlist
Beispiel #15
0
def fanart(item):
    logger.info("pelisalacarta.peliculasdk fanart")
    itemlist = []
    url = item.url
    data = scrapertools.cachePage(url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
    title= scrapertools.get_match(data,'<div id="titleopcions">Ver película(.*?)\(')
    title= re.sub(r"3D|SBS|-|","",title)
    title= title.replace('Reparado','')
    title= title.replace(' ','%20')
    url="http://api.themoviedb.org/3/search/movie?api_key=57983e31fb435df4df77afb854740ea9&query=" + title + "&language=es&include_adult=false"
    data = scrapertools.cachePage(url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
    patron = '"page":1.*?"backdrop_path":"(.*?)".*?,"id"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)==0:
       item.extra=item.thumbnail
    else:
        for fan in matches:
            fanart="https://image.tmdb.org/t/p/original" + fan
            item.extra= fanart
    itemlist.append( Item(channel=__channel__, title =item.title , url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.extra, folder=True) )
    title ="Info"
    title = title.replace(title,"[COLOR skyblue]"+title+"[/COLOR]")
    itemlist.append( Item(channel=__channel__, action="info" , title=title , url=item.url, thumbnail=item.thumbnail, fanart=item.extra, folder=False ))


    return itemlist
Beispiel #16
0
def get_main_page():

    file_name = os.path.join( config.get_data_path() , "tnu.cached" )
    logger.info("tvalacarta.channels.tnu get_main_page file_name="+file_name)

    if not os.path.exists(file_name):
        logger.info("tvalacarta.channels.tnu get_main_page no existe")
        data = scrapertools.cachePage("http://www.tnu.com.uy/videoteca/")
        f = open(file_name,"w")
        f.write(data)
        f.close()
        return data

    # Calcula la antiguedad del fichero
    file_timestap = os.path.getmtime(file_name)
    file_datetime = datetime.datetime.fromtimestamp(file_timestap)
    now_datetime = datetime.datetime.now()

    # Si tiene más de 3 horas
    diferencia = (now_datetime - file_datetime).seconds

    if diferencia > 60*60*3:
        logger.info("tvalacarta.channels.tnu get_main_page tiene más de 3 horas, lee de nuevo y actualiza la cache")
        data = scrapertools.cachePage("http://www.tnu.com.uy/videoteca/")
        f = open(file_name,"w")
        f.write(data)
        f.close()
        return data
    else:
        logger.info("tvalacarta.channels.tnu get_main_page tiene menos de 3 horas, devuelve la cache")
        f = open(file_name,"r")
        data = f.read()
        f.close()
        return data
Beispiel #17
0
def play(item):
    logger.info("documaniatv.play")
    itemlist = []

    # Descarga la pagina
    data1 = scrapertools.cachePage(item.url) 
    logger.info(data1)
    patron= 'itemprop="embedURL" content="(.*?)"'
    matc = re.compile(patron,re.DOTALL).findall(data1)
    logger.info(matc[0])
 
    data = scrapertools.cachePage(matc[0])
    logger.info(data)

    # Busca los enlaces a los videos
    video_itemlist = servertools.find_video_items(data=data)
    for video_item in video_itemlist:
        itemlist.append( Item(channel=__channel__ , action="play" , server=video_item.server, title=item.title+video_item.title,url=video_item.url, thumbnail=video_item.thumbnail, plot=video_item.plot, folder=False))

    # Extrae los enlaces a los videos (Directo)
    patronvideos = "src= '([^']+)'"
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    if len(matches)>0:
        if not "www.youtube" in matches[0]:
            itemlist.append( Item(channel=__channel__ , action="play" , server="Directo", title=item.title+" [directo]",url=matches[0], thumbnail=item.thumbnail, plot=item.plot))

    return itemlist
def detail(params,url,category):
    logger.info("[veranime.py] detail")

    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    plot = urllib.unquote_plus( params.get("plot") )

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    patron  = '<div id="listacapdd"><div class="listddserie">[^<]+'
    patron += '<a title="[^"]+" href="([^"]+)"><strong>[^<]+</strong></a>[^<]+'
    patron += '</div>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        url = matches[0]
        data = scrapertools.cachePage(url)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = video[0]
        url = video[1]
        server = video[2]
        xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
    # ------------------------------------------------------------------------------------

    # Asigna el título, desactiva la ordenación, y cierra el directorio
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Beispiel #19
0
def programas(item):
    logger.info("[rtve.py] programas")
    
    # En la paginación la URL vendrá fijada, si no se construye aquí la primera página
    if not item.url.startswith("http"):
        item.url = "http://www.rtve.es/alacarta/programas/"+item.extra+"/?pageSize=100&order=1&criteria=asc&emissionFilter=all"
    logger.info("[rtve.py] programas url="+item.url) 

    itemlist = []
    data = scrapertools.cachePage(item.url)
    itemlist.extend(addprogramas(item,data))
    salir = False

    while not salir:
        # Extrae el enlace a la página siguiente
        patron  = '<a name="paginaIR" href="[^"]+" class="active"><span>[^<]+</span></a>[^<]+'
        patron += '<a name="paginaIR" href="([^"]+)"><span>'
    
        matches = re.findall(patron,data,re.DOTALL)
        if DEBUG: scrapertools.printMatches(matches)

        if len(matches)>0:
            # Carga la página siguiente
            url = urlparse.urljoin(item.url,matches[0]).replace("&amp;","&")
            data = scrapertools.cachePage(url)
            
            # Extrae todos los programas
            itemlist.extend(addprogramas(item,data))
        else:
            salir = True

    return itemlist
Beispiel #20
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="", page_data="" ):
    logger.info("[extremaduratv.py] get_video_url(page_url='%s')" % page_url)
    video_urls = []

    # Descarga la página como navegador web
    #http://www.canalextremadura.es/alacarta/tv/videos/extremadura-desde-el-aire
    #<div id="mediaplayer" rel="rtmp://canalextremadurafs.fplive.net/canalextremadura/#tv/S-B5019-006.mp4#535#330"></div>
    data = scrapertools.cachePage(page_url)
    patron  = '<div id="mediaplayer" rel="([^"]+)"></div>'
    matches = re.findall(patron,data,re.DOTALL)

    for url in matches:
        partes = url.split("#")
        url = partes[0]+partes[1]
        logger.info("url="+url)
        video_urls.append( [ "RTMP [extremaduratv]" , url.replace(" ","%20") ] )

    # Descarga la página como ipad
    headers = []
    headers.append( ["User-Agent","Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10"] )
    data = scrapertools.cachePage(page_url,headers=headers)
    logger.info("data="+data)
    patron = "<video.*?src ='([^']+)'"
    matches = re.findall(patron,data,re.DOTALL)

    for url in matches:
        video_urls.append( [ "iPhone [extremaduratv]" , url ] )

    return video_urls
Beispiel #21
0
def play(item):
    logger.info("[tupornotv.py] play")
    itemlist = []
    
    # Lee la pagina del video
    data = scrapertools.cachePage(item.url)
    codVideo = scrapertools.get_match(data,'body id="([^"]+)"')
    logger.info("codVideo="+codVideo)
    
    # Lee la pagina con el codigo
    # http://tuporno.tv/flvurl.php?codVideo=188098&v=MAC%2011,5,502,146
    url = "http://tuporno.tv/flvurl.php?codVideo="+codVideo+"&v=MAC%2011,5,502,146"
    data = scrapertools.cachePage(url)
    logger.info("data="+data)
    kpt = scrapertools.get_match(data,"kpt\=(.+?)\&")
    logger.info("kpt="+kpt)
    
    # Decodifica
    import base64
    url = base64.decodestring(kpt)
    logger.info("url="+url)

    itemlist.append( Item(channel=item.channel, action="play", title=item.title , url=url , thumbnail=item.thumbnail , plot=item.plot, server="Directo", folder=False) )

    return itemlist
def usuario(item):
    logger.info("pelisalacarta.channels.documaniatv usuario")
    itemlist = []
    data = scrapertools.cachePage(item.url, headers=headers)
    profile_id = scrapertools.find_single_match(data, 'data-profile-id="([^"]+)"')
    url = "http://www.documaniatv.com/ajax.php?p=profile&do=profile-load-playlists&uid=%s" % profile_id

    data = scrapertools.cachePage(url, headers=headers)
    data = jsontools.load_json(data)
    data = data['html']

    patron = '<div class="pm-video-thumb">.*?src="([^"]+)".*?' \
             '<span class="pm-pl-items">(.*?)</span>(.*?)</div>' \
             '.*?<h3.*?href="([^"]+)".*?title="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedthumbnail, items, videos, scrapedurl, scrapedtitle in matches:
        scrapedtitle = scrapedtitle.replace("Historia",'Historial')
        scrapedtitle += " ("+items+videos+")"
        if "no-thumbnail" in scrapedthumbnail:
            scrapedthumbnail = ""
        else:
            scrapedthumbnail += "|"+headers[0][0]+"="+headers[0][1]
        itemlist.append( Item(channel=item.channel, action="playlist", title=scrapedtitle , url=scrapedurl , thumbnail=scrapedthumbnail , fanart=scrapedthumbnail, folder=True) )

    return itemlist
Beispiel #23
0
def play(item):
    logger.info("[a3media.py] play")

    '''
<section class="mod_player">
	<div id="capa_modulo_player" episode="20131030-EPISODE-00002-false"></div> 
    '''

    data = scrapertools.cachePage(item.url)
    logger.info(data)

    patron = '<div id="[^"]+" episode="([^"]+)"></div>'

    episode = scrapertools.get_match(data,patron)
    itemlist = []

    if len(episode)>0:
    	token = d(episode, "puessepavuestramerced")
    	url = "http://servicios.atresplayer.com/api/urlVideoLanguage/%s/%s/%s/es" % (episode, "android_tablet",token)
    	data = scrapertools.cachePage(url)
    	logger.info(data)
    	lista = load_json(data)
    	if lista != None: 
		#item.url = lista['resultObject']['es']
		item.url = lista['resultDes']
		if item.url == "Idioma inválido":     #### DRM encrypted
			item.url = "El video no puede verse en esta sistema"
    		itemlist.append(item)


    return itemlist
def porgeneros(item):
    logger.info("[zpeliculas.py] porgeneros")

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    data = scrapertools.get_match(data,'<div class="shortmovies">(.*?)<div class="navigation ignore-select" align="center">')
    
    '''
    <div class="leftpane">
    <div class="movieposter" title="Descargar El último pasajero">
    <a href="http://www.zpeliculas.com/peliculas/p-accion/1525-el-ltimo-pasajero.html"><img src="http://i.imgur.com/NW3xI3E.jpg" width="110" height="150" alt="El último pasajero" title="Descargar El último pasajero" /></a>
    <div class="shortname">El último pasajero</div>
    <div class="BDRip">BDRip</div>
    </div>
    </div>
    <div class="rightpane">
    <div style="display:block;overflow:hidden;">
    <h2 class="title" title="El último pasajero"><a href="http://www.zpeliculas.com/peliculas/p-accion/1525-el-ltimo-pasajero.html">El último pasajero</a></h2>
    <div style="height:105px; overflow:hidden;">
    <div class="small">
    <div class="cats" title="Genero"><a href="http://www.zpeliculas.com/peliculas/p-accion/">Accion</a>, <a href="http://www.zpeliculas.com/peliculas/p-intriga/">Intriga</a>, <a href="http://www.zpeliculas.com/peliculas/p-thriller/">Thriller</a></div>
    <div class="year" title="A&ntilde;o">2013</div>
    <div class="ESP" title="Idioma">ESP</div>
    <div class="FA" title="El último pasajero FA Official Website"><a href="http://www.filmaffinity.com/es/film419883.html" target="_blank" title="El último pasajero en filmaffinity">El último pasajero en FA</a></div>
    </div>
    </div>
    <div class="clear" style="height:2px;">
    '''
    patron  = '<div class="leftpane">.*?<a href="(.*?)"><img src="(.*?)".*?alt="(.*?)".*?<div class="shortname">.*?</div>.*?<div.*?>(.*?)</div>.*?<div class="rightpane">.*?<div class="year" title="A&ntilde;o">(.*?)<.*?"Idioma">(.*?)</div>'

    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    itemlist = []
    
    for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedcalidad, scrapedyear, scrapedidioma in matches:
        title = scrapedtitle
        logger.info("title="+scrapedtitle)
        title = title + ' ('+scrapedyear+') ['+scrapedidioma+'] ['+scrapedcalidad+']'
        url = scrapedurl
        thumbnail = scrapedthumbnail
        plot = ""
        plot = unicode( plot, "iso-8859-1" , errors="replace" ).encode("utf-8")
        if (DEBUG): logger.info("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
        
        itemlist.append( Item(channel=__channel__, action="findvideos" , title=title , url=url, thumbnail=thumbnail, plot=plot, show=title, viewmode="movie", fanart=thumbnail))
    data = data = scrapertools.cachePage(item.url)
    data = scrapertools.get_match(data,'<div class="navigation ignore-select" align="center">.*?<div class="clear"></div>(.*?)<div class="clear"></div>')
    #<span>1</span> <a href="http://www.zpeliculas.com/peliculas/p-accion/page/2/">2</a>

    patron='<span>.*?</span>.*?href="(.*?)"'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    for scrapedurl2 in matches:
        pagina=scrapedurl2
        if "Anterior" not in pagina:
            itemlist.append( Item(channel=__channel__, action="porgeneros" , title="Página siguiente >>" , url=pagina, thumbnail="", plot=plot, show=title, viewmode="movie", fanart=thumbnail))
    
    return itemlist
def series(item,extended=True):
    logger.info("pelisalacarta.channels.tumejortv series")

    url = item.url
    # Descarga la pagina
    if item.extra=="":
        data = scrapertools.cachePage(url)
    else:
        data = scrapertools.cachePage(url,post=item.extra)
    #logger.info(data)

    # Extrae las series
    '''
    <div class="antlo_dir_all_container">
    <div rel="tag" data-href="http://www.tumejortv.com/series/G-C-B---Golfas--Cursis-Y-Beatas-/" class="antlo_dir_pic_container color2" alt="G.C.B. (Golfas, Cursis Y Beatas)" title="G.C.B. (Golfas, Cursis Y Beatas)">
    <div class="antlo_dir_bandera"><img src="http://www.tumejortv.com/images/flags/f_estrenos_nuevo.png" alt="G.C.B. (Golfas, Cursis Y Beatas)" title="G.C.B. (Golfas, Cursis Y Beatas)"/></div>
    <div class="antlo_dir_img_container"><a href="http://www.tumejortv.com/series/G-C-B---Golfas--Cursis-Y-Beatas-/"><img src="http://www.tumejortv.com/images/posters/bXc4yUxJvPx4Hszf.jpeg" alt="G.C.B. (Golfas, Cursis Y Beatas)"/></a>
    <div class="antlo_pic_more_info"><span class="color2">Serie  <img src="http://www.tumejortv.com/images/idioma/antlo-es.png" alt="Español" title="Español"/><img src="http://www.tumejortv.com/images/general/posee_trailer.png" alt="Trailer" title="Trailer" style="margin: 0 3px;"/></span></div></div><p>
    <div class="antlo_dir_box_text_container"><h3 class="antlo_dir_video_title"><span style="font-size:1px;color:#3E3E3E;">Serie </span><br/><a href="http://www.tumejortv.com/series/G-C-B---Golfas--Cursis-Y-Beatas-/"> G.C.B. (Golfas, Cursis Y Beata...</a></h3>
    <h4 class="antlo_dir_video_cat">Temporada <span class="white">1</span> Capítulo <span class="white">10</span></h4><h5 class="antlo_dir_video_calidad">HDTV</h5></div></p></div></div>
    '''
    patron  = '<div class="antlo_dir_all_container">'
    patron += '<div rel="tag" data-href="([^"]+)".*?'
    patron += '<div class="antlo_dir_img_container"><a[^<]+<img src="([^"]+)"[^>]+></a>'
    patron += '<div class="antlo_pic_more_info"><span class="col[^"]+">([^>]+)<img src="[^"]+" alt="([^"]+)".*?</span></div></div><p>'
    patron += '<div class="antlo_dir_box_text_container"><h3 class="antlo_dir_video_title"><span[^<]+</span><br/><a[^>]+>([^<]+)</a></h3>'
    patron += '<h4 class="antlo_dir_video_cat">(.*?)<h5 class="antlo_dir_video_calidad">([^<]+)</h5'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    itemlist = []
    for url,thumbnail,tipo,idioma,titulo,categoria,calidad in matches:
        scrapedtitle = titulo.strip()
        if extended:
            scrapedtitle = scrapedtitle +" ("+idioma.strip()+") ("+scrapertools.htmlclean(calidad)+")"
        scrapedurl = url+"capitulos/"
        scrapedthumbnail = thumbnail
        scrapedplot = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        itemlist.append( Item(channel=__channel__, action="findepisodios" , title=scrapedtitle , fulltitle=scrapedtitle, url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show=titulo.strip()))

    # Ordena los listados alfabeticos
    if "filtro_letras" in item.url:
        itemlist = sorted(itemlist, key=lambda Item: Item.title)    

    # Extrae la pagina siguiente
    patron = '<a href="([^"]+)">SIGUIENTE</a>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        scrapedtitle = ">> Pagina siguiente"
        scrapedurl = matches[0]
        scrapedthumbnail = ""
        scrapedplot = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        itemlist.append( Item(channel=__channel__, action="series" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot))

    return itemlist
Beispiel #26
0
def send_to_jdownloader(item):
  #d = {"web": url}urllib.urlencode(d)
  from core import scrapertools
  if item.subtitle!="":
      data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail + " " + item.subtitle)
  else:
      data = scrapertools.cachePage(config.get_setting("jdownloader")+"/action/add/links/grabber0/start1/web="+item.url+ " " +item.thumbnail)
  return
def info_capitulos(item):
    logger.info("pelisalacarta.bricocine trailer")
    url= item.url
    data = scrapertools.cachePage(url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
    item.category = item.extra.split("|")[0]
    item.thumbnail = item.extra.split("|")[1]
    id = item.extra.split("|")[2]
    temp = item.extra.split("|")[3]
    epi = item.extra.split("|")[4]
    title = item.extra.split("|")[5]
    url="https://www.themoviedb.org/tv/"+item.extra.split("|")[2]+item.extra.split("|")[5]+"/season/"+item.extra.split("|")[3]+"/episode/"+item.extra.split("|")[4]+"?language=en"
    data = scrapertools.cachePage(url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
    patron = '<p><strong>Air Date:</strong>.*?content="(.*?)">'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)==0 :
        title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
        plot = "Este capitulo no tiene informacion..."
        plot = plot.replace(plot,"[COLOR yellow][B]"+plot+"[/B][/COLOR]")
        foto = "http://s6.postimg.org/nm3gk1xox/noinfosup2.png"
        image="http://s6.postimg.org/ub7pb76c1/noinfo.png"

    for day in matches:
        url="http://thetvdb.com/api/GetEpisodeByAirDate.php?apikey=1D62F2F90030C444&seriesid="+item.extra.split("|")[0]+"&airdate="+day+"&language=es"
        if "Castle%20%20%20" in item.extra.split("|")[5]:
            url="http://thetvdb.com/api/GetEpisodeByAirDate.php?apikey=1D62F2F90030C444&seriesid=83462"+"&airdate="+day+"&language=es"
        
        data = scrapertools.cachePage(url)
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
        patron = '<Data>.*?<EpisodeName>([^<]+)</EpisodeName>.*?'
        patron += '<Overview>(.*?)</Overview>.*?'
        
        matches = re.compile(patron,re.DOTALL).findall(data)
        if len(matches)==0 :
            title = "[COLOR red][B]LO SENTIMOS...[/B][/COLOR]"
            plot = "Este capitulo no tiene informacion..."
            plot = plot.replace(plot,"[COLOR yellow][B]"+plot+"[/B][/COLOR]")
            image="http://s6.postimg.org/ub7pb76c1/noinfo.png"
            foto="http://s6.postimg.org/nm3gk1xox/noinfosup2.png"
    
        else :
            
            
            for name_epi, info in matches:
                if "<filename>episodes" in data:
                    foto = scrapertools.get_match(data,'<Data>.*?<filename>(.*?)</filename>')
                    fanart = "http://thetvdb.com/banners/" + foto
                else:
                    fanart=item.extra.split("|")[1]
                plot = info
                plot = plot.replace(plot,"[COLOR yellow][B]"+plot+"[/B][/COLOR]")
                title = name_epi.upper()
                title = title.replace(title,"[COLOR sandybrown][B]"+title+"[/B][/COLOR]")
                image=fanart
                foto= item.extra.split("|")[1]
    ventana = TextBox2(title=title, plot=plot, thumbnail=image, fanart=foto)
    ventana.doModal()
def videos_p(item):

	logger.info("[asiateam.py] videos peliculas")
	# Descarga la página
	data = scrapertools.cachePage(item.url)
	title = item.title
	scrapedthumbnail = item.thumbnail
	scrapedplot = ""
	subtitulo = ""
	
    # Extrae las entradas
	patronimagen  = 'titulo.png".*?<img src="(.*?)".*?>'
	matches = re.compile(patronimagen,re.DOTALL).findall(data)
	if len(matches)>0:
		scrapedthumbnail = matches[0]
	patronplot  = 'sinopsis.png".*?>.*?<font color="(?:N|n)avy".*?>(.*?)</td>'
	matches = re.compile(patronplot,re.DOTALL).findall(data)
	if len(matches)>0:
		scrapedplot =  matches[0]
		scrapedplot = re.sub("</?\w+((\s+\w+(\s*=\s*(?:\".*?\"|'.*?'|[^'\">\s]+))?)+\s*|\s*)/?>",'',scrapedplot)
		scrapedplot = scrapedplot.replace('&quot;','"')
	patronsubs = 'subtitulos.png".*?>.*<a href="http://subs.asia-team.net/file.php\?id=(.*?)".*?>'
	matches = re.compile(patronsubs,re.DOTALL).findall(data)
	if len(matches)>0:
		subtitulo =  "http://subs.asia-team.net/download.php?id="+matches[0]
	itemlist = []
	listavideos = servertools.findvideos(data)
	for video in listavideos:
		scrapedtitle = title.strip() + " - " + video[0]
		videourl = video[1]
		server = video[2]
		if server.lower() =="megaupload":
			url = "http://www.megavideo.com/?d="+videourl
			data = scrapertools.cachePage(url)		
			patronname = 'flashvars.title = "(.*?)"'
			matches = re.compile(patronname,re.DOTALL).findall(data)
			if len(matches)>0:
				titulo = matches[0]
				#logger.info("Titulo: "+titulo)			
				if titulo[-3:]=="avi" or titulo[-3:]=="mkv" or titulo[-3:]=="mp4":
						scrapedtitle = "[MV] "+ title.strip()+"-"+titulo
				
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		itemlist.append( Item(channel=CHANNELNAME, action="sub", title=scrapedtitle , url=videourl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=server , category=subtitulo , folder=True) )
	
	#Añade opcion para filestube y asianmovielink
	if re.search('asia-team.net',item.url)!=None:
		if re.search(' / ',title)!=None:
			title = title.split(' / ')
			buscar = title[0]
		else:
			buscar = title
		
		itemlist.append( Item(channel=CHANNELNAME, action="search", title="Buscar Película en FilesTube",  extra=buscar , folder=True) )
		
	return itemlist
def detail(item):
    logger.info("[cine15.py] detail")

    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    #logger.info(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a videos no megavideo (playlist xml)
    # ------------------------------------------------------------------------------------
    patronvideos  = 'flashvars[^f]+file=([^\&]+)\&amp'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    
    itemlist = []
    if len(matches)>0:
        if ("xml" in matches[0]):
            data2 = scrapertools.cachePage(matches[0])
            logger.info("data2="+data2)
            patronvideos  = '<track>[^<]+'
            patronvideos += '<title>([^<]+)</title>[^<]+'
            patronvideos += '<location>([^<]+)</location>[^<]+'
            patronvideos += '</track>'
            matches = re.compile(patronvideos,re.DOTALL).findall(data2)
            scrapertools.printMatches(matches)

            for match in matches:
                scrapedtitle = match[0]
                scrapedurl = match[1].strip()
                scrapedthumbnail = thumbnail
                scrapedplot = plot
                if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

                itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle + " [Directo]" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, server="Directo", folder=False))

        else:
            itemlist.append( Item(channel=CHANNELNAME, action="play" , title=title + " [Directo]" , url=matches[0], thumbnail=thumbnail, plot=plot, server="Directo", folder=False))
            
    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = video[0]
        url = video[1]
        server = video[2]
        itemlist.append( Item(channel=CHANNELNAME, action="play" , title=title.strip() + " - " + videotitle , url=url, thumbnail=thumbnail, plot=plot, server=server, folder=False))
    # ------------------------------------------------------------------------------------

    return itemlist
def getlistWall(params,url,category):
    logger.info("[megalivewall.py] getlistWall")
    
    if url=="":
        url="http://www.megalive.com/"
    encontrados = set()
    # Descarga la p·gina
    data = scrapertools.cachePage(url)
    patron = "flashvars.xmlurl = '([^']+)'"
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        xmlurl = urllib.unquote_plus(matches[0])
        #logger.info(data)
        #<image click_url="?v=7RJPHQN0" images="http://img6.megalive.com/f29efb78905a482f00dacb5f5e41e953.jpg^
        #http://img6.megalive.com/eecd5b9bda6035095ef672b7c5e6dd5a.jpg" description="Expansion Ixcan TV" time="" thumb="http://img6.megalive.com/568a3de4a6b15fddce5c0f9609334529.jpg" hq="1" icon="ml">
        # Extrae las entradas (carpetas)
        patron  = '<image click_url="\?v=([^"]+)".*?'
        patron += 'description="(?:([^"]+)|)" time="" '
        patron += 'thumb="([^"]+)" '
        patron += 'hq="([^"]+)"'
        data = scrapertools.cachePage(xmlurl)
        matches = re.compile(patron,re.DOTALL).findall(data)
        scrapertools.printMatches(matches)
        itemlist = []

        for match in matches:
            # Titulo
            if len(match[1])>0:
                scrapedtitle = decodeHtmlentities(match[1]).encode("utf-8")
            else:
                scrapedtitle = "(no title)"
            # URL
            if match[0] in encontrados:
                continue
            scrapedurl = match[0]
            encontrados.add(match[0])
            # Thumbnail
            scrapedthumbnail = match[2]
            # Argumento
            scrapedplot = ""
            if match[3]=="1":
                hq=" [HQ]"
            else:
                hq=""

            # Depuracion
            if (DEBUG):
                logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

            # AÒade al listado de XBMC
            #addnewvideo( CHANNELNAME , "play" , category ,"Directo", scrapedtitle+hq , scrapedurl , scrapedthumbnail , scrapedplot )
            itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, show = scrapedtitle, folder=False , context = True))

        return itemlist
Beispiel #31
0
def episodios(item, data=""):
    logger.info("tvalacarta.channels.aragontv episodios")
    logger.info("tvalacarta.channels.aragontv programa [item=" +
                item.tostring() + " show=" + item.show + "]")
    itemlist = []

    # Descarga la página
    if data == "":
        data = scrapertools.cachePage(item.url)
    #logger.info(data)

    # Extrae las entradas
    '''
    <div id="idv_1186" class="vid bloque">
    <div class="imagen">
    <img title="Malanquilla y Camarillas" alt="Malanquilla y Camarillas" src="/_archivos/imagenes/galeria_5738_thumb.jpg" />			        
    <div class="play">
    <a href="/programas/pequeños-pero-no-invisibles/malanquilla-y-camarillas-27122011-2131" title="Ver video" rel="videoFacebox"><span>Ver video</span></a>
    </div>
    </div>
    <h2><a href="/programas/pequeños-pero-no-invisibles/malanquilla-y-camarillas-27122011-2131" title="Malanquilla y Camarillas" rel="videoFacebox">Malanquilla y Camarillas</a></h2>
    
    <!--<br><a href="/programas/pequeños-pero-no-invisibles/malanquilla-y-camarillas-27122011-2131" title="Malanquilla y Camarillas" rel="videoFacebox2">Malanquilla y Camarillas</a> -->
    <div class="social">
    <span class="fecha">
    27/12/2011 21:31 h<br />
    Duración: 00:49:38
    </span>
    </div>
    </div>
    '''
    patron = '<div id="[^"]+" class="vid bloque[^<]+'
    patron += '<div class="imagen[^<]+'
    patron += '<img title="[^"]+" alt="([^"]+)" src="([^"]+)"[^<]+'
    patron += '<div class="play">[^<]+'
    patron += '<a href="([^"]+)".*?'
    patron += '<span class="fecha">(.*?)</span>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    #if DEBUG: scrapertools.printMatches(matches)

    itemlist = []
    for match in matches:
        # Interpreta la fecha
        patron_fecha = "\s*([^<]+)<br />\s*Duración\: ([^\s]+)"
        campos_fecha = re.compile(patron_fecha, re.DOTALL).findall(match[3])
        fecha_string = campos_fecha[0][0].strip()
        #import time
        #fecha = time.strptime(fecha_string,"%d/%m/%y %H:%M")
        duracion_string = campos_fecha[0][1].strip()

        #scrapedtitle = match[0]+" "+fecha.strftime("%d/%m/%y")+" (Duración "+duracion_string+")"
        scrapedtitle = match[0].strip(
        ) + " " + fecha_string + " (Duración " + duracion_string + ")"
        scrapedurl = urlparse.urljoin(item.url, match[2])
        scrapedthumbnail = urlparse.urljoin(item.url, match[1])
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "], show=[" +
                        item.show + "]")

        # Añade al listado
        itemlist.append(
            Item(channel=CHANNELNAME,
                 title=scrapedtitle,
                 action="play",
                 server="aragontv",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 show=item.show,
                 folder=False))

    patron = "Paginación.*?<span class='activo'>[^<]+</span>  \|  <a href='([^']+)'"
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    if len(matches) > 0:
        pageitem = Item(channel=CHANNELNAME,
                        title=">> Página siguiente",
                        action="episodios",
                        url=urlparse.urljoin(item.url, matches[0]),
                        thumbnail=item.thumbnail,
                        plot=item.plot,
                        show=item.show,
                        folder=True)
        itemlist.append(pageitem)

    return itemlist
def set_opcion(item, seleccion, opciones, video_urls):
    logger.info("platformtools set_opcion")
    # logger.debug(item.tostring('\n'))
    salir = False
    # No ha elegido nada, lo más probable porque haya dado al ESC
    # TODO revisar
    if seleccion == -1:
        # Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
        listitem = xbmcgui.ListItem(item.title, iconImage="DefaultVideo.png", thumbnailImage=item.thumbnail)
        xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, listitem)

    # "Enviar a JDownloader"
    if opciones[seleccion] == config.get_localized_string(30158):
        from core import scrapertools

        # TODO comprobar que devuelve 'data'
        if item.subtitle != "":
            data = scrapertools.cachePage(config.get_setting("jdownloader") + "/action/add/links/grabber0/start1/web=" +
                                          item.url + " " + item.thumbnail + " " + item.subtitle)
        else:
            data = scrapertools.cachePage(config.get_setting("jdownloader") + "/action/add/links/grabber0/start1/web=" +
                                          item.url + " " + item.thumbnail)
        salir = True

    elif opciones[seleccion]==config.get_localized_string(30164): # Borrar archivo en descargas
        # En "extra" está el nombre del fichero en favoritos
        os.remove( item.url )
        xbmc.executebuiltin( "Container.Refresh" )
        salir = True

    # Descargar
    elif opciones[seleccion]==config.get_localized_string(30153): # "Descargar"

        download_title = item.fulltitle
        if item.hasContentDetails=="true":
            download_title = item.contentTitle

        # El vídeo de más calidad es el último
        mediaurl = video_urls[len(video_urls)-1][1]

        from core import downloadtools
        keyboard = xbmc.Keyboard(download_title)
        keyboard.doModal()
        if (keyboard.isConfirmed()):
            download_title = keyboard.getText()
            devuelve = downloadtools.downloadbest(video_urls,download_title)
            
            if devuelve==0:
                advertencia = xbmcgui.Dialog()
                resultado = advertencia.ok("plugin" , "Descargado con éxito")
            elif devuelve==-1:
                advertencia = xbmcgui.Dialog()
                resultado = advertencia.ok("plugin" , "Descarga abortada")
            else:
                advertencia = xbmcgui.Dialog()
                resultado = advertencia.ok("plugin" , "Error en la descarga")
        salir = True

    elif opciones[seleccion]==config.get_localized_string(30159): #"Borrar descarga definitivamente"
        from channels import descargas
        descargas.delete_error_bookmark(urllib.unquote_plus( item.extra ))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30106)) # 'Se ha quitado de la lista'
        xbmc.executebuiltin( "Container.Refresh" )
        salir = True

    elif opciones[seleccion]==config.get_localized_string(30160): #"Pasar de nuevo a lista de descargas":
        from channels import descargas
        descargas.mover_descarga_error_a_pendiente(urllib.unquote_plus( item.extra ))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30107)) # 'Ha pasado de nuevo a la lista de descargas'
        salir = True

    # "Quitar de favoritos"
    elif opciones[seleccion] == config.get_localized_string(30154):
        from channels import favoritos
        favoritos.delFavourite(item)
        salir = True

    # "Añadir a favoritos":
    elif opciones[seleccion] == config.get_localized_string(30155):
        from channels import favoritos
        item.from_channel = "favoritos"
        favoritos.addFavourite(item)
        salir = True

    elif opciones[seleccion]==config.get_localized_string(30156): #"Quitar de lista de descargas":
        # La categoría es el nombre del fichero en la lista de descargas
        from channels import descargas
        descargas.deletebookmark((urllib.unquote_plus( item.extra )))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(config.get_localized_string(30101) , item.title , config.get_localized_string(30106)) # 'Se ha quitado de lista de descargas'

        xbmc.executebuiltin( "Container.Refresh" )
        salir = True

    elif opciones[seleccion]==config.get_localized_string(30157): #"Añadir a lista de descargas":
        from core import downloadtools

        download_title = item.fulltitle
        download_thumbnail = item.thumbnail
        download_plot = item.plot

        if item.hasContentDetails=="true":
            download_title = item.contentTitle
            download_thumbnail = item.contentThumbnail
            download_plot = item.contentPlot

        keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(download_title))
        keyboard.doModal()
        if keyboard.isConfirmed():
            download_title = keyboard.getText()

            from channels import descargas
            descargas.savebookmark(titulo=download_title,url=item.url,thumbnail=download_thumbnail,server=item.server,plot=download_plot,fulltitle=download_title)

            advertencia = xbmcgui.Dialog()
            resultado = advertencia.ok(config.get_localized_string(30101) , download_title , config.get_localized_string(30109)) # 'se ha añadido a la lista de descargas'
        salir = True

    return salir
Beispiel #33
0
def mainlist(item):
    logger.info()
    itemlist = []

    if item.url == "":
        item.url = "http://www.ecartelera.com/videos/"

    # ------------------------------------------------------
    # Descarga la página
    # ------------------------------------------------------
    data = scrapertools.cachePage(item.url)
    # logger.info(data)

    # ------------------------------------------------------
    # Extrae las películas
    # ------------------------------------------------------
    patron = '<div class="viditem"[^<]+'
    patron += '<div class="fimg"><a href="([^"]+)"><img alt="([^"]+)" src="([^"]+)"/><p class="length">([^<]+)</p></a></div[^<]+'
    patron += '<div class="fcnt"[^<]+'
    patron += '<h4><a[^<]+</a></h4[^<]+'
    patron += '<p class="desc">([^<]+)</p>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedtitle, scrapedthumbnail, duration, scrapedplot in matches:
        title = scrapedtitle + " (" + duration + ")"
        url = scrapedurl
        thumbnail = scrapedthumbnail
        plot = scrapedplot.strip()

        logger.debug("title=[" + title + "], url=[" + url + "], thumbnail=[" +
                     thumbnail + "]")
        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=thumbnail,
                 plot=plot,
                 folder=False))

    # ------------------------------------------------------
    # Extrae la página siguiente
    # ------------------------------------------------------
    patron = '<a href="([^"]+)">Siguiente</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)

    for match in matches:
        scrapedtitle = "Pagina siguiente"
        scrapedurl = match
        scrapedthumbnail = ""
        scrapeddescription = ""

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=item.channel,
                 action="mainlist",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 server="directo",
                 folder=True,
                 viewmode="movie_with_plot"))

    return itemlist
Beispiel #34
0
def porgeneros(item):
    logger.info("[zpeliculas.py] porgeneros")

    # Descarga la página
    body = scrapertools.cachePage(item.url)
    data = scrapertools.get_match(
        body,
        '<div class="shortmovies">(.*?)<div class="navigation ignore-select" align="center">'
    )
    '''
    <div class="leftpane">
    <div class="movieposter" title="Descargar Sólo los amantes sobreviven">
    <a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html"><img src="http://i.imgur.com/NBPgXrp.jpg" width="110" height="150" alt="Sólo los amantes sobreviven" title="Descargar Sólo los amantes sobreviven" /></a>
    <div class="shortname">Sólo los amantes sobreviven</div>
    <div class="BDRip">BDRip</div>
    </div>
    </div>

    <div class="rightpane">
    <div style="display:block;overflow:hidden;">
    <h2 class="title" title="Sólo los amantes sobreviven"><a href="http://www.zpeliculas.com/peliculas/p-drama/1634-slo-los-amantes-sobreviven.html">Sólo los amantes sobreviven</a></h2>

    <div style="height:105px; overflow:hidden;">
    <div class="small">
    <div class="cats" title="Genero"><a href="http://www.zpeliculas.com/peliculas/p-drama/">Drama</a>, <a href="http://www.zpeliculas.com/peliculas/p-fantasia/">Fantasia</a>, <a href="http://www.zpeliculas.com/peliculas/p-romantica/">Romantica</a></div>
    <div class="year" title="A&ntilde;o">2013</div>
    <div class="ESP" title="Idioma">ESP</div>
    <div class="FA" title="Sólo los amantes sobreviven FA Official Website"><a href="http://www.filmaffinity.com/es/film851633.html" target="_blank" title="Sólo los amantes sobreviven en filmaffinity">Sólo los amantes sobreviven en FA</a></div>
    </div>
    </div>
    <div class="clear" style="height:2px;"></div>
    <div style="float:right">
    '''
    patron = '<div class="leftpane">(.*?)<div style="float\:right">'
    #<a href="(.*?)"><img src="(.*?)".*?alt="(.*?)".*?<div class="shortname">.*?</div>.*?<div.*?>(.*?)</div>.*?<div class="rightpane">.*?<div class="year" title="A&ntilde;o">(.*?)<.*?"Idioma">(.*?)</div>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    itemlist = []

    for match in matches:
        scrapedurl = scrapertools.find_single_match(match, '<a href="([^"]+)"')
        scrapedthumbnail = scrapertools.find_single_match(
            match, '<img src="([^"]+)"')
        scrapedtitle = scrapertools.find_single_match(
            match, '<div class="shortname">([^<]+)')
        scrapedcalidad = scrapertools.find_single_match(
            match,
            '<div class="shortname">[^<]+</div[^<]+<div class="[^"]+">([^<]+)')
        scrapedyear = scrapertools.find_single_match(
            match, '<div class="year[^>]+>([^<]+)')
        scrapedidioma = scrapertools.find_single_match(
            match,
            '<div class="year[^>]+>[^<]+</div[^<]+<div class[^>]+>([^<]+)')

        title = scrapedtitle
        logger.info("title=" + scrapedtitle)
        title = title + ' (' + scrapedyear + ') [' + scrapedidioma + '] [' + scrapedcalidad + ']'
        url = scrapedurl
        thumbnail = scrapedthumbnail
        plot = ""
        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "]")

        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 plot=plot,
                 show=title,
                 viewmode="movie",
                 fanart=thumbnail))

    next_page = scrapertools.find_single_match(body,
                                               '<a href="([^"]+)">Siguiente')
    if next_page != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="porgeneros",
                 title="Página siguiente >>",
                 url=next_page,
                 thumbnail="",
                 plot="",
                 show="",
                 viewmode="movie",
                 fanart=thumbnail))

    return itemlist
Beispiel #35
0
def episodios(item):
    logger.info("tvalacarta.channels.a3media episodios")

    data = scrapertools.cachePage(item.url,headers=ANDROID_HEADERS)
    #logger.info(data)
    lista = jsontools.load_json(data)

    if lista == None: lista =[]

    itemlist = []

    if lista.has_key('episodes'):
        episodes = lista['episodes']
    elif lista.has_key('items'):
        episodes = lista['items']
    else:
        episodes = []

    for entrys in episodes:
        logger.info("entrys="+repr(entrys))
        if entrys.has_key('episode'):
            entry = entrys['episode']
        elif entrys.has_key('section'):
            continue

        if entry.has_key('type'):
            tipo = entry['type']
        else:
            tipo = "FREE"

        try:
            episode = entry['contentPk']
        except:
            episode = 0

        try :
            scrapedtitle = entry['titleSection']+" "+entry['titleDetail']
        except:
            scrapedtitle = entry['name']
        if tipo == "REGISTER":
            scrapedtitle = scrapedtitle + " (R)"
        elif tipo == "PREMIUM":
            scrapedtitle = scrapedtitle + " (P)"

        scrapedurl = "http://servicios.atresplayer.com/api/urlVideo/%s/%s/" % (episode, "android_tablet")
        extra = episode
        if entry.has_key('storyline'): scrapedplot = entry['storyline']
        else: scrapedplot = item.plot
        scrapedthumbnail = entry['urlImage'].replace('.jpg','03.jpg')

        if account:
            if tipo == "FREE" or tipo == "REGISTER": #carga los videos que gratuitos y con registro
                # Añade al listado
                itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="play" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , extra = str(extra), folder=False) )
            #    logger.debug(tipo + " -> Añadido (1)")
            #else:
            #    logger.debug(tipo + " -> No añadido (1)")
        else:
            if tipo == "FREE": #solo carga los videos que no necesitan registro ni premium
                # Añade al listado
                itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="play" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , extra = str(extra), folder=False) )
            #    logger.debug(tipo + " -> Añadido (2)")
            #else:
            #    logger.debug(tipo + " -> No añadido (2)")
    return itemlist
Beispiel #36
0
def videolist(params, url, category):
    logger.info("[veocine.py] mainlist")

    # ------------------------------------------------------
    # Descarga la página
    # ------------------------------------------------------
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # ------------------------------------------------------
    # Extrae las películas
    # ------------------------------------------------------
    patron = '<tr.*?'
    patron += '<td.*?'
    patron += '<a href="([^"]+)">'
    patron += "<img src='([^']+)'.*?<a.*?>\s*(.*?)\s*<(.*?)"
    patron += "<img .*? alt='([^']+)' />"
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG:
        scrapertools.printMatches(matches)

    for match in matches:
        try:
            scrapedtitle = unicode(
                match[2], "utf-8").encode("iso-8859-1") + " (" + match[4] + ")"
        except:
            scrapedtitle = match[2] + " (" + match[4] + ")"
        scrapedurl = urlparse.urljoin("http://www.veocine.es/", match[0])
        scrapedthumbnail = ""

        try:
            scrapedplot = unicode(match[3], "utf-8").encode("iso-8859-1")
        except:
            scrapedplot = match[3]

        scrapedplot = scrapedplot.replace("/a>", "\n")
        scrapedplot = scrapedplot.replace("<br />", "\n")
        scrapedplot = scrapedplot.replace("<b>", "")
        scrapedplot = scrapedplot.replace("</b>", "")
        scrapedplot = scrapedplot.replace("<i>", "")
        scrapedplot = scrapedplot.replace("</i>", "")
        scrapedplot = scrapedplot.replace("<!--colorstart:#589BB9-->", "")
        scrapedplot = scrapedplot.replace("<!--colorend-->", "")
        scrapedplot = scrapedplot.replace("<!--/colorend-->", "")
        scrapedplot = scrapedplot.replace("<!--/colorstart-->", "")
        scrapedplot = scrapedplot.replace('<span style="color:#589BB9">', "")
        scrapedplot = scrapedplot.replace("</span>", "")
        scrapedplot = scrapedplot.strip()

        # Depuracion
        if DEBUG:
            logger.info("scrapedtitle=" + scrapedtitle)
            logger.info("scrapedurl=" + scrapedurl)
            logger.info("scrapedthumbnail=" + scrapedthumbnail)
            logger.info("scrapedplot=" + scrapedplot)

        # Añade al listado de XBMC
        xbmctools.addnewfolder(__channel__, "listmirrors", category,
                               scrapedtitle, scrapedurl, scrapedthumbnail,
                               scrapedplot)

    # ------------------------------------------------------
    # Extrae la página siguiente
    # ------------------------------------------------------
    patron = "<a href='([^']+)'>Siguiente</a>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG:
        scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = "Pagina siguiente"
        scrapedurl = urlparse.urljoin("http://www.veocine.es/", match)
        scrapedthumbnail = ""
        scrapeddescription = ""

        # Depuracion
        if DEBUG:
            logger.info("scrapedtitle=" + scrapedtitle)
            logger.info("scrapedurl=" + scrapedurl)
            logger.info("scrapedthumbnail=" + scrapedthumbnail)

        # Añade al listado de XBMC
        xbmctools.addthumbnailfolder(__channel__, scrapedtitle, scrapedurl,
                                     scrapedthumbnail, "mainlist")

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=int(sys.argv[1]),
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
Beispiel #37
0
def lista(item):
    logger.info("pelisalacarta.channels.divxatope lista")
    itemlist = []
    '''
    <li style="width:136px;height:263px;margin:0px 15px 0px 0px;">
    <a href="http://www.divxatope.com/descargar/374639_ahi-os-quedais-web-screener-r6-español-castellano-2014.html" title="Descargar Ahi Os Quedais Web  en DVD-Screener torrent gratis"><div  class='ribbon-estreno' ></div>                           <img class="torrent-image" src="http://www.divxatope.com/uploads/torrents/images/thumbnails2/6798_ahi--os--quedais.jpg" alt="Descargar Ahi Os Quedais Web  en DVD-Screener torrent gratis" style="width:130px;height:184px;" />
    <h2 style="float:left;width:100%;margin:3px 0px 0px 0px;padding:0px 0px 3px 0px;line-height:12px;font-size:12px;height:23px;border-bottom:solid 1px #C2D6DB;">Ahi Os Quedais Web </h2>
    <strong style="float:left;width:100%;text-align:center;color:#000;margin:0px;padding:3px 0px 0px 0px;font-size:11px;line-height:12px;">DVD-Screener<br>Español Castellano                                                       </strong>
    </a>
    </li>
    '''

    # Descarga la pagina
    if item.extra == "":
        data = scrapertools.cachePage(item.url)
    else:
        data = scrapertools.cachePage(item.url, post=item.extra)
    #logger.info("data="+data)

    patron = '<li [^<]+'
    patron += '<a href="([^"]+)".*?'
    patron += '<img class="[^"]+" src="([^"]+)"[^<]+'
    patron += '<h2[^>]+">([^<]+)</h2[^<]+'
    patron += '<strong[^>]+>(.*?)</strong>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle, calidad in matches:
        title = scrapedtitle.strip() + " (" + scrapertools.htmlclean(
            calidad) + ")"
        url = urlparse.urljoin(item.url, scrapedurl)
        thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
        plot = ""
        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=title,
                 fulltitle=title,
                 url=url,
                 thumbnail=thumbnail,
                 plot=plot,
                 folder=True))

    next_page_url = scrapertools.find_single_match(
        data, '<li><a href="([^"]+)">Next</a></li>')
    if next_page_url != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="lista",
                 title=">> Página siguiente",
                 url=urlparse.urljoin(item.url, next_page_url),
                 folder=True))
    else:
        next_page_url = scrapertools.find_single_match(
            data,
            '<li><input type="button" class="btn-submit" value="Siguiente" onClick="paginar..(\d+)'
        )
        if next_page_url != "":
            itemlist.append(
                Item(channel=__channel__,
                     action="lista",
                     title=">> Página siguiente",
                     url=item.url,
                     extra=item.extra + "&pg=" + next_page_url,
                     folder=True))

    return itemlist
def play_video(item,
               desdefavoritos=False,
               desdedescargados=False,
               desderrordescargas=False,
               strmfile=False):
    from core import servertools

    logger.info("streamondemand.platformcode.xbmctools play_video")
    #logger.info(item.tostring('\n'))

    try:
        item.server = item.server.lower()
    except:
        item.server = ""

    if item.server == "":
        item.server = "directo"

    view = False
    # Abre el diálogo de selección
    opciones = []
    default_action = config.get_setting("default_action")
    logger.info("default_action=" + default_action)

    # Si no es el modo normal, no muestra el diálogo porque cuelga XBMC
    muestra_dialogo = (config.get_setting("player_mode") == "0"
                       and not strmfile)

    # Extrae las URL de los vídeos, y si no puedes verlo te dice el motivo
    video_urls, puedes, motivo = servertools.resolve_video_urls_for_playing(
        item.server, item.url, item.password, muestra_dialogo)

    # Si puedes ver el vídeo, presenta las opciones
    if puedes:

        for video_url in video_urls:
            opciones.append(
                config.get_localized_string(30151) + " " + video_url[0])

        if item.server == "local":
            opciones.append(config.get_localized_string(30164))
        else:
            opcion = config.get_localized_string(30153)
            opciones.append(opcion)  # "Descargar"

            if item.channel == "favoritos":
                opciones.append(config.get_localized_string(
                    30154))  # "Quitar de favoritos"
            else:
                opciones.append(
                    config.get_localized_string(30155))  # "Añadir a favoritos"

            if not strmfile:
                opciones.append(config.get_localized_string(
                    30161))  # "Añadir a Biblioteca"

            if item.channel != "descargas":
                opciones.append(config.get_localized_string(
                    30157))  # "Añadir a lista de descargas"
            else:
                if item.category == "errores":
                    opciones.append(config.get_localized_string(
                        30159))  # "Borrar descarga definitivamente"
                    opciones.append(config.get_localized_string(
                        30160))  # "Pasar de nuevo a lista de descargas"
                else:
                    opciones.append(config.get_localized_string(
                        30156))  # "Quitar de lista de descargas"

            if config.get_setting("jdownloader_enabled") == "true":
                opciones.append(config.get_localized_string(
                    30158))  # "Enviar a JDownloader"

        if default_action == "3":
            seleccion = len(opciones) - 1

        # Busqueda de trailers en youtube
        if not item.channel in ["Trailer", "ecarteleratrailers"]:
            opciones.append(
                config.get_localized_string(30162))  # "Buscar Trailer"

    # Si no puedes ver el vídeo te informa
    else:
        if item.server != "":
            advertencia = xbmcgui.Dialog()
            if "<br/>" in motivo:
                resultado = advertencia.ok(
                    "Non è possibile guardare il video perché...",
                    motivo.split("<br/>")[0],
                    motivo.split("<br/>")[1], item.url)
            else:
                resultado = advertencia.ok(
                    "Non è possibile guardare il video perché...", motivo,
                    item.url)
        else:
            resultado = advertencia.ok(
                "Non è possibile guardare il video perché...",
                "Il server che lo ospita non è",
                "ancora supportato da streamondemand", item.url)

        if item.channel == "favoritos":
            opciones.append(
                config.get_localized_string(30154))  # "Quitar de favoritos"

        if item.channel == "descargas":
            if item.category == "errores":
                opciones.append(config.get_localized_string(
                    30159))  # "Borrar descarga definitivamente"
            else:
                opciones.append(config.get_localized_string(
                    30156))  # "Quitar de lista de descargas"

        if len(opciones) == 0:
            return

    # Si la accion por defecto es "Preguntar", pregunta
    if default_action == "0":  # and server!="torrent":
        dia = xbmcgui.Dialog()
        seleccion = dia.select(config.get_localized_string(30163),
                               opciones)  # "Elige una opción"
        #dia.close()
        '''
        elif default_action=="0" and server=="torrent":
            advertencia = xbmcgui.Dialog()
            logger.info("video_urls[0]="+str(video_urls[0][1]))
            if puedes and ('"status":"COMPLETED"' in video_urls[0][1] or '"percent_done":100' in video_urls[0][1]):
                listo  = "y está listo para ver"
            else:
                listo = "y se está descargando"
            resultado = advertencia.ok( "Torrent" , "El torrent ha sido añadido a la lista" , listo )
            seleccion=-1
        '''
    elif default_action == "1":
        seleccion = 0
    elif default_action == "2":
        seleccion = len(video_urls) - 1
    elif default_action == "3":
        seleccion = seleccion
    else:
        seleccion = 0

    logger.info("seleccion=%d" % seleccion)
    logger.info("seleccion=%s" % opciones[seleccion])

    # No ha elegido nada, lo más probable porque haya dado al ESC
    if seleccion == -1:
        #Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
        listitem = xbmcgui.ListItem(item.title,
                                    iconImage="DefaultVideo.png",
                                    thumbnailImage=item.thumbnail)
        xbmcplugin.setResolvedUrl(int(sys.argv[1]), False,
                                  listitem)  # JUR Added
        #if config.get_setting("subtitulo") == "true":
        #    config.set_setting("subtitulo", "false")
        return

    if opciones[seleccion] == config.get_localized_string(
            30158):  # "Enviar a JDownloader"
        #d = {"web": url}urllib.urlencode(d)
        from core import scrapertools

        if item.subtitle != "":
            data = scrapertools.cachePage(
                config.get_setting("jdownloader") +
                "/action/add/links/grabber0/start1/web=" + item.url + " " +
                item.thumbnail + " " + item.subtitle)
        else:
            data = scrapertools.cachePage(
                config.get_setting("jdownloader") +
                "/action/add/links/grabber0/start1/web=" + item.url + " " +
                item.thumbnail)

        return

    if opciones[seleccion] == config.get_localized_string(30158).replace(
            "jDownloader", "pyLoad"):  # "Enviar a pyLoad"
        logger.info("Enviando a pyload...")

        if item.show != "":
            package_name = item.show
        else:
            package_name = "streamondemand"

        from core import pyload_client
        pyload_client.download(url=item.url, package_name=package_name)
        return

    elif opciones[seleccion] == config.get_localized_string(
            30164):  # Borrar archivo en descargas
        # En "extra" está el nombre del fichero en favoritos
        os.remove(item.url)
        xbmc.executebuiltin("Container.Refresh")
        return

    # Ha elegido uno de los vídeos
    elif seleccion < len(video_urls):
        mediaurl = video_urls[seleccion][1]
        if len(video_urls[seleccion]) > 3:
            wait_time = video_urls[seleccion][2]
            item.subtitle = video_urls[seleccion][3]
        elif len(video_urls[seleccion]) > 2:
            wait_time = video_urls[seleccion][2]
        else:
            wait_time = 0
        view = True

    # Descargar
    elif opciones[seleccion] == config.get_localized_string(
            30153):  # "Descargar"

        download_title = item.fulltitle
        if item.hasContentDetails == "true":
            download_title = item.contentTitle

        # El vídeo de más calidad es el último
        mediaurl = video_urls[len(video_urls) - 1][1]

        from core import downloadtools
        keyboard = xbmc.Keyboard(download_title)
        keyboard.doModal()
        if (keyboard.isConfirmed()):
            download_title = keyboard.getText()
            devuelve = downloadtools.downloadbest(video_urls, download_title)

            if devuelve == 0:
                advertencia = xbmcgui.Dialog()
                resultado = advertencia.ok("Download",
                                           "Scaricato con successo")
            elif devuelve == -1:
                advertencia = xbmcgui.Dialog()
                resultado = advertencia.ok("Download", "Download interrotto")
            else:
                advertencia = xbmcgui.Dialog()
                resultado = advertencia.ok("Download", "Errore nel download")
        return

    elif opciones[seleccion] == config.get_localized_string(
            30154):  #"Quitar de favoritos"
        from channels import favoritos
        # En "extra" está el nombre del fichero en favoritos
        favoritos.deletebookmark(urllib.unquote_plus(item.extra))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(
            config.get_localized_string(30102), item.title,
            config.get_localized_string(30105))  # 'Se ha quitado de favoritos'

        xbmc.executebuiltin("Container.Refresh")
        return

    elif opciones[seleccion] == config.get_localized_string(
            30159):  #"Borrar descarga definitivamente"
        from channels import descargas
        descargas.delete_error_bookmark(urllib.unquote_plus(item.extra))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(
            config.get_localized_string(30101), item.title,
            config.get_localized_string(30106))  # 'Se ha quitado de la lista'
        xbmc.executebuiltin("Container.Refresh")
        return

    elif opciones[seleccion] == config.get_localized_string(
            30160):  #"Pasar de nuevo a lista de descargas":
        from channels import descargas
        descargas.mover_descarga_error_a_pendiente(
            urllib.unquote_plus(item.extra))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(
            config.get_localized_string(30101), item.title,
            config.get_localized_string(
                30107))  # 'Ha pasado de nuevo a la lista de descargas'
        return

    elif opciones[seleccion] == config.get_localized_string(
            30155):  #"Añadir a favoritos":
        from channels import favoritos
        from core import downloadtools

        download_title = item.fulltitle
        download_thumbnail = item.thumbnail
        download_plot = item.plot

        if item.hasContentDetails == "true":
            download_title = item.contentTitle
            download_thumbnail = item.contentThumbnail
            download_plot = item.contentPlot

        keyboard = xbmc.Keyboard(
            downloadtools.limpia_nombre_excepto_1(download_title) + " [" +
            item.channel + "]")
        keyboard.doModal()
        if keyboard.isConfirmed():
            title = keyboard.getText()
            favoritos.savebookmark(titulo=title,
                                   url=item.url,
                                   thumbnail=download_thumbnail,
                                   server=item.server,
                                   plot=download_plot,
                                   fulltitle=title)
            advertencia = xbmcgui.Dialog()
            resultado = advertencia.ok(
                config.get_localized_string(30102), title,
                config.get_localized_string(
                    30108))  # 'se ha añadido a favoritos'
        return

    elif opciones[seleccion] == config.get_localized_string(
            30156):  #"Quitar de lista de descargas":
        # La categoría es el nombre del fichero en la lista de descargas
        from channels import descargas
        descargas.deletebookmark((urllib.unquote_plus(item.extra)))

        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(
            config.get_localized_string(30101), item.title,
            config.get_localized_string(
                30106))  # 'Se ha quitado de lista de descargas'

        xbmc.executebuiltin("Container.Refresh")
        return

    elif opciones[seleccion] == config.get_localized_string(
            30157):  #"Añadir a lista de descargas":
        from core import downloadtools

        download_title = item.fulltitle
        download_thumbnail = item.thumbnail
        download_plot = item.plot

        if item.hasContentDetails == "true":
            download_title = item.contentTitle
            download_thumbnail = item.contentThumbnail
            download_plot = item.contentPlot

        keyboard = xbmc.Keyboard(
            downloadtools.limpia_nombre_excepto_1(download_title))
        keyboard.doModal()
        if keyboard.isConfirmed():
            download_title = keyboard.getText()

            from channels import descargas
            descargas.savebookmark(titulo=download_title,
                                   url=item.url,
                                   thumbnail=download_thumbnail,
                                   server=item.server,
                                   plot=download_plot,
                                   fulltitle=download_title)

            advertencia = xbmcgui.Dialog()
            resultado = advertencia.ok(
                config.get_localized_string(30101), download_title,
                config.get_localized_string(
                    30109))  # 'se ha añadido a la lista de descargas'
        return

    elif opciones[seleccion] == config.get_localized_string(
            30161):  # "Añadir a Biblioteca":  # Library

        titulo = item.fulltitle
        if titulo == "":
            titulo = item.title
        #library.savelibrary(titulo,item.url,item.thumbnail,item.server,item.plot,canal=item.channel,category=item.category,Serie=item.show)
        # TODO ¿SOLO peliculas?
        #logger.debug(item.tostring('\n'))
        new_item = item.clone(title=titulo,
                              action="play_from_library",
                              category="Cine",
                              fulltitle=item.fulltitle,
                              channel=item.channel)
        #logger.debug(new_item.tostring('\n'))
        insertados, sobreescritos, fallidos = library.save_library_movie(
            new_item)

        advertencia = xbmcgui.Dialog()
        if fallidos == 0:
            advertencia.ok(config.get_localized_string(30131), titulo,
                           config.get_localized_string(
                               30135))  # 'se ha añadido a la biblioteca'
        return

    elif opciones[seleccion] == config.get_localized_string(
            30162):  #"Buscar Trailer":
        config.set_setting("subtitulo", "false")
        xbmc.executebuiltin("XBMC.RunPlugin(%s?%s)" %
                            (sys.argv[0],
                             item.clone(channel="trailertools",
                                        action="buscartrailer",
                                        contextual=True).tourl()))
        return

    # Si no hay mediaurl es porque el vídeo no está :)
    logger.info("streamondemand.platformcode.xbmctools mediaurl=" + mediaurl)
    if mediaurl == "":
        if server == "unknown":
            alertUnsopportedServer()
        else:
            alertnodisponibleserver(item.server)
        return

    # Si hay un tiempo de espera (como en megaupload), lo impone ahora
    if wait_time > 0:
        continuar = handle_wait(wait_time, server, "Cargando vídeo...")
        if not continuar:
            return

    # Obtención datos de la Biblioteca (solo strms que estén en la biblioteca)
    if strmfile:
        xlistitem = getLibraryInfo(mediaurl)
    else:
        play_title = item.fulltitle
        play_thumbnail = item.thumbnail
        play_plot = item.plot

        if item.hasContentDetails == "true":
            play_title = item.contentTitle
            play_thumbnail = item.contentThumbnail
            play_plot = item.contentPlot

        try:
            xlistitem = xbmcgui.ListItem(play_title,
                                         iconImage="DefaultVideo.png",
                                         thumbnailImage=play_thumbnail,
                                         path=mediaurl)
        except:
            xlistitem = xbmcgui.ListItem(play_title,
                                         iconImage="DefaultVideo.png",
                                         thumbnailImage=play_thumbnail)

        xlistitem.setInfo(
            "video", {
                "Title": play_title,
                "Plot": play_plot,
                "Studio": item.channel,
                "Genre": item.category
            })

        #set_infoLabels(listitem,plot) # Modificacion introducida por super_berny para añadir infoLabels al ListItem

    # Lanza el reproductor
    # Lanza el reproductor

    if strmfile and not item.from_biblioteca:  #Si es un fichero strm no hace falta el play
        xbmcplugin.setResolvedUrl(int(sys.argv[1]), True, xlistitem)
        if item.subtitle != "":
            xbmc.sleep(2000)
            xbmc.Player().setSubtitles(item.subtitle)

    #Movido del conector "torrent" aqui
    elif item.server == "torrent":

        #Opciones disponibles para Reproducir torrents
        torrent_options = []
        torrent_options.append(["Client  (necessario libtorrent)"])
        torrent_options.append(["Client interno MCT (necessario libtorrent)"])

        #Plugins externos se pueden añadir otros
        if xbmc.getCondVisibility(
                'System.HasAddon("plugin.video.xbmctorrent")'):
            torrent_options.append([
                "Plugin esterno: xbmctorrent",
                "plugin://plugin.video.xbmctorrent/play/%s"
            ])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.pulsar")'):
            torrent_options.append([
                "Plugin esterno: pulsar",
                "plugin://plugin.video.pulsar/play?uri=%s"
            ])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.quasar")'):
            torrent_options.append([
                "Plugin esterno: quasar",
                "plugin://plugin.video.quasar/play?uri=%s"
            ])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.stream")'):
            torrent_options.append([
                "Plugin esterno: stream",
                "plugin://plugin.video.stream/play/%s"
            ])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.torrenter")'):
            torrent_options.append([
                "Plugin esterno: torrenter",
                "plugin://plugin.video.torrenter/?action=playSTRM&url=%s"
            ])
        if xbmc.getCondVisibility('System.HasAddon("plugin.video.torrentin")'):
            torrent_options.append([
                "Plugin esterno: torrentin",
                "plugin://plugin.video.torrentin/?uri=%s&image="
            ])

        if len(torrent_options) > 1:
            seleccion = xbmcgui.Dialog().select(
                "Aprire torrent con...",
                [opcion[0] for opcion in torrent_options])
        else:
            seleccion = 0

        #Plugins externos
        if seleccion > 1:
            mediaurl = urllib.quote_plus(item.url)
            xbmc.executebuiltin("PlayMedia(" +
                                torrent_options[seleccion][1] % mediaurl + ")")

        if seleccion == 1:
            from platformcode import mct
            mct.play(mediaurl,
                     xbmcgui.ListItem("",
                                      iconImage=item.thumbnail,
                                      thumbnailImage=item.thumbnail),
                     subtitle=item.subtitle)

        #Reproductor propio (libtorrent)
        if seleccion == 0:
            import time
            videourl = None
            played = False

            #Importamos el cliente
            from btserver import Client

            #Iniciamos el cliente:
            c = Client(url=mediaurl,
                       is_playing_fnc=xbmc.Player().isPlaying,
                       wait_time=None,
                       timeout=5,
                       temp_path=os.path.join(config.get_data_path(),
                                              "torrent"))

            #Mostramos el progreso
            progreso = xbmcgui.DialogProgress()
            progreso.create("streamondemand - Torrent", "Avviando...")

            #Mientras el progreso no sea cancelado ni el cliente cerrado
            while not progreso.iscanceled() and not c.closed:

                try:
                    #Obtenemos el estado del torrent
                    s = c.status

                    #Montamos las tres lineas con la info del torrent
                    txt = '%.2f%% de %.1fMB %s | %.1f kB/s' % \
                    (s.progress_file, s.file_size, s.str_state, s._download_rate)
                    txt2 =  'S: %d(%d) P: %d(%d) | DHT:%s (%d) | Trakers: %d' % \
                    (s.num_seeds, s.num_complete, s.num_peers, s.num_incomplete, s.dht_state, s.dht_nodes, s.trackers)
                    txt3 = 'Origen Peers TRK: %d DHT: %d PEX: %d LSD %d ' % \
                    (s.trk_peers,s.dht_peers, s.pex_peers, s.lsd_peers)

                    progreso.update(s.buffer, txt, txt2, txt3)

                    time.sleep(1)

                    #Si el buffer se ha llenado y la reproduccion no ha sido iniciada, se inicia
                    if s.buffer == 100 and not played:

                        #Cerramos el progreso
                        progreso.close()

                        #Obtenemos el playlist del torrent
                        videourl = c.get_play_list()

                        #Iniciamos el reproductor
                        playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
                        playlist.clear()
                        playlist.add(videourl, xlistitem)
                        xbmcPlayer = xbmc.Player()
                        xbmcPlayer.play(playlist)

                        #Marcamos como reproducido para que no se vuelva a iniciar
                        played = True

                        #Y esperamos a que el reproductor se cierre
                        while xbmc.Player().isPlaying():
                            time.sleep(1)

                        #Cuando este cerrado,  Volvemos a mostrar el dialogo
                        progreso.create("streamondemand - Torrent",
                                        "Avviando...")

                except:
                    import traceback
                    logger.info(traceback.format_exc())
                    break

            progreso.update(100, "Terminato, elimina dati", " ", " ")

            #Detenemos el cliente
            if not c.closed:
                c.stop()

            #Y cerramos el progreso
            progreso.close()

            return

    else:
        logger.info("player_mode=" + config.get_setting("player_mode"))
        logger.info("mediaurl=" + mediaurl)
        if config.get_setting(
                "player_mode") == "3" or "megacrypter.com" in mediaurl:
            import download_and_play
            download_and_play.download_and_play(
                mediaurl, "download_and_play.tmp",
                config.get_setting("downloadpath"))
            return

        elif config.get_setting("player_mode") == "0" or (
                config.get_setting("player_mode") == "3"
                and mediaurl.startswith("rtmp")):
            # Añadimos el listitem a una lista de reproducción (playlist)
            playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
            playlist.clear()
            playlist.add(mediaurl, xlistitem)

            # Reproduce
            playersettings = config.get_setting('player_type')
            logger.info(
                "streamondemand.platformcode.xbmctools playersettings=" +
                playersettings)

            if config.get_system_platform() == "xbox":
                player_type = xbmc.PLAYER_CORE_AUTO
                if playersettings == "0":
                    player_type = xbmc.PLAYER_CORE_AUTO
                    logger.info(
                        "streamondemand.platformcode.xbmctools PLAYER_CORE_AUTO"
                    )
                elif playersettings == "1":
                    player_type = xbmc.PLAYER_CORE_MPLAYER
                    logger.info(
                        "streamondemand.platformcode.xbmctools PLAYER_CORE_MPLAYER"
                    )
                elif playersettings == "2":
                    player_type = xbmc.PLAYER_CORE_DVDPLAYER
                    logger.info(
                        "streamondemand.platformcode.xbmctools PLAYER_CORE_DVDPLAYER"
                    )

                xbmcPlayer = xbmc.Player(player_type)
            else:
                xbmcPlayer = xbmc.Player()

            xbmcPlayer.play(playlist)

            if item.channel == "cuevana" and item.subtitle != "":
                logger.info("subtitulo=" + subtitle)
                if item.subtitle != "" and (
                        opciones[seleccion].startswith("Ver")
                        or opciones[seleccion].startswith("Watch")):
                    logger.info(
                        "streamondemand.platformcode.xbmctools Con subtitulos")
                    setSubtitles()

        elif config.get_setting("player_mode") == "1":
            logger.info("mediaurl :" + mediaurl)
            logger.info("Tras setResolvedUrl")
            xbmcplugin.setResolvedUrl(int(sys.argv[1]), True,
                                      xbmcgui.ListItem(path=mediaurl))

        elif config.get_setting("player_mode") == "2":
            xbmc.executebuiltin("PlayMedia(" + mediaurl + ")")

    if item.subtitle != "" and view:
        logger.info("Subtítulos externos: " + item.subtitle)
        xbmc.Player().setSubtitles(item.subtitle)
Beispiel #39
0
def enlaces(item):
    logger.info("[somosmovies.py] enlaces")
    itemlist = []

    data = scrapertools.cachePage(item.url)
    '''
    <fieldset id="enlaces">
    <legend>Enlaces</legend><br />
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 1</b>: <small>30 Days Without an Accident</small></div><div class="tres"><a href="http://bit.ly/1aIiGdq" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/GY8PWg" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/15CGs8G" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/17RTYZl" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/ognvK7" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 2</b>: Infected</div><div class="tres"><a href="http://bit.ly/1fyubIg" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1a9voBA" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/19pmMpo" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1aYd0be" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/rI9OL7" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 3</b>: Isolation</div><div class="tres"><a href="http://bit.ly/1fyucfd" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/17UzXLX" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/17tmo9Y" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1eqtMEL" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/2f3Jj5" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 4</b>: Indifference</div><div class="tres"><a href="http://bit.ly/1aPKmwf" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/185vLcB" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/1iJ5mGm" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1hadtPR" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/lYoQoo" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 5</b>: Internment</div><div class="tres"><a href="http://bit.ly/1aYcERL" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/HSRa1F" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/1dilJZe" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1iG6sWi" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/0tHIKr" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 6</b>: Live Bait</div><div class="tres"><a href="http://bit.ly/17Z1EUf" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1ddc0Ym" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/I0GBKK" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/1jx50TF" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/mgXyof" target="_blank">TurboBit</a></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 7</b>: Dead Weight</div><div class="tres"><a href="http://bit.ly/17UwbIi" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/17NZj1D" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/1aTE4vw" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://bit.ly/IhQa8C" target="_blank">180upload</a> <b class="sep">|</b> <a href="http://goo.gl/ZiSH47" target="_blank">TurboBit</a> <b style="font-style:italic;color:red;">Nuevo!</b></div>
    </div>
    <div class="clearfix uno">
    <div class="dos"><b> Episodio 8</b>: Too Far Gone</div><div class="tres"><i style="font-style:italic">Disponible el 02 de Diciembre.</i></div>
    </div>
    </fieldset>
    '''
    '''
    <fieldset id="enlaces">
    <h5 class='h5'>Season 1</h5>
    <div class="clearfix uno">
    <div class="dos"><b> Capítulo 1</b>: Yesterday's Jam</div><div class="tres"><a href="http://bit.ly/14OorEU" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/Z2uWNc" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/11nIqHi" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/XYo0jN" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 2</b>: Calamity Jen</div><div class="tres"><a href="http://bit.ly/XecqUq" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10algD1" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/YTsGe4" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/16xaKYZ" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 3</b>: Fifty-Fifty</div><div class="tres"><a href="http://bit.ly/12i5mq8" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10aljyA" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/12gnyo1" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/10xM8LC" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 4</b>: The Red Door</div><div class="tres"><a href="http://bit.ly/10al5Yg" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10wyHMz" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/10rHP5P" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/10xM9PW" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 5</b>: The Haunting of Bill Crouse</div><div class="tres"><a href="http://bit.ly/10wyAjT" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/XecCmO" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/XYoPt0" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/14OpPXW" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 6</b>: Aunt Irma Visits</div><div class="tres"><a href="http://bit.ly/17dCeEj" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/12i5JRM" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/10amVIA" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/17dDdUU" target="_blank">FreakShare</a></div>
    </div>
    <h5 class='h5'>Season 2</h5>
    <div class="clearfix uno">
    <div class="dos"><b> Capítulo 1</b>: The Work Outing</div><div class="tres"><a href="http://bit.ly/XOrCcl" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/10wDjCe" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/12ibnDi" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/17dEXgU" target="_blank">FreakShare</a></div>
    <div class="dos"><b> Capítulo 2</b>: Return of the Golden Child</div><div class="tres"><a href="http://bit.ly/16p6Tvh" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/13SeTJq" target="_blank">PutLocker</a> <b class="sep">|</b> <a href="http://bit.ly/10zwtuf" target="_blank">SockShare</a> <b class="sep">|</b> <a href="http://bit.ly/XqnsZ7" target="_blank">FreakShare</a></div>
    '''
    '''
    <fieldset id="enlaces">
    <legend>Enlaces</legend><br />
    <div class="clearfix uno">
    <div class="dos">
    <b>AVI</b> <small>480p</small></div>
    <div class="tres">
    <a href="http://bit.ly/1dQbvlS" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/Nd96Hh" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1d3a534" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://goo.gl/TOipXB" target="_blank">TurboBit</a> <b class="sep">|</b> <a href="http://bit.ly/1oUWtPP" target="_blank">FreakShare</a>
    </div>
    </div>
    <div class="clearfix uno">
    <div class="dos">
    <b>MP4</b> <small>1080p</small></div>
    <div class="tres">
    <a href="http://bit.ly/1c40BEG" target="_blank">1Fichier</a> <b class="sep">|</b> <a href="http://bit.ly/OcZDki" target="_blank">MEGA</a> <b class="sep">|</b> <a href="http://bit.ly/1gjElZY" target="_blank">4Shared</a> <b class="sep">|</b> <a href="http://goo.gl/fc43B2" target="_blank">TurboBit</a> <b class="sep">|</b> <a href="http://bit.ly/1e9GxAq" target="_blank">FreakShare</a>
    </div>
    </div>
    </fieldset>
    '''
    # Se queda con la caja de enlaces
    data = scrapertools.get_match(
        data,
        '<fieldset id="enlaces"[^<]+<legend>Enlaces</legend>(.*?)</fieldset>')
    patron = '<div class="dos"[^<]+<b>([^<]+)</b>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for title in matches:
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title="Enlaces " + title.strip(),
                 url=item.url,
                 extra=title,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 folder=True))

    return itemlist
def novedades_documentales(item):
    logger.info("[shurweb.py] novedades_documentales")
    data = scrapertools.cachePage(item.url)
    data = scrapertools.unescape(data)
    data = scrapertools.get_match(data,'<div class="tab-pane fade" id="docus">(.*?)<div class="panel panel-primary">')
    return peliculas(item,data=data)
Beispiel #41
0
def peliculas(item):
    logger.info("[somosmovies.py] peliculas")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    logger.info("data=" + data)

    # Extrae las entradas
    '''
    <article CLASS='post crp'>
    <header><h3 CLASS='post-title entry-title item_name'>
    <a href='http://www.somosmovies.com/2013/11/elysium-2013_24.html' title='Elysium (2013)'>Elysium (2013)</a>
    </h3>
    </header>
    <section CLASS='post-body entry-content clearfix'>
    <a href='http://www.somosmovies.com/2013/11/elysium-2013_24.html' title='Elysium (2013)'><center>
    <img border="0" src="http://1.bp.blogspot.com/-J15zDm0KXVA/UoOmwu563kI/AAAAAAAALqw/zBww3WoCyEw/s1600/Poster.Elysium.2013.jpg" style="display: block; height: 400px; width: 312px;">
    </center>
    </a>
    <div CLASS='es-LAT'></div>
    <div CLASS='pie-post'>
    <div style='float:left'>
    <div class='fb-like' data-href='http://www.somosmovies.com/2013/11/elysium-2013_24.html' data-layout='button_count' data-send='false' data-show-faces='false' data-width='120'></div>
    </div>
    </div>
    <div STYLE='clear: both;'></div>
    </section>
    </article>
    '''
    patron = "<article(.*?)</article>"
    matches = re.compile(patron, re.DOTALL).findall(data)

    for match in matches:
        logger.info("match=" + match)
        scrapedtitle = scrapertools.get_match(
            match, "<a href='[^']+' title='([^']+)'")
        scrapedurl = urlparse.urljoin(
            item.url,
            scrapertools.get_match(match, "<a href='([^']+)' title='[^']+'"))
        scrapedplot = ""
        try:
            scrapedthumbnail = urlparse.urljoin(
                item.url,
                scrapertools.get_match(match, '<img border="0" src="([^"]+)"'))
        except:
            scrapedthumbnail = ""
        try:
            idioma = scrapertools.get_match(
                match, "</center[^<]+</a[^<]+<div CLASS='([^']+)'></div>")
            scrapedtitle = scrapedtitle + " (" + idioma.upper() + ")"
        except:
            pass
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade a XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action="enlaces",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    # Extrae el paginador
    #<a CLASS='blog-pager-older-link' href='http://www.somosmovies.com/search?updated-max=2012-08-22T23:10:00-05:00&amp;max-results=16' id='Blog1_blog-pager-older-link' title='Siguiente Película'>Siguiente &#187;</a>
    patronvideos = "<a CLASS='blog-pager-older-link' href='([^']+)' id='Blog1_blog-pager-older-link' title='Siguiente"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        #http://www.somosmovies.com/search/label/Peliculas?updated-max=2010-12-20T08%3A27%3A00-06%3A00&max-results=12
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        scrapedurl = scrapedurl.replace("%3A", ":")
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title=">> Página siguiente",
                 url=scrapedurl,
                 folder=True))

    return itemlist
Beispiel #42
0
def novedades(item):
    logger.info("[filmenoi.py] novedades")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)

    #esta es para web situl Cinemaxx.rs
    #patron  = '<ul class="pm-ul-browse-videos thumbnails" id="pm-grid">[^<]+'
    #patron = '<li>[^<]+'
    #patron += '<div class="pm-li-video">[^<]+'
    #patron += '.*?<a href="([^"]+)".*?[^<]+<img src="([^"]+)" alt="([^"]+)".*?</li>'

    #esta es para web Filme-noi.com

    #patron = '<div class="home_posts_thumbnail">[^<]+'
    #patron += '<a href="([^"]+)".*?[^<]+<img src="([^"]+)" alt="([^"]+)".*?</div>'
    patron = '<div class="home_posts_thumbnail">[^<]+'
    patron += '<a href="([^"]+)"[^<]+<img src="([^"]+)" alt="([^"]+)"'

    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        thumbnail = scrapertools.find_single_match(
            scrapedthumbnail,
            "(http\://www.filme-net.com/wp-content/uploads/.*?.jpg)")
        scrapedplot = ""
        #if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        if (DEBUG):
            logger.info(
                "url=[" + scrapedurl + "], thumbnail=[" + scrapedthumbnail +
                "], title=[" + scrapedtitle +
                "]")  # Falla en sacar las imagenes por que tienen espacios
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    patron = "<a href='([^']+)'>\&rsaquo\;</a>"  #Falla no pone pagina siguente
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)
    #if DEBUG: scrapertools.printMatches(item.url)

    for match in matches:
        scrapedtitle = "> Inainte"
        scrapedplot = ""
        scrapedurl = urlparse.urljoin(item.url, match)
        scrapedthumbnail = ""
        if (DEBUG):
            logger.info("url=[" + scrapedurl + "], thumbnail=[" +
                        scrapedthumbnail + "], title=[" + scrapedtitle + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="novedades",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 folder=True))

    return itemlist
Beispiel #43
0
def series(item):
    logger.info()
    itemlist = []

    data = scrapertools.cachePage(item.url)

    #Paginador
    #<div class="paginator"> &nbsp;<a href="/lista-de-series/C/">&lt;</a>&nbsp;<a href="/lista-de-series/C/">1</a>&nbsp;<strong>2</strong>&nbsp;<a href="/lista-de-series/C/200">3</a>&nbsp;<a href="/lista-de-series/C/200">&gt;</a>&nbsp; </div>
    matches = re.compile('<a href="([^"]+)">></a>', re.S).findall(data)
    #matches = re.compile('<div class="paginator">.*?<a href="([^"]+)".*?</div>', re.S).findall(data)
    if len(matches) > 0:
        paginador = Item(
            channel=item.channel,
            action="series",
            title="!Página siguiente",
            url=urlparse.urljoin(item.url, matches[0]),
            thumbnail=item.thumbnail,
            plot="",
            extra="",
            show=item.show,
            fanart=
            "http://pelisalacarta.mimediacenter.info/fanart/seriesyonkis.jpg")
    else:
        paginador = None

    if paginador is not None:
        itemlist.append(paginador)

    #<div id="main-section" class="lista-series">.*?</div>
    #matches = re.compile('<div id="main-section" class="lista-series">.*?</div>', re.S).findall(data)
    matches = re.compile('<ul id="list-container".*?</ul>', re.S).findall(data)
    #scrapertools.printMatches(matches)
    for match in matches:
        data = match
        break

    #<li><a href="/serie/al-descubierto" title="Al descubierto">Al descubierto</a></li>
    #matches = re.compile('<li>.*?href="([^"]+)".*?title="([^"]+)".*?</li>', re.S).findall(data)
    matches = re.compile('title="([^"]+)" href="([^"]+)"', re.S).findall(data)
    #scrapertools.printMatches(matches)

    for match in matches:
        #itemlist.append( Item(channel=item.channel, action="episodios" , title=match[1], fulltitle=match[1] , url=urlparse.urljoin(item.url,match[0]), thumbnail="", plot="", extra = "" , show=match[1],fanart="http://pelisalacarta.mimediacenter.info/fanart/seriesyonkis.jpg" ))
        itemlist.append(
            Item(
                channel=item.channel,
                action="episodios",
                title=match[0],
                fulltitle=match[0],
                url=urlparse.urljoin(item.url, match[1]),
                thumbnail="",
                plot="",
                extra="",
                show=match[0],
                fanart=
                "http://pelisalacarta.mimediacenter.info/fanart/seriesyonkis.jpg"
            ))

    if len(itemlist) > 0 and config.get_platform() in (
            "wiimc", "rss") and item.channel <> "wiideoteca":
        itemlist.append(
            Item(channel=item.channel,
                 action="add_serie_to_wiideoteca",
                 title=">> Agregar Serie a Wiideoteca <<",
                 fulltitle=item.fulltitle,
                 url=item.url,
                 thumbnail="",
                 plot="",
                 extra=""))

    if paginador is not None:
        itemlist.append(paginador)

    return itemlist
Beispiel #44
0
def findvideos(item):
    logger.info("pelisalacarta.channels.divxatope findvideos")
    itemlist = []

    # Descarga la pagina
    item.url = item.url.replace("divxatope.com/descargar/",
                                "divxatope.com/ver-online/")
    '''
    <div class="box1"><img src='http://www.divxatope.com/uploads/images/gestores/thumbs/1411605666_nowvideo.jpg' width='33' height='33'></div>
    <div class="box2">nowvideo</div>
    <div class="box3">Español Castel</div>
    <div class="box4">DVD-Screene</div>
    <div class="box5"><a href="http://www.nowvideo.ch/video/affd21b283421" rel="nofollow" target="_blank">Ver Online</a></div>
    '''
    # Descarga la pagina
    data = scrapertools.cachePage(item.url)

    link = scrapertools.find_single_match(
        data, 'href="http://tumejorserie.*?url=([^"]+)"')
    if link != "":
        #link = "http://www.divxatope.com/"+link
        logger.info("pelisalacarta.channels.divxatope torrent=" + link)
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="torrent",
                 title="Vídeo en torrent",
                 fulltitle=item.title,
                 url=link,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 folder=False))

    patron = "<div class=\"box1\"[^<]+<img[^<]+</div[^<]+"
    patron += '<div class="box2">([^<]+)</div[^<]+'
    patron += '<div class="box3">([^<]+)</div[^<]+'
    patron += '<div class="box4">([^<]+)</div[^<]+'
    patron += '<div class="box5">(.*?)</div[^<]+'
    patron += '<div class="box6">([^<]+)<'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    itemlist_ver = []
    itemlist_descargar = []

    for servername, idioma, calidad, scrapedurl, comentarios in matches:
        title = "Mirror en " + servername + " (" + calidad + ")" + " (" + idioma + ")"
        if comentarios.strip() != "":
            title = title + " (" + comentarios.strip() + ")"
        url = urlparse.urljoin(item.url, scrapedurl)
        thumbnail = ""
        plot = ""
        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "]")
        new_item = Item(channel=__channel__,
                        action="extract_url",
                        title=title,
                        fulltitle=title,
                        url=url,
                        thumbnail=thumbnail,
                        plot=plot,
                        folder=True)
        if comentarios.startswith("Ver en"):
            itemlist_ver.append(new_item)
        else:
            itemlist_descargar.append(new_item)

    for new_item in itemlist_ver:
        itemlist.append(new_item)

    for new_item in itemlist_descargar:
        itemlist.append(new_item)

    if len(itemlist) == 0:
        itemlist = servertools.find_video_items(data=data)
        for videoitem in itemlist:
            videoitem.title = "Enlace encontrado en " + videoitem.server + " (" + scrapertools.get_filename_from_url(
                videoitem.url) + ")"
            videoitem.fulltitle = item.fulltitle
            videoitem.thumbnail = item.thumbnail
            videoitem.channel = __channel__

    return itemlist
Beispiel #45
0
def getApiTime():
    stime = scrapertools.cachePage("http://servicios.atresplayer.com/api/admin/time",headers=ANDROID_HEADERS)
    return long(stime) / 1000L
def series(item, extended=True):
    logger.info("pelisalacarta.channels.tumejortv series")

    url = item.url
    # Descarga la pagina
    if item.extra == "":
        data = scrapertools.cachePage(url)
    else:
        data = scrapertools.cachePage(url, post=item.extra)
    #logger.info(data)

    # Extrae las series
    '''
    <div class="antlo_dir_all_container">
    <div rel="tag" data-href="http://www.tumejortv.com/series/G-C-B---Golfas--Cursis-Y-Beatas-/" class="antlo_dir_pic_container color2" alt="G.C.B. (Golfas, Cursis Y Beatas)" title="G.C.B. (Golfas, Cursis Y Beatas)">
    <div class="antlo_dir_bandera"><img src="http://www.tumejortv.com/images/flags/f_estrenos_nuevo.png" alt="G.C.B. (Golfas, Cursis Y Beatas)" title="G.C.B. (Golfas, Cursis Y Beatas)"/></div>
    <div class="antlo_dir_img_container"><a href="http://www.tumejortv.com/series/G-C-B---Golfas--Cursis-Y-Beatas-/"><img src="http://www.tumejortv.com/images/posters/bXc4yUxJvPx4Hszf.jpeg" alt="G.C.B. (Golfas, Cursis Y Beatas)"/></a>
    <div class="antlo_pic_more_info"><span class="color2">Serie  <img src="http://www.tumejortv.com/images/idioma/antlo-es.png" alt="Español" title="Español"/><img src="http://www.tumejortv.com/images/general/posee_trailer.png" alt="Trailer" title="Trailer" style="margin: 0 3px;"/></span></div></div><p>
    <div class="antlo_dir_box_text_container"><h3 class="antlo_dir_video_title"><span style="font-size:1px;color:#3E3E3E;">Serie </span><br/><a href="http://www.tumejortv.com/series/G-C-B---Golfas--Cursis-Y-Beatas-/"> G.C.B. (Golfas, Cursis Y Beata...</a></h3>
    <h4 class="antlo_dir_video_cat">Temporada <span class="white">1</span> Capítulo <span class="white">10</span></h4><h5 class="antlo_dir_video_calidad">HDTV</h5></div></p></div></div>
    '''
    patron = '<div class="antlo_dir_all_container">'
    patron += '(?:<ul>.*?</ul>)?'
    patron += '<div rel="tag" data-href="([^"]+)".*?'
    patron += '<div class="antlo_dir_img_container"><a[^<]+<img src="([^"]+)"[^>]+></a>'
    patron += '<div class="antlo_pic_more_info"><span class="col[^"]+">([^>]+)<img src="[^"]+" alt="([^"]+)".*?</span></div></div><p>'
    patron += '<div class="antlo_dir_box_text_container"><h3 class="antlo_dir_video_title"><span[^<]+</span><br/><a[^>]+>([^<]+)</a></h3>'
    patron += '<h4 class="antlo_dir_video_cat">(.*?)<h5 class="antlo_dir_video_calidad">([^<]+)</h5'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    itemlist = []
    for url, thumbnail, tipo, idioma, titulo, categoria, calidad in matches:
        scrapedtitle = unicode(titulo.strip(), "utf-8")
        if extended:
            scrapedtitle = unicode(
                scrapedtitle + " (" + idioma.strip() + ") (" +
                scrapertools.htmlclean(calidad) + ")", "utf-8")
        scrapedurl = url + "capitulos/"
        scrapedthumbnail = thumbnail
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(
            Item(channel=__channel__,
                 action="findepisodios",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 show=unicode(titulo.strip(), "utf-8")))

    # Ordena los listados alfabeticos
    if "filtro_letras" in item.url:
        itemlist = sorted(itemlist, key=lambda Item: Item.title)

    # Extrae la pagina siguiente
    patron = '<a href="([^"]+)">SIGUIENTE</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) > 0:
        scrapedtitle = u"Pagina siguiente >>"
        scrapedurl = matches[0]
        scrapedthumbnail = ""
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(
            Item(channel=__channel__,
                 action="series",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot))

    return itemlist
def agregadas(item):
    logger.info("[sintonizzate.py] agregadas")

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    data = scrapertools.get_match(data, ' <ul class="Aportes (.*?)</ul>')
    '''
    <div class="peli_img_img"> 
    <a href="http://sintonizzate.me/pelicula/641/mortal-kombat-1995.html" title="Mortal Kombat (1995)"><img src="http://sintonizzate.me/files/uploads/641.jpg" alt="Mortal Kombat (1995)" /></a> 
    </div> 
    '''
    patron = '<div class="peli_img_img">.*?href="(.*?)".*?src="(.*?)".*?alt="(.*?)"'
    patron += '.*?Sinopsis.*?<p>(.*?)</p>.*?<strong>Genero</strong>:(.*?), (.*?)</div>.*?<strong>Idioma</strong>: (.*?)</div>.*?<strong>Calidad</strong>: (.*?)</div>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    itemlist = []
    for scrapedurl, scrapedthumbnail, scrapedtitle, sipnosis, categoria, scrapedyear, idioma, calidad in matches:
        logger.info("title=" + scrapedtitle)
        title = scrapertools.htmlclean(scrapedtitle).strip()
        title = title.replace("\r", "").replace("\n", "")
        title = title.replace(" Online", "")
        title = unicode(title, "iso-8859-1", errors="replace").encode("utf-8")
        title = re.compile("\s+", re.DOTALL).sub(" ", title)
        title = title + " [" + idioma + "][" + calidad + "]"
        logger.info("title=" + title)
        year = scrapedyear
        url = scrapedurl
        thumbnail = scrapedthumbnail
        plot = sipnosis
        plot = unicode(plot, "iso-8859-1", errors="replace").encode("utf-8")
        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "]")
        if "Programa TV" not in categoria:
            if "Serie TV" not in categoria:
                itemlist.append(
                    Item(channel=__channel__,
                         action="findvideos",
                         language=idioma,
                         title=title,
                         category=categoria,
                         url=url,
                         thumbnail=thumbnail,
                         plot=plot,
                         show=title,
                         viewmode="movie",
                         fanart=thumbnail))
    data = scrapertools.cachePage(item.url)
    data = scrapertools.get_match(data, '<!--<nav>-->(.*?)<!--</nav>-->')
    patron = 'href="(.*?)" >(.*?)</a></li>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    for siguientes, buena in matches:
        siguiente = "http://www.sintonizzate.me/" + siguientes
        if "Siguiente" in buena:
            itemlist.append(
                Item(
                    channel=__channel__,
                    action="agregadas",
                    title="Siguiente >>",
                    url=siguiente,
                    thumbnail="",
                    plot=plot,
                    show=title,
                    viewmode="movie",
                    fanart=
                    "http://pelisalacarta.mimediacenter.info/fanart/seriespepito.jpg"
                ))

    return itemlist
def peliculas(item):
    logger.info("pelisalacarta.channels.tumejortv peliculas")

    url = item.url
    # Descarga la pagina
    if item.extra == "":
        data = scrapertools.cachePage(url)
    else:
        data = scrapertools.cachePage(url, post=item.extra)
    #logger.info(data)

    # Extrae las peliculas
    patron = '<div class="antlo_dir_all_container">'
    patron += '(?:<ul>.*?</ul>)?'
    patron += '<div rel="tag" data-href="([^"]+)".*?'
    patron += '<div class="antlo_dir_img_container"><a[^<]+<img src="([^"]+)"[^>]+></a>'
    patron += '<div class="antlo_pic_more_info"><span class="color1">([^>]+)<img src="[^"]+" alt="([^"]+)".*?</span></div></div><p>'
    patron += '<div class="antlo_dir_box_text_container"><h3 class="antlo_dir_video_title"><span[^<]+</span><br/><a[^>]+>([^<]+)</a></h3>'
    patron += '<span class="antlo_dir_video_cat">([^<]+)</span><h5 class="antlo_dir_video_calidad">([^<]+)</h5>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    itemlist = []
    for url, thumbnail, tipo, idioma, titulo, categoria, calidad in matches:
        scrapedtitle = unicode(
            titulo + " (" + idioma.strip() + ") (" + calidad + ")", "utf-8")
        scrapedurl = url + "enlaces/"
        scrapedthumbnail = thumbnail
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(
            Item(channel=__channel__,
                 action="findvideospeliculas",
                 title=scrapedtitle,
                 fulltitle=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot))

    if "filtro_letras" in item.url:
        itemlist = sorted(itemlist, key=lambda Item: Item.title)

    # Extrae la pagina siguiente
    patron = '<a href="([^"]+)">SIGUIENTE</a>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) > 0:
        scrapedtitle = u"Página siguiente >>"
        scrapedurl = matches[0]
        scrapedthumbnail = ""
        scrapedplot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot))

    return itemlist
Beispiel #49
0
def novedades(item):
    logger.info("[newhd.py] novedades")

    # Descarga la página
    data = scrapertools.cachePage(item.url)

    # Extrae las entradas
    '''
    <table width="100%" border="0" cellspacing="0" cellpadding="0" style="background-color:#ffffff;cursor:pointer;"
    id="9111" 
    onmouseover="colorFade('9111','background','ffffff','eff6f9')" 
    onmouseout="colorFade('9111','background','eff6f9','ffffff',25,50)">
    <tr valign="middle">
    <td width="1%" class="box" bgcolor="#FFFFFF"><div onClick="desplegarContraer('911',this);" class="linkContraido"><img src="/templates/newhd/images/mas.png" border="0"></div></td>
    <td width="85%" height="100%" class="box"><div onClick="desplegarContraer('911',this);" class="linkContraido">&nbsp;&nbsp;<font color="#83a0ba"><a>Salvar al soldado Ryan</a></font> </div></td>
    <td width="14%" align="right"><div align="right"><a href="http://www.newhd.org/online/online-belico/911-salvar-al-soldado-ryan.html"><img src="/templates/newhd/images/completo.png" onMouseOver="this.src='/templates/newhd/images/completoon.png';" onMouseOut="this.src='/templates/newhd/images/completo.png';" width="129" height="15" border="0"/></a></div></td>
    </tr>
    <td height="1" colspan="4" background="/templates/newhd/images/dotted.gif"><img src="/templates/newhd/images/spacer.gif" width="1" height="1" /></td>
    </tr>
    </table>
    <div id="911" class='elementoOculto'><table width="100%" class="box"><br><tr>
    <td width="14%" rowspan="6" align="left" valign="top"><img src="/uploads/thumbs/1319662843_salvar_al_soldado_ryan-738956437-large.jpg" width="112" height="154" border="0" align="top" /></td>
    <td height="122" colspan="4" valign="top"><div id="news-id-911" style="display:inline;">Durante la invasión de Normandía, en plena Segunda Guerra Mundial, a un grupo de soldados americanos se le encomienda una peligrosa misión: poner a salvo al soldado James Ryan. Los hombres de la patrulla del capitán John Miller deben arriesgar sus vidas para encontrar a este soldado, cuyos tres hermanos han muerto en la guerra. Lo único que se sabe del soldado Ryan es que se lanzó con su escuadrón de paracaidistas detrás de las líneas enemigas.</div><font style="text-transform: uppercase;">&nbsp;</font></td>
    <tr>
    <tr>
    <td height="20" valign="bottom" class="rating"><img src="/templates/newhd/images/floder.gif" width="20" height="16" align="absbottom" />&nbsp;Category: <font style="text-transform: uppercase;"><a href="http://www.newhd.org/online/">HD Online</a> &raquo; <a href="http://www.newhd.org/online/online-belico/">Belico</a></font></td>
    <td align="right" valign="bottom"> <a href="http://nowtrailer.tv/view/1060/Saving-Private-Ryan-1998-Official-Trailer.html" target="_blank"><img src="/templates/newhd/images/trailer.gif" alt="Trailer" width="37" height="15" border="0"></a> </td>
    <tr>
    <td height="1" background="/templates/newhd/images/dot_dark.gif"></td>    
    <td height="1"  background="/templates/newhd/images/dot_dark.gif"></td>
    <tr>
    <td width="73%" height="20" valign="bottom" class="rating"><div id='ratig-layer-911'><div class="rating" style="float:left;">
    <ul class="unit-rating">
    <li class="current-rating" style="width:0px;">0</li>
    <li><a href="#" title="Bad" class="r1-unit" onclick="dleRate('1', '911'); return false;">1</a></li>
    <li><a href="#" title="Poor" class="r2-unit" onclick="dleRate('2', '911'); return false;">2</a></li>
    <li><a href="#" title="Fair" class="r3-unit" onclick="dleRate('3', '911'); return false;">3</a></li>
    <li><a href="#" title="Good" class="r4-unit" onclick="dleRate('4', '911'); return false;">4</a></li>
    <li><a href="#" title="Excellent" class="r5-unit" onclick="dleRate('5', '911'); return false;">5</a></li>
    </ul>
    </div>
    
    patron  = '<table width="100\%" border="0" cellspacing="0" cellpadding="0".*?'
    patron += '<font[^<]+<a>([^<]+)</a>.*?'
    patron += '<a href="(http://www.newhd.org/online/[^"]+)"><img.*?<img.*?'
    patron += '<img src="([^"]+)".*?'
    patron += '<div id="news-id[^"]+" style="display\:inline\;">([^<]+)<'
    '''
    patron = '<tr valign="middle">(.*?)</a></font></td>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    itemlist = []
    for match in matches:
        try:
            scrapedurl = re.compile(r'href="(.+?)"').findall(match)[0]
        except:
            continue
        try:
            scrapedtitle = re.compile(r'<a>(.+?)</a>').findall(match)[0]
        except:
            scrapedtitle = "untitle"
        try:
            scrapedthumbnail = urlparse.urljoin(
                item.url,
                re.compile(r'html"><img src="([^"]+)" width=').findall(match)
                [0])
        except:
            scrapedthumbnail = ""
        try:
            scrapedplot = re.compile(r'(<td height="122".+?)<').findall(
                match)[0]
            scrapedplot = re.sub("<[^>]+>", " ", scrapedplot).strip()
        except:
            scrapedplot = ""
        logger.info(scrapedtitle)

        # Añade al listado
        itemlist.append(
            Item(channel=__channel__,
                 action="videos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 context='4',
                 folder=True))

    # Extrae la marca de siguiente página
    patronvideos = '<a href="([^"]+)"><span class="thide pnext">Next</span>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        scrapedtitle = "Página siguiente"
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        scrapedthumbnail = ""
        itemlist.append(
            Item(channel=__channel__,
                 title=scrapedtitle,
                 action="novedades",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 folder=True))

    return itemlist
def peliculas(item):
    logger.info("[cineonlineeu.py] peliculas")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)

    # Extrae las entradas (carpetas)
    patron = "<div class='post bar hentry'>[^<]+"
    patron += "<a name='[^']+'></a>[^<]+"
    patron += "<h3 class='post-title entry-title'>[^<]+"
    patron += "<a href='([^']+)'>([^<]+)</a>[^<]+"
    patron += '</h3>.*?<img.*?src="([^"]+)"'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle, scrapedthumbnail in matches:
        plot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fanart=scrapedthumbnail,
                 plot=plot,
                 viewmode="movie",
                 folder=True))

    patron = "<div class='item-content'>[^<]+"
    patron += "<div class='item-thumbnail'>[^<]+"
    patron += "<a href='([^']+)'[^<]+"
    patron += "<img.*?src='([^']+)'[^<]+"
    patron += "</a>[^<]+"
    patron += "</div>[^<]+"
    patron += "<div class='item-title'><a[^>]+>([^<]+)</a></div>"
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        plot = ""
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="findvideos",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fanart=scrapedthumbnail,
                 plot=plot,
                 folder=True))

    # Extrae el paginador
    patronvideos = "<a class='blog-pager-older-link' href='([^']+)'"
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    if len(matches) > 0:
        scrapedurl = urlparse.urljoin(item.url, matches[0])
        itemlist.append(
            Item(channel=__channel__,
                 action="peliculas",
                 title="Página siguiente >>",
                 url=scrapedurl,
                 folder=True))

    return itemlist
Beispiel #51
0
def episodios(item, load_all_pages=False):
    logger.info("tvalacarta.channels.sieterm episodios")

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    #logger.info(data)

    # Extrae los vídeos
    '''
    <dt class="alacarta-video"><a href="http://..." title="...">Murcianos por el mundo: Cracovia</a> · 12/05/2010 · (5411 veces visto)</dt>
    <dd style="height:100%; overflow:hidden">
    <a href="http://www.7rm.es/servlet/rtrm.servlets.ServletLink2?METHOD=DETALLEALACARTA&amp;sit=c,6,ofs,10&amp;serv=BlogPortal2&amp;orden=1&amp;idCarta=40&amp;mId=4182&amp;autostart=TV" title="Ver v&iacute;deo">
    <img src="http://mediateca.regmurcia.com/MediatecaCRM/ServletLink?METHOD=MEDIATECA&amp;accion=imagen&amp;id=4182" alt="Murcianos por el mundo: Cracovia" title="Murcianos por el mundo: Cracovia" style="width:95px" />
    </a>
    Esta semana nos desplazamos al sur de Polonia, a Cracovia y Wroclaw, para conocer cómo viven seis murcianos en una de las ciudades más importantes de Polonia y Patrimonio de la Humanidad.
    <a href="http://ficheros.7rm.es:3025/Video/4/1/4182_BAJA.mp4">
    <img src="/images/bajarArchivo.gif" alt="Descargar Archivo" title="Descargar Archivo" style="margin:0;padding:0 5px 0 0;vertical-align:middle;border:none" />
    </a>
    </dd>
    '''
  
    '''
    <dt class="alacarta-video"><a href="http://www.7rm.es/servlet/rtrm.servlets.ServletLink2?METHOD=DETALLEALACARTA&amp;sit=c,6,ofs,0&amp;serv=BlogPortal2&amp;orden=2&amp;idCarta=36&amp;mId=3214&amp;autostart=TV" title="Ver v&iacute;deo">De la tierra al mar</a> · 22/12/2009 · (1072 veces visto)</dt>
    <dd style="height:100%; overflow:hidden">
    <a href="http://www.7rm.es/servlet/rtrm.servlets.ServletLink2?METHOD=DETALLEALACARTA&amp;sit=c,6,ofs,0&amp;serv=BlogPortal2&amp;orden=2&amp;idCarta=36&amp;mId=3214&amp;autostart=TV" title="Ver v&iacute;deo">
    <img src="http://mediateca.regmurcia.com/MediatecaCRM/ServletLink?METHOD=MEDIATECA&amp;accion=imagen&amp;id=3214" alt="De la tierra al mar" title="De la tierra al mar" style="width:95px" />
    </a>
    En este programa conocemos a Plácido, joven agricultor que nos mostrará la mala situación en que se encuentra el sector, informamos de la campaña 'Dale vida a tu árbol', asistimos a la presentación del libro 'Gestión ambiental. Guía fácil para empresas y profesionales', y nos hacemos eco del malestar de nuestros agricultores con la nueva normativa europea en materia de fitosanitarios, que entrará en vigor en junio de 2011.
    <a href="http://ficheros.7rm.es:3025/Video/3/2/3214_BAJA.mp4">
    <img src="/images/bajarArchivo.gif" alt="Descargar Archivo" title="Descargar Archivo" style="margin:0;padding:0 5px 0 0;vertical-align:middle;border:none" />
    </a>
    </dd>
    '''
    patron  = '<dt class="alacarta-video"><a href="([^"]+)" title="[^"]+">([^<]+)</a>.*?([0-9\/]+).*?</dt>[^<]+'
    patron += '<dd style="[^<]+">[^<]+'
    patron += '<a href="[^"]+" title="[^"]+">[^<]+'
    patron += '<img src="([^"]+)"[^<]+'
    patron += '</a>([^<]+)<a href="([^"]+)">'
    matches = re.compile(patron,re.DOTALL).findall(data)
    #scrapertools.printMatches(matches)
    itemlist = []
    for match in matches:
        # Atributos del vídeo
        scrapedtitle = unicode( match[1].strip()+" ("+match[2]+")" , "iso-8859-1" , errors="ignore").encode("utf-8")
        scrapedurl = urlparse.urljoin(item.url,match[5]).replace("&amp;","&")
        scrapedthumbnail = urlparse.urljoin(item.url,match[3]).replace("&amp;","&")
        scrapedplot = unicode( match[4].strip()  , "iso-8859-1" , errors="ignore").encode("utf-8")
        scrapedpage = urlparse.urljoin(item.url,match[0]).replace("&amp;","&")
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], page=["+scrapedpage+"], thumbnail=["+scrapedthumbnail+"]")

        # Trata de sacar la fecha de emisión del título
        aired_date = scrapertools.parse_date(scrapedtitle)
        #logger.info("aired_date="+aired_date)

        # Añade al listado de XBMC
        itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="play" , server="sieterm" , url=scrapedpage, thumbnail=scrapedthumbnail, fanart=scrapedthumbnail, plot=scrapedplot , show = item.show , page=scrapedpage, viewmode="movie_with_plot", aired_date=aired_date, folder=False) )

    # Busca la página siguiente
    next_page_url = scrapertools.find_single_match(data,'<a class="list-siguientes" href="([^"]+)" title="Ver siguientes archivos">')
    if next_page_url!="":
        next_page_url = urlparse.urljoin(item.url,next_page_url)
        next_page_item = Item(channel=CHANNELNAME, title=">> Página siguiente" , action="episodios" , url=next_page_url , show=item.show, folder=True)

        if load_all_pages:
            itemlist.extend(episodios(next_page_item,load_all_pages))
        else:
            itemlist.append( next_page_item )

    return itemlist
Beispiel #52
0
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("[rtve.py] get_video_url(page_url='%s')" % page_url)

    # Extrae el código
    logger.info("url=" + page_url)
    codigo = scrapertools.find_single_match(page_url, 'http://.*?/([0-9]+)')
    url = ""
    itemlist = []
    logger.info("assetid=" + codigo)

    # Código sacado de PyDownTV, gracias @aabilio :)
    # https://github.com/aabilio/PyDownTV2/blob/master/spaintvs/tve.py
    # -- Método 24 Mayo 2013
    videoID = codigo
    logger.info("Probando método de 24 de uno de Mayo de 2013")
    tipo = "videos"
    url = "http://www.rtve.es/ztnr/movil/thumbnail/default/%s/%s.png" % (
        tipo, videoID)

    logger.info("Probando url:" + url)
    print("Manager default")
    from base64 import b64decode as decode
    tmp_ = decode(scrapertools.cachePage(url))
    if tmp_ == "":
        url = "http://www.rtve.es/ztnr/movil/thumbnail/anubis/%s/%s.png" % (
            tipo, videoID)
        tmp_ = decode(scrapertools.cachePage(url))
        print("Manager anubis")
    tmp = re.findall(".*tEXt(.*)#[\x00]*([0-9]*).*", tmp_)[0]
    tmp = [n for n in tmp]
    cyphertext = tmp[0]
    key = tmp[1]
    tmp = tmp = [0 for n in range(500)]

    # Créditos para: http://sgcg.es/articulos/2012/09/11/nuevos-cambios-en-el-mecanismo-para-descargar-contenido-multimedia-de-rtve-es-2/
    intermediate_cyphertext = ""
    increment = 1
    text_index = 0
    while text_index < len(cyphertext):
        text_index = text_index + increment
        try:
            intermediate_cyphertext = intermediate_cyphertext + cyphertext[
                text_index - 1]
        except:
            pass
        increment = increment + 1
        if increment == 5: increment = 1

    plaintext = ""
    key_index = 0
    increment = 4
    while key_index < len(key):
        key_index = key_index + 1
        text_index = int(key[key_index - 1]) * 10
        key_index = key_index + increment
        try:
            text_index = text_index + int(key[key_index - 1])
        except:
            pass
        text_index = text_index + 1
        increment = increment + 1
        if increment == 5: increment = 1
        try:
            plaintext = plaintext + intermediate_cyphertext[text_index - 1]
        except:
            pass

    urlVideo = plaintext
    if urlVideo != "":
        url_video = urlVideo.replace("www.rtve.es", "media5.rtve.es")

        # -- CarlosJDelgado ([email protected]) -- Se obtiene la url con token tras un cambio en rtve
        url_auth = "http://flash.akamaihd.multimedia.cdn.rtve.es/auth" + urlVideo[
            url_video.
            find("/resources"
                 ):] + "?v=2.6.8&fp=WIN%2016,0,0,305&r=TDBDO&g=UZEYDOLYKFLY"
        logger.info("url_auth=" + url_auth)

        urlVideo = url_video[:urlVideo.find("/resources")] + urllib2.urlopen(
            url_auth).read()

    else:
        logger.info("No se pudo encontrar el enlace de descarga")
    url = urlVideo

    logger.info("url=" + url)

    # -- Método 24 Mayo 2013 FIN
    '''
    if url=="":
        url = "http://www.rtve.es/ztnr/consumer/xl/video/alta/" + codigo + "_es_292525252525111"
        logger.info("url="+url)

        location = scrapertools.get_header_from_response(url,header_to_get="location")

        if location != "":
            url = location.replace("www.rtve.es", "media5.rtve.es")

    if url=="":
        data = scrapertools.cache_page("http://web.pydowntv.com/api?url="+page_url)
        url = scrapertools.get_match(data,'"url_video"\: \["([^"]+)"\]')

    if url=="":
        try:
            # Compone la URL
            #http://www.rtve.es/swf/data/es/videos/alacarta/5/2/5/1/741525.xml
            url = 'http://www.rtve.es/swf/data/es/videos/alacarta/'+codigo[-1:]+'/'+codigo[-2:-1]+'/'+codigo[-3:-2]+'/'+codigo[-4:-3]+'/'+codigo+'.xml'
            logger.info("[rtve.py] url="+url)
    
            # Descarga el XML y busca el vídeo
            #<file>rtmp://stream.rtve.es/stream/resources/alacarta/flv/6/9/1270911975696.flv</file>
            data = scrapertools.cachePage(url)
            #print url
            #print data
            patron = '<file>([^<]+)</file>'
            matches = re.compile(patron,re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            if len(matches)>0:
                #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
                url = matches[0]
            else:
                url = ""
            
            patron = ''
            matches = re.compile(patron,re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            #print len(matches)
            #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
            thumbnail = matches[0]
        except:
            url = ""
    
    # Hace un segundo intento
    if url=="":
        try:
            # Compone la URL
            #http://www.rtve.es/swf/data/es/videos/video/0/5/8/0/500850.xml
            url = 'http://www.rtve.es/swf/data/es/videos/video/'+codigo[-1:]+'/'+codigo[-2:-1]+'/'+codigo[-3:-2]+'/'+codigo[-4:-3]+'/'+codigo+'.xml'
            logger.info("[rtve.py] url="+url)

            # Descarga el XML y busca el vídeo
            #<file>rtmp://stream.rtve.es/stream/resources/alacarta/flv/6/9/1270911975696.flv</file>
            data = scrapertools.cachePage(url)
            patron = '<file>([^<]+)</file>'
            matches = re.compile(patron,re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
            url = matches[0]
        except:
            url = ""
    
    if url=="":

        try:
            # Compone la URL
            #http://www.rtve.es/swf/data/es/videos/video/0/5/8/0/500850.xml
            url = 'http://www.rtve.es/swf/data/es/videos/video/'+codigo[-1:]+'/'+codigo[-2:-1]+'/'+codigo[-3:-2]+'/'+codigo[-4:-3]+'/'+codigo+'.xml'
            logger.info("[rtve.py] url="+url)

            # Descarga el XML y busca el assetDataId
            #<plugin ... assetDataId::576596"/>
            data = scrapertools.cachePage(url)
            #logger.info("[rtve.py] data="+data)
            patron = 'assetDataId\:\:([^"]+)"'
            matches = re.compile(patron,re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            #url = matches[0].replace('rtmp://stream.rtve.es/stream/','http://www.rtve.es/')
            codigo = matches[0]
            logger.info("assetDataId="+codigo)
            
            #url = http://www.rtve.es/scd/CONTENTS/ASSET_DATA_VIDEO/6/9/5/6/ASSET_DATA_VIDEO-576596.xml
            url = 'http://www.rtve.es/scd/CONTENTS/ASSET_DATA_VIDEO/'+codigo[-1:]+'/'+codigo[-2:-1]+'/'+codigo[-3:-2]+'/'+codigo[-4:-3]+'/ASSET_DATA_VIDEO-'+codigo+'.xml'
            logger.info("[rtve.py] url="+url)
            
            data = scrapertools.cachePage(url)
            #logger.info("[rtve.py] data="+data)
            patron  = '<field>[^<]+'
            patron += '<key>ASD_FILE</key>[^<]+'
            patron += '<value>([^<]+)</value>[^<]+'
            patron += '</field>'
            matches = re.compile(patron,re.DOTALL).findall(data)
            scrapertools.printMatches(matches)
            codigo = matches[0]
            logger.info("[rtve.py] url="+url)
            
            #/deliverty/demo/resources/mp4/4/3/1290960871834.mp4
            #http://media4.rtve.es/deliverty/demo/resources/mp4/4/3/1290960871834.mp4
            #http://www.rtve.es/resources/TE_NGVA/mp4/4/3/1290960871834.mp4
            url = "http://www.rtve.es/resources/TE_NGVA"+codigo[-26:]

        except:
            url = ""
    '''

    logger.info("[rtve.py] url=" + url)
    '''
    if url=="":
        logger.info("[rtve.py] Extrayendo URL tipo iPad")
        headers = []
        headers.append( ["User-Agent","Mozilla/5.0 (iPad; U; CPU OS 3_2 like Mac OS X; en-us) AppleWebKit/531.21.10 (KHTML, like Gecko) Version/4.0.4 Mobile/7B334b Safari/531.21.10"] )
        location = scrapertools.get_header_from_response(page_url,headers=headers,header_to_get="location")
        logger.info("[rtve.py] location="+location)
        
        data = scrapertools.cache_page(location,headers=headers)
        logger.info("[rtve.py] data="+data)
        #<a href="/usuarios/sharesend.shtml?urlContent=/resources/TE_SREP63/mp4/4/8/1334334549284.mp4" target
        url = scrapertools.get_match(data,'<a href="/usuarios/sharesend.shtml\?urlContent\=([^"]+)" target')
        logger.info("[rtve.py] url="+url)
        #http://www.rtve.es/resources/TE_NGVA/mp4/4/8/1334334549284.mp4
        url = urlparse.urljoin("http://www.rtve.es",url)
        logger.info("[rtve.py] url="+url)
    '''

    video_urls = []
    video_urls.append(["[rtve]", url])

    for video_url in video_urls:
        logger.info("[rtve.py] %s - %s" % (video_url[0], video_url[1]))

    return video_urls
Beispiel #53
0
def set_opcion(item, seleccion, opciones, video_urls):
    logger.info("platformtools set_opcion")
    # logger.debug(item.tostring('\n'))
    salir = False
    # No ha elegido nada, lo más probable porque haya dado al ESC
    # TODO revisar
    if seleccion == -1:
        # Para evitar el error "Uno o más elementos fallaron" al cancelar la selección desde fichero strm
        listitem = xbmcgui.ListItem(item.title,
                                    iconImage="DefaultVideo.png",
                                    thumbnailImage=item.thumbnail)
        xbmcplugin.setResolvedUrl(int(sys.argv[1]), False, listitem)

    # "Enviar a JDownloader"
    if opciones[seleccion] == config.get_localized_string(30158):
        from core import scrapertools

        # TODO comprobar que devuelve 'data'
        if item.subtitle != "":
            data = scrapertools.cachePage(
                config.get_setting("jdownloader") +
                "/action/add/links/grabber0/start1/web=" + item.url + " " +
                item.thumbnail + " " + item.subtitle)
        else:
            data = scrapertools.cachePage(
                config.get_setting("jdownloader") +
                "/action/add/links/grabber0/start1/web=" + item.url + " " +
                item.thumbnail)
        salir = True

    # "Descargar"
    elif opciones[seleccion] == config.get_localized_string(30153):
        item.video_urls = video_urls
        from channels import descargas
        descargas.save_download(item)
        salir = True

    # "Quitar de favoritos"
    elif opciones[seleccion] == config.get_localized_string(30154):
        from channels import favoritos
        favoritos.delFavourite(item)
        salir = True

    # "Añadir a favoritos":
    elif opciones[seleccion] == config.get_localized_string(30155):
        from channels import favoritos
        item.from_channel = "favoritos"
        favoritos.addFavourite(item)
        salir = True

    # "Añadir a Biblioteca":  # Library
    elif opciones[seleccion] == config.get_localized_string(30161):
        titulo = item.fulltitle
        if titulo == "":
            titulo = item.title

        new_item = item.clone(title=titulo,
                              action="play_from_library",
                              category="Cine",
                              fulltitle=item.fulltitle,
                              channel=item.channel)

        from platformcode import library
        library.add_pelicula_to_library(new_item)

        salir = True

    # "Buscar Trailer":
    elif opciones[seleccion] == config.get_localized_string(30162):
        config.set_setting("subtitulo", "false")
        xbmc.executebuiltin("XBMC.RunPlugin(%s?%s)" %
                            (sys.argv[0],
                             item.clone(channel="trailertools",
                                        action="buscartrailer",
                                        contextual=True).tourl()))
        salir = True

    return salir
Beispiel #54
0
def episodios(item):
    logger.info("[rtvv.py] episodios")

    itemlist = []

    # Extrae los videos
    data = scrapertools.cachePage(item.url)
    '''
    <div class="mg fl">
    <a title="Tornar al niu" href="/va/alqueriablanca/LAlqueria-Blanca-Tornar-niu-Cap_13_477082294.html">
    <img src="/alqueriablanca/LAlqueria-Blanca-Tornar-niu-Cap_RTVVID20110508_0069_3.jpg" width="145" height="109" alt="L&acute;Alqueria Blanca - Tornar al niu - Cap. 152" />
    </a>
    </div>    
    <div clasS="mt">
    <h3 class="title"><a href="/va/alqueriablanca/LAlqueria-Blanca-Tornar-niu-Cap_13_477082294.html">Cap. 152 - Tornar al niu</a></h3>
    <p class="section"><a href="/alqueriablanca/">L'Alqueria Blanca </a><span class="date">08.05.2011 / 22h14</span></p>
    <p class="body">
    Elena ix en llibertat, per&ograve; el pas pel calab&oacute;s deixa en ella una empremta profunda que la duu a prendre decisions dr&agrave;stiques. Don Mauro s&acute;enfronta al bisbe per defendre el seu suport a Elena i Robert. Sanitat tanca cautelarment la f&agrave;brica de calcer. Davant l&acute;actitud de Bali, &eacute;s Narc&iacute;s el qui mou els fils per tal que es re&ograve;briga. Jaume i Asun avancen la tornada i aix&ograve; porta Teresa a accelerar els preparatius de la boda.
    </p>
    '''
    patron = '<div class="mg fl">[^<]+'
    patron += '<a[^>]+>[^<]+'
    patron += '<img src="([^"]+)"[^<]+>[^<]+'
    patron += '</a>[^<]+'
    patron += '</div>[^<]+'
    patron += '<div clasS="mt">[^<]+'
    patron += '<h3 class="title"><a href="([^"]+)">([^<]+)</a></h3>[^<]+'
    patron += '<p class="section"><a[^>]+>[^<]+</a><span class="date">([^<]+)</span></p>[^<]+'
    patron += '<p class="body">([^<]+)</p>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    for match in matches:
        scrapedtitle = match[2] + " (" + match[3] + ")"
        scrapedurl = urlparse.urljoin(item.url, match[1])
        scrapedthumbnail = urlparse.urljoin(item.url, match[0])
        scrapedplot = match[4]
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + scrapedurl +
                        "], thumbnail=[" + scrapedthumbnail + "]")
        itemlist.append(
            Item(channel=CHANNELNAME,
                 title=scrapedtitle,
                 action="play",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 show=item.show,
                 page=scrapedurl,
                 category=item.category,
                 folder=False))

    patron = '<div class="md-item">[^<]+'
    patron += '<div class="thumb-mediateca bspace6">[^<]+'
    patron += '<div class="mg">[^<]+'
    patron += '<a href="([^"]+)" title="([^"]+)">[^<]+'
    patron += '<img src="([^"]+)".*?'
    patron += '<var class="date">([^<]+)</var>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedfecha in matches:
        title = scrapedtitle + " (" + scrapedfecha + ")"
        url = urlparse.urljoin(item.url, scrapedurl)
        thumbnail = urlparse.urljoin(item.url, scrapedthumbnail)
        itemlist.append(
            Item(channel=CHANNELNAME,
                 title=title,
                 action="play",
                 url=url,
                 thumbnail=thumbnail,
                 show=item.show,
                 page=url,
                 category=item.category,
                 folder=False))

    patron = '<span class="next"><a.*?href="([^"]+)">Siguiente</a></span>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) > 0:
        logger.info("Página siguiente " + matches[0])
        itemlist.extend(
            episodios(
                Item(url=urlparse.urljoin(item.url, matches[0]),
                     show=item.show)))

    patron = '<a class="ctrl ctrl-next[^"]+" href="([^"]+)" title="Anar a la p'
    matches = re.compile(patron, re.DOTALL).findall(data)
    if len(matches) > 0:
        logger.info("Página siguiente " + matches[0])
        itemlist.append(
            Item(channel=CHANNELNAME,
                 title=">> Página siguiente",
                 action="episodios",
                 url=urlparse.urljoin(item.url, matches[0]),
                 show=item.show))

    return itemlist
Beispiel #55
0
def episodios(item):
    logger.info("[boing.py] episodios")

    # Descarga la página
    #http://www.boing.es/serie/hora-de-aventuras
    #http://www.boing.es/videos/hora-de-aventuras
    data = scrapertools.cachePage(item.url.replace("/serie/","/videos/"))
    #logger.info(data)
    bloque = scrapertools.get_match(data,'<div class="Contenedor100">(.*?)<\!-- \/Contenedor100 -->',1)
    logger.info(str(bloque))

    # Extrae los videos
    '''
    <div class="pic"><div class="pic2"><div class="pic3">    
    <a href="/serie/geronimo-stilton/video/top-model">
    <img class="bcvid" height="73" width="130" src="http://i.cdn.turner.com/tbseurope/big/Boing_ES/thumbs/SP_SA_GERSTI0017_01.jpg" />
    </a>
    </div></div></div>
    <div class="series"><a href="/serie/geronimo-stilton">Gerónimo Stilton</a></div>
    <div class="title"><a href="/serie/geronimo-stilton/video/top-model">Top Model</a></div>
    '''
    '''
    <div class="pic"><div class="pic2"><div class="pic3">
    
    <a href="/serie/generator-rex/video/hombre-contra-hombre">
    <img style="margin-top:10px" height="73" width="130" src="http://i.cdn.turner.com/tbseurope/big/Boing_ES_16_9/thumbs/SP_SA_GENREX0047_01.jpg" />
    </a>
    
    
    </div></div></div>
    <div class="stars"><form action="/videos/generator-rex"  accept-charset="UTF-8" method="post" id="fivestar-custom-widget" class="fivestar-widget">
    <div><div class="fivestar-form-vote-18249 clear-block"><input type="hidden" name="content_type" id="edit-content-type" value="node"  />
    <input type="hidden" name="content_id" id="edit-content-id" value="18249"  />
    <div class="fivestar-form-item  fivestar-average-stars"><div class="form-item" id="edit-vote-wrapper">
    <span class='edit-vote-design'><span class='form-item-value-design1'><span class='form-item-value-design2'><span class='form-item-value-design3'> <input type="hidden" name="vote_count" id="edit-vote-count" value="0"  />
    <input type="hidden" name="vote_average" id="edit-vote-average" value="76.25"  />
    <input type="hidden" name="auto_submit_path" id="edit-auto-submit-path" value="/fivestar/vote/node/18249/vote"  class="fivestar-path" />
    <select name="vote" class="form-select" id="edit-vote-1" ><option value="-">Select rating</option><option value="20">Give it 1/5</option><option value="40">Give it 2/5</option><option value="60">Give it 3/5</option><option value="80" selected="selected">Give it 4/5</option><option value="100">Give it 5/5</option></select><input type="hidden" name="auto_submit_token" id="edit-auto-submit-token" value="36639bc15e086e0bfc3d93bfec3d5287"  class="fivestar-token" />
    
    </span></span></span></span></div>
    </div><input type="hidden" name="destination" id="edit-destination" value="videos/generator-rex"  />
    <input type="submit" name="op" id="edit-fivestar-submit" value="Rate"  class="form-submit fivestar-submit" />
    <input type="hidden" name="form_build_id" id="form-d62c4ce5673f9173ca3edb7e81986457" value="form-d62c4ce5673f9173ca3edb7e81986457"  />
    <input type="hidden" name="form_id" id="edit-fivestar-custom-widget" value="fivestar_custom_widget"  />
    </div>
    </div></form></div>
    <div class="series"><a href="/serie/generator-rex">Generator Rex</a></div>
    <div class="title"><a href="/serie/generator-rex/video/hombre-contra-hombre">Hombre contra hombre</a></div>
    '''
    '''
    <div class="pic3">
    
    <a href="/serie/monster-high/video/monster-high-superpillada" class="imagecache imagecache-130x73 imagecache-linked imagecache-130x73_linked"><img src="http://www.boing.es/sites/default/files/imagecache/130x73/pantallazo2mh.jpg" alt="" title=""  class="imagecache imagecache-130x73" width="130" height="73" /></a>      		      		
    
    </div></div></div>
    <div class="stars"><form action="/videos/monster-high"  accept-charset="UTF-8" method="post" id="fivestar-custom-widget" class="fivestar-widget">
    <div><div class="fivestar-form-vote-24388 clear-block"><input type="hidden" name="content_type" id="edit-content-type" value="node"  />
    <input type="hidden" name="content_id" id="edit-content-id" value="24388"  />
    <div class="fivestar-form-item  fivestar-average-stars"><div class="form-item" id="edit-vote-wrapper">
    <span class='edit-vote-design'><span class='form-item-value-design1'><span class='form-item-value-design2'><span class='form-item-value-design3'> <input type="hidden" name="vote_count" id="edit-vote-count" value="0"  />
    <input type="hidden" name="vote_average" id="edit-vote-average" value="67.9646"  />
    <input type="hidden" name="auto_submit_path" id="edit-auto-submit-path" value="/fivestar/vote/node/24388/vote"  class="fivestar-path" />
    <select name="vote" class="form-select" id="edit-vote-1" ><option value="-">Select rating</option><option value="20">Give it 1/5</option><option value="40">Give it 2/5</option><option value="60">Give it 3/5</option><option value="80" selected="selected">Give it 4/5</option><option value="100">Give it 5/5</option></select><input type="hidden" name="auto_submit_token" id="edit-auto-submit-token" value="219ac03ae7ca6956d5484acb00454195"  class="fivestar-token" />
    
    </span></span></span></span></div>
    </div><input type="hidden" name="destination" id="edit-destination" value="videos/monster-high"  />
    <input type="submit" name="op" id="edit-fivestar-submit" value="Rate"  class="form-submit fivestar-submit" />
    <input type="hidden" name="form_build_id" id="form-9e308b4823178e9cbca63316130d805e" value="form-9e308b4823178e9cbca63316130d805e"  />
    <input type="hidden" name="form_id" id="edit-fivestar-custom-widget" value="fivestar_custom_widget"  />
    </div>
    </div></form></div>
    <div class="series"><a href="/serie/monster-high">Monster High</a></div>
    <div class="title"><a href="/serie/monster-high/video/monster-high-superpillada">Monster High: Superpillada</a></div>
    
    '''
    patron  = '<div class="pic3"[^<]+'
    patron += '<a href="([^"]+)"[^<]+<img style="[^"]+" height="\d+" width="\d+" src="([^"]+)".*?'
    patron += '<div class="title"><a[^>]+>([^<]+)</a>'
    matches = re.compile(patron,re.DOTALL).findall(bloque)
    scrapertools.printMatches(matches)
    #if DEBUG: scrapertools.printMatches(matches)

    if len(matches)==0:
        patron  = '<div class="pic3"[^<]+'
        patron += '<a href="([^"]+)"[^<]+<img src="([^"]+)".*?'
        patron += '<div class="title"><a[^>]+>([^<]+)</a>'
        matches = re.compile(patron,re.DOTALL).findall(bloque)
        scrapertools.printMatches(matches)
        #if DEBUG: scrapertools.printMatches(matches)

    itemlist = []
    for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        url = urlparse.urljoin(item.url,scrapedurl)
        itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="play", server="boing" , url=url, thumbnail=scrapedthumbnail, page=url, show = item.show, folder=False) )

    return itemlist
Beispiel #56
0
def episodios(item):
    logger.info("tvalacarta.cctvspan episodios")
    itemlist = []
    '''
    <div class="text_lt">
    <a guid="40f061633e614ffe829ab3df91279b44" style="cursor:pointer;" onclick="loadvideo('40f061633e614ffe829ab3df91279b44')"><img src="http://p2.img.cctvpic.com/photoworkspace/2015/03/15/2015031515100374890.bmp" width="96" height="75" class="l" /></a>
    <h3><a onclick="loadvideo('40f061633e614ffe829ab3df91279b44')" style="cursor:pointer;">EXTRANJEROS EN CHINA 03/15/2015 Liz Vargas, Profesora de la Universidad de Estudios Internacionales de Beijing</a></h3>
    '''
    # Descarga la pȧina
    data = scrapertools.cachePage(item.url)
    patron = '<div class="text_lt"[^<]+'
    patron += '<a guid="([^"]+)"[^<]+<img src="([^"]+)"[^<]+</a[^<]+'
    patron += '<h3><a[^>]+>([^<]+)</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    for guid, scrapedthumbnail, scrapedtitle in matches:

        title = scrapertools.htmlclean(scrapedtitle)
        url = guid
        thumbnail = scrapedthumbnail
        aired_date = scrapertools.parse_date(scrapedtitle, "mdy")
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="cntv",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 show=item.show,
                 aired_date=aired_date,
                 folder=False))
    '''
    <span class="text_lt">
    <h3><a href="http://cctv.cntv.cn/2015/03/31/VIDE1427774161717552.shtml" target="_blank">ECONOMÍA  AL DÍA 03/31/2015 11:00</a></h3>
    '''
    patron = '<span class="text_lt"[^<]+'
    patron += '<h3><a href="([^"]+)"[^>]+>([^<]+)</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    for scrapedurl, scrapedtitle in matches:

        title = scrapertools.htmlclean(scrapedtitle)
        url = scrapedurl
        thumbnail = ""
        aired_date = scrapertools.parse_date(scrapedtitle, "mdy")
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="cntv",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 show=item.show,
                 aired_date=aired_date,
                 folder=False))
    '''
    <li>
    <a href="http://cctv.cntv.cn/2015/08/21/VIDE1440121441066290.shtml" target="_blank">
    <img src="http://p1.img.cctvpic.com/photoworkspace/2015/08/21/2015082114203738064.jpg" width="151" height="110" />
    </a>
    <div class="tp1"><a href="http://cctv.cntv.cn/2015/08/21/VIDE1440121441066290.shtml" target="_blank">
    </a>
    </div>
    <div class="tp2">
    <a href="http://cctv.cntv.cn/2015/08/21/VIDE1440121441066290.shtml" target="_blank">
    NIHAO CHINA 08/21/2015 Viajando y Aprendiendo Chino-Palabras y frases sobre mobiliarios
    </a></div></li>
    '''
    patron = '<li[^<]+'
    patron += '<a href="([^"]+)"[^<]+'
    patron += '<img src="([^"]+)"[^<]+'
    patron += '</a[^<]+'
    patron += '<div class="tp1"><a[^<]+'
    patron += '</a[^<]+'
    patron += '</div[^<]+'
    patron += '<div class="tp2"[^<]+'
    patron += '<a[^>]+>([^<]+)</a>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:

        title = scrapertools.htmlclean(scrapedtitle)
        url = scrapedurl
        thumbnail = scrapedthumbnail
        aired_date = scrapertools.parse_date(scrapedtitle, "mdy")
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="cntv",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 show=item.show,
                 aired_date=aired_date,
                 folder=False))

    # Prueba a ver si es la página de una serie
    if len(itemlist) == 0:
        itemlist = episodios_serie(item, data)

    return itemlist
def series(item):
    logger.info("pelisalacarta.channels.tusnovelas series")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    '''
    <div class="pelis">
    <a href="novela/lo-que-la-vida-me-robo.html"><img class="port" src="img/photos/portadas_160x240/72.jpg" alt="Lo que la vida me robó"  width="160" height="240px"  /></a>
    <!-- Descripción -->
    <div class="pelis-desc">
    <h3>Lo que la vida me robó</h3>
    <p class="desc-mid">
    La vida le jugo a Monserrat una mala pasada, su madre la obligo a casarse con un hombre a quién ella no ama, todo por salvar a su familia de la miseria ya que este hombre es rico.
    <br /><br />
    Por este matrimonio debe renunciar al amor real, el cual será encarcelado injustamente producto de una trampa por parte de la madre de Monserrat. Pero la vida muchas veces es bastante complicada y ella podría encontrar el amor en donde menos lo espera.                </p>
    <p class="desc-low">
    <span class="desc-item"><span class="bold">Actores y Actrices: </span> Daniela Castro, Angelique Boyer, Sebastián Rulli, Luis Roberto Guzmán, Sergio Sendel, Rogelio Guerra, Eric del Castillo, Gabriela Rivero, Grettell Valdez, Lisset Gutiérrez Salazar, Alberto Estrella, Ana Bertha Espín, Juan Carlos Barreto, Luis Uribe, Osvaldo Benavides, Verónica Jaspeado, Margarita Magaña.</span>
    <span class="desc-item"><span class="bold">Canal: </span> El Canal de las Estrellas</span>
    <span class="desc-item"><span class="bold">País </span> México </span>
    </p>
    </div>
    <!-- Fin Descripción -->
    </div><!--end .pelis-->
    '''
    patron = '<div class="pelis"[^<]+'
    patron += '<a href="([^"]+)"><img class="port" src="([^"]+)"[^<]+</a[^<]+'
    patron += '<!-- Des[^<]+'
    patron += '<div class="pelis-desc"[^<]+'
    patron += '<h3>([^<]+)</h3>(.*?)</div>'

    matches = re.compile(patron, re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle, scrapedplot in matches:
        url = urlparse.urljoin("http://tusnovelas.com/", scrapedurl)
        thumbnail = urlparse.urljoin("http://tusnovelas.com/",
                                     scrapedthumbnail)
        title = scrapedtitle
        plot = scrapertools.htmlclean(scrapedplot)
        if (DEBUG):
            logger.info("title=[" + title + "], url=[" + url +
                        "], thumbnail=[" + thumbnail + "]")
        itemlist.append(
            Item(channel=__channel__,
                 action="episodios",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=thumbnail,
                 plot=plot,
                 viewmode="movie_with_plot",
                 folder=True))

    next_page_url = scrapertools.find_single_match(
        data, '<a href="([^"]+)">Siguiente</a>')
    if next_page_url != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="series",
                 title=">> Página siguiente",
                 url=next_page_url,
                 folder=True))

    return itemlist
Beispiel #58
0
def findvideos(item):
    logger.info("pelisalacarta.channels.pelisplus findvideos")
    itemlist = []
    datas = scrapertools.cache_page(item.url)

    patron = "<iframe.*?src='([^']+)' frameborder='0' allowfullscreen.*?"
    matches = re.compile(patron, re.DOTALL).findall(datas)

    for scrapedurl in matches:

        if 'elreyxhd' or 'pelisplus.biz' in scrapedurl:
            data = scrapertools.cachePage(scrapedurl, headers=headers)
            quote = scrapertools.find_single_match(data,
                                                   'sources.*?file.*?http')

            if quote and "'" in quote:
                patronr = "file:'([^']+)',label:'([^.*?]+)',type:.*?'.*?}"
            elif '"' in quote:
                patronr = 'file:"([^"]+)",label:"([^.*?]+)",type:.*?".*?}'
            matchesr = re.compile(patronr, re.DOTALL).findall(data)

            for scrapedurl, scrapedcalidad in matchesr:
                print scrapedurl + ' ' + scrapedcalidad
                url = scrapedurl
                title = item.contentTitle + ' (' + scrapedcalidad + ')'
                thumbnail = item.thumbnail
                fanart = item.fanart
                if (DEBUG):
                    logger.info("title=[" + title + "], url=[" + url +
                                "], thumbnail=[" + thumbnail + "])")
                itemlist.append(
                    Item(channel=item.channel,
                         action="play",
                         title=title,
                         url=url,
                         thumbnail=thumbnail,
                         fanart=fanart))

    url = scrapedurl
    from core import servertools
    itemlist.extend(servertools.find_video_items(data=datas))

    for videoitem in itemlist:

        videoitem.channel = item.channel
        if videoitem.server != '':
            videoitem.thumbnail = servertools.guess_server_thumbnail(
                videoitem.server)
        else:
            videoitem.thumbnail = item.thumbnail
        videoitem.action = 'play'
        videoitem.fulltitle = item.title

        if 'redirector' not in videoitem.url and 'youtube' not in videoitem.url:
            videoitem.title = item.contentTitle + ' (' + videoitem.server + ')'

    n = 0
    for videoitem in itemlist:
        if 'youtube' in videoitem.url:
            videoitem.title = '[COLOR orange]Trailer en' + ' (' + videoitem.server + ')[/COLOR]'
            itemlist[n], itemlist[-1] = itemlist[-1], itemlist[n]
        n = n + 1

    if item.extra == 'findvideos' and 'youtube' in itemlist[-1]:
        itemlist.pop(1)

    if 'serie' not in item.url:
        if config.get_library_support(
        ) and len(itemlist) > 0 and item.extra != 'findvideos':
            itemlist.append(
                Item(
                    channel=item.channel,
                    title=
                    '[COLOR yellow]Añadir esta pelicula a la biblioteca[/COLOR]',
                    url=item.url,
                    action="add_pelicula_to_library",
                    extra="findvideos",
                    contentTitle=item.contentTitle))

    return itemlist
Beispiel #59
0
def get_page(url):

    data = scrapertools.cachePage(
        "http://ssl-proxy.my-addr.org/myaddrproxy.php/" + url)

    return data
Beispiel #60
0
def play(item):
    import xbmc
    import xbmcgui

    logger.info("[turbonick.py] play")

    # Abre dialogo
    dialogWait = xbmcgui.DialogProgress()
    dialogWait.create('Descargando datos del vídeo...', item.title)

    # --------------------------------------------------------
    # Descarga pagina detalle
    # --------------------------------------------------------
    data = scrapertools.cachePage(item.url)
    patron = '<src>([^<]+)</src>'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    url = matches[0]
    #rtmp://cp35019.edgefcs.net/ondemand/mtviestor/_!/intlnick/es/AVATAR/AVATAR1A_OD_640.flv
    #DEBUG: Protocol : RTMP
    #DEBUG: Hostname : cp35019.edgefcs.net
    #DEBUG: Port     : 1935
    #DEBUG: Playpath : mtviestor/_!/intlnick/es/AVATAR/AVATAR1A_OD_640
    #DEBUG: tcUrl    : rtmp://cp35019.edgefcs.net:1935/ondemand
    #DEBUG: app      : ondemand
    #DEBUG: flashVer : LNX 9,0,124,0
    #DEBUG: live     : no
    #DEBUG: timeout  : 300 sec
    cabecera = url[:35]
    logger.info("cabecera=" + cabecera)
    finplaypath = url.rfind(".")
    playpath = url[35:finplaypath]
    logger.info("playpath=" + playpath)

    logger.info("url=" + url)

    # Playlist vacia
    playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
    playlist.clear()

    # Crea la entrada y la añade al playlist
    url = cabecera
    listitem = xbmcgui.ListItem(item.title,
                                iconImage="DefaultVideo.png",
                                thumbnailImage=item.thumbnail)
    listitem.setProperty(
        "SWFPlayer",
        "http://es.turbonick.nick.com/global/apps/broadband/swf/bb_flv_player.swf"
    )
    #listitem.setProperty("Playpath","14314/plus/plustv/PO778395")
    listitem.setProperty("Playpath", playpath)
    listitem.setProperty("Hostname", "cp35019.edgefcs.net")
    listitem.setProperty("Port", "1935")
    #listitem.setProperty("tcUrl","rtmp://od.flash.plus.es/ondemand")
    listitem.setProperty("tcUrl", cabecera)
    listitem.setProperty("app", "ondemand")
    listitem.setProperty("flashVer", "LNX 9,0,124,0")
    #listitem.setProperty("pageUrl","LNX 9,0,124,0")

    listitem.setInfo(
        "video", {
            "Title": item.title,
            "Plot": item.plot,
            "Studio": CHANNELNAME,
            "Genre": item.category
        })
    playlist.add(url, listitem)

    # Cierra dialogo
    dialogWait.close()
    del dialogWait

    # Reproduce
    xbmcPlayer = xbmc.Player(xbmc.PLAYER_CORE_AUTO)
    xbmcPlayer.play(playlist)

    return []