def findvideos(item):
    data = scrapertools.cachePage(item.url)
    from servers import servertools 
    import copy    
    itemlist = []
    patronvideos  = '</div>\n<div class="tab-pane reproductor repron" id="([^"]+)">\n<div class="calishow">([^<]+)</div>\n'
    patronvideos += '<iframe[^>]+src="([^"]+)"[^>]+></iframe>\n'
    patronvideos += '<div class="clear"></div>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    for id, server, url in matches:
      print id
      patroncalidad = '<a href="#'+id+'" data-toggle="tab" alt="[^"]+" title="[^"]+">\n<span class="[^"]+" style="margin-right:5px"></span>\n([^\n]+)\n</a>'
      calidad = re.compile(patroncalidad,re.DOTALL).findall(data)
      print calidad
      if server == "netu":
        url = "http://netu.tv/watch_video.php?v="+ url.split("=")[1]
        listavideos = servertools.findvideos(url)
      else:
        listavideos = servertools.findvideos(url)
        
      for video in listavideos:
          NuevoItem = copy.deepcopy(item)
          NuevoItem.title = "Ver en: ["  + video[2] + "]" + " ("+calidad[0] + ")"
          NuevoItem.url = video[1]
          NuevoItem.server = video[2]
          NuevoItem.action = "play"
          NuevoItem.folder=False
          itemlist.append(NuevoItem) 
    return itemlist
def findvideostv(item):
    logger.info("pelisalacarta.channels.allpeliculas findvideostv")
    itemlist = []
    season = item.title.split(" ")[1]
    thumbnail = item.thumbnail
    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = scrapertools.cachePage(item.url)
    data = data.replace("\n","").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)
    try:
        from core.tmdb import Tmdb
        otmdb= Tmdb(texto_buscado=item.fulltitle, tipo="tv")
    except:
        pass
    #Enlaces Online
    patron = '<span class="movie-online-list" id_movies_types="([^"]+)".*?episode="([^"]+)" season="'+season+'" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, episode, language, url in matches:
         enlaces = servertools.findvideos(data=url)
         if len(enlaces)> 0:
             idioma = IDIOMAS.get(idiomas_videos.get(language))
             titulo = "[COLOR sandybrown][B]Episodio "+episode+"[/B][/COLOR] "
             titulo += "Enlace encontrado en [COLOR green][B]"+enlaces[0][0]+"[/B][/COLOR] [COLOR magenta]["+idioma+"][/COLOR] ["+calidad_videos.get(quality)+"]"
             servidor = enlaces[0][2]
             try:
                 item.plot, thumbnail = infoepi(otmdb, season, episode)
             except:
                 pass
             itemlist.append(Item(channel=__channel__, action="play", server=servidor, title=titulo, url=enlaces[0][1], fulltitle = item.fulltitle, thumbnail=thumbnail, fanart=item.fanart, plot=str(item.plot), extra=episode, folder=False))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)".*?episode="([^"]+)" season="'+season+'" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, episode, language, url in matches:
        mostrar_server = True
        enlaces = servertools.findvideos(data=url)
        if len(enlaces)> 0:
            servidor = enlaces[0][2]
            if config.get_setting("hidepremium")=="true":
                mostrar_server = servertools.is_server_enabled(servidor)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "[COLOR sandybrown][B]Episodio "+episode+"[/B][/COLOR] "
                titulo += "Enlace encontrado en [COLOR green][B]"+enlaces[0][0]+"[/B][/COLOR] ["+idioma+"] ["+calidad_videos.get(quality)+"]"
                try:
                    item.plot, thumbnail = infoepi(otmdb, season, episode)
                except:
                    pass
                itemlist.append(Item(channel=__channel__, action="play", server=servidor, title=titulo , url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=thumbnail , fanart=item.fanart, plot=str(item.plot) , extra=episode, folder=False) )

    itemlist.sort(key=lambda item:(int(item.extra), item.title))
    return itemlist
def play(item):
    logger.info("[peliculasaudiolatino.py] play")
    itemlist=[]

    data2 = scrapertools.cache_page(item.url)
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/mv.php?url=","http://www.megavideo.com/?v=")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/videobb.php?url=","http://www.videobb.com/watch_video.php?v=")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/vidbux.php?url=","http://www.vidbux.com/")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/vidxden.php?url=","http://www.vidxden.com/")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/videozer.php?url=","http://www.videozer.com/video/")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/v/pl/play.php?url=","http://www.putlocker.com/embed/")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/v/mv/play.php?url=","http://www.modovideo.com/frame.php?v=")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/v/ss/play.php?url=","http://www.sockshare.com/embed/")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/v/vb/play.php?url=","http://vidbull.com/")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/sockshare.php?url=","http://www.sockshare.com/embed/")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/moevide.php?url=","http://moevideo.net/?page=video&uid=")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/novamov.php?url=","http://www.novamov.com/video/")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/movshare.php?url=","http://www.movshare.net/video/")
    data2 = data2.replace("http://www.peliculasaudiolatino.tv/show/divxstage.php?url=","http://www.divxstage.net/video/")
    listavideos = servertools.findvideos(data2)
    for video in listavideos:
        invalid = video[1]
        invalid = invalid[0:8]
        if invalid!= "FN3WE43K" and invalid!="9CC3F8&e":
            scrapedtitle = item.title+video[0]
            videourl = video[1]
            server = video[2]
            if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")

            # Añade al listado de XBMC
            itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
    
    return itemlist
def detail(params, url, category):
    logger.info("[pintadibujos.py] detail")

    title = urllib.unquote_plus(params.get("title"))
    thumbnail = urllib.unquote_plus(params.get("thumbnail"))

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        xbmctools.addnewvideo(CHANNELNAME, "play", category, video[2],
                              title + " - " + video[0], video[1], thumbnail,
                              "")
    # ------------------------------------------------------------------------------------

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=pluginhandle,
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
Example #5
0
def findvideos(item,channelitemlist=None):
    logger.info("findvideos")
    itemlist = []
    if channelitemlist==None:
      from core import scrapertools
      from servers import servertools
      from servers import longurl
      import copy
      data = scrapertools.cache_page(item.url)
      data=longurl.get_long_urls(data)  
      listavideos = servertools.findvideos(data)  
      for video in listavideos:
          NuevoItem = copy.deepcopy(item)
          NuevoItem.title = item.title
          NuevoItem.fulltitle = "Ver en: ["  + video[2] + "]"
          NuevoItem.url = video[1]
          NuevoItem.server = video[2]
          NuevoItem.action = "play"
          NuevoItem.folder=False
          itemlist.append(NuevoItem)
    else:
        import copy
        for itemvideo in channelitemlist:
            NuevoItem = copy.deepcopy(itemvideo)
            NuevoItem.fulltitle = itemvideo.title
            NuevoItem.title = item.title
            NuevoItem.thumbnail = item.thumbnail
            itemlist.append(NuevoItem)
        
    return itemlist
def listvideos(item):
    logger.info("[Filmfab:py] listvideos")

    url = item.url

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # Extrae los items
    patronvideos  = '<p>(<a href="[^"]+"><strong>.*?)</p>'
    entradas = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(entradas)
    
    itemlist = []
    for entrada in entradas:
        # Titulo
        patrontitulo = '<a href="[^"]+"><strong>([^<]+)<'
        matches = re.compile(patrontitulo,re.DOTALL).findall(entrada)
        scrapertools.printMatches(matches)
        if len(matches)>0:
            scrapedtitle = matches[0].strip()

        # Busca los enlaces a los videos
        import servertools
        listavideos = servertools.findvideos(entrada)
    
        for video in listavideos:
            scrapedtitle = scrapedtitle + " - " + video[0]
            scrapedurl = video[1]
            server = video[2]
            
            itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, server=server, folder=False))

    return itemlist
Example #7
0
def play(item):
    logger.info("pelisalacarta.bricocine findvideos")
    media_url = scrapertools.get_header_from_response(item.url,header_to_get="location")
    itemlist = servertools.find_video_items(data=media_url)
    
    if len(itemlist) == 0:
    
    
       itemlist = servertools.find_video_items(data=item.url)
       data = scrapertools.cache_page(item.url)
    
    
    
    listavideos = servertools.findvideos(data)
    
    for video in listavideos:
        videotitle = item.title
        url =item.url
        server = video[2]
        
        
    
    
   

    return itemlist
def videos(item):

	logger.info("[islapeliculas.py] videos")
	# Descarga la página
	data = scrapertools.cachePage(item.url)
	patron = '(modules.php\?name=Anime-Online&func=JokeView&jokeid=.*?&amp;Es=\d)'
	matches = re.compile(patron,re.DOTALL).findall(data)
	scrapertools.printMatches(matches)
	for match in matches:
		url= urlparse.urljoin('http://www.buenaisla.com/',match)
		url = url.replace('&amp;','&')
		data2= scrapertools.cachePage(url)
		data = data + data2
			
	title= item.title
	scrapedthumbnail = item.thumbnail
	listavideos = servertools.findvideos(data)

	itemlist = []
	for video in listavideos:
		invalid = video[1]
		invalid = invalid[0:8]
		if invalid!= "FN3WE43K" and invalid!="9CC3F8&e":
			scrapedtitle = title.strip() + " - " + video[0]
			videourl = video[1]
			server = video[2]
			if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"], thumbnail=["+scrapedthumbnail+"]")

			# Añade al listado de XBMC
			itemlist.append( Item(channel=CHANNELNAME, action="play", title=scrapedtitle , url=videourl , thumbnail=scrapedthumbnail , server=server , folder=False) )

	return itemlist
Example #9
0
def play(item):
    logger.info("pelisalacarta.bricocine findvideos")

    itemlist = servertools.find_video_items(data=item.url)
    data = scrapertools.cache_page(item.url)

    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = item.url
        server = video[2]

        #xbmctools.addnewvideo( __channel__ , "play" , category , server ,  , url , thumbnail , plot )
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server=server,
                 title="Trailer - " + videotitle,
                 url=url,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 fulltitle=item.title,
                 fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg",
                 folder=False))

    return itemlist
Example #10
0
def findvideos(item, channel):
    logger.info("findvideos")

    url = item.url
    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot
    fulltitle = item.fulltitle
    # Descarga la pagina
    from core import scrapertools
    data = scrapertools.cachePage(url)

    from servers import servertools
    listavideos = servertools.findvideos(data)

    itemlist = []
    for video in listavideos:
        scrapedtitle = video[0]
        scrapedurl = video[1]
        server = video[2]

        itemlist.append(
            Item(channel=channel,
                 action="play",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 server=server,
                 fulltitle=fulltitle,
                 folder=False))

    return itemlist
Example #11
0
def find_video_items(item=None, data=None, channel=""):
    logger.info("[launcher.py] findvideos")

    # Descarga la página
    if data is None:
        from core import scrapertools
        data = scrapertools.cache_page(item.url)
        #logger.info(data)
    
    # Busca los enlaces a los videos
    from core.item import Item
    from servers import servertools
    listavideos = servertools.findvideos(data)

    if item is None:
        item = Item()

    itemlist = []
    for video in listavideos:
        scrapedtitle = item.title.strip() + " - " + video[0]
        scrapedurl = video[1]
        server = video[2]
        
        itemlist.append( Item(channel=item.channel, title=scrapedtitle , action="play" , server=server, page=item.page, url=scrapedurl, thumbnail=item.thumbnail, show=item.show , plot=item.plot , folder=False) )

    return itemlist
Example #12
0
def find_video_items(item=None, data=None, channel=""):
    logger.info("[servertools.py] find_video_items")

    # Descarga la página
    if data is None:
        from core import scrapertools
        data = scrapertools.cache_page(item.url)
        #logger.info(data)

    # Busca los enlaces a los videos
    from core.item import Item
    from servers import servertools
    listavideos = servertools.findvideos(data)

    if item is None:
        item = Item()

    itemlist = []
    for video in listavideos:
        scrapedtitle = "Enlace encontrado en "+video[2]
        scrapedurl = video[1]
        server = video[2]
        thumbnail = "http://media.tvalacarta.info/servers/server_"+server+".png"
        
        itemlist.append( Item(channel=item.channel, title=scrapedtitle , action="play" , server=server, url=scrapedurl, thumbnail=thumbnail, show=item.show , plot=item.plot , parentContent=item, folder=False) )

    return itemlist
Example #13
0
def trailer(item):
    
    logger.info("pelisalacarta.bricocine trailer")
    
    itemlist = []
    data = get_page( item.url )
    
    
    #trailer
    patron = "<iframe width='570' height='400' src='//([^']+)"
    
    # Busca los enlaces a los videos
    listavideos = servertools.findvideos(data)
    if len(listavideos)==0 :
        itemlist.append( Item(channel=__channel__, title="[COLOR gold][B]Esta pelicula no tiene trailer,lo sentimos...[/B][/COLOR]", thumbnail ="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", fanart ="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg",folder=False) )
    
    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = video[1]
        server = video[2]
        
        #xbmctools.addnewvideo( __channel__ , "play" , category , server ,  , url , thumbnail , plot )
        title= "[COLOR crimson]Trailer - [/COLOR]"
        itemlist.append( Item(channel=__channel__, action="play", server=server, title=title + videotitle  , url=url , thumbnail=item.extra , plot=item.plot , fulltitle = item.title , fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False) )
    return itemlist
Example #14
0
def detail(params, url, category):
    xbmc.output("[cineblog01.py] detail")

    title = params.get("title")
    thumbnail = params.get("thumbnail")
    xbmc.output("[cineblog01.py] title=" + title)
    xbmc.output("[cineblog01.py] thumbnail=" + thumbnail)

    # Descarga la página
    data = scrapertools.cachePage(url)
    #xbmc.output(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        xbmctools.addvideo(CHANNELNAME, "Megavideo - " + video[0], video[1],
                           category, video[2])
    # ------------------------------------------------------------------------------------

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=pluginhandle, category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=pluginhandle,
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=pluginhandle, succeeded=True)
Example #15
0
def findvideos(item):
    from core.item import Item

    Log("[__init__.py] findvideos")

    url = item.url
    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot

    # Descarga la pagina
    from core import scrapertools
    data = scrapertools.cachePage(url)

    from servers import servertools
    listavideos = servertools.findvideos(data)

    itemlist = []
    for video in listavideos:
        scrapedtitle = video[0]
        scrapedurl = video[1]
        server = video[2]

        itemlist.append(
            Item(channel=item.channel,
                 action="play",
                 title=scrapedtitle,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 server=server,
                 folder=False))

    return itemlist
def detail(params,url,category):
    logger.info("[veranime.py] detail")

    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    plot = urllib.unquote_plus( params.get("plot") )

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    patron  = '<div id="listacapdd"><div class="listddserie">[^<]+'
    patron += '<a title="[^"]+" href="([^"]+)"><strong>[^<]+</strong></a>[^<]+'
    patron += '</div>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        url = matches[0]
        data = scrapertools.cachePage(url)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = video[0]
        url = video[1]
        server = video[2]
        xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
    # ------------------------------------------------------------------------------------

    # Asigna el título, desactiva la ordenación, y cierra el directorio
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def videos(item): 
    logger.info("[liberateca.py] videos")

    # Descarga la página
    authStr = base64.encodestring('%s:%s' % (LOGIN, PASSWORD))[:-1]
    data = scrapertools.cachePage(item.url,headers=[["Authorization", "Basic %s" % authStr]])
    print data

    # Extrae las entradas
    patronvideos  = '"url": "([^"]+)",[^"]+'
    patronvideos += '"audio": "([^"]+)"'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    if DEBUG: scrapertools.printMatches(matches)

    itemlist = []
    for match in matches:
        scrapedurl = urlparse.urljoin(item.url,match[0])
        scrapedtitle = "Audio "+match[1]
        scrapedplot = ""
        scrapedthumbnail = ""
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        videos = servertools.findvideos(scrapedurl)
        if len(videos)>0:
            print videos
            server = videos[0][2]
            scrapedurl = videos[0][1]
            itemlist.append( Item(channel=CHANNELNAME, action="play", title=scrapedtitle+" ["+server+"]" , url=scrapedurl , thumbnail=scrapedthumbnail , plot=scrapedplot, server=server, folder=False) )

    return itemlist
def detail(params,url,category):
    logger.info("[descargapelis.py] detail")

    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    plot = ""

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    patron = '<table width="100%" cellpadding="0" cellspacing="0">[^<]+?'
    patron +='<tr>[^<]+?<td align="center"><img src="(.+?)".+?'
    patron +='<td align="justify" valign="top" class="texto_peli"><b>Sinopsis de (.+?):</b>(.+?)<br />'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        if DEBUG:
            scrapertools.printMatches(matches)
            #xbmc.output('test')

    listavideos = servertools.findvideos(data)
    thumbnail=matches[0][0]
    plot=matches[0][2]
    title=matches[0][1]
    for video in listavideos:
        xbmctools.addnewvideo( CHANNELNAME , "play" , CHANNELNAME , video[2] , title + " (" + video[2] + ")" , video[1] , thumbnail, plot )

    # Label (top-right)...
    xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
    xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
Example #19
0
def findvideos(item):
    from core.item import Item

    Log("[__init__.py] findvideos")

    url = item.url
    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot

    # Descarga la pagina
    from core import scrapertools
    data = scrapertools.cachePage(url)
    
    from servers import servertools
    listavideos = servertools.findvideos(data)
    
    itemlist = []
    for video in listavideos:
        scrapedtitle = video[0]
        scrapedurl = video[1]
        server = video[2]

        itemlist.append( Item(channel=item.channel, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, server=server, folder=False))

    return itemlist
def findvideos(item):
    logger.info("[cinevos.py] findvideos")
    # Descarga la página
    data = scrapertools.cachePage(item.url)
    logger.info(data)
    # Busca si hay subtitulo
    patronvideos  = '<a href="(http://www.cinevos.com/sub/[^"]+)"'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    sub = ""
    if len(matches):
        sub = matches[0]
        logger.info("con subtitulo :%s" %sub)
    # Busca la descripcion
    patronvideos  = '<p>(<div.*?</div>) </p>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    plot = ""
    if len(matches):
        plot = re.sub("<[^>]+>","",matches[0])
    # Busca los enlaces a los videos
    listavideos = servertools.findvideos(data)
    itemlist = []
    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        #print videotitle
        url = video[1]
        server = video[2]
        if "Megaupload" in videotitle:
            videotitle = item.title + " - [Megaupload]"
        else:
            videotitle = item.title+ " - " +videotitle
        itemlist.append( Item(channel=CHANNELNAME, action="play", server=server, title=videotitle , url=url , thumbnail=item.thumbnail , plot=plot ,subtitle=sub, folder=False) )
    return itemlist
def ddpostdetail(params,url,category):
    logger.info("[mcanime.py] ddpostdetail")

    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )

    # Descarga la p·gina
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # Foto de la serie de la enciclopedia
    patron = '<img src="([^"]+)" width="300".*?class="title_pic" />'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        thumbnail = matches[0]
    
    # Argumento - texto del post
    patron = '<div id="download_detail">(.*?)</div>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        plot = scrapertools.htmlclean(matches[0])
        plot = plot.replace("\r\n"," ")
        plot = plot.replace("\r"," ")
        plot = plot.replace("\n"," ")
        plot = plot.strip()

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    i = 1

    for video in listavideos:
        try:
            fulltitle = unicode( title.strip() + " (%d) " + video[0], "utf-8" ).encode("iso-8859-1")
        except:
            fulltitle = title.strip() + " (%d) " + video[0]
        fulltitle = fulltitle % i
        i = i + 1
        videourl = video[1]
        server = video[2]
        #logger.info("videotitle="+urllib.quote_plus( videotitle ))
        #logger.info("plot="+urllib.quote_plus( plot ))
        #plot = ""
        #logger.info("title="+urllib.quote_plus( title ))

        xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , fulltitle , videourl , thumbnail , plot )
    # ------------------------------------------------------------------------------------

    # ------------------------------------------------------------------------------------
    # AÒade la opciÛn "AÒadir todos los vÌdeos a la lista de descarga"
    # ------------------------------------------------------------------------------------
    xbmctools.addnewvideo( CHANNELNAME , "addalltodownloadlist" , title , "" , "(AÒadir todos los vÌdeos a la lista de descarga)" , url , thumbnail , plot )
    
    # Cierra el directorio
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def mirrors(item):
    logger.info("[capitancinema.py] mirrors")

    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    patronvideos  = '<li><strong>DISPONIBLE EN EL FORO</strong>[^<]+<a href="([^"]+)"'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    itemlist = []
    if len(matches)>0:
        url = matches[0]
        data = scrapertools.cachePage(url)

        # ------------------------------------------------------------------------------------
        # Busca los enlaces a los videos
        # ------------------------------------------------------------------------------------
        listavideos = servertools.findvideos(data)

        for video in listavideos:
            scrapedtitle = title.strip() + " - " + video[0]
            scrapedurl = video[1]
            server = video[2]
            
            itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, server=server, folder=False))

    return itemlist
def findvideos(item):
    logger.info("[tumejortv.py] findvideos")

    # Descarga la página
    url = item.url
    data = scrapertools.cachePage(url)
    #logger.info(data)

    patron = '<div id="blogitem">[^<]+<p>([^<]+)</p>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        plot = matches[0]

    listavideos = servertools.findvideos(data)
    
    itemlist = []
    for video in listavideos:
        scrapedtitle = item.title + " (" + video[2] + ")"
        scrapedurl = video[1]
        scrapedthumbnail = item.thumbnail
        scrapedplot = item.plot
        server = video[2]
        itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, server=server, folder=False))

    return itemlist
Example #24
0
def find_video_items(item=None, data=None, channel=""):
    logger.info("[launcher.py] findvideos")

    # Descarga la página
    if data is None:
        from core import scrapertools
        data = scrapertools.cache_page(item.url)
        #logger.info(data)
    
    # Busca los enlaces a los videos
    from core.item import Item
    from servers import servertools
    listavideos = servertools.findvideos(data)

    if item is None:
        item = Item()

    itemlist = []
    for video in listavideos:
        scrapedtitle = item.title.strip() + " - " + video[0].strip()
        scrapedurl = video[1]
        server = video[2]
        
        itemlist.append( Item(channel=item.channel, title=scrapedtitle , action="play" , server=server, page=item.page, url=scrapedurl, thumbnail=item.thumbnail, show=item.show , plot=item.plot , folder=False) )

    return itemlist
def detail(params,url,category):
    logger.info("[pintadibujos.py] detail")

    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        xbmctools.addnewvideo( CHANNELNAME , "play" , category , video[2] , title + " - " + video[0] , video[1] , thumbnail , "" )
    # ------------------------------------------------------------------------------------

    # Label (top-right)...
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
        
    # Disable sorting...
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )

    # End of directory...
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Example #26
0
def find_video_items(item=None, data=None, channel=""):
    logger.info("[servertools.py] find_video_items")

    # Descarga la página
    if data is None:
        from core import scrapertools
        data = scrapertools.cache_page(item.url)
        #logger.info(data)

    # Busca los enlaces a los videos
    from core.item import Item
    from servers import servertools
    listavideos = servertools.findvideos(data)

    if item is None:
        item = Item()

    itemlist = []
    for video in listavideos:
        scrapedtitle = "Enlace encontrado en "+video[2]
        scrapedurl = video[1]
        server = video[2]
        thumbnail = "http://media.tvalacarta.info/servers/server_"+server+".png"
        
        itemlist.append( Item(channel=item.channel, title=scrapedtitle , action="play" , server=server, url=scrapedurl, thumbnail=thumbnail, show=item.show , plot=item.plot , parentContent=item, folder=False) )

    return itemlist
Example #27
0
def trailer(item):
    
    logger.info("pelisalacarta.bricocine trailer")
    
    itemlist = []
    data = get_page( item.url )
    
    
    #trailer
    patron = "<iframe width='570' height='400' src='//([^']+)"
    
    # Busca los enlaces a los videos
    listavideos = servertools.findvideos(data)
    if len(listavideos)==0 :
        itemlist.append( Item(channel=__channel__, title="[COLOR gold][B]Esta pelicula no tiene trailer,lo sentimos...[/B][/COLOR]", thumbnail ="http://s6.postimg.org/fay99h9ox/briconoisethumb.png", fanart ="http://s6.postimg.org/uie8tu1jl/briconoisefan.jpg",folder=False) )
    
    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = video[1]
        server = video[2]
        
        #xbmctools.addnewvideo( __channel__ , "play" , category , server ,  , url , thumbnail , plot )
        title= "[COLOR crimson]Trailer - [/COLOR]"
        itemlist.append( Item(channel=__channel__, action="play", server=server, title=title + videotitle  , url=url , thumbnail=item.extra , plot=item.plot , fulltitle = item.title , fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg", folder=False) )
    return itemlist
Example #28
0
def enlaces(item):
    logger.info("pelisalacarta.channels.descargasmix enlaces")
    itemlist = []
    data = scrapertools.cachePage(item.url)
    #Bloque de enlaces
    patron = "(dm\(c.a\('" + item.extra.replace("+", "\+") + "'.*?)</div>"
    data_enlaces = scrapertools.find_single_match(data, patron)
    patron = 'dm\(c.a\(\'([^\']+)\''
    matches = scrapertools.find_multiple_matches(data_enlaces, patron)
    numero = len(matches)
    for code in matches:
        enlace = dm(code)
        enlaces = servertools.findvideos(data=enlace)
        if len(enlaces) > 0:
            for link in enlaces:
                if "/folder/" in enlace: titulo = link[0]
                else:
                    titulo = item.title.split("-")[0] + " - Enlace " + str(
                        numero)
                    numero -= 1
                itemlist.append(
                    Item(channel=__channel__,
                         action="play",
                         server=link[2],
                         title=titulo,
                         url=link[1],
                         fulltitle=item.fulltitle,
                         thumbnail=item.thumbnail,
                         fanart=item.fanart,
                         plot=item.plot,
                         folder=False))
    itemlist.sort(key=lambda item: item.title)
    return itemlist
Example #29
0
def detail(params,url,category):
    xbmc.output("[cineblog01.py] detail")

    title = params.get("title")
    thumbnail = params.get("thumbnail")
    xbmc.output("[cineblog01.py] title="+title)
    xbmc.output("[cineblog01.py] thumbnail="+thumbnail)

    # Descarga la página
    data = scrapertools.cachePage(url)
    #xbmc.output(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        xbmctools.addvideo( CHANNELNAME , "Megavideo - "+video[0] , video[1] , category , video[2] )
    # ------------------------------------------------------------------------------------

    # Label (top-right)...
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
        
    # Disable sorting...
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )

    # End of directory...
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def detail(item):
    logger.info("[Descarregadirecta.py] detail")

    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot
    scrapedurl = ""
    url = item.url

    itemlist = []

    # Descarga la p�gina
    data = scrapertools.cachePage(url)
    
    # Usa findvideos    
    listavideos = servertools.findvideos(data)
    
    itemlist = []
    
    for video in listavideos:
        server = video[2]
        scrapedtitle = item.title + " [" + server + "]"
        scrapedurl = video[1]
        
        itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, server=server, folder=False))



    return itemlist
Example #31
0
def play(item):
    logger.info("pelisalacarta.channels.reyanime play")
    itemlist = []

    data = scrapertools.cache_page(item.url)
    logger.info("data=" + data)

    listavideos = servertools.findvideos(data)
    for video in listavideos:
        scrapedtitle = item.title + video[0]
        videourl = video[1]
        server = video[2]
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + videourl + "]")

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=scrapedtitle,
                 fulltitle=item.fulltitle,
                 url=videourl,
                 server=server,
                 folder=False))

    return itemlist
def play(item):
    logger.info("pelisalacarta.bricocine findvideos")

    itemlist = servertools.find_video_items(data=item.url)
    data = scrapertools.cache_page(item.url)

    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = item.url
        server = video[2]

        # xbmctools.addnewvideo( __channel__ , "play" , category , server ,  , url , thumbnail , plot )
        itemlist.append(
            Item(
                channel=__channel__,
                action="play",
                server=server,
                title="Trailer - " + videotitle,
                url=url,
                thumbnail=item.thumbnail,
                plot=item.plot,
                fulltitle=item.title,
                fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg",
                folder=False,
            )
        )

    return itemlist
def detail(item):
    logger.info("[Descarregadirecta.py] detail")

    title = item.title
    thumbnail = item.thumbnail
    plot = item.plot
    scrapedurl = ""
    url = item.url

    itemlist = []

    # Descarga la p�gina
    data = scrapertools.cachePage(url)
    
    # Usa findvideos    
    listavideos = servertools.findvideos(data)
    
    itemlist = []
    
    for video in listavideos:
        server = video[2]
        scrapedtitle = item.title + " [" + server + "]"
        scrapedurl = video[1]
        
        itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, server=server, folder=False))



    return itemlist
Example #34
0
def links(item):

    itemlist = []
    try:
        count = 0
        exit = False
        while(not exit and count < 5):
            #A veces da error al intentar acceder
            try:
                logger.info(str(item.url))
                page = urllib2.urlopen(item.url)
                urlvideo = "\"" + page.geturl() + "\""
                logger.info(str(page.read()))
                logger.info(item.url)
                exit = True
            except:
                import traceback
                logger.info(traceback.format_exc())
                count = count + 1


        logger.info("urlvideo="+urlvideo)
        for video in servertools.findvideos(urlvideo) :
            #scrapedtitle = title.strip() + " " + match[1] + " " + match[2] + " " + video[0]
            scrapedtitle = scrapertools.htmlclean(video[0])
            scrapedurl = video[1]
            server = video[2]
            itemlist.append( Item(channel=__channel__, action="play" , title=scrapedtitle, url=scrapedurl, thumbnail=item.thumbnail, plot="", server=server, extra="", category=item.category, fanart=item.thumbnail, folder=False))
    except:
        import sys
        for line in sys.exc_info():
            logger.error( "%s" % line )


    return itemlist
def videos(item):

    logger.info("[newhd.py] videos")
    # Descarga la página
    data = scrapertools.cachePage(item.url)
    title = item.title
    scrapedthumbnail = item.thumbnail
    scrapedplot = item.plot
    listavideos = servertools.findvideos(data)

    itemlist = []
    for video in listavideos:
        scrapedtitle = title.strip() + " - " + video[0]
        videourl = video[1]
        server = video[2]
        #logger.info("videotitle="+urllib.quote_plus( videotitle ))
        #logger.info("plot="+urllib.quote_plus( plot ))
        #plot = ""
        #logger.info("title="+urllib.quote_plus( title ))
        if (DEBUG):
            logger.info("title=[" + scrapedtitle + "], url=[" + videourl +
                        "], thumbnail=[" + scrapedthumbnail + "]")

        # Añade al listado de XBMC
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 title=scrapedtitle,
                 url=videourl,
                 thumbnail=scrapedthumbnail,
                 plot=scrapedplot,
                 server=server,
                 folder=False))

    return itemlist
def detail(params,url,category):
    logger.info("[anifenix.py] detail")

    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    plot = urllib.unquote_plus( params.get("plot") )

    # Descarga la p�gina
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = video[0]
        url = video[1]
        server = video[2]
        xbmctools.addnewvideo( CHANNELNAME , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
    # ------------------------------------------------------------------------------------

    # Cierra el directorio
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def videos(item):

	logger.info("[newhd.py] videos")
	# Descarga la página
	data = scrapertools.cachePage(item.url)
	title= item.title
	scrapedthumbnail = item.thumbnail
	scrapedplot = item.plot
	listavideos = servertools.findvideos(data)

	itemlist = []
	for video in listavideos:
		scrapedtitle = title.strip() + " - " + video[0]
		videourl = video[1]
		server = video[2]
		#logger.info("videotitle="+urllib.quote_plus( videotitle ))
		#logger.info("plot="+urllib.quote_plus( plot ))
		#plot = ""
		#logger.info("title="+urllib.quote_plus( title ))
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , url=videourl , thumbnail=scrapedthumbnail , plot=scrapedplot , server=server , folder=False) )

	return itemlist
Example #38
0
def findvideos(item):
    logger.info("[guaridavalencianista.py] findvideos")
    data = scrapertools.cachePage(item.url)

    # Busca los enlaces a los videos

    listavideos = servertools.findvideos(data)

    if item is None:
        item = Item()

    itemlist = []
    for video in listavideos:
        scrapedtitle = video[0].strip() + " - " + item.title.strip()
        scrapedurl = video[1]
        server = video[2]

        itemlist.append(
            Item(channel=item.channel,
                 title=scrapedtitle,
                 action="play",
                 server=server,
                 page=item.page,
                 url=scrapedurl,
                 thumbnail=item.thumbnail,
                 show=item.show,
                 plot=item.plot,
                 folder=False))

    return itemlist
Example #39
0
def findvideos(item):
    logger.info("[discoverymx.py] findvideos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    data = scrapertools.get_match(
        data,
        "<div class='post-body entry-content'(.*?)<div class='post-footer'>")

    # Busca los enlaces a los videos
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = video[1]
        server = video[2]
        #xbmctools.addnewvideo( __channel__ , "play" , category , server ,  , url , thumbnail , plot )
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server=server,
                 title=videotitle,
                 url=url,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 fulltitle=item.title,
                 folder=False))

    return itemlist
def addalltodownloadlist(params,url,category):
    logger.info("[mcanime.py] addalltodownloadlist")

    title = urllib.unquote_plus( params.get("category") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )

    # Pide el tÌtulo de la serie como "prefijo"
    keyboard = xbmc.Keyboard(downloadtools.limpia_nombre_excepto_1(title))
    keyboard.doModal()
    if (keyboard.isConfirmed()):
        title = keyboard.getText()
    else:
        return

    # Descarga la p·gina
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    # Di·logo de progreso
    pDialog = xbmcgui.DialogProgress()
    ret = pDialog.create('pelisalacarta', 'AÒadiendo vÌdeos a la lista de descargas')
    pDialog.update(0, 'VÌdeo...')
    totalepisodes = len(listavideos)

    i = 1

    for video in listavideos:
        try:
            fulltitle = unicode( title.strip() + " (%d) " + video[0], "utf-8" ).encode("iso-8859-1")
        except:
            fulltitle = title.strip() + " (%d) " + video[0]
        fulltitle = fulltitle % i
        i = i + 1
        url = video[1]
        server = video[2]

        # AÒade el enlace a la lista de descargas
        descargadoslist.savebookmark(fulltitle,url,thumbnail,server,plot)
        
        pDialog.update(i*100/totalepisodes, 'VÌdeo...',fulltitle)
        if (pDialog.iscanceled()):
            pDialog.close()
            return

    # ------------------------------------------------------------------------------------
    pDialog.close()

    advertencia = xbmcgui.Dialog()
    resultado = advertencia.ok('VÌdeos en lista de descargas' , 'Se han aÒadido todos los vÌdeos' , 'a la lista de descargas')

    # Cierra el directorio
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
def findvideos(item):
    itemlist = []
    from core import scrapertools
    from servers import servertools
    from servers import longurl
    import copy
    data = scrapertools.cache_page(item.url)
    data=longurl.get_long_urls(data)  
    listavideos = servertools.findvideos(data)  
    for video in listavideos:
        NuevoItem = copy.deepcopy(item)
        if not "youtube" in video[2]:
          NuevoItem.title = "Ver en: ["  + video[2] + "]"
        else:
          NuevoItem.title = "Ver Trailer en: ["  + video[2] + "]"
        NuevoItem.url = video[1]
        NuevoItem.server = video[2]
        NuevoItem.action = "play"
        NuevoItem.folder=False
        if not "youtube" in video[2]:
          itemlist.append(NuevoItem)
        else:
          itemlist.insert(0,NuevoItem)
        
    return itemlist
Example #42
0
def trailer(item):

    logger.info("pelisalacarta.bricocine trailer")

    itemlist = []
    data = scrapertools.cache_page(item.url)

    #trailer
    patron = "<iframe width='570' height='400' src='//([^']+)"

    # Busca los enlaces a los videos
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        url = video[1]
        server = video[2]

        #xbmctools.addnewvideo( __channel__ , "play" , category , server ,  , url , thumbnail , plot )
        title = "[COLOR crimson]Trailer - [/COLOR]"
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server=server,
                 title=title + videotitle,
                 url=url,
                 thumbnail=item.thumbnail,
                 plot=item.plot,
                 fulltitle=item.title,
                 fanart="http://s23.postimg.org/84vkeq863/movietrailers.jpg",
                 folder=False))
    return itemlist
Example #43
0
def findvideos(item):
    logger.info("pelisalacarta.channels.allpeliculas findvideos")
    itemlist = []
    sinopsis = item.plot
    fanart = item.fanart
    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = scrapertools.cachePage(item.url)
    data = data.replace("\n","").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)
    try:
        sinopsis, fanart = info(item.fulltitle, "movie", scrapertools.find_single_match(sinopsis,"plot:'([^']+)'"))
    except:
        pass
    #Enlaces Online
    patron = '<span class="movie-online-list" id_movies_types="([^"]+)".*?id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, language, url in matches:
         enlaces = servertools.findvideos(data=url)
         if len(enlaces)> 0:
             idioma = IDIOMAS.get(idiomas_videos.get(language))
             titulo = "Enlace encontrado en [COLOR green][B]"+enlaces[0][0]+"[/B][/COLOR] [COLOR sandybrown]["+idioma+"][/COLOR] ["+calidad_videos.get(calidad)+"]"
             servidor = enlaces[0][2]
             itemlist.append(Item(channel=__channel__, action="play", server=servidor, title=titulo , url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=fanart, plot=str(sinopsis) , extra=idioma, folder=False) )

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)".*?id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, language, url in matches:
        mostrar_server = True
        enlaces = servertools.findvideos(data=url)
        if len(enlaces)> 0:
            servidor = enlaces[0][2]
            if config.get_setting("hidepremium")=="true":
                mostrar_server = servertools.is_server_enabled(servidor)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "Enlace encontrado en [COLOR blue][B]"+enlaces[0][0]+"[/B][/COLOR] [COLOR sandybrown]["+idioma+"][/COLOR] ["+calidad_videos.get(calidad)+"]"
                itemlist.append(Item(channel=__channel__, action="play", server=servidor, title=titulo , url=enlaces[0][1] , fulltitle = item.fulltitle, thumbnail=item.thumbnail , fanart=fanart, plot=str(sinopsis) , extra=idioma, folder=False) )

    itemlist.sort(key=lambda item:(item.extra, item.server))
    if len(itemlist) > 0 and item.category == "" or item.category == "Buscador":
        if config.get_library_support():
            itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url, action="add_pelicula_to_library", fulltitle=item.fulltitle, show=item.fulltitle))
    return itemlist
def videos_p(item):

	logger.info("[asiateam.py] videos peliculas")
	# Descarga la página
	data = scrapertools.cachePage(item.url)
	title = item.title
	scrapedthumbnail = item.thumbnail
	scrapedplot = ""
	subtitulo = ""
	
    # Extrae las entradas
	patronimagen  = 'titulo.png".*?<img src="(.*?)".*?>'
	matches = re.compile(patronimagen,re.DOTALL).findall(data)
	if len(matches)>0:
		scrapedthumbnail = matches[0]
	patronplot  = 'sinopsis.png".*?>.*?<font color="(?:N|n)avy".*?>(.*?)</td>'
	matches = re.compile(patronplot,re.DOTALL).findall(data)
	if len(matches)>0:
		scrapedplot =  matches[0]
		scrapedplot = re.sub("</?\w+((\s+\w+(\s*=\s*(?:\".*?\"|'.*?'|[^'\">\s]+))?)+\s*|\s*)/?>",'',scrapedplot)
		scrapedplot = scrapedplot.replace('&quot;','"')
	patronsubs = 'subtitulos.png".*?>.*<a href="http://subs.asia-team.net/file.php\?id=(.*?)".*?>'
	matches = re.compile(patronsubs,re.DOTALL).findall(data)
	if len(matches)>0:
		subtitulo =  "http://subs.asia-team.net/download.php?id="+matches[0]
	itemlist = []
	listavideos = servertools.findvideos(data)
	for video in listavideos:
		scrapedtitle = title.strip() + " - " + video[0]
		videourl = video[1]
		server = video[2]
		if server.lower() =="megaupload":
			url = "http://www.megavideo.com/?d="+videourl
			data = scrapertools.cachePage(url)		
			patronname = 'flashvars.title = "(.*?)"'
			matches = re.compile(patronname,re.DOTALL).findall(data)
			if len(matches)>0:
				titulo = matches[0]
				#logger.info("Titulo: "+titulo)			
				if titulo[-3:]=="avi" or titulo[-3:]=="mkv" or titulo[-3:]=="mp4":
						scrapedtitle = "[MV] "+ title.strip()+"-"+titulo
				
		if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"], thumbnail=["+scrapedthumbnail+"]")

		# Añade al listado de XBMC
		itemlist.append( Item(channel=CHANNELNAME, action="sub", title=scrapedtitle , url=videourl , thumbnail=scrapedthumbnail , plot=scrapedplot , extra=server , category=subtitulo , folder=True) )
	
	#Añade opcion para filestube y asianmovielink
	if re.search('asia-team.net',item.url)!=None:
		if re.search(' / ',title)!=None:
			title = title.split(' / ')
			buscar = title[0]
		else:
			buscar = title
		
		itemlist.append( Item(channel=CHANNELNAME, action="search", title="Buscar Película en FilesTube",  extra=buscar , folder=True) )
		
	return itemlist
Example #45
0
def play(item):
    logger.info("pelisalacarta.letmewatchthis play")

    location = scrapertools.get_header_from_response(item.url.replace(
        " ", "%20"),
                                                     header_to_get="location")
    logger.info("pelisalacarta.letmewatchthis location=" + location)

    if location != "":
        itemlist = servertools.find_video_items(data=location)
    else:
        item.url = item.url.replace(" ", "%20")
        itemlist = servertools.find_video_items(item)
        if len(itemlist) == 0:
            try:
                count = 0
                exit = False
                while (not exit and count < 5):
                    #A veces da error al intentar acceder
                    try:
                        page = urllib2.urlopen(item.url)
                        urlvideo = page.geturl()
                        exit = True
                    except:
                        count = count + 1
                if (exit):
                    listavideos = servertools.findvideos(urlvideo)
                    for video in listavideos:
                        scrapedtitle = item.title.strip(
                        ) + " - " + video[0].strip()
                        scrapedurl = video[1]
                        server = video[2]

                        itemlist.append(
                            Item(channel=item.channel,
                                 title=scrapedtitle,
                                 action="play",
                                 server=server,
                                 page=item.page,
                                 url=scrapedurl,
                                 thumbnail=item.thumbnail,
                                 show=item.show,
                                 plot=item.plot,
                                 folder=False))

            except:
                import sys
                for line in sys.exc_info():
                    logger.error("%s" % line)

        for videoitem in itemlist:
            try:
                videoitem.title = scrapertools.get_match(
                    item.title, "Watch Version \d+ of (.*)\(")
            except:
                videoitem.title = item.title

    return itemlist
def play(item):
    logger.info("channels.peliculasaudiolatino play")
    itemlist=[]

    data2 = item.url

    if data2.startswith("javascript"):

        item.url = scrapertools.find_single_match(data2,"window.open\('([^']+)'")
        data2 = scrapertools.cache_page(item.url)
    
    logger.info("data2="+data2)
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidbux.php?url=","http://www.vidbux.com/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidxden.php?url=","http://www.vidxden.com/")

    data2 = data2.replace("http://peliculasaudiolatino.com/v/pl/play.php?url=","http://www.putlocker.com/embed/")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/mv/play.php?url=","http://www.modovideo.com/frame.php?v=")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/ss/play.php?url=","http://www.sockshare.com/embed/")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/vb/play.php?url=","http://vidbull.com/")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/vk/play.php?url=","http://vk.com/video_ext.php?oid=")
    data2 = data2.replace("http://peliculasaudiolatino.com/v/ttv/play.php?url=","http://www.tumi.tv/")

    data2 = data2.replace("http://peliculasaudiolatino.com/show/sockshare.php?url=","http://www.sockshare.com/embed/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/moevide.php?url=","http://moevideo.net/?page=video&uid=")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/novamov.php?url=","http://www.novamov.com/video/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/movshare.php?url=","http://www.movshare.net/video/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/divxstage.php?url=","http://www.divxstage.net/video/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/tumi.php?url=","http://www.tumi.tv/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/playerto.php?url=","http://played.to/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/videoweed.php?url=","http://www.videoweed.es/file/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/netu.php?url=","http://netu.tv/watch_video.php?v=")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/powvideo.php?url=","http://powvideo.net/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/streamin.php?url=","http://streamin.to/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidspot.php?url=","http://vidspot.net/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/allmy.php?url=","http://allmyvideos.net/")
    data2 = data2.replace('http://peliculasaudiolatino.com/show/r"></iframe>url=',"http://realvid.net/")

    data2 = data2.replace("http://peliculasaudiolatino.com/show/roc.php?url=","http://rocvideo.net/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vide.php?url=","http://thevideo.me/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vidto.php?url=","http://vidto.me/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/vodlocker.php?url=","http://vodlocker.com/")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/videomega.php?url=","http://videomega.tv/?ref=")
    data2 = data2.replace("http://peliculasaudiolatino.com/show/gamo.php?url=","http://gamovideo.com/")
    data2 = data2.replace("%26","&")
    logger.info("data2="+data2)

    listavideos = servertools.findvideos(data2)
    for video in listavideos:
        scrapedtitle = item.title+video[0]
        videourl = video[1]
        server = video[2]
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+videourl+"]")

        # Añade al listado de XBMC
        itemlist.append( Item(channel=__channel__, action="play", title=scrapedtitle , fulltitle=item.fulltitle, url=videourl , server=server , folder=False) )
    
    return itemlist
Example #47
0
def play(item):
    logger.info("[seriesyonkis.py] play")
    itemlist = []
    data = scrapertools.cache_page(item.url)
    videos = servertools.findvideos(data)  

    if(len(videos)>0): 
        url = videos[0][1]
        server=videos[0][2]                   
        itemlist.append( Item(channel=item.channel, action="play" , title=item.title, fulltitle=item.fulltitle , url=url, thumbnail=item.thumbnail, plot=item.plot, server=server, extra=item.extra, folder=False))
    else:
        patron='<ul class="form-login">(.*?)</ul'
        matches = re.compile(patron, re.S).findall(data)
        if(len(matches)>0):
            if "xbmc" in config.get_platform():
                data = matches[0]
                #buscamos la public key
                patron='src="http://www.google.com/recaptcha/api/noscript\?k=([^"]+)"'
                pkeys = re.compile(patron, re.S).findall(data)
                if(len(pkeys)>0):
                    pkey=pkeys[0]
                    #buscamos el id de challenge
                    data = scrapertools.cache_page("http://www.google.com/recaptcha/api/challenge?k="+pkey)
                    patron="challenge.*?'([^']+)'"
                    challenges = re.compile(patron, re.S).findall(data)
                    if(len(challenges)>0):
                        challenge = challenges[0]
                        image = "http://www.google.com/recaptcha/api/image?c="+challenge
                        
                        #CAPTCHA
                        exec "import platformcode.captcha as plugin"
                        tbd = plugin.Keyboard("","",image)
                        tbd.doModal()
                        confirmed = tbd.isConfirmed()
                        if (confirmed):
                            tecleado = tbd.getText()
                            logger.info("tecleado="+tecleado)
                            sendcaptcha(playurl,challenge,tecleado)
                        del tbd 
                        #tbd ya no existe
                        if(confirmed and tecleado != ""):
                            itemlist = play(item)
            else:
                itemlist.append( Item(channel=item.channel, action="error", title="El sitio web te requiere un captcha") )

    logger.info("len(itemlist)=%s" % len(itemlist))
    return itemlist
Example #48
0
def links(item):

    itemlist = []
    try:
        count = 0
        exit = False
        urlvideo = ""
        while (not exit and count < 5 and urlvideo == ""):
            #A veces da error al intentar acceder
            try:
                page = scrapertools.get_headers_from_response(item.extra)
                logger.info(page)
                for s in page:
                    if "location" in s:
                        urlvideo = "\"" + s[1] + "\""
                exit = True
            except:
                import traceback
                logger.info(traceback.format_exc())
                count = count + 1
                urlvideo = item.url

        logger.info("urlvideo=" + urlvideo)
        for video in servertools.findvideos(urlvideo):
            #scrapedtitle = title.strip() + " " + match[1] + " " + match[2] + " " + video[0]
            scrapedtitle = scrapertools.htmlclean(video[0])
            scrapedurl = video[1]
            server = video[2]
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=item.thumbnail,
                     plot="",
                     server=server,
                     extra="",
                     category=item.category,
                     fanart=item.thumbnail,
                     folder=False))
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)

    return itemlist
Example #49
0
def links(item):

    itemlist = []
    try:
        count = 0
        exit = False
        while (not exit and count < 5):
            #A veces da error al intentar acceder
            try:
                logger.info(str(item.url))
                page = urllib2.urlopen(item.url)
                urlvideo = "\"" + page.geturl() + "\""
                logger.info(str(page.read()))
                logger.info(item.url)
                exit = True
            except:
                import traceback
                logger.info(traceback.format_exc())
                count = count + 1

        logger.info("urlvideo=" + urlvideo)
        for video in servertools.findvideos(urlvideo):
            #scrapedtitle = title.strip() + " " + match[1] + " " + match[2] + " " + video[0]
            scrapedtitle = scrapertools.htmlclean(video[0])
            scrapedurl = video[1]
            server = video[2]
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     title=scrapedtitle,
                     url=scrapedurl,
                     thumbnail=item.thumbnail,
                     plot="",
                     server=server,
                     extra="",
                     category=item.category,
                     fanart=item.thumbnail,
                     folder=False))
    except:
        import sys
        for line in sys.exc_info():
            logger.error("%s" % line)

    return itemlist
Example #50
0
def detail(params, url, category):
    logger.info("[seriesonline.py] detail")

    title = urllib.unquote_plus(params.get("title"))
    thumbnail = urllib.unquote_plus(params.get("thumbnail"))

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los mirrors, o a los capítulos de las series...
    # ------------------------------------------------------------------------------------

    logger.info("Busca el enlace de página siguiente...")
    try:
        # La siguiente página
        patronvideos = '<a href="([^"]+)">Sigu'
        matches = re.compile(patronvideos, re.DOTALL).findall(data)
        for match in matches:
            addfolder("#Siguiente", urlparse.urljoin(url, match), "list")
    except:
        logger.info("No encuentro la pagina...")

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        xbmctools.addnewvideo(CHANNELNAME, "play", category, video[2],
                              title + " - " + video[0], video[1], thumbnail,
                              "")
    # ------------------------------------------------------------------------------------

    # Label (top-right)...
    xbmcplugin.setPluginCategory(handle=int(sys.argv[1]), category=category)

    # Disable sorting...
    xbmcplugin.addSortMethod(handle=int(sys.argv[1]),
                             sortMethod=xbmcplugin.SORT_METHOD_NONE)

    # End of directory...
    xbmcplugin.endOfDirectory(handle=int(sys.argv[1]), succeeded=True)
Example #51
0
def findvideos(item):
    logger.info("[cinevos.py] findvideos")
    # Descarga la página
    data = scrapertools.cachePage(item.url)
    logger.info(data)
    # Busca si hay subtitulo
    patronvideos = '<a href="(http://www.cinevos.com/sub/[^"]+)"'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    sub = ""
    if len(matches):
        sub = matches[0]
        logger.info("con subtitulo :%s" % sub)
    # Busca la descripcion
    patronvideos = '<p>(<div.*?</div>) </p>'
    matches = re.compile(patronvideos, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    plot = ""
    if len(matches):
        plot = re.sub("<[^>]+>", "", matches[0])
    # Busca los enlaces a los videos
    listavideos = servertools.findvideos(data)
    itemlist = []
    for video in listavideos:
        videotitle = scrapertools.unescape(video[0])
        #print videotitle
        url = video[1]
        server = video[2]
        if "Megaupload" in videotitle:
            videotitle = item.title + " - [Megaupload]"
        else:
            videotitle = item.title + " - " + videotitle
        itemlist.append(
            Item(channel=CHANNELNAME,
                 action="play",
                 server=server,
                 title=videotitle,
                 url=url,
                 thumbnail=item.thumbnail,
                 plot=plot,
                 subtitle=sub,
                 folder=False))
    return itemlist
Example #52
0
def detallecapitulo(params,url,category):
    logger.info("[watchanimeon.py] detallecapitulo")

    title = urllib.unquote_plus( params.get("title") )
    thumbnail = urllib.unquote_plus( params.get("thumbnail") )
    plot = urllib.unquote_plus( params.get("plot") )

    # Descarga la página
    data = scrapertools.cachePage(url)
    #logger.info(data)

    # ------------------------------------------------------------------------------------
    # Busca los enlaces a los videos
    # ------------------------------------------------------------------------------------
    listavideos = servertools.findvideos(data)

    for video in listavideos:
        videotitle = video[0]
        url = video[1]
        server = video[2]
        xbmctools.addnewvideo( __channel__ , "play" , category , server , title.strip() + " - " + videotitle , url , thumbnail , plot )
    # ------------------------------------------------------------------------------------

    # Extrae el enlace a la serie completa
    patron = '<a href="([^"]+)" title="View all posts in'
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    # Las añade a XBMC
    for match in matches:
        scrapedtitle = "Ver serie completa"
        scrapedurl = urlparse.urljoin(url,match)
        scrapedthumbnail = thumbnail
        scrapedplot = plot
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")

        # Añade al listado de XBMC
        xbmctools.addnewfolder( __channel__ , "detalleserie" , category , scrapedtitle , scrapedurl , scrapedthumbnail, scrapedplot )

    # Label (top-right)...
    xbmcplugin.setPluginCategory( handle=pluginhandle, category=category )
    xbmcplugin.addSortMethod( handle=pluginhandle, sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=pluginhandle, succeeded=True )
Example #53
0
def AddVideoURL(devuelve,encontrados,title,url):
    
    #Megavideo tipo "http://www.megavideo.com/?v=CN7DWZ8S"
    logger.info ("0) Enlace estricto a megavideo")
    patronvideos = 'http\:\/\/www.megavideo.com\/.*?v\=([A-Z0-9a-z]{8})'
    matches = re.compile(patronvideos).findall(url)
    for match in matches:
        AddVideoID(devuelve,encontrados,title,match,'Megavideo')
            
    #Megavideo tipo "http://www.megavideo.com/v/CN7DWZ8S"
    logger.info ("1) Enlace estricto a megavideo")
    patronvideos = 'http\:\/\/www.megavideo.com\/v\/([A-Z0-9a-z]{8})'
    matches = re.compile(patronvideos).findall(url)
    for match in matches:
        AddVideoID(devuelve,encontrados,title,match,'Megavideo')
            
    #Megavideo tipo "http://www.megaupload.com/?d=CN7DWZ8S"
    logger.info ("2) Enlace estricto a megaupload")
    patronvideos = 'http\:\/\/www.megaupload.com\/.*?d\=([A-Z0-9a-z]{8})'
    matches = re.compile(patronvideos).findall(url)
    for match in matches:
        AddVideoID(devuelve,encontrados,title,match,'Megaupload')
    
    #Megavideo tipo "http://www.megaupload.com/?d=CN7DWZ8S"
    logger.info ("3) Enlace estricto a megaupload")
    patronvideos = 'http\:\/\/www.megavideo.com\/.*?d\=([A-Z0-9a-z]{8})'
    matches = re.compile(patronvideos).findall(url)
    for match in matches:
        AddVideoID(devuelve,encontrados,title,match,'Megaupload')
    
    #Megavideo tipo "http://www.megaupload.com/?d=CN7DWZ8S"
    logger.info ("4) Enlace estricto a megavideo")
    patronvideos = 'http\:\/\/wwwstatic.megavideo.com\/mv_player.swf\?v\=([A-Z0-9a-z]{8})'
    matches = re.compile(patronvideos).findall(url)
    for match in matches:
        AddVideoID(devuelve,encontrados,title,match,'Megavideo')
    
    videosarray = servertools.findvideos(url)
    for videoa in videosarray:
        AddVideoID(devuelve,encontrados,title,videoa[1],videoa[2])
    
    return 
Example #54
0
def videos(item):
    logger.info("[nki.py] videos")
    itemlist = []

    # Descarga la página
    data = scrapertools.cachePage(item.url)
    logger.info(data)

    # Busca los enlaces a los videos
    listavideos = servertools.findvideos(data)

    itemlist = []
    for video in listavideos:
        scrapedtitle = item.title.strip() + " - " + video[0]
        scrapedurl = video[1]
        server = video[2]
        
        itemlist.append( Item(channel=item.channel, title=scrapedtitle , action="play" , server=server, page=item.page, url=scrapedurl, thumbnail=item.thumbnail, show=item.show , plot=item.plot , folder=False) )

    return itemlist
def findvideos(item):
    logger.info("findvideos")
    itemlist = []
    from core import scrapertools
    from servers import servertools
    from servers import longurl
    import copy
    data = scrapertools.cache_page(item.url)
    data = longurl.get_long_urls(data)
    listavideos = servertools.findvideos(data)
    for video in listavideos:
        NuevoItem = copy.deepcopy(item)
        NuevoItem.title = item.title
        NuevoItem.fulltitle = "Ver en: [" + video[2] + "]"
        NuevoItem.url = video[1]
        NuevoItem.server = video[2]
        NuevoItem.action = "play"
        NuevoItem.folder = False
        itemlist.append(NuevoItem)

    return itemlist
Example #56
0
def play(item):
    logger.info("pelisalacarta.channels.verseriesynovelas play")
    itemlist = []
    data = anti_cloudflare(item.url)
    if "Redireccionando" in data: data = anti_cloudflare(item.url)
    enlace = scrapertools.find_single_match(data, 'class="btn" href="([^"]+)"')
    location = anti_cloudflare(enlace, location=True)
    enlaces = servertools.findvideos(data=location)
    if len(enlaces) > 0:
        titulo = "Enlace encontrado en " + enlaces[0][0]
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server=enlaces[0][2],
                 title=titulo,
                 url=enlaces[0][1],
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart,
                 plot=item.plot,
                 folder=False))
    return itemlist
Example #57
0
def play_episodios(item):
    logger.info("pelisalacarta.channels.descargasmix play_episodios")
    itemlist = []
    #IF en caso de redireccion
    if "http://descargasmix" in item.url:
        DEFAULT_HEADERS.append(["Referer", item.extra])
        data = scrapertools.cachePage(item.url, headers=DEFAULT_HEADERS)
        item.url = scrapertools.find_single_match(data, 'src="([^"]+)"')
    enlaces = servertools.findvideos(data=item.url)
    if len(enlaces) > 0:
        titulo = "Enlace encontrado en " + enlaces[0][0]
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server=enlaces[0][2],
                 title=titulo,
                 url=enlaces[0][1],
                 fulltitle=item.fulltitle,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart,
                 plot=item.plot,
                 folder=False))
    return itemlist
Example #58
0
def findvideostv(item):
    logger.info("pelisalacarta.channels.allpeliculas findvideostv")
    itemlist = []
    season = item.title.split(" ")[1]
    thumbnail = item.thumbnail
    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = scrapertools.cachePage(item.url)
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)
    try:
        from core.tmdb import Tmdb
        otmdb = Tmdb(texto_buscado=item.fulltitle, tipo="tv")
    except:
        pass
    #Enlaces Online
    patron = '<span class="movie-online-list" id_movies_types="([^"]+)".*?episode="([^"]+)" season="' + season + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, episode, language, url in matches:
        enlaces = servertools.findvideos(data=url)
        if len(enlaces) > 0:
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = "[COLOR sandybrown][B]Episodio " + episode + "[/B][/COLOR] "
            titulo += "Enlace encontrado en [COLOR green][B]" + enlaces[0][
                0] + "[/B][/COLOR] [COLOR magenta][" + idioma + "][/COLOR] [" + calidad_videos.get(
                    quality) + "]"
            servidor = enlaces[0][2]
            try:
                item.plot, thumbnail = infoepi(otmdb, season, episode)
            except:
                pass
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     server=servidor,
                     title=titulo,
                     url=enlaces[0][1],
                     fulltitle=item.fulltitle,
                     thumbnail=thumbnail,
                     fanart=item.fanart,
                     plot=str(item.plot),
                     extra=episode,
                     folder=False))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)".*?episode="([^"]+)" season="' + season + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, episode, language, url in matches:
        mostrar_server = True
        enlaces = servertools.findvideos(data=url)
        if len(enlaces) > 0:
            servidor = enlaces[0][2]
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(servidor)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "[COLOR sandybrown][B]Episodio " + episode + "[/B][/COLOR] "
                titulo += "Enlace encontrado en [COLOR green][B]" + enlaces[0][
                    0] + "[/B][/COLOR] [" + idioma + "] [" + calidad_videos.get(
                        quality) + "]"
                try:
                    item.plot, thumbnail = infoepi(otmdb, season, episode)
                except:
                    pass
                itemlist.append(
                    Item(channel=__channel__,
                         action="play",
                         server=servidor,
                         title=titulo,
                         url=enlaces[0][1],
                         fulltitle=item.fulltitle,
                         thumbnail=thumbnail,
                         fanart=item.fanart,
                         plot=str(item.plot),
                         extra=episode,
                         folder=False))

    itemlist.sort(key=lambda item: (int(item.extra), item.title))
    return itemlist
Example #59
0
def findvideos(item):
    logger.info("pelisalacarta.channels.allpeliculas findvideos")
    itemlist = []
    sinopsis = item.plot
    fanart = item.fanart
    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = scrapertools.cachePage(item.url)
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)
    try:
        sinopsis, fanart = info(
            item.fulltitle, "movie",
            scrapertools.find_single_match(sinopsis, "plot:'([^']+)'"))
    except:
        pass
    #Enlaces Online
    patron = '<span class="movie-online-list" id_movies_types="([^"]+)".*?id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, language, url in matches:
        enlaces = servertools.findvideos(data=url)
        if len(enlaces) > 0:
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = "Enlace encontrado en [COLOR green][B]" + enlaces[0][
                0] + "[/B][/COLOR] [COLOR sandybrown][" + idioma + "][/COLOR] [" + calidad_videos.get(
                    calidad) + "]"
            servidor = enlaces[0][2]
            itemlist.append(
                Item(channel=__channel__,
                     action="play",
                     server=servidor,
                     title=titulo,
                     url=enlaces[0][1],
                     fulltitle=item.fulltitle,
                     thumbnail=item.thumbnail,
                     fanart=fanart,
                     plot=str(sinopsis),
                     extra=idioma,
                     folder=False))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)".*?id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for calidad, language, url in matches:
        mostrar_server = True
        enlaces = servertools.findvideos(data=url)
        if len(enlaces) > 0:
            servidor = enlaces[0][2]
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(servidor)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "Enlace encontrado en [COLOR blue][B]" + enlaces[0][
                    0] + "[/B][/COLOR] [COLOR sandybrown][" + idioma + "][/COLOR] [" + calidad_videos.get(
                        calidad) + "]"
                itemlist.append(
                    Item(channel=__channel__,
                         action="play",
                         server=servidor,
                         title=titulo,
                         url=enlaces[0][1],
                         fulltitle=item.fulltitle,
                         thumbnail=item.thumbnail,
                         fanart=fanart,
                         plot=str(sinopsis),
                         extra=idioma,
                         folder=False))

    itemlist.sort(key=lambda item: (item.extra, item.server))
    if len(itemlist
           ) > 0 and item.category == "" or item.category == "Buscador":
        if config.get_library_support():
            itemlist.append(
                Item(
                    channel=__channel__,
                    title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]",
                    url=item.url,
                    action="add_pelicula_to_library",
                    fulltitle=item.fulltitle,
                    show=item.fulltitle))
    return itemlist
Example #60
0
def findvideos(item):
    data = scrapertools.cache_page(item.url)
    itemlist = []
    logger.info("data=" + data)

    #http://content2.catalog.video.msn.com/e2/ds/09d8b24e-203e-4a3a-b374-e920eb78a081.mp4
    #http://content4.catalog.video.msn.com/e2/ds/69c13d8c-913e-42d7-bf43-fa157e16e97d.mp4&
    patron = '(http:\//[a-z0-9\.]+msn.com/[a-z0-9\/\-]+\.mp4)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl in matches:
        logger.info("patron 1=" + scrapedurl)
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="directo",
                 title="Video en msn.com",
                 url=scrapedurl,
                 folder=False))

    #http://content4.catalog.video.msn.com/e2/ds/69c13d8c-913e-42d7-bf43-fa157e16e97d.flv&
    patron = '(http:\//[a-z0-9\.]+msn.com/[a-z0-9\/\-]+\.flv)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl in matches:
        logger.info("patron 2=" + scrapedurl)
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="directo",
                 title="Video en msn.com",
                 url=scrapedurl,
                 folder=False))

    #<param name="flashvars" value="file=http://gbs04.esmas.com/m4v/boh/poamo/fda5ed7787b1f6fddcbfc296778fa8d9/b1f6fddcbf-480.mp4&
    patron = '(http:\//[a-z0-9\.]+esmas.com/[a-z0-9\/\-]+\.mp4)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl in matches:
        logger.info("patron 3=" + scrapedurl)
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="directo",
                 title="Video en esmas.com",
                 url=scrapedurl,
                 folder=False))

    #http://capitulosdenovela.net/refugio-c001.mp4
    patron = '(http://capitulosdenovela.net/[a-z0-9A-Z\/\-]+\.mp4)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl in matches:
        logger.info("patron 3=" + scrapedurl)
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="directo",
                 title="Video en capitulosdenovela.net",
                 url=scrapedurl,
                 folder=False))

    #http://cinechulo.com/series/el_capo2/capitulo73.html
    patron = '(http://cinechulo.com/[A-Z0-9a-z\-\_\/]+.html)'
    matches = re.compile(patron, re.DOTALL).findall(data)
    for scrapedurl in matches:
        logger.info("patron 4=" + scrapedurl)
        itemlist.append(
            Item(channel=__channel__,
                 action="play",
                 server="directo",
                 title="Video en cinechulo.com",
                 url=scrapedurl,
                 extra=item.url,
                 folder=False))

    listavideos = servertools.findvideos(data=data)
    for video in listavideos:
        scrapedurl = video[1]
        server = video[2]
        scrapedtitle = "Ver en " + server

        itemlist.append(
            Item(channel=item.channel,
                 title=scrapedtitle,
                 action="play",
                 server=server,
                 url=scrapedurl,
                 folder=False))

    return itemlist