コード例 #1
0
def busqueda(params,url,category):
    logger.info("busqueda")
    tecleado = ""
    keyboard = xbmc.Keyboard('')
    keyboard.doModal()
    if (keyboard.isConfirmed()):
        tecleado = keyboard.getText()
        if len(tecleado)<=0:
            return
    
    tecleado = tecleado.replace(" ", "+")
    data=scrapertools.cachePagePost("http://www.divxonline.info/buscador.html",'texto=' + tecleado + '&categoria=0&tipobusqueda=1&Buscador=Buscar')

    #logger.info(data)
    data=data[data.find('Se han encontrado un total de'):]
    
    #<li><a href="/pelicula/306/100-chicas-2000/">100 chicas (2000)</a></li>
    patronvideos  = '<li><a href="(.+?)">(.+?)</a></li>'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    if DEBUG: 
        scrapertools.printMatches(matches)
    
    for match in matches:
        xbmctools.addnewfolder( CHANNELNAME , "listmirrors" , category , match[1] , 'http://www.divxonline.info' + match[0] , 'scrapedthumbnail', 'scrapedplot' )
    
    xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
    xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
    xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
コード例 #2
0
def search(item, texto):
    logger.info("[laserietv.py] " + item.url + " search " + texto)
    itemlist = []
    url = "%s/index.php?do=search" % host
    post = "do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=" + texto
    logger.debug(post)
    data = scrapertools.cachePagePost(url, post=post)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedthumbnail = host + scrapedthumbnail
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodi",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle), tipo='tv'))

    return itemlist
コード例 #3
0
def search(item, texto):
    logger.info("[laserietv.py] " + item.url + " search " + texto)
    itemlist = []
    url = "%s/index.php?do=search" % host
    post = "do=search&subaction=search&search_start=0&full_search=0&result_from=1&story=" + texto
    logger.debug(post)
    data = scrapertools.cachePagePost(url, post=post)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedthumbnail = host + scrapedthumbnail
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodios",
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle),
                    tipo='tv'))

    return itemlist
コード例 #4
0
def extractVideosSection(data):
    result = re.findall('<table class="as_gridder_table">(.+?)</table>|<table class=\'zebra\'>(.+?)<[Bb][Rr]>|data : "(action=load[^\"]+)"', data, re.MULTILINE | re.DOTALL)

    if len(result) == 1 and result[0][2]:
        return extractVideosSection(scrapertools.cachePagePost(HOST + 'ajax.php', result[0][2]))

    row = len(result) - 2
    idx = 1 if result[row][1] else 0

    return [result[row][idx], result[row + 1][idx]]
コード例 #5
0
def lista_serie(item):
    logger.info("[leserie.py] lista_serie")
    itemlist = []

    post="dlenewssortby=title&dledirection=asc&set_new_sort=dle_sort_cat&set_direction_sort=dle_direction_cat"

    data =scrapertools.cachePagePost(item.url,post=post)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)

        try:
            tmdbtitle = scrapedtitle
            plot, fanart, poster, extrameta = info(tmdbtitle)

            itemlist.append(Item(channel=__channel__,
                                 thumbnail=poster,
                                 fanart=fanart if fanart != "" else poster,
                                 extrameta=extrameta,
                                 plot=str(plot),
                                 action="episodi",
                                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                                 url=scrapedurl,
                                 fulltitle=scrapedtitle,
                                 show=scrapedtitle,
                                 folder=True))
        except:

                itemlist.append(Item(channel=__channel__,
                             action="episodi",
                             title=scrapedtitle,
                             url=scrapedurl,
                             thumbnail=host + scrapedthumbnail,
                             fulltitle=scrapedtitle,
                             show=scrapedtitle))


    # Paginazione
    #===========================================================
    patron = '<div class="pages">(.*?)</div>'
    paginazione = scrapertools.find_single_match(data, patron)
    patron = '<span>.*?</span>.*?href="([^"]+)".*?</a>'
    matches = re.compile(patron, re.DOTALL).findall(paginazione)
    scrapertools.printMatches(matches)
    #===========================================================

    if len(matches) > 0:
        paginaurl = matches[0]
        itemlist.append(Item(channel=__channel__, action="lista_serie", title="[COLOR orange]Successivo>>[/COLOR]", url=paginaurl,thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",folder=True))
        itemlist.append(Item(channel=__channel__, action="HomePage", title="[COLOR yellow]Torna Home[/COLOR]", folder=True))
    return itemlist
コード例 #6
0
def lista_serie(item):
    logger.info("[leserie.py] lista_serie")
    itemlist = []

    post="dlenewssortby=title&dledirection=asc&set_new_sort=dle_sort_cat&set_direction_sort=dle_direction_cat"

    data =scrapertools.cachePagePost(item.url,post=post)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)

        try:
            tmdbtitle = scrapedtitle
            plot, fanart, poster, extrameta = info(tmdbtitle)

            itemlist.append(Item(channel=__channel__,
                                 thumbnail=poster,
                                 fanart=fanart if fanart != "" else poster,
                                 extrameta=extrameta,
                                 plot=str(plot),
                                 action="episodi",
                                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                                 url=scrapedurl,
                                 fulltitle=scrapedtitle,
                                 show=scrapedtitle,
                                 folder=True))
        except:

                itemlist.append(Item(channel=__channel__,
                             action="episodi",
                             title=scrapedtitle,
                             url=scrapedurl,
                             thumbnail=host + scrapedthumbnail,
                             fulltitle=scrapedtitle,
                             show=scrapedtitle))


    # Paginazione
    #===========================================================
    patron = '<div class="pages">(.*?)</div>'
    paginazione = scrapertools.find_single_match(data, patron)
    patron = '<span>.*?</span>.*?href="([^"]+)".*?</a>'
    matches = re.compile(patron, re.DOTALL).findall(paginazione)
    scrapertools.printMatches(matches)
    #===========================================================

    if len(matches) > 0:
        paginaurl = matches[0]
        itemlist.append(Item(channel=__channel__, action="lista_serie", title="[COLOR orange]Successivo>>[/COLOR]", url=paginaurl,thumbnail="http://2.bp.blogspot.com/-fE9tzwmjaeQ/UcM2apxDtjI/AAAAAAAAeeg/WKSGM2TADLM/s1600/pager+old.png",folder=True))
        itemlist.append(Item(channel=__channel__, action="HomePage", title="[COLOR yellow]Torna Home[/COLOR]", folder=True))
    return itemlist
コード例 #7
0
def extractVideosSection(data):
    result = re.findall('<table class="as_gridder_table">(.+?)</table>|<table class=\'zebra\'>(.+?)<[Bb][Rr]>|'
                        'data : "(action=load[^\"]+)"', data, re.MULTILINE | re.DOTALL)

    if len(result) == 1 and result[0][2]:
        return extractVideosSection(scrapertools.cachePagePost(HOST + 'ajax.php', result[0][2]))

    row = len(result) - 2
    idx = 1 if result[row][1] else 0

    return [result[row][idx], result[row + 1][idx]]
コード例 #8
0
def lista_serie(item):
    logger.info("[leserie.py] lista_serie")
    itemlist = []

    post = "dlenewssortby=title&dledirection=asc&set_new_sort=dle_sort_cat&set_direction_sort=dle_direction_cat"

    data = scrapertools.cachePagePost(item.url, post=post)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedthumbnail = host + scrapedthumbnail
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)
        itemlist.append(
            infoSod(Item(channel=__channel__,
                         action="episodi",
                         title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                         url=scrapedurl,
                         thumbnail=scrapedthumbnail,
                         fulltitle=scrapedtitle,
                         show=scrapedtitle,
                         viewmode="movie"),
                    tipo='tv'))

    # Paginazione
    # ===========================================================
    patron = '<div class="pages">(.*?)</div>'
    paginazione = scrapertools.find_single_match(data, patron)
    patron = '<span>.*?</span>.*?href="([^"]+)".*?</a>'
    matches = re.compile(patron, re.DOTALL).findall(paginazione)
    scrapertools.printMatches(matches)
    # ===========================================================

    if len(matches) > 0:
        paginaurl = matches[0]
        itemlist.append(
            Item(
                channel=__channel__,
                action="lista_serie",
                title="[COLOR orange]Successivo>>[/COLOR]",
                url=paginaurl,
                thumbnail=
                "https://raw.githubusercontent.com/orione7/Pelis_images/master/vari/successivo_P.png",
                folder=True))
        itemlist.append(
            Item(channel=__channel__,
                 action="HomePage",
                 title="[COLOR yellow]Torna Home[/COLOR]",
                 thumbnail=ThumbnailHome,
                 folder=True))
    return itemlist
コード例 #9
0
def findvideos(item):
    logger.info("pelisalacarta.seriesblanco findvideos")

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Hacer la petición ajax con los enlaces
    params = scrapertools.get_match(data, 'data : "(action=load[^\"]+)"')
    data = scrapertools.cachePagePost(HOST + 'ajax.php', params)

    online = re.findall('<table class="as_gridder_table">(.+?)</table>', data, re.MULTILINE | re.DOTALL)

    return parseVideos(item, "Ver", online[0]) + parseVideos(item, "Descargar", online[1])
コード例 #10
0
def lista_serie(item):
    logger.info("[leserie.py] lista_serie")
    itemlist = []

    post = "dlenewssortby=title&dledirection=asc&set_new_sort=dle_sort_cat&set_direction_sort=dle_direction_cat"

    data = scrapertools.cachePagePost(item.url, post=post)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        scrapedthumbnail = host + scrapedthumbnail
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)
        itemlist.append(infoSod(
            Item(channel=__channel__,
                 action="episodi",
                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                 url=scrapedurl,
                 thumbnail=scrapedthumbnail,
                 fulltitle=scrapedtitle,
                 show=scrapedtitle,viewmode="movie"), tipo='tv'))

    # Paginazione
    # ===========================================================
    patron = '<div class="pages">(.*?)</div>'
    paginazione = scrapertools.find_single_match(data, patron)
    patron = '<span>.*?</span>.*?href="([^"]+)".*?</a>'
    matches = re.compile(patron, re.DOTALL).findall(paginazione)
    scrapertools.printMatches(matches)
    # ===========================================================

    if len(matches) > 0:
        paginaurl = matches[0]
        itemlist.append(
            Item(channel=__channel__, action="lista_serie", title="[COLOR orange]Successivo>>[/COLOR]", url=paginaurl,
                 thumbnail="https://raw.githubusercontent.com/orione7/Pelis_images/master/vari/successivo_P.png",
                 folder=True))
        itemlist.append(
            Item(channel=__channel__, action="HomePage", title="[COLOR yellow]Torna Home[/COLOR]",thumbnail=ThumbnailHome, folder=True))
    return itemlist
コード例 #11
0
def search(item,texto):
    logger.info("[laserietv.py] " + item.url + " search " + texto)
    itemlist =[]

    post = "do=search&subaction=search&search_start=0&full_search=0&result_from=1&story="+texto
    logger.debug(post)
    data = scrapertools.cachePagePost(item.url, post=post)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)

        try:
            tmdbtitle = scrapedtitle
            plot, fanart, poster, extrameta = info(tmdbtitle)

            itemlist.append(Item(channel=__channel__,
                                 thumbnail=poster,
                                 fanart=fanart if fanart != "" else poster,
                                 extrameta=extrameta,
                                 plot=str(plot),
                                 action="episodi",
                                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                                 url=scrapedurl,
                                 fulltitle=scrapedtitle,
                                 show=scrapedtitle,
                                 folder=True))
        except:

                itemlist.append(Item(channel=__channel__,
                             action="episodi",
                             title=scrapedtitle,
                             url=scrapedurl,
                             thumbnail=host + scrapedthumbnail,
                             fulltitle=scrapedtitle,
                             show=scrapedtitle))

    return itemlist
コード例 #12
0
def search(item,texto):
    logger.info("[laserietv.py] " + item.url + " search " + texto)
    itemlist =[]

    post = "do=search&subaction=search&search_start=0&full_search=0&result_from=1&story="+texto
    logger.debug(post)
    data = scrapertools.cachePagePost(item.url, post=post)

    patron = '<div class="video-item-cover"[^<]+<a href="(.*?)">[^<]+<img src="(.*?)" alt="(.*?)">'
    matches = re.compile(patron, re.DOTALL).findall(data)
    scrapertools.printMatches(matches)

    for scrapedurl,scrapedthumbnail,scrapedtitle in matches:
        logger.info(scrapedurl + " " + scrapedtitle + scrapedthumbnail)

        try:
            tmdbtitle = scrapedtitle
            plot, fanart, poster, extrameta = info(tmdbtitle)

            itemlist.append(Item(channel=__channel__,
                                 thumbnail=poster,
                                 fanart=fanart if fanart != "" else poster,
                                 extrameta=extrameta,
                                 plot=str(plot),
                                 action="episodi",
                                 title="[COLOR azure]" + scrapedtitle + "[/COLOR]",
                                 url=scrapedurl,
                                 fulltitle=scrapedtitle,
                                 show=scrapedtitle,
                                 folder=True))
        except:

                itemlist.append(Item(channel=__channel__,
                             action="episodi",
                             title=scrapedtitle,
                             url=scrapedurl,
                             thumbnail=host + scrapedthumbnail,
                             fulltitle=scrapedtitle,
                             show=scrapedtitle))

    return itemlist
コード例 #13
0
def findvideos(item):
    logger.info("pelisalacarta.seriesblanco findvideos")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Hacer la petición ajax con los enlaces
    params = scrapertools.get_match(data, 'data : "(action=load[^\"]+)"')
    data = scrapertools.cachePagePost(HOST + 'ajax.php', params)

    online = re.findall('<table class="as_gridder_table">(.+?)</table>', data, re.MULTILINE | re.DOTALL)

    links = re.findall('<tr.+?<span>(.+?)</span>.*?banderas/([^\.]+).+?href="([^"]+).+?servidores/([^\.]+).*?</td>.*?<td>.*?<span>(.+?)</span>.*?<span>(.*?)</span>.*?</tr>', online[0], re.MULTILINE | re.DOTALL)

    for date, language, link, server, uploader, quality in links:
        if not quality:
            quality = "SD"
        title = "{0} en {1} [{2}] [{3}] ({4}: {5})".format("Ver", server, IDIOMAS[language],
                                                           quality, uploader, date)

        itemlist.append(Item(channel=__channel__, title=title, url=urlparse.urljoin(HOST, link), action="play",
                             show=item.show))

    links = re.findall('<tr.+?<span>(.+?)</span>.*?banderas/([^\.]+).+?href="([^"]+).+?servidores/([^\.]+).*?</td>.*?<td>.*?<span>(.+?)</span>.*?<span>(.*?)</span>.*?</tr>', online[0], re.MULTILINE | re.DOTALL)

    for date, language, link, server, uploader, quality in links:
        if not quality:
            quality = "SD"
        title = "{0} en {1} [{2}] [{3}] ({4}: {5})".format("Descargar", server, IDIOMAS[language],
                                                           quality, uploader, date)
        itemlist.append(Item(channel=__channel__, title=title, url=urlparse.urljoin(HOST, link), action="play",
                             show=item.show))

    return itemlist
コード例 #14
0
def strm_detail(item):
    logger.info("[cuevana.py] strm_detail")
    code =""
    if (item.url.startswith("http://www.cuevana.tv/list_search_info.php")):
        data = scrapertools.cachePage(item.url)
        patron = "window.location\='/series/([0-9]+)/"
        matches = re.compile(patron,re.DOTALL).findall(data)
        if len(matches)>0:
            code = matches[0]
        logger.info("code="+code)
        url = "http://www.cuevana.tv/player/source?id=%s&subs=,ES&onstart=yes&tipo=s&sub_pre=ES" % matches[0]
        serieOpelicula = True
    else:
        logger.info("url1="+item.url)
        patron = "http://www.cuevana.tv/peliculas/([0-9]+)/"
        matches = re.compile(patron,re.DOTALL).findall(item.url)
        if len(matches)>0:
            code = matches[0]
        logger.info("code="+code)
        url = "http://www.cuevana.tv/player/source?id=%s&subs=,ES&onstart=yes&sub_pre=ES#" % code
        serieOpelicula = False
    
    logger.info("url2="+url)
    data = scrapertools.cachePage(url)

    # goSource('ee5533f50eab1ef355661eef3b9b90ec','megaupload')
    patron = "goSource\('([^']+)','megaupload'\)"
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        data = scrapertools.cachePagePost("http://www.cuevana.tv/player/source_get","key=%s&host=megaupload&vars=&id=2933&subs=,ES&tipo=&amp;sub_pre=ES" % matches[0])
    logger.info("data="+data)

    # Subtitulos
    if serieOpelicula:
        suburl = "http://www.cuevana.tv/files/s/sub/"+code+"_ES.srt"
    else:
        suburl = "http://www.cuevana.tv/files/sub/"+code+"_ES.srt"
    logger.info("suburl="+suburl)
    
    # Elimina el archivo subtitulo.srt de alguna reproduccion anterior
    ficherosubtitulo = os.path.join( config.get_data_path(), 'subtitulo.srt' )
    if os.path.exists(ficherosubtitulo):
        try:
          os.remove(ficherosubtitulo)
        except IOError:
          xbmc.output("Error al eliminar el archivo subtitulo.srt "+ficherosubtitulo)
          raise

    from core import downloadtools
    downloadtools.downloadfile(suburl, ficherosubtitulo )
    config.set_setting("subtitulo","true")

    listavideos = servertools.findvideos(data)
    
    for video in listavideos:
        server = video[2]
        if server == "Megaupload":
          scrapedtitle = item.title + " [" + server + "]"
          scrapedurl = video[1]
          thumbnail = urllib.unquote_plus( item.thumbnail )
          plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )
          # xbmctools.playvideo("cuevana","megaupload","G0NMCIXJ","Series","24","","",strmfile=True)
          xbmctools.playvideo(CHANNELNAME,server,scrapedurl,"Series",scrapedtitle,item.thumbnail,item.plot,strmfile=True,subtitle=suburl)
          exit
    logger.info("[cuevana.py] strm_detail fin")
    return
コード例 #15
0
def findvideos(item):
    logger.info("[cuevana.py] findvideos")

    # True es Serie, False es Pelicula
    serieOpelicula = True
    code =""
    if (item.url.startswith("http://www.cuevana.tv/list_search_info.php")):
        data = scrapertools.cachePage(item.url)
        #logger.info("data="+data)
        patron = "window.location\='/series/([0-9]+)/"
        matches = re.compile(patron,re.DOTALL).findall(data)
        if len(matches)>0:
            code = matches[0]
        logger.info("code="+code)
        url = "http://www.cuevana.tv/player/source?id=%s&subs=,ES&onstart=yes&tipo=s&sub_pre=ES" % matches[0]
        serieOpelicula = True
    else:
        # http://www.cuevana.tv/peliculas/2553/la-cienaga/
        logger.info("url1="+item.url)
        patron = "http://www.cuevana.tv/peliculas/([0-9]+)/"
        matches = re.compile(patron,re.DOTALL).findall(item.url)
        if len(matches)>0:
            code = matches[0]
        logger.info("code="+code)
        url = "http://www.cuevana.tv/player/source?id=%s&subs=,ES&onstart=yes&sub_pre=ES#" % code
        serieOpelicula = False
    
    logger.info("url2="+url)
    data = scrapertools.cachePage(url)
    #logger.info("data="+data)

    # goSource('ee5533f50eab1ef355661eef3b9b90ec','megaupload')
    patron = "goSource\('([^']+)','megaupload'\)"
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        data = scrapertools.cachePagePost("http://www.cuevana.tv/player/source_get","key=%s&host=megaupload&vars=&id=2933&subs=,ES&tipo=&amp;sub_pre=ES" % matches[0])
    logger.info("data="+data)

    # Subtitulos
    if serieOpelicula:
	    suburl = "http://www.cuevana.tv/files/s/sub/"+code+"_ES.srt"
    else:
            suburl = "http://www.cuevana.tv/files/sub/"+code+"_ES.srt"
    logger.info("suburl="+suburl)
    
    # Elimina el archivo subtitulo.srt de alguna reproduccion anterior
    ficherosubtitulo = os.path.join( config.get_data_path(), 'subtitulo.srt' )
    if os.path.exists(ficherosubtitulo):
        try:
          os.remove(ficherosubtitulo)
        except IOError:
          xbmc.output("Error al eliminar el archivo subtitulo.srt "+ficherosubtitulo)
          raise

    listavideos = servertools.findvideos(data)
    
    itemlist = []
    
    for video in listavideos:
        server = video[2]
        scrapedtitle = item.title + " [" + server + "]"
        scrapedurl = video[1]
        
        itemlist.append( Item(channel=CHANNELNAME, action="play" , title=scrapedtitle , url=scrapedurl, thumbnail=item.thumbnail, plot=item.plot, server=server, subtitle=suburl, folder=False))

    return itemlist
コード例 #16
0
ファイル: moviezet.py プロジェクト: vdeku/xbmc-tvalacarta
def strm_detail(item):
    logger.info("[moviezet.py] strm_detail")
    from core import xbmctools
    import xbmc
    
    code =""
    if (item.url.startswith("http://www.cuevana.tv/list_search_info.php")):
        data = scrapertools.cachePage(item.url)
        patron = "window.location\='/series/([0-9]+)/"
        matches = re.compile(patron,re.DOTALL).findall(data)
        if len(matches)>0:
            code = matches[0]
        logger.info("code="+code)
        url = "http://www.cuevana.tv/player/source?id=%s&subs=,ES&onstart=yes&tipo=s&sub_pre=ES" % matches[0]
        serieOpelicula = True
    else:
        logger.info("url1="+item.url)
        patron = "http://www.cuevana.tv/peliculas/([0-9]+)/"
        matches = re.compile(patron,re.DOTALL).findall(item.url)
        if len(matches)>0:
            code = matches[0]
        logger.info("code="+code)
        url = "http://www.cuevana.tv/player/source?id=%s&subs=,ES&onstart=yes&sub_pre=ES#" % code
        serieOpelicula = False
    
    logger.info("url2="+url)
    data = scrapertools.cachePage(url)

    # goSource('ee5533f50eab1ef355661eef3b9b90ec','megaupload')
    patron = "goSource\('([^']+)','megaupload'\)"
    matches = re.compile(patron,re.DOTALL).findall(data)
    if len(matches)>0:
        data = scrapertools.cachePagePost("http://www.cuevana.tv/player/source_get","key=%s&host=megaupload&vars=&id=2933&subs=,ES&tipo=&amp;sub_pre=ES" % matches[0])
    logger.info("data="+data)

    # Subtitulos
    if serieOpelicula:
        suburl = "http://www.cuevana.tv/files/s/sub/"+code+"_ES.srt"
    else:
        suburl = "http://www.cuevana.tv/files/sub/"+code+"_ES.srt"
    logger.info("suburl="+suburl)
    
    # Elimina el archivo subtitulo.srt de alguna reproduccion anterior
    ficherosubtitulo = os.path.join( config.get_data_path(), 'subtitulo.srt' )
    if os.path.exists(ficherosubtitulo):
        try:
          os.remove(ficherosubtitulo)
        except IOError:
          logger.info("Error al eliminar el archivo subtitulo.srt "+ficherosubtitulo)
          raise

    from core import downloadtools
    downloadtools.downloadfile(suburl, ficherosubtitulo )
    config.set_setting("subtitulo","true")

    listavideos = servertools.findvideos(data)
    
    for video in listavideos:
        server = video[2]
        if server == "Megaupload":
            scrapedtitle = item.title + " [" + server + "]"
            scrapedurl = video[1]
            thumbnail = urllib.unquote_plus( item.thumbnail )
            plot = unicode( xbmc.getInfoLabel( "ListItem.Plot" ), "utf-8" )
            xbmctools.playvideo(CHANNELNAME,server,scrapedurl,"Series",scrapedtitle,item.thumbnail,item.plot,strmfile=True,subtitle=suburl)
            exit
    logger.info("[moviezet.py] strm_detail fin")
    return
コード例 #17
0
def findvideos(item):
    logger.info("pelisalacarta.seriesblanco findvideos")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Hacer la petición ajax con los enlaces
    params = scrapertools.get_match(data, 'data : "(action=load[^"]+)"')
    data = scrapertools.cachePagePost(HOST + "ajax.php", params)

    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "", data)
    data = re.sub(r"<!--.*?-->", "", data)
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")

    data = re.sub(r"<center>|</center>|</a>", "", data)
    data = re.sub(r'<div class="grid_content([^>]+)><span></span>', r'<div class="grid_content\1><span>SD</span>', data)

    """
    <td><div class="grid_content*ATTR*><span>(*FECHA*)</span></div></td>
    <td><div class="grid_content2*ATTR*><span><img src="*PATH*/(*IDIOMA*)\.*ATTR*></span></td>
    <td>
        <div class="grid_content*ATTR*>
            <span><a href="(*ENLACE*)"*ATTR*><img src='/servidores/(*SERVIDOR*).*ATTR*></span>
        </div>
    </td>
    <td><div class="grid_content*ATTR*><span>(*UPLOADER*)</span></td>
    <td><div class="grid_content*ATTR*><span>(*SUB|CALIDAD*)</span></td>
    """

    online = scrapertools.get_match(data, '<table class="as_gridder_table">(.+?)</table>')
    download = scrapertools.get_match(data, '<div class="grid_heading"><h2>Descarga</h2></div>(.*)')

    online = re.sub(
        r"<td><div class=\"grid_content[^>]+><span>([^>]+)</span></div></td><td>"
        + '<div class="grid_content2[^>]+><span><img src=".+?banderas/([^\.]+)\.[^>]+></span></td>'
        + '<td><div class="grid_content[^>]+><span><a href="([^"]+)"[^>]+>'
        "<img src='/servidores/([^\.]+)\.[^>]+></span></div></td>"
        + '<td><div class="grid_content[^>]+><span>([^>]+)</span></td>'
        + '<td><div class="grid_content[^>]+><span>([^>]+)</span></td>',
        r"<patron>\3;\2;\1;\4;\5;\6;Ver</patron>",
        online,
    )

    download = re.sub(
        r"<td><div class=\"grid_content[^>]+><span>([^>]+)</span></div></td><td>"
        + '<div class="grid_content2[^>]+><span><img src=".+?banderas/([^\.]+)\.[^>]+></span></td>'
        + '<td><div class="grid_content[^>]+><span><a href="([^"]+)"[^>]+>'
        "<img src='/servidores/([^\.]+)\.[^>]+></span></div></td>"
        + '<td><div class="grid_content[^>]+><span>([^>]+)</span></td>'
        + '<td><div class="grid_content[^>]+><span>([^>]+)</span></td>',
        r"<patron>\3;\2;\1;\4;\5;\6;Descargar</patron>",
        download,
    )

    data = online + download

    """
    <patron>*URL*;*IDIOMA*;*FECHA*;*SERVIDOR*;*UPLOADER*;*SUB|CALIDAD*;*TIPO*</patron>
    """

    patron = "<patron>([^;]+);([^;]+);([^;]+);([^;]+);([^;]+);([^;]+);([^<]+)</patron>"

    matches = re.compile(patron, re.DOTALL).findall(data)

    for (
        scrapedurl,
        scrapedidioma,
        scrapedfecha,
        scrapedservidor,
        scrapeduploader,
        scrapedsubcalidad,
        scrapedtipo,
    ) in matches:
        title = "{0} en {1} [{2}] [{3}] ({4}: {5})".format(
            scrapedtipo, scrapedservidor, IDIOMAS[scrapedidioma], scrapedsubcalidad, scrapeduploader, scrapedfecha
        )

        itemlist.append(
            Item(
                channel=__channel__, title=title, url=urlparse.urljoin(HOST, scrapedurl), action="play", show=item.show
            )
        )

    return itemlist
コード例 #18
0
def findvideos(item):
    logger.info("pelisalacarta.seriesblanco findvideos")

    itemlist = []

    # Descarga la página
    data = scrapertools.cache_page(item.url)

    # Hacer la petición ajax con los enlaces
    params = scrapertools.get_match(data, 'data : "(action=load[^\"]+)"')
    data = scrapertools.cachePagePost(host + 'ajax.php', params)

    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<Br>|<BR>|<br>|<br/>|<br />|-\s", "",
                  data)
    data = re.sub(r"<!--.*?-->", "", data)
    data = unicode(data, "iso-8859-1", errors="replace").encode("utf-8")

    data = re.sub(r"<center>|</center>|</a>", "", data)
    data = re.sub(r'<div class="grid_content([^>]+)><span></span>',
                  r'<div class="grid_content\1><span>SD</span>', data)
    '''
    <td><div class="grid_content*ATTR*><span>(*FECHA*)</span></div></td>
    <td>
    <div class="grid_content2*ATTR*><span><img src="*PATH*/(*IDIOMA*)\.*ATTR*></span></td>
    <td><div class="grid_content*ATTR*><span><a href="(*ENLACE*)"*ATTR*><img src='/servidores/(*SERVIDOR*).*ATTR*></span></div></td>"
    <td>
    <div class="grid_content*ATTR*><span>(*UPLOADER*)</span></td>
    <td>
    <div class="grid_content*ATTR*><span>(*SUB|CALIDAD*)</span></td>
    '''

    online = scrapertools.get_match(
        data, '<table class="as_gridder_table">(.+?)</table>')
    download = scrapertools.get_match(
        data, '<div class="grid_heading"><h2>Descarga</h2></div>(.*)')

    online = re.sub(
        r"<td><div class=\"grid_content[^>]+><span>([^>]+)</span></div></td>" + \
         "<td>" + \
         "<div class=\"grid_content2[^>]+><span><img src=\".+?banderas/([^\.]+)\.[^>]+></span></td>" + \
         "<td><div class=\"grid_content[^>]+><span><a href=\"([^\"]+)\"[^>]+><img src='/servidores/([^\.]+)\.[^>]+></span></div></td>" + \
         "<td>" + \
         "<div class=\"grid_content[^>]+><span>([^>]+)</span></td>" + \
         "<td>" + \
         "<div class=\"grid_content[^>]+><span>([^>]+)</span></td>",
        r"<patron>\3;\2;\1;\4;\5;\6;Ver</patron>",
        online
    )

    download = re.sub(
        r"<td><div class=\"grid_content[^>]+><span>([^>]+)</span></div></td>" + \
         "<td>" + \
         "<div class=\"grid_content2[^>]+><span><img src=\".+?banderas/([^\.]+)\.[^>]+></span></td>" + \
         "<td><div class=\"grid_content[^>]+><span><a href=\"([^\"]+)\"[^>]+><img src='/servidores/([^\.]+)\.[^>]+></span></div></td>" + \
         "<td>" + \
         "<div class=\"grid_content[^>]+><span>([^>]+)</span></td>" + \
         "<td>" + \
         "<div class=\"grid_content[^>]+><span>([^>]+)</span></td>",
        r"<patron>\3;\2;\1;\4;\5;\6;Descargar</patron>",
        download
    )

    data = online + download
    '''
    <patron>*URL*;*IDIOMA*;*FECHA*;*SERVIDOR*;*UPLOADER*;*SUB|CALIDAD*;*TIPO*</patron>
    '''

    patron = '<patron>([^;]+);([^;]+);([^;]+);([^;]+);([^;]+);([^;]+);([^<]+)</patron>'

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedidioma, scrapedfecha, scrapedservidor, scrapeduploader, scrapedsubcalidad, scrapedtipo in matches:
        title = scrapedtipo + " en " + scrapedservidor + " [" + idiomas[
            scrapedidioma] + "] [" + scrapedsubcalidad + "] (" + scrapeduploader + ": " + scrapedfecha + ")"
        itemlist.append(
            Item(channel=__channel__,
                 title=title,
                 url=urlparse.urljoin(host, scrapedurl),
                 action="play",
                 show=item.show))

    return itemlist