def play(item):
    logger.info("pelisalacarta.channels.descargasmix play")
    itemlist = []
    if "enlacesmix.com" in item.url:
        DEFAULT_HEADERS.append(["Referer", item.extra])
        if not item.url.startswith("http:"):
            item.url = "http:" + item.url
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        item.url = scrapertools.find_single_match(data, 'iframe src="([^"]+)"')
         
        enlaces = servertools.findvideos(data=item.url)[0]
        if len(enlaces) > 0:
            itemlist.append(item.clone(action="play", server=enlaces[2], url=enlaces[1]))
    elif item.server == "directo":
        global DEFAULT_HEADERS
        DEFAULT_HEADERS.append(["Referer", item.extra])
        data = scrapertools.downloadpage(item.url, headers=DEFAULT_HEADERS)
        subtitulo = scrapertools.find_single_match(data, "var subtitulo='([^']+)'")
        DEFAULT_HEADERS[1][1] = item.url
        calidades = ["1080p", "720p", "480p", "360p"]
        for i in range(0, len(calidades)):
            url_redirect = scrapertools.find_single_match(data, "{file:'([^']+)',label:'"+calidades[i]+"'")
            if url_redirect:
                url_video = scrapertools.get_header_from_response(url_redirect, header_to_get="location", headers=DEFAULT_HEADERS)
                if url_video:
                    url_video = url_video.replace(",", "%2C")
                    itemlist.append(item.clone(url=url_video, subtitle=subtitulo))
                    break
    else:
        itemlist.append(item.clone())
    
    return itemlist
示例#2
0
def findvideos(item):
    logger.info("pelisalacarta.channels.pelisdanko findvideos")
    itemlist = []

    if item.url[-2:] == "ss":
        prefix = "strms"
    else:
        prefix = "lnks"
    # Descarga la pagina
    data = scrapertools.downloadpage(item.url)

    # Parametros para redireccion donde muestra los enlaces
    data_slug = scrapertools.find_single_match(data, '<div id="ad" data-id="[^"]+" data-slug="([^"]+)"')
    data_id = scrapertools.find_single_match(data, '<tr class="rip hover" data-id="([^"]+)"')
    url = "http://pelisdanko.com/%s/%s/%s/%s" % (prefix, data_id, item.id_enlaces, data_slug)
    data = scrapertools.downloadpage(url, post="")

    from core import servertools
    video_item_list = servertools.find_video_items(data=data)
    for video_item in video_item_list:
        title = "[COLOR green]%s[/COLOR]    |    [COLOR darkorange][%s][/COLOR]" % (video_item.server, item.calidad)
        itemlist.append(item.clone(title=bbcode_kodi2html(title), url=video_item.url, action="play",
                                   server=video_item.server, text_color=""))

    # Opción "Añadir esta película a la biblioteca de XBMC"
    if config.get_library_support() and len(itemlist) > 0 and item.category != "Cine":
        itemlist.append(Item(channel=item.channel, title="Añadir película a la biblioteca", url=item.url,
                             infoLabels={'title': item.fulltitle}, action="add_pelicula_to_library",
                             fulltitle=item.fulltitle, text_color="green", id_enlaces=item.id_enlaces))

    return itemlist
def login():
    logger.info("pelisalacarta.channels.verseriesynovelas login")

    try:
        user = config.get_setting("verseriesynovelasuser", "verseriesynovelas")
        password = config.get_setting("verseriesynovelaspassword", "verseriesynovelas")
        if user == "" and password == "":
            return False, "Para ver los enlaces de este canal es necesario registrarse en www.verseriesynovelas.tv"
        elif user == "" or password == "":
            return False, "Usuario o contraseña en blanco. Revisa tus credenciales"
        data = scrapertools.downloadpage("http://www.verseriesynovelas.tv/")
        if user in data:
            return True, ""
        
        try:
            os.remove(os.path.join(config.get_data_path(), 'cookies', 'verseriesynovelas.tv.dat'))
        except:
            pass

        post = "log=%s&pwd=%s&redirect_to=http://www.verseriesynovelas.tv/wp-admin/&action=login" % (user, password)
        data = scrapertools.downloadpage("http://www.verseriesynovelas.tv/iniciar-sesion", post=post)
        if "La contraseña que has introducido" in data:
            logger.info("pelisalacarta.channels.verseriesynovelas Error en el login")
            return False, "Contraseña errónea. Comprueba tus credenciales"
        elif "Nombre de usuario no válido" in data:
            logger.info("pelisalacarta.channels.verseriesynovelas Error en el login")
            return False, "Nombre de usuario no válido. Comprueba tus credenciales"            
        else:
            logger.info("pelisalacarta.channels.verseriesynovelas Login correcto")
            return True, ""
    except:
        import traceback
        logger.info(traceback.format_exc())
        return False, "Error durante el login. Comprueba tus credenciales"
示例#4
0
def get_video_url(page_url, video_password):
    video_urls = []
    media_url =""
    video_id = scrapertools.get_match(page_url,".*?video([0-9]+)")  
    url= "http://flashservice.xvideos.com/flashservices/gateway.php"
    post = "0003000000010011".decode("hex") + "flashRpc.getVideo" + "0002".decode("hex") + "/1" + "000000190A00000004020008".decode("hex") + video_id + "020000020000020000".decode("hex")
    headers = []
    headers.append(["Content-type","application/x-amf"])
    headers.append(["Content-length",str(len(post))])

    data = scrapertools.downloadpage(url,post=post, headers=headers)
    try:
      media_url = scrapertools.get_match(data,"(http\://[0-9a-z/_\.]+\.flv\?[0-9a-z&=]+)")
    except:
      pass
    if not media_url:
      try: 
        post = "0003000000010011".decode("hex") + "flashRpc.getVideo" + "0002".decode("hex") + "/1" + "000000180a00000004020007".decode("hex") + video_id + "020000020000020000".decode("hex")
        data = scrapertools.downloadpage(url,post=post, headers=headers)
        media_url = scrapertools.get_match(data,"(http\://[0-9a-z/_\.]+\.flv\?[0-9a-z&=]+)")
      except:
        pass
        
    if not media_url:
      try: 
        post = "0003000000010011".decode("hex") + "flashRpc.getVideo" + "0002".decode("hex") + "/1" + "000000170a00000004020006".decode("hex") + video_id + "020000020000020000".decode("hex")
        data = scrapertools.downloadpage(url,post=post, headers=headers)
        media_url = scrapertools.get_match(data,"(http\://[0-9a-z/_\.]+\.flv\?[0-9a-z&=]+)")
      except:
        pass
      
    print   media_url
    video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" [xvideos]",media_url])
    return video_urls
示例#5
0
def loadlives(item):
    logger.info("[mitele.py] loadlives")
    itemlist = []
    
    import time
    tiempo = int(time.time())
    data = scrapertools.downloadpage("http://indalo.mediaset.es/mmc-player/api/mmc/v1/lives.json")
    # Parrilla de programación
    parrilla = jsontools.load_json(data)

    channels = []
    for channel in parrilla:
        programa = channel["name"]
        canal = channel["channel"]
        if canal not in channels:
            channels.append(canal)
            title = canal.capitalize() + " [[COLOR red]" + programa + "[/COLOR]]"
            url = "http://indalo.mediaset.es/mmc-player/api/mmc/v1/%s/live/flash.json" % canal
            data_channel = scrapertools.downloadpage(url)
            embed_code = jsontools.load_json(data_channel)["locations"][0]["yoo"]
            if not embed_code:
                continue
            url = "http://player.ooyala.com/player.js?embedCode="+embed_code
            itemlist.append(item.clone(title=title, action="play", server="mitele", url=url))


    return itemlist
示例#6
0
def play(item):
    logger.info("pelisalacarta.channels.miscelanea_p2p play")
    itemlist = []
    xbmc.executebuiltin('xbmc.PlayMedia(Stop)')

    try:
        from servers import servertools
    except:
        from core import servertools

    data = scrapertools.downloadpage(item.url)

    # Si el canal está en la web se busca manualmente el enlace ya que puede haber varios
    if item.extra == "dhd1":
        url = scrapertools.find_single_match(data, 'href="(acestream://[^"]+)"')
        if url == "":
            redirect = scrapertools.find_single_match(data, 'src="(http://buker[^"]+)"')
            data = scrapertools.downloadpage(redirect)
            urls = servertools.findvideosbyserver(data, "p2p")
            if urls:
                url = urls[0][1] +"|" + item.title
                itemlist.append(item.clone(url=url, server="p2p"))
        else:
            url += "|" + item.title
            itemlist.append(item.clone(url=url, server="p2p"))
    else:
        # Se automatiza la búsqueda del enlace acestream/sopcast a través del conector p2p
        urls = servertools.findvideosbyserver(data, "p2p")
        if urls:
            url = urls[0][1]+"|" + item.title
            itemlist.append(item.clone(url=url, server="p2p"))
        
    return itemlist
示例#7
0
def novedades_pokeryour(item):
    logger.info("pelisalacarta.channels.boxingclub novedades_pokeryour")
    itemlist = []
    ## Petición 1
    url = "http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u="+item.url
    data = scrapertools.decodeHtmlentities( scrapertools.downloadpage(url,follow_redirects=False) )
    ## Petición 2
    url = scrapertools.get_match(data, ' src="([^"]+)" name=c ')
    data = scrapertools.decodeHtmlentities( scrapertools.downloadpage(url,follow_redirects=False) )
    ## Petición 3
    url = scrapertools.get_match(data, 'URL=([^"]+)"')
    data = scrapertools.decodeHtmlentities( scrapertools.cachePage(url) )
    data = re.sub(r"\n|\r|\t|</span> comentario de Rusia.</span>", '', data)

    bloque_entradas = scrapertools.find_multiple_matches(data, '<div class="item column(.*?)<div class=item-separator>')
    for bloque in bloque_entradas:
        patron = 'title="([^>]+)>.*?<a href=([^>]+)>.*?' \
                 '<img src=(/sport/media/com_hwdmediashare/files/[^\s]+).*?' \
                 '<dd class=media-info-description>.*?</span>(.*?)</span>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedtitle, scrapedurl, scrapedthumbnail, scrapedplot  in matches:
            scrapedthumbnail = host + scrapedthumbnail

            scrapedtitle = scrapedtitle.replace("vídeo de alta definición","HD").replace('::"','')
            scrapedtitle = re.sub(r'(?i)- tarjeta principal|tarjeta de|tarjeta|en línea de|el vídeo|el video|vídeo|video|en línea|en ruso|::','',scrapedtitle)
            if not "/" in scrapedtitle: scrapedtitle += "/"
            scrapedtitle = "[COLOR darkorange]"+scrapedtitle.split("/",1)[0]+"/[/COLOR][COLOR red]"+scrapedtitle.split("/",1)[1]+"[/COLOR]"
            scrapedurl = scrapedurl.replace("http://translate.googleusercontent.com/translate_c?depth=2&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=","")
            scrapedurl = urllib.unquote(scrapedurl)
            itemlist.append(Item(channel=__channel__, title=scrapedtitle, action="play", url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot, folder=True))
    
    next_page = scrapertools.find_single_match(data, '<li class=pagination-next>.*?href=([^\s]+)')
    if next_page != "":
        itemlist.append(Item(channel=__channel__, title=">> Siguiente", action="novedades_pokeryour", url=next_page, thumbnail=item.thumbnail, folder=True))
    return itemlist
示例#8
0
def play(item):
    logger.info("pelisalacarta.zentorrents findvideos")
    
    itemlist = []
    
    # Descarga la página
    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|</p>|<p>|&amp;|amp;","",data)
    
    patron = '<div class="descargatext">.*?'
    patron += '<img alt="([^<]+)" '
    patron += 'src="([^"]+)".*?'
    patron += '<a href="/download([^"]+)"'
    
    
    matches = re.compile(patron,re.DOTALL).findall(data)
    scrapertools.printMatches(matches)
    
    for scrapedtitulo, scrapedthumbnail, scrapedurl in matches:
      scrapedurl = "http://www.zentorrents.com/download" + scrapedurl
      data = scrapertools.downloadpage(scrapedurl)
      link = scrapertools.get_match(data,'Si la descarga no comienza autom&aacute;ticamente, prueba haciendo click <a href="([^"]+)">')
      itemlist.append( Item(channel=__channel__, title =scrapedtitulo + " (Torrent)" , thumbnail=scrapedthumbnail, url=link, server="torrent", action="play", folder=False) )

    return itemlist
示例#9
0
def get_api_url():
  global url_api
  global beeg_salt
  data = scrapertools.downloadpage("http://beeg.com")
  version = re.compile('<script src="//static.beeg.com/cpl/([\d]+).js"').findall(data)[0]
  js_url  = "http:" + re.compile('<script src="(//static.beeg.com/cpl/[\d]+.js)"').findall(data)[0]
  url_api = "https://api2.beeg.com/api/v6/"+ version
  data = scrapertools.downloadpage(js_url)
  beeg_salt = re.compile('beeg_salt="([^"]+)"').findall(data)[0]
def search_links_abando(item):
    logger.info("streamondemand.channels.trailertools search_links_abando")

    data = scrapertools.downloadpage(item.url)
    itemlist = []
    if "Lo sentimos, no tenemos trailer" in data:
        itemlist.append(item.clone(title="Nessun video disponibile", action="", text_color=""))
    else:
        if item.contextual:
            progreso = platformtools.dialog_progress("Cercando su abandomoviez", "Caricando i trailer...")
            progreso.update(10)
            i = 0
            message = "Caricando i trailer..."
        patron = '<div class="col-md-3 col-xs-6"><a href="([^"]+)".*?' \
                 'Images/(\d+).gif.*?</div><small>(.*?)</small>'
        matches = scrapertools.find_multiple_matches(data, patron)
        if len(matches) == 0:
            trailer_url = scrapertools.find_single_match(data, '<iframe.*?src="([^"]+)"')
            if trailer_url != "":
                trailer_url = trailer_url.replace("embed/", "watch?v=")
                code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
                thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
                itemlist.append(item.clone(title="Trailer  [youtube]", url=trailer_url, server="youtube",
                                           thumbnail=thumbnail, action="play", text_color="white"))
        else:
            for scrapedurl, language, scrapedtitle in matches:
                if language == "1":
                    idioma = " (ESP)"
                else:
                    idioma = " (V.O)"
                scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl)
                scrapedtitle = scrapertools.htmlclean(scrapedtitle) + idioma + "  [youtube]"
                if item.contextual:
                    i += 1
                    message += ".."
                    progreso.update(10 + (90*i/len(matches)), message)
                    scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle

                data_trailer = scrapertools.downloadpage(scrapedurl)
                trailer_url = scrapertools.find_single_match(data_trailer, 'iframe.*?src="([^"]+)"')
                trailer_url = trailer_url.replace("embed/", "watch?v=")
                code = scrapertools.find_single_match(trailer_url, 'v=([A-z0-9\-_]+)')
                thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
                itemlist.append(item.clone(title=scrapedtitle, url=trailer_url, server="youtube", action="play",
                                           thumbnail=thumbnail, text_color="white"))
        
        if item.contextual:
            progreso.close()

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else: 
            title = "%s"
        itemlist.append(item.clone(title=title % "Ricerca manuale su Abandomoviez",
                                   action="manual_search", thumbnail="", text_color="green", extra="abandomoviez"))
    return itemlist
def filmaffinity_search(item):
    logger.info("streamondemand.channels.trailertools filmaffinity_search")

    if item.filmaffinity:
        item.url = item.filmaffinity
        return search_links_filmaff(item)

    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        data = scrapertools.downloadpage(item.page)
    else:
        params = urllib.urlencode([('stext', item.contentTitle), ('stype%5B%5D', 'title'), ('country', ''),
                                   ('genre', ''), ('fromyear', item.year), ('toyear', item.year)])
        url = "http://www.filmaffinity.com/es/advsearch.php?%s" % params
        data = scrapertools.downloadpage(url)

    itemlist = []
    patron = '<div class="mc-poster">.*?<img.*?src="([^"]+)".*?' \
             '<div class="mc-title"><a  href="/es/film(\d+).html"[^>]+>(.*?)<img'
    matches = scrapertools.find_multiple_matches(data, patron)
    # Si solo hay un resultado, busca directamente los trailers, sino lista todos los resultados
    if len(matches) == 1:
        item.url = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % matches[0][1]
        item.thumbnail = matches[0][0]
        if not item.thumbnail.startswith("http"):
            item.thumbnail = "http://www.filmaffinity.com" + item.thumbnail
        itemlist = search_links_filmaff(item)
    elif len(matches) > 1:
        for scrapedthumbnail, id, scrapedtitle in matches:
            if not scrapedthumbnail.startswith("http"):
                scrapedthumbnail = "http://www.filmaffinity.com" + scrapedthumbnail
            scrapedurl = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % id
            scrapedtitle = unicode(scrapedtitle, encoding="utf-8", errors="ignore")
            scrapedtitle = scrapertools.htmlclean(scrapedtitle)
            itemlist.append(item.clone(title=scrapedtitle, url=scrapedurl, text_color="white",
                                       action="search_links_filmaff", thumbnail=scrapedthumbnail))

        next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">&gt;&gt;</a>')
        if next_page != "":
            next_page = urlparse.urljoin("http://www.filmaffinity.com/es/", next_page)
            itemlist.append(item.clone(title=">> Seguente", page=next_page, action="filmaffinity_search", thumbnail="",
                                       text_color=""))

    if not itemlist:
        itemlist.append(item.clone(title="Nessun risultato trovato per (%s)" % item.contentTitle,
                                   action="", thumbnail="", text_color=""))

        if keyboard:
            if item.contextual:
                title = "[COLOR green]%s[/COLOR]"
            else: 
                title = "%s"
            itemlist.append(item.clone(title=title % "Ricerca manuale su Filmaffinity",
                                       action="manual_search", text_color="green", thumbnail="", extra="filmaffinity"))
        
    return itemlist
示例#12
0
def episodios(item):
    logger.info("pelisalacarta.channels.cinefox episodios")
    itemlist = []

    if item.extra == "ultimos":
        data = scrapertools.downloadpage(item.url, headers=headers.items())
        item.url = scrapertools.find_single_match(data, '<a href="([^"]+)" class="h1-like media-title"')
        item.url += "/episodios"

    data = scrapertools.downloadpage(item.url, headers=headers.items())

    data_season = data[:]
    headers["Referer"] = item.url

    if item.extra == "episodios" or not __menu_info__:
        action = "findvideos"
    else:
        action = "menu_info_episode"

    seasons = scrapertools.find_multiple_matches(data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
    for i, url in enumerate(seasons):
        if i != 0:
            data_season = scrapertools.downloadpage(url, headers=headers.items())
        patron = '<div class="ep-list-number">.*?href="([^"]+)">([^<]+)</a>.*?<span class="name">([^<]+)</span>'
        matches = scrapertools.find_multiple_matches(data_season, patron)
        for scrapedurl, episode, scrapedtitle in matches:
            item.contentSeason = episode.split("x")[0]
            item.contentEpisodeNumber = episode.split("x")[1]
            
            title = episode + " - " + scrapedtitle
            extra = "episode"
            if item.extra == "episodios":
                extra = "episode|"
            itemlist.append(item.clone(action=action, title=title, url=scrapedurl, text_color=color2, extra=extra, contentType="episode"))

    if item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    itemlist.reverse()
    if item.extra != "episodios":
        id = scrapertools.find_single_match(item.url, '/(\d+)/')
        data_trailer = scrapertools.downloadpage("http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id,
                                                     headers=headers.items())
        item.infoLabels["trailer"] = jsontools.load_json(data_trailer)["video"]["url"]
        itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                       text_color="magenta"))
        if config.get_library_support():
            itemlist.append(Item(channel=item.channel, action="add_serie_to_library", text_color=color5,
                                 title="Añadir serie a la biblioteca", show=item.show, thumbnail=item.thumbnail,
                                 url=item.url, fulltitle=item.fulltitle, fanart=item.fanart, extra="episodios"))

    return itemlist
示例#13
0
def findvideos(item):
    logger.info("pelisalacarta.channels.verseriesynovelas findvideos")
    itemlist = []
    item.text_color = color3

    if item.extra == "newest" and item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    data = scrapertools.downloadpage(item.url, headers=CHANNEL_HEADERS)
    if "valida el captcha" in data:
        logueado, error = login()
        data = scrapertools.downloadpage(item.url, headers=CHANNEL_HEADERS)    
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)

    bloque = scrapertools.find_multiple_matches(data, '<tr><td data-th="Idioma">(.*?)</div>')
    for match in bloque:
        patron = 'data-th="Calidad">(.*?)<.*?' \
                 '"Servidor".*?src="http://www.google.com/s2/favicons\?domain=(.*?)\.' \
                 '.*?<td data-th="Enlace"><a href="(http://www.verseriesynovelas.tv/link/enlaces.php.*?)"'
        matches = scrapertools.find_multiple_matches(match, patron)
        for quality, server, url in matches:
            if server == "streamin":
                server = "streaminto"
            if server== "waaw":
                server = "netutv"
            if server == "ul":
                server = "uploadedto"
            try:
                servers_module = __import__("servers."+server)
                title = "Ver vídeo en "+server+"  ["+quality+"]"
                if "Español.png" in match:
                    title += " [CAST]"
                if "VOS.png" in match:
                    title += " [VOSE]"
                if "Latino.png" in match:
                    title += " [LAT]"
                if "VO.png" in match:
                    title += " [V.O]"
                itemlist.append(item.clone(action="play", title=title, url=url))
            except:
                pass

    if not itemlist: 
        itemlist.append(item.clone(action="", title="No se ha encontrado ningún enlace"))
    if item.extra != "episodios":
        url_lista = scrapertools.find_single_match(data, '<a class="regresar" href="([^"]+)"')
        if url_lista != "":
            itemlist.append(item.clone(action="episodios", title="Ir a la Lista de Capítulos", url=url_lista,
                                       text_color="red", context=""))

    return itemlist
示例#14
0
def episodios(item):
    logger.info("pelisalacarta.channels.vixto episodios")
    itemlist = list()

    # Descarga la página
    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    # Extrae las entradas (carpetas)
    bloque = scrapertools.find_single_match(data, '<strong>Temporada:(.*?)</div>')
    matches = scrapertools.find_multiple_matches(bloque, 'href="([^"]+)">(.*?)</a>')

    for scrapedurl, scrapedtitle in matches:
        title = "Temporada %s" % scrapedtitle

        new_item = item.clone(action="", title=title, text_color=color2)
        new_item.infoLabels["season"] = scrapedtitle
        new_item.infoLabels["mediatype"] = "season"
        data_season = scrapertools.downloadpage(scrapedurl)
        data_season = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data_season)
        patron = '<li class="media">.*?href="([^"]+)"(.*?)<div class="media-body">.*?href.*?>' \
                 '(.*?)</a>'
        matches = scrapertools.find_multiple_matches(data_season, patron)

        elementos = []
        for url, status, title in matches:
            if not "Enlaces Disponibles" in status:
                continue
            elementos.append(title)
            item_epi = item.clone(action="findvideos", url=url, text_color=color1)
            item_epi.infoLabels["season"] = scrapedtitle
            episode = scrapertools.find_single_match(title, 'Capitulo (\d+)')
            titulo = scrapertools.find_single_match(title, 'Capitulo \d+\s*-\s*(.*?)$')
            item_epi.infoLabels["episode"] = episode
            item_epi.infoLabels["mediatype"] = "episode"
            item_epi.title = "%sx%s  %s" % (scrapedtitle, episode.zfill(2), titulo)

            itemlist.insert(0, item_epi)
        if elementos:
            itemlist.insert(0, new_item)

    if item.infoLabels["tmdb_id"] and itemlist:
        try:
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    if itemlist:
        if config.get_library_support():
            itemlist.append(Item(channel=item.channel, title="Añadir serie a la biblioteca", text_color="green",
                                 filtro=True, action="add_serie_to_library", fulltitle=item.fulltitle,
                                 extra="episodios", url=item.url, infoLabels=item.infoLabels, show=item.show))
    else:
        itemlist.append(item.clone(title="Serie sin episodios disponibles", action="", text_color=color3))
    return itemlist
def Check():
  import guitools
  progress = guitools.Dialog_ProgressBG("Pelisalacarta","Comprobando actualizaciones...")

  DownloadServers = []
  DownloadChannels = []
  ServersPath = os.path.join( config.get_runtime_path(), "servers" )
  ServersIndexPath = os.path.join(config.get_data_path(), "Servers.json")
  ChannelsPath = os.path.join( config.get_runtime_path(), "channels" )
  ChannelsIndexPath = os.path.join(config.get_data_path(), "Channels.json")
  if not os.path.isfile(ServersIndexPath): CreateIndex(ServersIndexPath,"servers")
  if not os.path.isfile(ChannelsIndexPath): CreateIndex(ChannelsIndexPath,"channels")
  
  #Servers
  progress.Actualizar(25, "Descargando lista de Servidores...")
  RemoteJSONData = json.loads(scrapertools.downloadpage(GitApi + "servers", headers=headers))
  LocalJSONData = json.loads(open(ServersIndexPath,"r").read())
  #open(ServersIndexPath.replace(".json","-remote.json"),"w").write(json.dumps(RemoteJSONData, indent=4, sort_keys=True))
  if RemoteJSONData <> LocalJSONData:
    for Server in RemoteJSONData:
      if not Server in LocalJSONData:
        DownloadServers.append(Server)
        
  #Channels
  progress.Actualizar(50, "Descargando lista de Canales...")
  RemoteJSONData = json.loads(scrapertools.downloadpage(GitApi + "channels", headers=headers))
  LocalJSONData = json.loads(open(ChannelsIndexPath,"r").read())
  #open(ChannelsIndexPath.replace(".json","-remote.json"),"w").write(json.dumps(RemoteJSONData, indent=4, sort_keys=True))
  progress.Actualizar(75, "Comprobando...")
  if RemoteJSONData <> LocalJSONData:
    for Channel in RemoteJSONData:
      if not Channel in LocalJSONData:
        logger.info(Channel)
        DownloadChannels.append(Channel)
               
  if DownloadServers or DownloadChannels:    
    
    for File in  DownloadServers:   
      Progreso = DownloadServers.index(File) * 100 / (len(DownloadServers) + len(DownloadChannels))
      progress.Actualizar(Progreso ,'Actualizando Archivo: "' + File["name"] + '"')
      open(os.path.join(config.get_runtime_path(), "servers", File["name"]),"wb").write(scrapertools.downloadpage(File["download_url"]))
    for File in  DownloadChannels:   
      Progreso = (DownloadChannels.index(File) + len(DownloadServers)  ) * 100 / (len(DownloadServers) + len(DownloadChannels))
      progress.Actualizar(Progreso ,'Actualizando Archivo: "' + File["name"] + '"')
      open(os.path.join(config.get_runtime_path(), "channels", File["name"]),"wb").write(scrapertools.downloadpage(File["download_url"]))
      
    CreateIndex(ServersIndexPath,"servers")
    CreateIndex(ChannelsIndexPath,"channels")

  progress.Actualizar(100, "Todos los canales y servidores estan actualizados")
  import time
  time.sleep(3)
  progress.Cerrar()
def abandomoviez_search(item):
    logger.info("streamondemand.channels.trailertools abandomoviez_search")

    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        data = scrapertools.downloadpage(item.page)
    else:
        titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1')
        post = urllib.urlencode({'query': titulo, 'searchby': '1', 'posicion': '1', 'orden': '1',
                                 'anioin': item.year, 'anioout': item.year, 'orderby': '1'})
        url = "http://www.abandomoviez.net/db/busca_titulo_advance.php"
        item.prefix = "db/"
        data = scrapertools.downloadpage(url, post=post)
        if "No hemos encontrado ninguna" in data:
            url = "http://www.abandomoviez.net/indie/busca_titulo_advance.php"
            item.prefix = "indie/"
            data = scrapertools.downloadpage(url, post=post).decode("iso-8859-1").encode('utf-8')

    itemlist = []
    patron = '(?:<td width="85"|<div class="col-md-2 col-sm-2 col-xs-3">).*?<img src="([^"]+)"' \
             '.*?href="([^"]+)">(.*?)(?:<\/td>|<\/small>)'
    matches = scrapertools.find_multiple_matches(data, patron)
    # Si solo hay un resultado busca directamente los trailers, sino lista todos los resultados
    if len(matches) == 1:
        item.url = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, matches[0][1])
        item.thumbnail = matches[0][0]
        itemlist = search_links_abando(item)
    elif len(matches) > 1:
        for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
            scrapedurl = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, scrapedurl)
            scrapedtitle = scrapertools.htmlclean(scrapedtitle)
            itemlist.append(item.clone(title=scrapedtitle, action="search_links_abando",
                                       url=scrapedurl, thumbnail=scrapedthumbnail, text_color="white"))

        next_page = scrapertools.find_single_match(data, '<a href="([^"]+)">Siguiente')
        if next_page != "":
            next_page = urlparse.urljoin("http://www.abandomoviez.net/%s" % item.prefix, next_page)
            itemlist.append(item.clone(title=">> Seguente", action="abandomoviez_search", page=next_page, thumbnail="",
                                       text_color=""))

    if not itemlist:
        itemlist.append(item.clone(title="Nessun risultato trovato", action="", thumbnail="",
                                   text_color=""))
    
        if keyboard:
            if item.contextual:
                title = "[COLOR green]%s[/COLOR]"
            else: 
                title = "%s"
            itemlist.append(item.clone(title=title % "Ricerca manuale su Abandomoviez",
                                       action="manual_search", thumbnail="", text_color="green", extra="abandomoviez"))

    return itemlist
示例#17
0
def findvideos(item):
    logger.info("pelisalacarta.channels.cinefox findvideos")
    itemlist = []

    if not "|" in item.extra and not __menu_info__:
        data = scrapertools.downloadpage(item.url, headers=headers.items())
        year = scrapertools.find_single_match(data, '<div class="media-summary">.*?release.*?>(\d+)<')
        if year != "" and not "tmdb_id" in item.infoLabels:
            try:
                from core import tmdb
                item.infoLabels["year"] = year
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass
    
        if item.infoLabels["plot"] == "":
            sinopsis = scrapertools.find_single_match(data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    id = scrapertools.find_single_match(item.url, '/(\d+)/')
    if "|" in item.extra or not __menu_info__:
        extra = item.extra
        if "|" in item.extra:
            extra = item.extra[:-1]
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, extra, "streaming")
        itemlist.extend(get_enlaces(item, url, "Online"))
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, extra, "download")
        itemlist.extend(get_enlaces(item, url, "de Descarga"))

        if extra == "media":
            data_trailer = scrapertools.downloadpage("http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id,
                                                     headers=headers.items())
            trailer_url = jsontools.load_json(data_trailer)["video"]["url"]
            if trailer_url != "":
                item.infoLabels["trailer"] = trailer_url

            title = "Ver enlaces %s - [" + item.contentTitle + "]"
            itemlist.append(item.clone(channel="trailertools", action="buscartrailer", title="Buscar Tráiler",
                                       text_color="magenta", context=""))

            if config.get_library_support() and not "|" in item.extra:
                itemlist.append(Item(channel=item.channel, action="add_pelicula_to_library", text_color=color5,
                                     title="Añadir película a la biblioteca", url=item.url, thumbnail=item.thumbnail,
                                     fanart=item.fanart, fulltitle=item.fulltitle,
                                     extra="media|"))
    else:
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (id, item.extra, item.type)
        type = item.type.replace("streaming", "Online").replace("download", "de Descarga")
        itemlist.extend(get_enlaces(item, url, type))

    return itemlist
示例#18
0
def get_video_url(page_url, premium=False, video_password=""):
    logger.info("pelisalacarta.servers.realdebrid get_video_url( page_url='%s' , video_password=%s)"
                % (page_url, video_password))
    
    # Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación
    token_auth = channeltools.get_channel_setting("realdebrid_token", "realdebrid")
    if token_auth is None or token_auth == "":
        if config.is_xbmc():
            token_auth = authentication()
            if token_auth == "":
                return [["REAL-DEBRID: No se ha completado el proceso de autentificación", ""]]
        else:
            return [["Es necesario activar la cuenta. Accede al menú de ayuda", ""]]

    post_link = urllib.urlencode([("link", page_url), ("password", video_password)])
    headers["Authorization"] = "Bearer %s" % token_auth
    url = "https://api.real-debrid.com/rest/1.0/unrestrict/link"
    data = scrapertools.downloadpage(url, post=post_link, headers=headers.items())
    data = jsontools.load_json(data)
    
    # Si el token es erróneo o ha caducado, se solicita uno nuevo
    if "error" in data and data["error"] == "bad_token":
        debrid_id = channeltools.get_channel_setting("realdebrid_id", "realdebrid")
        secret = channeltools.get_channel_setting("realdebrid_secret", "realdebrid")
        refresh = channeltools.get_channel_setting("realdebrid_refresh", "realdebrid")

        post_token = urllib.urlencode({"client_id": debrid_id, "client_secret": secret, "code": refresh,
                                       "grant_type": "http://oauth.net/grant_type/device/1.0"})
        renew_token = scrapertools.downloadpage("https://api.real-debrid.com/oauth/v2/token", post=post_token,
                                                headers=headers.items())
        renew_token = jsontools.load_json(renew_token)
        if not "error" in renew_token:
            token_auth = renew_token["access_token"]
            channeltools.set_channel_setting("realdebrid_token", token_auth, "realdebrid")
            headers["Authorization"] = "Bearer %s" % token_auth
            data = scrapertools.downloadpage(url, post=post_link, headers=headers.items())
            data = jsontools.load_json(data)

    if "download" in data:
        return get_enlaces(data)
    else:
        if "error" in data:
            msg = data["error"].decode("utf-8","ignore")
            msg = msg.replace("hoster_unavailable", "Servidor no disponible") \
                     .replace("unavailable_file", "Archivo no disponible") \
                     .replace("hoster_not_free", "Servidor no gratuito") \
                     .replace("bad_token", "Error en el token")
            return [["REAL-DEBRID: " + msg, ""]]
        else:
            return [["REAL-DEBRID: No se ha generado ningún enlace", ""]]
def youtube_search(item):
    logger.info("streamondemand.channels.trailertools youtube_search")
    itemlist = []

    titulo = item.contentTitle
    if item.extra != "youtube":
        titulo += " trailer"
    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        data = scrapertools.downloadpage(item.page)
    else:
        titulo = urllib.quote(titulo)
        titulo = titulo.replace("%20", "+")
        data = scrapertools.downloadpage("https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q="+titulo)

    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    patron = '<span class="yt-thumb-simple">.*?(?:src="https://i.ytimg.com/|data-thumb="https://i.ytimg.com/)([^"]+)"' \
             '.*?<h3 class="yt-lockup-title ">.*?<a href="([^"]+)".*?title="([^"]+)".*?' \
             '</a><span class="accessible-description".*?>.*?(\d+:\d+)'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matches:
        scrapedthumbnail = urlparse.urljoin("https://i.ytimg.com/", scrapedthumbnail)
        scrapedtitle = scrapedtitle.decode("utf-8")
        scrapedtitle = scrapedtitle + " (" + scrapedduration + ")"
        if item.contextual:
            scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
        url = urlparse.urljoin('https://www.youtube.com/', scrapedurl)
        itemlist.append(item.clone(title=scrapedtitle, action="play", server="youtube", url=url,
                                   thumbnail=scrapedthumbnail, text_color="white"))
    
    next_page = scrapertools.find_single_match(data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">'
                                                     'Siguiente')
    if next_page != "":
        next_page = urlparse.urljoin("https://www.youtube.com", next_page)
        itemlist.append(item.clone(title=">> Seguente", action="youtube_search", extra="youtube", page=next_page,
                                   thumbnail="", text_color=""))
    
    if not itemlist:
        itemlist.append(item.clone(title="Nessun risultato trovato per (%s)" % titulo,
                                   action="", thumbnail="", text_color=""))

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else: 
            title = "%s"
        itemlist.append(item.clone(title=title % "Ricerca manuale su Youtube", action="manual_search",
                                   text_color="green", thumbnail="", extra="youtube"))

    return itemlist
def jayhap_search(item):
    logger.info("streamondemand.channels.trailertools jayhap_search")
    itemlist = []

    if item.extra != "jayhap":
        item.contentTitle += " trailer"
    texto = item.contentTitle
    post = urllib.urlencode({'q': texto, 'yt': 'true', 'vm': 'true', 'dm': 'true',
                             'v': 'all', 'l': 'all', 'd': 'all'})

    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        post += urllib.urlencode(item.page)
        data = scrapertools.downloadpage("https://www.jayhap.com/load_more.php", post=post)
    else:
        data = scrapertools.downloadpage("https://www.jayhap.com/get_results.php", post=post)
    data = jsontools.load_json(data)
    for video in data['videos']:
        url = video['url']
        server = video['source'].lower()
        duration = " (" + video['duration'] + ")"
        title = video['title'].decode("utf-8") + duration + "  [" + server.capitalize() + "]"
        thumbnail = video['thumbnail']
        if item.contextual:
            title = "[COLOR white]%s[/COLOR]" % title
        itemlist.append(item.clone(action="play", server=server, title=title, url=url, thumbnail=thumbnail,
                                   text_color="white"))

    if not itemlist:
        itemlist.append(item.clone(title="Nessun risultato trovato per (%s)" % item.contentTitle,
                                   action="", thumbnail="", text_color=""))
    else:
        tokens = data['tokens']
        tokens['yt_token'] = tokens.pop('youtube')
        tokens['vm_token'] = tokens.pop('vimeo')
        tokens['dm_token'] = tokens.pop('dailymotion')
        itemlist.append(item.clone(title=">> Seguente", page=tokens, action="jayhap_search", extra="jayhap",
                                   thumbnail="", text_color=""))

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else: 
            title = "%s"
        itemlist.append(item.clone(title=title % "Ricerca manuale su Jayhap", action="manual_search",
                                   text_color="green", thumbnail="", extra="jayhap"))

    return itemlist
示例#21
0
def series(item):
    logger.info("pelisalacarta.channels.cinefox series")
    itemlist = []
    if "valores" in item:
        itemlist.append(item.clone(action="", title=item.valores, text_color=color4))

    data = scrapertools.downloadpage(item.url)
    bloque = scrapertools.find_multiple_matches(data, ' <div class="media-card "(.*?)<div class="info-availability '
                                                      'one-line">')
    for match in bloque:
        patron = '<div class="audio-info">.*?<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
        matches = scrapertools.find_multiple_matches(match, patron)
        for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
            url = urlparse.urljoin(host, scrapedurl + "/episodios")
            itemlist.append(Item(channel=item.channel, action="episodios", title=scrapedtitle, url=url,
                                 thumbnail=scrapedthumbnail, contentTitle=scrapedtitle, fulltitle=scrapedtitle,
                                 show=scrapedtitle, text_color=color2, context="25"))

    try:
        from core import tmdb
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
    if next_page != "":
        itemlist.append(Item(channel=item.channel, action="series", title=">> Siguiente", url=next_page,
                             thumbnail=item.thumbnail, text_color=color3))

    return itemlist
def detail(item):
    logger.info("[filesmonster_catalogue.py] detail")
    itemlist = []

    data=scrapertools.downloadpage(item.url)
    patronvideos  = '["|\'](http\://filesmonster.com/download.php\?[^"\']+)["|\']'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)

    for url in matches:
        title = "Archivo %d: %s [filesmonster]" %(len(itemlist)+1, item.fulltitle)
        itemlist.append( Item(channel=item.channel , action="play" ,  server="filesmonster", title=title, fulltitle= item.fulltitle ,url=url, thumbnail=item.thumbnail, folder=False))



    patronvideos  = '["|\'](http\://filesmonster.com/folders.php\?[^"\']+)["|\']'
    matches = re.compile(patronvideos,re.DOTALL).findall(data)
    for url in matches: 
      if not url == item.url:
        logger.info(url)
        logger.info(item.url)
        title = "Carpeta %d: %s [filesmonster]" %(len(itemlist)+1, item.fulltitle)
        itemlist.append( Item(channel=item.channel , action="detail" ,  title=title, fulltitle= item.fulltitle ,url=url, thumbnail=item.thumbnail, folder=True))


    return itemlist
示例#23
0
def test_video_exists(page_url):
    logger.info("fusionse.servers.stormo test_video_exists(page_url='%s')" % page_url)
    
    data = scrapertools.downloadpage(page_url)
    if "video_error.mp4" in data: return False, "[Stormo] El archivo no existe o ha sido borrado"

    return True, ""
示例#24
0
def ultimos(item):
    logger.info("pelisalacarta.channels.cinefox ultimos")
    item.text_color = color2
    itemlist = []
    data = scrapertools.downloadpage(item.url)
    
    bloque = scrapertools.find_multiple_matches(data, ' <div class="media-card "(.*?)<div class="info-availability '
                                                      'one-line">')
    for match in bloque:
        patron = '<div class="audio-info">(.*?)<img class.*?src="([^"]+)".*?href="([^"]+)">([^<]+)</a>'
        matches = scrapertools.find_multiple_matches(match, patron)
        for idiomas, scrapedthumbnail, scrapedurl, scrapedtitle in matches:
            show = re.sub(r'(\s*[\d]+x[\d]+\s*)', '', scrapedtitle)
            audios = []
            if "medium-es" in idiomas: audios.append('CAST')
            if "medium-vs" in idiomas: audios.append('VOSE')
            if "medium-la" in idiomas: audios.append('LAT')
            if "medium-en" in idiomas: audios.append('V.O')
            title = show + " - " + re.sub(show, '', scrapedtitle) + " [" + "/".join(audios) + "]"
            url = urlparse.urljoin(host, scrapedurl)
            itemlist.append(item.clone(action="menu_info_episode", title=title, url=url, thumbnail=scrapedthumbnail,
                                       contentTitle=show, fulltitle=show, show=show, context="25"))

    try:
        from core import tmdb
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    next_page = scrapertools.find_single_match(data, 'href="([^"]+)"[^>]+>Siguiente')
    if next_page != "":
        itemlist.append(item.clone(action="ultimos", title=">> Siguiente", url=next_page, text_color=color3))

    return itemlist
示例#25
0
def extract_safe(item):
    logger.info("pelisalacarta.channels.puyasubs extract_safe")
    if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
        from core import tmdb
        tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")
    itemlist = list()
    
    hash = item.url.rsplit("/", 1)[1]
    headers = [['Content-Type', 'application/json;charset=utf-8']]
    post = jsontools.dump_json({"hash": hash})
    data = scrapertools.downloadpage("http://safelinking.net/v1/protected", post, headers)
    data = jsontools.load_json(data)

    for link in data.get("links"):
        enlace = link["url"]
        domain = link["domain"]
        title = "Ver por %s" % domain
        action = "play"
        if "mega" in domain:
            server = "mega"
            if "/#F!" in enlace:
                action = "carpeta"

        elif "1fichier" in domain:
            server = "onefichier"
            if "/dir/" in enlace:
                action = "carpeta"

        itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))
    
    return itemlist
示例#26
0
def busqueda(item, texto=""):
    logger.info("pelisalacarta.channels.verseriesynovelas busqueda")
    itemlist = []
    item.text_color = color2

    data = scrapertools.downloadpage(item.url, headers=CHANNEL_HEADERS)
    data = data.replace("\n", "").replace("\t", "")

    bloque = scrapertools.find_single_match(data, '<ul class="list-paginacion">(.*?)</section>')
    bloque = scrapertools.find_multiple_matches(bloque, '<li><a href=(.*?)</li>')
    for match in bloque:
        patron = '([^"]+)".*?<img class="fade" src="([^"]+)".*?<h2>(.*?)</h2>'
        matches = scrapertools.find_multiple_matches(match, patron)
        for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
            # fix para el buscador para que no muestre entradas con texto que no es correcto
            if texto.lower() not in scrapedtitle.lower():
                continue

            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle).replace(" online", "")
            titleinfo = re.sub(r'(?i)((primera|segunda|tercera|cuarta|quinta|sexta) temporada)', "Temporada",
                               scrapedtitle)
            titleinfo = titleinfo.split("Temporada")[0].strip()
            titleinfo = re.sub(r'(\(\d{4}\))|(\(\d{4}\s*-\s*\d{4}\))', '', titleinfo)

            if DEBUG:
                logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
            itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=scrapedurl,
                                       thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, show=titleinfo,
                                       contentType="tvshow", contentTitle=titleinfo))
    # Paginación
    next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)">')
    if next_page != "":
        itemlist.append(item.clone(title=">> Siguiente", url=next_page))

    return itemlist
示例#27
0
def busqueda(item):
    logger.info("pelisalacarta.channels.descargasmix busqueda")
    itemlist = []
    data = scrapertools.downloadpage(item.url)

    bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" '
                                                  'role="complementary">')
    patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
             '.*?<p class="stats">(.*?)</p>'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedcat in matches:
        scrapedthumbnail = "http:"+scrapedthumbnail.replace("-129x180", "")
        if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
        if ("Películas" in scrapedcat) or ("Documentales" in scrapedcat):
            titulo = scrapedtitle.split("[")[0]
            itemlist.append(item.clone(action="findvideos", title=scrapedtitle, url=scrapedurl,
                                       thumbnail=scrapedthumbnail, fulltitle=titulo, context="05", contentTitle=titulo))
        else:
            itemlist.append(item.clone(action="episodios", title=scrapedtitle, url=scrapedurl,  context="25",
                                       thumbnail=scrapedthumbnail, fulltitle=scrapedtitle, contentTitle=scrapedtitle))

    next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
    if next_page != "":
        itemlist.append(item.clone(title=">> Siguiente", url=next_page))

    return itemlist
示例#28
0
def findvideostv(item):
    logger.info("pelisalacarta.channels.allpeliculas findvideostv")
    itemlist = []

    #Rellena diccionarios idioma y calidad
    idiomas_videos, calidad_videos = dict_videos()

    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")
    data = scrapertools.decodeHtmlentities(data)

    patron = '<span class="movie-online-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode=' \
             '"([^"]+)" season="' + \
             item.infoLabels['season'] + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            idioma = IDIOMAS.get(idiomas_videos.get(language))
            titulo = "Episodio "+episode+" ["
            titulo += server.capitalize()+"]   ["+idioma+"] ("+calidad_videos.get(quality)+")"
            item.infoLabels['episode'] = episode

            itemlist.append(item.clone(action="play", title=titulo, url=url))

    #Enlace Descarga
    patron = '<span class="movie-downloadlink-list" id_movies_types="([^"]+)" id_movies_servers="([^"]+)".*?episode=' \
             '"([^"]+)" season="'+item.infoLabels['season'] + '" id_lang="([^"]+)".*?online-link="([^"]+)"'
    matches = scrapertools.find_multiple_matches(data, patron)
    for quality, servidor_num, episode, language, url in matches:
        mostrar_server = True
        try:
            server = SERVERS[servidor_num]
            servers_module = __import__("servers."+server)
        except:
            server = servertools.get_server_from_url(url)

        if server != "directo":
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(server)
            if mostrar_server:
                idioma = IDIOMAS.get(idiomas_videos.get(language))
                titulo = "Episodio "+episode+" "
                titulo += server.capitalize()+"   ["+idioma+"] ("+calidad_videos.get(quality)+")"
                item.infoLabels['episode'] = episode
                itemlist.append(item.clone(action="play", title=titulo, url=url))

    itemlist.sort(key=lambda item: (int(item.infoLabels['episode']), item.title))
    try:
        from core import tmdb
        tmdb.set_infoLabels(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
示例#29
0
def play(item):
    logger.info("pelisalacarta.channels.verseriesynovelas play")
    itemlist = []
    
    try:
        data = scrapertools.downloadpage(item.url, headers=CHANNEL_HEADERS)
    except:
        pass

    url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
    if not url_redirect:
        try:
            import StringIO
            compressedstream = StringIO.StringIO(data)
            import gzip
            gzipper = gzip.GzipFile(fileobj=compressedstream)
            data = gzipper.read()
            gzipper.close()
            url_redirect = scrapertools.find_single_match(data, 'href="(http://www.verseriesynovelas.tv/link/enlace.php\?u=[^"]+)"')
        except:
            pass

    
    location = scrapertools.get_header_from_response(url_redirect, headers=CHANNEL_HEADERS[:2], header_to_get="location")
    enlaces = servertools.findvideos(data=location)
    if len(enlaces) > 0:
        itemlist.append(item.clone(action="play", server=enlaces[0][2], url=enlaces[0][1]))

    return itemlist
示例#30
0
def carpeta(item):
    logger.info("pelisalacarta.channels.puyasubs carpeta")
    itemlist = list()
    
    if item.server == "onefichier":
        data = scrapertools.downloadpage(item.url)

        patron = '<tr>.*?<a href="([^"]+)".*?>(.*?)</a>.*?<td class="normal">(.*?)</td>'
        matches = scrapertools.find_multiple_matches(data, patron)
        for scrapedurl, scrapedtitle, size in matches:
            scrapedtitle += "  (%s)   [1fichier]" % size
            itemlist.append(Item(channel=item.channel, title=scrapedtitle, url=scrapedurl, action="play",
                                 server="onefichier", text_color=color1, thumbnail=item.thumbnail,
                                 infoLabels=item.infoLabels))
    else:
        from megaserver import Client
        from platformcode import platformtools
            
        c = Client(url=item.url)
        
        files = c.get_files()
        c.stop()
        for enlace in files:
            file_id = enlace["id"]
            itemlist.append(Item(channel=item.channel, title=enlace["name"], url=item.url+"|"+file_id, action="play",
                                 server="mega", text_color=color1, thumbnail=item.thumbnail,
                                 infoLabels=item.infoLabels))

    itemlist.sort(key=lambda item: item.title)
    return itemlist
示例#31
0
def findvideos(item):
    logger.info("pelisalacarta.channels.vixto findvideos")
    itemlist = list()

    try:
        filtro_idioma = config.get_setting("filterlanguages", item.channel)
        filtro_enlaces = config.get_setting("filterlinks", item.channel)
    except:
        filtro_idioma = 3
        filtro_enlaces = 2

    dict_idiomas = {'Castellano': 2, 'Latino': 1, 'Subtitulada': 0}

    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    if not item.infoLabels["tmdb_id"]:
        year = scrapertools.find_single_match(data, 'Lanzamiento.*?(\d{4})')

        if year != "":
            item.infoLabels['filtro'] = ""
            item.infoLabels['year'] = int(year)

            # Ampliamos datos en tmdb
            try:
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass

    if not item.infoLabels['plot']:
        plot = scrapertools.find_single_match(data,
                                              '<p class="plot">(.*?)</p>')
        item.infoLabels['plot'] = plot

    if filtro_enlaces != 0:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas,
                                      "Ver Online", item)
        if list_enlaces:
            itemlist.append(
                item.clone(action="",
                           title="Enlaces Online",
                           text_color=color1,
                           text_blod=True))
            itemlist.extend(list_enlaces)
    if filtro_enlaces != 1:
        list_enlaces = bloque_enlaces(data, filtro_idioma, dict_idiomas,
                                      "Descarga Directa", item)
        if list_enlaces:
            itemlist.append(
                item.clone(action="",
                           title="Enlaces Descarga",
                           text_color=color1,
                           text_blod=True))
            itemlist.extend(list_enlaces)

    # Opción "Añadir esta película a la biblioteca de XBMC"
    if itemlist and item.contentType == "movie":
        contextual = config.is_xbmc()
        itemlist.append(
            item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta",
                       contextual=contextual))
        if item.extra != "findvideos":
            if config.get_library_support():
                itemlist.append(
                    Item(channel=item.channel,
                         title="Añadir enlaces a la biblioteca",
                         text_color="green",
                         filtro=True,
                         action="add_pelicula_to_library",
                         fulltitle=item.fulltitle,
                         extra="findvideos",
                         url=item.url,
                         infoLabels=item.infoLabels,
                         contentType=item.contentType,
                         contentTitle=item.contentTitle,
                         show=item.show))
    elif not itemlist and item.contentType == "movie":
        itemlist.append(
            item.clone(title="Película sin enlaces disponibles",
                       action="",
                       text_color=color3))

    return itemlist
示例#32
0
def epienlaces(item):
    logger.info("pelisalacarta.channels.descargasmix epienlaces")
    itemlist = []
    item.text_color = color3

    data = scrapertools.downloadpage(item.url)
    data = data.replace("\n", "").replace("\t", "")

    #Bloque de enlaces
    delimitador = item.extra.strip()
    delimitador = re.sub(r'(?i)(\[(?:/|)Color.*?\])', '', delimitador)
    patron = '<div class="cap">' + delimitador + '(.*?)(?:<div class="polo"|</li>)'
    bloque = scrapertools.find_single_match(data, patron)

    patron = '<div class="episode-server">.*?href="([^"]+)"' \
             '.*?data-server="([^"]+)"' \
             '.*?<div class="caliycola">(.*?)</div>'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    itemlist.append(
        item.clone(action="",
                   title="Enlaces de Descarga/Online",
                   text_color=color1))
    for scrapedurl, scrapedserver, scrapedcalidad in matches:
        if scrapedserver == "ul":
            scrapedserver = "uploadedto"
        if scrapedserver == "streamin":
            scrapedserver = "streaminto"
        titulo = "    " + scrapedserver.capitalize(
        ) + " [" + scrapedcalidad + "]"
        #Enlaces descarga
        if scrapedserver == "magnet":
            itemlist.insert(
                0,
                item.clone(action="play",
                           title=titulo,
                           server="torrent",
                           url=scrapedurl))
        else:
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers." + scrapedserver)
                    if "enlacesmix.com" in scrapedurl:
                        itemlist.append(
                            item.clone(action="play",
                                       title=titulo,
                                       server=scrapedserver,
                                       url=scrapedurl,
                                       extra=item.url))
                    else:
                        enlaces = servertools.findvideos(data=scrapedurl)
                        if len(enlaces) > 0:
                            titulo = "    " + enlaces[0][2].capitalize(
                            ) + "  [" + scrapedcalidad + "]"
                            itemlist.append(
                                item.clone(action="play",
                                           server=enlaces[0][2],
                                           title=titulo,
                                           url=enlaces[0][1]))
                except:
                    pass

    if itemlist[0].server == "torrent":
        itemlist.insert(
            0, item.clone(action="",
                          title="Enlaces Torrent",
                          text_color=color1))

    return itemlist
示例#33
0
def fanart(item):
    logger.info()
    itemlist = []
    url = item.url
    data = scrapertools.cachePage(url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
    title_fan = item.extra.split("|")[1]
    title = re.sub(r'Serie Completa|Temporada.*?Completa','',title_fan)
    fulltitle=title
    title= title.replace(' ','%20')
    title = ''.join((c for c in unicodedata.normalize('NFD',unicode(title.decode('utf-8'))) if unicodedata.category(c) != 'Mn'))
    try:
     sinopsis =scrapertools.find_single_match(data,'<span class="clms">Sinopsis: <\/span>(.*?)<\/div>')
    except:
     sinopsis= ""
    year =item.extra.split("|")[0]
   
    if not "series" in item.url:
        
        #filmafinity
        url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&genre=&fromyear={1}&toyear={1}".format(title, year)
        data = scrapertools.downloadpage(url)

        url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"')
        if url_filmaf:
           url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf
           data = scrapertools.downloadpage(url_filmaf)
        else:

               try:
                 url_bing="http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" %  (title.replace(' ', '+'),  year)
                 data = browser (url_bing)
                 data = re.sub(r'\n|\r|\t|\s{2}|&nbsp;','',data)

                 if "myaddrproxy.php" in data:
                     subdata_bing = scrapertools.get_match(data,'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
                     subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/','',subdata_bing)
                 else:
                     subdata_bing = scrapertools.get_match(data,'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"')
    
                 url_filma = scrapertools.get_match(subdata_bing,'<a href="([^"]+)')
                 
                 if not "http" in url_filma:
                    data = scrapertools.cachePage ("http://"+url_filma)
                 else:
                    data = scrapertools.cachePage (url_filma)
                 data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
            
               except:
                 pass
    
        if sinopsis == " ":
           try:
            sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>')
            sinopsis = sinopsis.replace("<br><br />", "\n")
            sinopsis=re.sub(r"\(FILMAFFINITY\)<br />","",sinopsis)
           except:
              pass
        try:
            rating_filma=scrapertools.get_match(data,'itemprop="ratingValue" content="(.*?)">')
        except:
            rating_filma = "Sin puntuacion"
        
        critica=""
        patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"'
        matches_reviews = scrapertools.find_multiple_matches(data, patron)

        if matches_reviews:
            for review, autor, valoracion in matches_reviews:
                review = dhe(scrapertools.htmlclean(review))
                review += "\n" + autor +"[CR]"
                review = re.sub(r'Puntuac.*?\)','',review)
                if "positiva" in valoracion:
                    critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review
                elif "neutral" in valoracion:
                    critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review
                else:
                    critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review
        else:
             critica = "[COLOR floralwhite][B]Esta película no tiene críticas todavía...[/B][/COLOR]"
        print "ozuu"
        print critica
    
        url="http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title +"&year="+year+ "&language=es&include_adult=false"
        data = scrapertools.cachePage(url)
        data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
        patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
        matches = re.compile(patron,re.DOTALL).findall(data)
        
        
        if len(matches)==0:
                
                
                title= re.sub(r":.*|\(.*?\)","",title)
                url="http://api.themoviedb.org/3/search/movie?api_key=2e2160006592024ba87ccdf78c28f49f&query=" + title + "&language=es&include_adult=false"
                
                data = scrapertools.cachePage(url)
                data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
                patron = '"page":1.*?,"id":(.*?),.*?"backdrop_path":(.*?),'
                matches = re.compile(patron,re.DOTALL).findall(data)
                if len(matches)==0:
                    extra=item.thumbnail+"|"+""+"|"+""+"|"+"Sin puntuación"+"|"+rating_filma+"|"+critica
                    show= item.fanart+"|"+""+"|"+sinopsis
                    posterdb = item.thumbnail
                    fanart_info = item.fanart
                    fanart_3 = ""
                    fanart_2 = item.fanart
                    category= item.thumbnail
                    id_scraper=""
                        
                    itemlist.append( Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.fanart,extra = extra, show= show, category= category,library=item.library,fulltitle=fulltitle,folder=True) )
    
        for id, fan in matches:
            
            fan = re.sub(r'\\|"','',fan)
            
            try:
                rating = scrapertools.find_single_match(data,'"vote_average":(.*?),')
            except:
                rating = "Sin puntuación"
            
            id_scraper =id+"|"+"peli"+"|"+rating+"|"+rating_filma+"|"+critica
            try:
                posterdb = scrapertools.get_match(data,'"page":1,.*?"poster_path":"\\\(.*?)"')
                posterdb =  "https://image.tmdb.org/t/p/original" + posterdb
            except:
                posterdb = item.thumbnail

            if "null" in fan:
                fanart = item.fanart
            else:
                fanart="https://image.tmdb.org/t/p/original" + fan
            item.extra= fanart
            
            url ="http://api.themoviedb.org/3/movie/"+id+"/images?api_key=2e2160006592024ba87ccdf78c28f49f"
            data = scrapertools.cachePage(url)
            data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
            
            patron = '"backdrops".*?"file_path":".*?",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
            matches = re.compile(patron,re.DOTALL).findall(data)

            if len(matches) == 0:
              patron = '"backdrops".*?"file_path":"(.*?)",.*?"file_path":"(.*?)",.*?"file_path":"(.*?)"'
              matches = re.compile(patron,re.DOTALL).findall(data)
              if len(matches) == 0:
                  fanart_info = item.extra
                  fanart_3 = ""
                  fanart_2 = item.extra
            for fanart_info, fanart_3, fanart_2 in matches:
                fanart_info = "https://image.tmdb.org/t/p/original" + fanart_info
                fanart_3 = "https://image.tmdb.org/t/p/original" + fanart_3
                fanart_2 = "https://image.tmdb.org/t/p/original" + fanart_2
                if fanart== item.fanart:
                   fanart= fanart_info
            #clearart, fanart_2 y logo
            url ="http://webservice.fanart.tv/v3/movies/"+id+"?api_key=dffe90fba4d02c199ae7a9e71330c987"
            data = scrapertools.cachePage(url)
            data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
            patron = '"hdmovielogo":.*?"url": "([^"]+)"'
            matches = re.compile(patron,re.DOTALL).findall(data)
        
            if '"moviedisc"' in data:
                disc = scrapertools.get_match(data,'"moviedisc":.*?"url": "([^"]+)"')
            if '"movieposter"' in data:
                poster = scrapertools.get_match(data,'"movieposter":.*?"url": "([^"]+)"')
            if '"moviethumb"' in data:
                thumb = scrapertools.get_match(data,'"moviethumb":.*?"url": "([^"]+)"')
            if '"moviebanner"' in data:
                banner= scrapertools.get_match(data,'"moviebanner":.*?"url": "([^"]+)"')
        
            if len(matches)==0:
                extra=  posterdb
                #"http://es.seaicons.com/wp-content/uploads/2015/11/Editing-Overview-Pages-1-icon.png"
                show =fanart_2+"|"+fanart_3+"|"+sinopsis
                category = posterdb
                        
                itemlist.append( Item(channel=item.channel, title = item.title , action="findvideos", url=item.url, server="torrent", thumbnail=posterdb, fanart=item.extra, extra=extra, show=show, category= category, library=item.library,fulltitle=fulltitle,folder=True) )
            for logo in matches:
                if '"hdmovieclearart"' in data:
                    clear=scrapertools.get_match(data,'"hdmovieclearart":.*?"url": "([^"]+)"')
                    if '"moviebackground"' in data:
                    
                        extra=clear
                        show= fanart_2+"|"+fanart_3+"|"+sinopsis
                        if '"moviedisc"' in data:
                            category= disc
                        else:
                            category= clear
                        itemlist.append( Item(channel=item.channel, title = item.title , action="findvideos", url=item.url, server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,show=show,  category= category,library=item.library,fulltitle=fulltitle,folder=True) )
                    else:
                        extra= clear
                        show=fanart_2+"|"+fanart_3+"|"+sinopsis
                        if '"moviedisc"' in data:
                            category= disc
                        else:
                            category= clear
                        itemlist.append( Item(channel=item.channel, title = item.title , action="findvideos", url=item.url, server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,show=show,  category= category,library=item.library, fulltitle=fulltitle,folder=True) )
                                                                                                
                if '"moviebackground"' in data:
                                                                                                    
                    if '"hdmovieclearart"' in data:
                        clear=scrapertools.get_match(data,'"hdmovieclearart":.*?"url": "([^"]+)"')
                        extra=clear
                        show= fanart_2+"|"+fanart_3+"|"+sinopsis
                        if '"moviedisc"' in data:
                            category= disc
                        else:
                            category= clear
                    else:
                        extra=logo
                        show= fanart_2+"|"+fanart_3+"|"+sinopsis
                        if '"moviedisc"' in data:
                            category= disc
                        else:
                            category= logo
                                
                        itemlist.append( Item(channel=item.channel, title = item.title , action="findvideos", url=item.url, server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,show=show,  category= category,library=item.library,fulltitle=fulltitle, folder=True) )
                                                                                                                                                                    
                                                                                                                                                                    
                                                                                                                                                                    
                                                                                                                                                                    
                if not '"hdmovieclearart"' in data and not '"moviebackground"' in data:
                    extra= logo
                    show=  fanart_2+"|"+fanart_3+"|"+sinopsis
                    if '"moviedisc"' in data:
                        category= disc
                    else:
                        category= item.extra
                    itemlist.append( Item(channel=item.channel, title = item.title , action="findvideos", url=item.url, server="torrent", thumbnail=logo, fanart=item.extra, extra=extra,show=show ,  category= category, library=item.library,fulltitle=fulltitle,folder=True) )

    title_info ="Info"
   
    if posterdb == item.thumbnail:
       if '"movieposter"' in data:
           thumbnail= poster
       else:
           thumbnail = item.thumbnail
    else:
        thumbnail = posterdb



    id = id_scraper

    extra = extra+"|"+id+"|"+title.encode('utf8')

    title_info = title_info.replace(title_info,bbcode_kodi2html("[COLOR skyblue]"+title_info+"[/COLOR]"))
    itemlist.append( Item(channel=item.channel, action="info" , title=title_info , url=item.url, thumbnail=thumbnail, fanart=fanart_info, extra = extra, category=category, show = show,folder=False ))

    return itemlist
示例#34
0
def marcador(item):
    itemlist = []
    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)

    fecha = scrapertools.find_single_match(data, 'Begegnungen (\d+/\d+/\d+)')
    itemlist.append(
        item.clone(title="----- [I]" + fecha + "[/I]  -----", action=""))
    bloques = scrapertools.find_multiple_matches(
        data,
        '<table class="livescore">.*?src="([^"]+)" title="([^"]+)".*?href="([^"]+)".*?>(.*?)</table>'
    )
    for thumb_liga, liga, url_liga, bloque in bloques:
        thumb_liga = "http:" + thumb_liga.replace("small/", "originals/")
        title = "[COLOR blue][B]" + liga + "[/B][/COLOR]"
        url_liga = "http://www.transfermarkt.es" + url_liga
        itemlist.append(
            Item(channel="futbol_window",
                 title=title,
                 thumbnail=thumb_liga,
                 url=url_liga,
                 liga=liga,
                 action="ventana_liga",
                 folder=False))

        patron = '<span class="spielzeitpunkt">(.*?)</span>.*?src="([^"]+)".*?<span class="vereinsname">.*?href="([^"]+)"' \
                 '.*?>([^<]+)</a>.*?<a title="(Previa|Crónica|Ticker en vivo).*?>([^<]+)</a>' \
                 '.*?<span class="vereinsname">.*?href="([^"]+)".*?>([^<]+)</a>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for hora, thumb_home, url_home, home, tipo, result, url_away, away in matches:
            if "live-ergebnis" in hora:
                hora = scrapertools.find_single_match(hora + "<",
                                                      '<span.*?>([^<]+)<')
            url_home = "http://www.transfermarkt.es" + url_home.replace(
                "spielplan/", "startseite/")
            url_away = "http://www.transfermarkt.es" + url_away.replace(
                "spielplan/", "startseite/")
            thumb_home = "http:" + thumb_home.replace("small/", "originals/")
            if "'" in hora:
                title = "     [COLOR red][%s][/COLOR] [COLOR darkorange]%s [COLOR red]%s[/COLOR] %s[/COLOR]" % (
                    hora.replace("Descanso'", "Descanso"), home, result, away)
            elif "-:-" in result:
                title = "     [COLOR green][%s][/COLOR] [COLOR darkorange]%s [COLOR green]%s[/COLOR] %s[/COLOR]" % (
                    hora, home, result, away)
            else:
                title = "     [COLOR gold][%s][/COLOR] [COLOR darkorange]%s [COLOR gold]%s[/COLOR] %s[/COLOR]" % (
                    hora, home, result, away)

            itemlist.append(
                Item(channel="futbol_window",
                     action="ventana",
                     title=title,
                     url_home=url_home,
                     url_away=url_away,
                     thumbnail=thumb_home,
                     evento=home + " vs " + away,
                     date=fecha,
                     time=hora,
                     deporte="futbol",
                     marcador="si",
                     context="info_partido",
                     folder=False))

    next_page = "http://www.transfermarkt.es" + scrapertools.find_single_match(
        data, '<a class="bx-next" href="([^"]+)"')
    year, month, day = scrapertools.find_single_match(
        next_page, 'datum/(\d+)-(\d+)-(\d+)')
    title = "[COLOR green]Siguiente día (%s/%s/%s)[/COLOR]" % (day, month,
                                                               year)
    itemlist.append(item.clone(title=title, url=next_page, action="marcador"))
    prev_page = "http://www.transfermarkt.es" + scrapertools.find_single_match(
        data, '<a class="bx-prev" href="([^"]+)"')
    year, month, day = scrapertools.find_single_match(
        prev_page, 'datum/(\d+)-(\d+)-(\d+)')
    title = "[COLOR red]Día anterior (%s/%s/%s)[/COLOR]" % (day, month, year)
    itemlist.append(item.clone(title=title, url=prev_page, action="marcador"))
    itemlist.append(
        item.clone(title="[COLOR gold]Ir a una fecha concreta[/COLOR]",
                   extra=fecha,
                   action="fecha"))

    return itemlist
示例#35
0
def busqueda(item):
    logger.info("pelisalacarta.channels.cinefox busqueda")
    itemlist = []

    data = scrapertools.downloadpage(item.url)
    patron = '<div class="poster-media-card">(.*?)(?:<li class="search-results-item media-item">|<footer>)'
    bloque = scrapertools.find_multiple_matches(data, patron)
    for match in bloque:
        patron = 'href="([^"]+)" title="([^"]+)".*?src="([^"]+)".*?' \
                 '<p class="search-results-main-info">.*?del año (\d+).*?' \
                 'p class.*?>(.*?)<'
        matches = scrapertools.find_multiple_matches(match, patron)
        for scrapedurl, scrapedtitle, scrapedthumbnail, year, plot in matches:
            scrapedtitle = scrapedtitle.capitalize()
            item.infoLabels["year"] = year
            plot = scrapertools.htmlclean(plot)
            if "/serie/" in scrapedurl:
                action = "episodios"
                show = scrapedtitle
                scrapedurl += "/episodios"
                title = " [Serie]"
                contentType = "tvshow"
            elif "/pelicula/" in scrapedurl:
                action = "menu_info"
                show = ""
                title = " [Película]"
                contentType = "movie"
            else:
                continue
            title = scrapedtitle + title + " (" + year + ")"
            itemlist.append(
                item.clone(action=action,
                           title=title,
                           url=scrapedurl,
                           thumbnail=scrapedthumbnail,
                           contentTitle=scrapedtitle,
                           fulltitle=scrapedtitle,
                           context=["buscar_trailer"],
                           plot=plot,
                           show=show,
                           text_color=color2,
                           contentType=contentType))

    try:
        from core import tmdb
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    next_page = scrapertools.find_single_match(
        data, 'href="([^"]+)"[^>]+>Más resultados')
    if next_page != "":
        next_page = urlparse.urljoin(host, next_page)
        itemlist.append(
            Item(channel=item.channel,
                 action="busqueda",
                 title=">> Siguiente",
                 url=next_page,
                 thumbnail=item.thumbnail,
                 text_color=color3))

    return itemlist
示例#36
0
def episodios(item):
    logger.info("pelisalacarta.channels.vixto episodios")
    itemlist = list()

    # Descarga la página
    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    # Extrae las entradas (carpetas)
    bloque = scrapertools.find_single_match(data,
                                            '<strong>Temporada:(.*?)</div>')
    matches = scrapertools.find_multiple_matches(bloque,
                                                 'href="([^"]+)">(.*?)</a>')

    for scrapedurl, scrapedtitle in matches:
        title = "Temporada %s" % scrapedtitle

        new_item = item.clone(action="", title=title, text_color=color2)
        new_item.infoLabels["season"] = scrapedtitle
        new_item.infoLabels["mediatype"] = "season"
        data_season = scrapertools.downloadpage(scrapedurl)
        data_season = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data_season)
        patron = '<li class="media">.*?href="([^"]+)"(.*?)<div class="media-body">.*?href.*?>' \
                 '(.*?)</a>'
        matches = scrapertools.find_multiple_matches(data_season, patron)

        elementos = []
        for url, status, title in matches:
            if not "Enlaces Disponibles" in status:
                continue
            elementos.append(title)
            item_epi = item.clone(action="findvideos",
                                  url=url,
                                  text_color=color1)
            item_epi.infoLabels["season"] = scrapedtitle
            episode = scrapertools.find_single_match(title, 'Capitulo (\d+)')
            titulo = scrapertools.find_single_match(
                title, 'Capitulo \d+\s*-\s*(.*?)$')
            item_epi.infoLabels["episode"] = episode
            item_epi.infoLabels["mediatype"] = "episode"
            item_epi.title = "%sx%s  %s" % (scrapedtitle, episode.zfill(2),
                                            titulo)

            itemlist.insert(0, item_epi)
        if elementos:
            itemlist.insert(0, new_item)

    if item.infoLabels["tmdb_id"] and itemlist:
        try:
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    if itemlist:
        if config.get_library_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir serie a la biblioteca",
                     text_color="green",
                     filtro=True,
                     action="add_serie_to_library",
                     fulltitle=item.fulltitle,
                     extra="episodios",
                     url=item.url,
                     infoLabels=item.infoLabels,
                     show=item.show))
    else:
        itemlist.append(
            item.clone(title="Serie sin episodios disponibles",
                       action="",
                       text_color=color3))
    return itemlist
示例#37
0
def findvideos(item):
    logger.info("pelisalacarta.channels.descargasmix findvideos")
    if item.extra and item.extra != "findvideos":
        return epienlaces(item)
    itemlist = []
    item.text_color = color3
    data = scrapertools.downloadpage(item.url)

    item.plot = scrapertools.find_single_match(data, 'SINOPSIS(?:</span>|</strong>):(.*?)</p>')
    year = scrapertools.find_single_match(data, '(?:<span class="bold">|<strong>)AÑO(?:</span>|</strong>):\s*(\d+)')
    if year != "":
        try:
            from core import tmdb
            item.infoLabels['year'] = year
            tmdb.set_infoLabels_item(item, __modo_grafico__)
        except:
            pass

    old_format = False
    #Patron torrent antiguo formato
    if "Enlaces de descarga</div>" in data:
        old_format = True
        matches = scrapertools.find_multiple_matches(data, 'class="separate3 magnet".*?href="([^"]+)"')
        for scrapedurl in matches:
            title = "[Torrent] "
            title += urllib.unquote(scrapertools.find_single_match(scrapedurl, 'dn=(.*?)(?i)WWW.DescargasMix'))
            itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl, text_color="green"))
    
    #Patron online
    data_online = scrapertools.find_single_match(data, 'Ver online</div>(.*?)<div class="section-box related-'
                                                       'posts">')
    if len(data_online) > 0:
        title = "Enlaces Online"
        if '"l-latino2"' in data_online:
            title += " [LAT]"
        elif '"l-esp2"' in data_online:
            title += " [ESP]"
        elif '"l-vose2"' in data_online:
            title += " [VOSE]"
        itemlist.append(item.clone(title=title, action="", text_color=color1))
        patron = 'make_links.*?,[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(data_online, patron)
        for code in matches:
            enlace = mostrar_enlaces(code)
            enlaces = servertools.findvideos(data=enlace[0])
            if len(enlaces) > 0:
                title = "   Ver vídeo en " + enlaces[0][2]
                itemlist.append(item.clone(action="play", server=enlaces[0][2], title=title, url=enlaces[0][1]))
    scriptg = scrapertools.find_single_match(data, "<script type='text/javascript'>str='([^']+)'")
    if scriptg:
        gvideo = urllib.unquote_plus(scriptg.replace("@", "%"))
        url = scrapertools.find_single_match(gvideo, 'src="([^"]+)"')
        if url:
            itemlist.append(item.clone(action="play", server="directo", url=url, title="   Ver vídeo en Googlevideo (Máxima calidad)", extra=item.url))

    #Patron descarga
    patron = '<div class="(?:floatLeft |)double(?:nuevo|)">(.*?)</div>(.*?)' \
             '(?:<div(?: id="mirrors"|) class="(?:contentModuleSmall |)mirrors">|<div class="section-box related-posts">)'
    bloques_descarga = scrapertools.find_multiple_matches(data, patron)
    for title_bloque, bloque in bloques_descarga:
        if title_bloque == "Ver online":
            continue
        if '"l-latino2"' in bloque:
            title_bloque += " [LAT]"
        elif '"l-esp2"' in bloque:
            title_bloque += " [ESP]"
        elif '"l-vose2"' in bloque:
            title_bloque += " [VOSE]"
        itemlist.append(item.clone(title=title_bloque, action="", text_color=color1))
        if '<div class="subiendo">' in bloque:
            itemlist.append(item.clone(title="   Los enlaces se están subiendo", action=""))
            continue
        patron = 'class="separate.*? ([^"]+)".*?(?:make_links.*?,|href=)[\'"]([^"\']+)["\']'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedserver, scrapedurl in matches:
            if (scrapedserver == "ul") | (scrapedserver == "uploaded"):
                scrapedserver = "uploadedto"
            titulo = scrapedserver.capitalize()
            if titulo == "Magnet" and old_format:
                continue
            elif titulo == "Magnet" and not old_format:
                title = "   Enlace Torrent"
                itemlist.append(item.clone(action="play", server="torrent", title=title, url=scrapedurl, text_color="green"))
                continue
            mostrar_server = True
            if config.get_setting("hidepremium") == "true":
                mostrar_server = servertools.is_server_enabled(scrapedserver)
            if mostrar_server:
                try:
                    servers_module = __import__("servers."+scrapedserver)
                    #Saca numero de enlaces
                    urls = mostrar_enlaces(scrapedurl)
                    numero = str(len(urls))
                    titulo = "   "+titulo+" - Nº enlaces:"+numero
                    itemlist.append(item.clone(action="enlaces", title=titulo, extra=scrapedurl))
                except:
                    pass

    itemlist.append(item.clone(channel="trailertools", title="Buscar Tráiler", action="buscartrailer", context="",
                               text_color="magenta"))
    if item.extra != "findvideos" and config.get_library_support():
        itemlist.append(Item(channel=item.channel, title="Añadir a la biblioteca", action="add_pelicula_to_library",
                             extra="findvideos", url=item.url, infoLabels={'title': item.fulltitle},
                             fulltitle=item.fulltitle, text_color="green"))

    return itemlist
示例#38
0
def busqueda(item):
    logger.info("pelisalacarta.channels.vixto busqueda")
    itemlist = list()

    # Descarga la página
    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    # Extrae las entradas (carpetas)
    bloque = scrapertools.find_single_match(data,
                                            '<h2>Peliculas</h2>(.*?)</div>')
    bloque += scrapertools.find_single_match(data,
                                             '<h2>Series</h2>(.*?)</div>')

    patron = '<figure class="col-lg-2.*?href="([^"]+)".*?src="([^"]+)".*?<figcaption title="([^"]+)"'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    peliculas = False
    series = False
    for scrapedurl, scrapedthumbnail, scrapedtitle in matches:
        new_item = Item(channel=item.channel,
                        contentType="movie",
                        url=scrapedurl,
                        title="   " + scrapedtitle,
                        text_color=color1,
                        context="buscar_trailer",
                        fulltitle=scrapedtitle,
                        contentTitle=scrapedtitle,
                        thumbnail=scrapedthumbnail,
                        action="findvideos")

        if "/peliculas/" in scrapedurl and not peliculas:
            itemlist.append(
                Item(channel=item.channel,
                     action="",
                     title="Películas",
                     text_color=color2))
            peliculas = True
        if "/series/" in scrapedurl and not series:
            itemlist.append(
                Item(channel=item.channel,
                     action="",
                     title="Series",
                     text_color=color2))
            series = True

        if "/series/" in scrapedurl:
            new_item.contentType = "tvshow"
            new_item.show = scrapedtitle
            new_item.action = "episodios"

        filtro_thumb = scrapedthumbnail.replace(
            "http://image.tmdb.org/t/p/w342", "")
        filtro_list = {"poster_path": filtro_thumb}
        new_item.infoLabels["filtro"] = filtro_list.items()
        itemlist.append(new_item)

    try:
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    return itemlist
示例#39
0
def filmaffinity_search(item):
    logger.info("pelisalacarta.channels.trailertools filmaffinity_search")

    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        data = scrapertools.downloadpage(item.page)
    else:
        params = urllib.urlencode([('stext', item.contentTitle),
                                   ('stype%5B%5D', 'title'), ('country', ''),
                                   ('genre', ''), ('fromyear', item.year),
                                   ('toyear', item.year)])
        url = "http://www.filmaffinity.com/es/advsearch.php?%s" % params
        data = scrapertools.downloadpage(url)

    devuelve = []
    itemlist = []
    patron = '<div class="mc-poster">.*?<img.*?src="([^"]+)".*?' \
             '<div class="mc-title"><a  href="/es/film(\d+).html"[^>]+>(.*?)<img'
    matches = scrapertools.find_multiple_matches(data, patron)
    # Si solo hay un resultado, busca directamente los trailers, sino lista todos los resultados
    if len(matches) == 1:
        item.url = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % matches[
            0][1]
        item.thumbnail = matches[0][0]
        if not item.thumbnail.startswith("http"):
            item.thumbnail = "http://www.filmaffinity.com" + item.thumbnail
        devuelve = search_links_filmaff(item)
    elif len(matches) > 1:
        for scrapedthumbnail, id, scrapedtitle in matches:
            if not scrapedthumbnail.startswith("http"):
                scrapedthumbnail = "http://www.filmaffinity.com" + scrapedthumbnail
            scrapedurl = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % id
            scrapedtitle = unicode(scrapedtitle,
                                   encoding="utf-8",
                                   errors="ignore")
            scrapedtitle = scrapertools.htmlclean(scrapedtitle)
            itemlist.append(
                item.clone(title=scrapedtitle,
                           url=scrapedurl,
                           action="search_links_filmaff",
                           thumbnail=scrapedthumbnail,
                           text_color="white"))

        next_page = scrapertools.find_single_match(
            data, '<a href="([^"]+)">&gt;&gt;</a>')
        if next_page != "":
            next_page = urlparse.urljoin("http://www.filmaffinity.com/es/",
                                         next_page)
            itemlist.append(
                item.clone(title=">> Siguiente",
                           page=next_page,
                           action="filmaffinity_search",
                           thumbnail="",
                           text_color=""))

        if item.contextual:
            opciones = []
            for item_film in itemlist:
                opciones.append(item_film.title)
            seleccion = platformtools.dialog_select(
                "Buscando: " + item.contentTitle, opciones)
            logger.info("seleccion=%d" % seleccion)
            logger.info("seleccion=%s" % opciones[seleccion])
            if seleccion < 0:
                return
            else:
                item_film = itemlist[seleccion]
                if item_film.title == ">> Siguiente":
                    return buscartrailer(item_film)
                else:
                    devuelve = search_links_filmaff(item_film)
        else:
            devuelve = itemlist

    if not devuelve:
        devuelve.append(
            item.clone(title="La búsqueda no ha dado resultados (%s)" %
                       item.contentTitle,
                       action="",
                       thumbnail="",
                       text_color=""))

        if keyboard:
            title = "[COLOR green]%s[/COLOR]" if item.contextual else "%s"
            devuelve.append(
                item.clone(title=title % "Búsqueda Manual en Filmaffinity",
                           action="manual_search",
                           text_color="green",
                           thumbnail="",
                           extra="filmaffinity"))

    return devuelve
示例#40
0
def get_video_url(page_url, premium=False, video_password=""):
    logger.info(
        "pelisalacarta.servers.realdebrid get_video_url( page_url='%s' , video_password=%s)"
        % (page_url, video_password))

    # Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación
    token_auth = channeltools.get_channel_setting("realdebrid_token",
                                                  "realdebrid")
    if token_auth is None or token_auth == "":
        if config.is_xbmc():
            token_auth = authentication()
            if token_auth == "":
                return [[
                    "REAL-DEBRID: No se ha completado el proceso de autentificación",
                    ""
                ]]
        else:
            return [[
                "Es necesario activar la cuenta. Accede al menú de ayuda", ""
            ]]

    post_link = urllib.urlencode([("link", page_url),
                                  ("password", video_password)])
    headers["Authorization"] = "Bearer %s" % token_auth
    url = "https://api.real-debrid.com/rest/1.0/unrestrict/link"
    data = scrapertools.downloadpage(url,
                                     post=post_link,
                                     headers=headers.items())
    data = jsontools.load_json(data)

    # Si el token es erróneo o ha caducado, se solicita uno nuevo
    if "error" in data and data["error"] == "bad_token":
        debrid_id = channeltools.get_channel_setting("realdebrid_id",
                                                     "realdebrid")
        secret = channeltools.get_channel_setting("realdebrid_secret",
                                                  "realdebrid")
        refresh = channeltools.get_channel_setting("realdebrid_refresh",
                                                   "realdebrid")

        post_token = urllib.urlencode({
            "client_id":
            debrid_id,
            "client_secret":
            secret,
            "code":
            refresh,
            "grant_type":
            "http://oauth.net/grant_type/device/1.0"
        })
        renew_token = scrapertools.downloadpage(
            "https://api.real-debrid.com/oauth/v2/token",
            post=post_token,
            headers=headers.items())
        renew_token = jsontools.load_json(renew_token)
        if not "error" in renew_token:
            token_auth = renew_token["access_token"]
            channeltools.set_channel_setting("realdebrid_token", token_auth,
                                             "realdebrid")
            headers["Authorization"] = "Bearer %s" % token_auth
            data = scrapertools.downloadpage(url,
                                             post=post_link,
                                             headers=headers.items())
            data = jsontools.load_json(data)

    if "download" in data:
        return get_enlaces(data)
    else:
        if "error" in data:
            msg = data["error"].decode("utf-8", "ignore")
            msg = msg.replace("hoster_unavailable", "Servidor no disponible") \
                     .replace("unavailable_file", "Archivo no disponible") \
                     .replace("hoster_not_free", "Servidor no gratuito") \
                     .replace("bad_token", "Error en el token")
            return [["REAL-DEBRID: " + msg, ""]]
        else:
            return [["REAL-DEBRID: No se ha generado ningún enlace", ""]]
示例#41
0
def authentication():
    logger.info("pelisalacarta.servers.realdebrid authentication")
    try:
        client_id = "YTWNFBIJEEBP6"

        # Se solicita url y código de verificación para conceder permiso a la app
        url = "http://api.real-debrid.com/oauth/v2/device/code?client_id=%s&new_credentials=yes" % (
            client_id)
        data = scrapertools.downloadpage(url, headers=headers.items())
        data = jsontools.load_json(data)
        verify_url = data["verification_url"]
        user_code = data["user_code"]
        device_code = data["device_code"]
        intervalo = data["interval"]

        dialog_auth = platformtools.dialog_progress(
            "Autentificación. No cierres esta ventana!!",
            "1. Entra en la siguiente url: %s" % verify_url,
            "2. Ingresa este código en la página y presiona Allow:  %s" %
            user_code, "3. Espera a que se cierre esta ventana")

        # Generalmente cada 5 segundos se intenta comprobar si el usuario ha introducido el código
        while True:
            time.sleep(intervalo)
            try:
                if dialog_auth.iscanceled():
                    return ""

                url = "https://api.real-debrid.com/oauth/v2/device/credentials?client_id=%s&code=%s" \
                      % (client_id, device_code)
                data = scrapertools.downloadpage(url, headers=headers.items())
                data = jsontools.load_json(data)
                if "client_secret" in data:
                    # Código introducido, salimos del bucle
                    break
            except:
                pass

        try:
            dialog_auth.close()
        except:
            pass

        debrid_id = data["client_id"]
        secret = data["client_secret"]

        # Se solicita el token de acceso y el de actualización para cuando el primero caduque
        post = urllib.urlencode({
            "client_id":
            debrid_id,
            "client_secret":
            secret,
            "code":
            device_code,
            "grant_type":
            "http://oauth.net/grant_type/device/1.0"
        })
        data = scrapertools.downloadpage(
            "https://api.real-debrid.com/oauth/v2/token",
            post=post,
            headers=headers.items())
        data = jsontools.load_json(data)

        token = data["access_token"]
        refresh = data["refresh_token"]

        channeltools.set_channel_setting("realdebrid_id", debrid_id,
                                         "realdebrid")
        channeltools.set_channel_setting("realdebrid_secret", secret,
                                         "realdebrid")
        channeltools.set_channel_setting("realdebrid_token", token,
                                         "realdebrid")
        channeltools.set_channel_setting("realdebrid_refresh", refresh,
                                         "realdebrid")

        return token
    except:
        import traceback
        logger.error(traceback.format_exc())
        return ""
示例#42
0
def youtube_search(item):
    logger.info()
    itemlist = []

    titulo = item.contentTitle
    if item.extra != "youtube":
        titulo += " trailer"
    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        data = scrapertools.downloadpage(item.page)
    else:
        titulo = urllib.quote(titulo)
        titulo = titulo.replace("%20", "+")
        data = scrapertools.downloadpage(
            "https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + titulo)
    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    patron = """"thumbnails":\[\{"url":"(https://i.ytimg.com/vi[^"]+).*?"""
    patron += """simpleText":"([^"]+).*?"""
    patron += """simpleText":"[^"]+.*?simpleText":"([^"]+).*?"""
    patron += """url":"([^"]+)"""
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedthumbnail, scrapedtitle, scrapedduration, scrapedurl in matches:
        scrapedtitle = scrapedtitle.decode("utf-8")
        scrapedtitle = scrapedtitle + " (" + scrapedduration + ")"
        if item.contextual:
            scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
        url = urlparse.urljoin('https://www.youtube.com/', scrapedurl)
        itemlist.append(
            item.clone(title=scrapedtitle,
                       action="play",
                       server="youtube",
                       url=url,
                       thumbnail=scrapedthumbnail,
                       text_color="white"))

    next_page = scrapertools.find_single_match(
        data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">'
        'Siguiente')
    if next_page != "":
        next_page = urlparse.urljoin("https://www.youtube.com", next_page)
        itemlist.append(
            item.clone(title=">> Siguiente",
                       action="youtube_search",
                       extra="youtube",
                       page=next_page,
                       thumbnail="",
                       text_color=""))

    if not itemlist:
        itemlist.append(
            item.clone(title="La búsqueda no ha dado resultados (%s)" % titulo,
                       action="",
                       thumbnail="",
                       text_color=""))

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else:
            title = "%s"
        itemlist.append(
            item.clone(title=title % "Búsqueda Manual en Youtube",
                       action="manual_search",
                       text_color="green",
                       thumbnail="",
                       extra="youtube"))

    return itemlist
示例#43
0
def entradas(item):
    logger.info("pelisalacarta.channels.descargasmix entradas")
    itemlist = []
    item.text_color = color2
    data = scrapertools.downloadpage(item.url)

    bloque = scrapertools.find_single_match(data, '<div id="content" role="main">(.*?)<div id="sidebar" '
                                                  'role="complementary">')
    contenido = ["series", "deportes", "anime", 'miniseries']
    c_match = [True for match in contenido if match in item.url]
    #Patron dependiendo del contenido
    if True in c_match:
        patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)"' \
                 '.*?<span class="overlay(|[^"]+)">'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedurl, scrapedtitle, scrapedthumbnail, scrapedinfo in matches:
            if scrapedinfo != "":
                scrapedinfo = "  [" + scrapedinfo.replace(" ", "").replace("-", " ").capitalize() + "]"
            titulo = scrapedtitle+scrapedinfo	
            titulo = scrapertools.decodeHtmlentities(titulo)
            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle)
            scrapedthumbnail = "http:"+scrapedthumbnail.replace("-129x180", "")
            scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0]+"/"+urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
            if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
            if "series" in item.url or "anime" in item.url:
                item.show = scrapedtitle
            itemlist.append(item.clone(action="episodios", title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
                                       fulltitle=scrapedtitle, context="25", contentTitle=scrapedtitle))
    else:
        patron = '<a class="clip-link".*?href="([^"]+)".*?<img alt="([^"]+)" src="([^"]+)".*?<span class="cat">(.*?)</span>(.*?)</p>'
        matches = scrapertools.find_multiple_matches(bloque, patron)
        for scrapedurl, scrapedtitle, scrapedthumbnail, categoria, info in matches:
            titulo = scrapertools.decodeHtmlentities(scrapedtitle)
            scrapedtitle = scrapertools.decodeHtmlentities(scrapedtitle.split("[")[0])
            action = "findvideos"
            show = ""
            if "Series" in categoria:
                action = "episodios"
                show = scrapedtitle
            elif categoria and categoria != "peliculas" and categoria != "documentales":
                try:
                    titulo += " ["+categoria.rsplit(", ",1)[1]+"]"
                except:
                    titulo += " ["+categoria+"]"
                if 'l-espmini' in info:
                    titulo += " [ESP]"
                if 'l-latmini' in info:
                    titulo += " [LAT]"
                if 'l-vosemini' in info:
                    titulo += " [VOSE]"

            scrapedthumbnail = "http:"+scrapedthumbnail.replace("-129x180", "")
            scrapedthumbnail = scrapedthumbnail.rsplit("/", 1)[0]+"/"+urllib.quote(scrapedthumbnail.rsplit("/", 1)[1])
            if (DEBUG): logger.info("title=["+scrapedtitle+"], url=["+scrapedurl+"], thumbnail=["+scrapedthumbnail+"]")
            itemlist.append(item.clone(action=action, title=titulo, url=scrapedurl, thumbnail=scrapedthumbnail,
                                       fulltitle=scrapedtitle, context="05", contentTitle=scrapedtitle,
                                       viewmode="movie_with_plot", show=show))

    #Paginación
    next_page = scrapertools.find_single_match(data, '<a class="nextpostslink".*?href="([^"]+)"')
    if next_page != "":
        itemlist.append(item.clone(title=">> Siguiente", url=next_page, text_color=color3))

    return itemlist
def mainlist(item):
    logger.info("pelisalacarta.lfootballws lista")
    itemlist = []
    import xbmc, time
    
    xbmc.executebuiltin('Notification([COLOR crimson][B]BIENVENIDOS A...[/B][/COLOR], [COLOR yellow][B]'+'livefootballws'.upper()+'[/B][/COLOR],4000,"http://4.bp.blogspot.com/-Jtkwjc049c0/T7CKiNujy-I/AAAAAAAARPc/llNdvg_8TWk/s1600/football_128x128.png")')
    xbmc.executebuiltin("Container.Update")
    
    check =xbmc.getInfoLabel('Container.FolderPath')
    if "channelselector" in check:
        xbmc.executebuiltin('xbmc.PlayMedia('+song+')')

    
    """
        Lo que ocurre con
        url = http://translate.googleusercontent.com/translate_c?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/&usg=ALkJrhgzJfI1TDn3BxGgPbjgAHHS7J0i9g
        Redirecciones:
        1. http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/
        2. http://translate.googleusercontent.com/translate_p?nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/&depth=2&usg=ALkJrhgAAAAAVupk4tLINTbmU7JrcQdl0G4V3LtnRM1n
        3. http://translate.googleusercontent.com/translate_c?depth=2&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/&usg=ALkJrhhhRDwHSDRDN4t27cX5CYZLFFQtmA
        Lo que significa que necesitamos una key nueva cada vez en el argumento "usg" y para llegar a la url 3 debemos hacer la petición 1 y 2 con 'follow_redirects=False' o con la convinación de 'follow_redirects=False' y 'header_to_get="location"'
        """
    
    #### Opción 1: 'follow_redirects=False'
    ## Petición 1
    url = "http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/"
    data = dhe( scrapertools.downloadpage(url,follow_redirects=False) )#.decode('cp1251').encode('utf8')
    ## Petición 2
    url = scrapertools.get_match(data, ' src="([^"]+)" name=c ')
    data = dhe( scrapertools.downloadpage(url,follow_redirects=False) )#.decode('cp1251').encode('utf8')
    ## Petición 3
    url = scrapertools.get_match(data, 'URL=([^"]+)"')
    data = dhe( scrapertools.cachePage(url) )#.decode('cp1251').encode('utf8')
    
    """
        #### Opción 2: 'follow_redirects=False' y 'header_to_get="location"'
        ## Petición 1
        url = "http://translate.google.com/translate?depth=1&nv=1&rurl=translate.google.com&sl=ru&tl=es&u=http://lfootball.ws/"
        data = dhe( scrapertools.downloadpage(url,follow_redirects=False) )#.decode('cp1251').encode('utf8')
        ## Petición 2
        url = scrapertools.get_match(data, ' src="([^"]+)" name=c ')
        url = scrapertools.get_header_from_response(url, header_to_get="location")
        ## Petición 3
        data = dhe( scrapertools.cachePage(url ) )#.decode('cp1251').encode('utf8')
        """
    
    
    
    #a:
    
    patronmain ='transmisión en vivo(.*?)<h3 class=sectionName> '
    matchesmain = re.compile(patronmain,re.DOTALL).findall(data)
    for main in matchesmain:
        print "liveeee"
        print main
        patron = '<img class=img src=(.*?) alt.*?<img class=img src=(.*?) alt.*?<a class=link href=.*?&u=.*?href.*?&u=(.*?)&usg.*?title="(.*?)"'#'<a class="link" href="([^"]+)" title="(.*?)".*?<span class="liga"><span>(.*?)</span></span>.*?<span class="date"><span>(.*?)</span></span>'
        matches = re.compile(patron,re.DOTALL).findall(main)
        for thumbnail, fanart,url,title in matches:
            #liga = re.sub(r'<span.*?">|</span>|class=.*?>|<span>.*?<span|<span.*?|>|<a.*?','',liga)
            #if "и" in liga:
            #   liga = "La liga"
    
            
            
            
            #info = "("+liga+")"
            title = "[COLOR chocolate][B]"+title +"[/B][/COLOR]" +" "+ "[COLOR crimson][B]EN VIVO!![/B][/COLOR]"
            print "lolo"
            print title
            print url
            itemlist.append( Item(channel=__channel__, title=title,action="enlaces",url=url,thumbnail =urlparse.urljoin(host,thumbnail),fanart =urlparse.urljoin(host,fanart),fulltitle = title,folder=True) )

    #b
    patronmain ='nuestra(.*?)partido'
    matchesmain = re.compile(patronmain,re.DOTALL).findall(data)
    print "ojuuu"
    print matchesmain
    
    
    for main in matchesmain:
    
        patron = '<img class=img src=([^"]+) alt.*?src=([^"]+) '
        patron +='.*?<a class=link href=.*?&u=.*?href.*?&u=([^"]+)&usg.*?'
        patron +='title="([^<]+)".*?'
        patron +='<span class="nameCon clear">(.*?)</li><li class=fl>'
        
        matches = re.compile(patron,re.DOTALL).findall(main)
        if len(matches)==0 :
           itemlist.append( Item(channel=__channel__, title="[COLOR orange][B]No hay encuentros previstos en estos momentos[/B][/COLOR]",thumbnail ="http://s6.postimg.org/619q91s41/comingsoon.png",fanart ="http://s6.postimg.org/7ucmxddap/soccer_field_desktop_20150703154119_5596ad1fde3e.jpg") )
        for thumbnail, fanart, url,title,liga_fecha in matches:
            print "zorro"
            print liga_fecha
            #liga = re.sub(r'<span.*?">|</span>|class=.*?>|<span>.*?<span|<span.*?|>|<a.*?','',liga)
            try:
                liga = scrapertools.get_match(liga_fecha,'<span class=liga><span>([^<]+)')
                liga = re.sub(r'de','',liga)
                fecha = re.sub(r'<span class=liga><span>.*?</span></span>|<span class="name fl"><span>.*?</span></span></span>|<span class=dopCon>.*?<span class=date><span>|<span class=date><span>|</span>|</a>|de|,|','',liga_fecha)
            except:
                liga=""
                
                fecha = scrapertools.get_match(liga_fecha,'<span class=dopCon><span class=date><span>([^<]+)</span>')
            if "taza" in liga:
                liga="Cup"

            print "perraco"
            if "00:" in fecha:
               fecha = fecha.replace("00:","24:")
               print fecha
            info = "("+liga+")"
            time= re.compile('(\d+):(\d+)',re.DOTALL).findall(fecha)
            for horas, minutos in time:
                
                print "horas"
                print horas
                wrong_time =int(horas)
                value = 2
                correct_time = wrong_time - value
                correct_time = str(correct_time)
                ok_time = correct_time +":"+ minutos
                fecha = re.sub(r'(\d+):(\d+)|el|de|,|<span class="nameCon clear">','',fecha).strip()
                
                print "joder"
                print fecha
                d_m = re.compile('([^<]+) (\d+)',re.DOTALL).findall(fecha)
                print "opcion a"
                print d_m
                if len(d_m)==0 :
                    d_m = re.compile('(\d+) ([^<]+)',re.DOTALL).findall(fecha)
                    print "pasa a opcion b"
                for x, y in d_m:
                    if x.isdigit():
                       print "opcion x=numero"
                       dia = x.strip()
                       mes = y.strip()
                    else:
                       print "opcion x = letras"
                       print d_m
                       mes = x.strip()
                       dia = y.strip()
    
                    dia_mes = (dia+" "+mes).title()
                    partido = "[COLOR khaki][B]"+dia_mes+"[/B][/COLOR]" +"[COLOR yellow][B]"+" "+ok_time+"[/B][/COLOR]"+"--"+"[COLOR chocolate][B]"+title +"[/B][/COLOR]"+" "+"[COLOR lightslategray]"+ info +"[/COLOR]"
                    
                    itemlist.append( Item(channel=__channel__, title=partido,action="enlaces",url=url,thumbnail =urlparse.urljoin(host,thumbnail),fanart =urlparse.urljoin(host,fanart), fulltitle= "[COLOR chocolate][B]"+title +"[/B][/COLOR]", folder=True) )
        

    
    
    return itemlist
示例#45
0
def play(url, xlistitem={}, is_view=None, subtitle=""):

    allocate = True
    try:
        import platform
        xbmc.log(
            "XXX KODI XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
        )
        xbmc.log("OS platform: %s %s" %
                 (platform.system(), platform.release()))
        xbmc.log("xbmc/kodi version: %s" %
                 xbmc.getInfoLabel("System.BuildVersion"))
        xbmc_version = int(xbmc.getInfoLabel("System.BuildVersion")[:2])
        xbmc.log("xbmc/kodi version number: %s" % xbmc_version)
        xbmc.log(
            "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX KODI XXXX"
        )

        _platform = get_platform()
        if str(_platform['system']) in [
                "android_armv7", "linux_armv6", "linux_armv7"
        ]:
            allocate = False
        # -- log ------------------------------------------------
        xbmc.log(
            "XXX platform XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
        )
        xbmc.log("_platform['system']: %s" % _platform['system'])
        xbmc.log("allocate: %s" % allocate)
        xbmc.log(
            "XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX platform XXXX"
        )
        # -- ----------------------------------------------------
    except:
        pass

    DOWNLOAD_PATH = config.get_setting("downloadpath")

    # -- adfly: ------------------------------------
    if url.startswith("http://adf.ly/"):
        try:
            data = scrapertools.downloadpage(url)
            url = decode_adfly(data)
        except:
            ddd = xbmcgui.Dialog()
            ddd.ok(
                "pelisalacarta-MCT: Sin soporte adf.ly",
                "El script no tiene soporte para el acortador de urls adf.ly.",
                "", "url: " + url)
            return

    # -- Necesario para algunas webs ----------------------------
    if not url.endswith(".torrent") and not url.startswith("magnet"):
        t_file = scrapertools.get_header_from_response(
            url, header_to_get="location")
        if len(t_file) > 0:
            url = t_file
            t_file = scrapertools.get_header_from_response(
                url, header_to_get="location")
        if len(t_file) > 0:
            url = t_file

    # -- Crear dos carpetas en descargas para los archivos ------
    save_path_videos = os.path.join(DOWNLOAD_PATH, "torrent-videos")
    save_path_torrents = os.path.join(DOWNLOAD_PATH, "torrent-torrents")
    if not os.path.exists(save_path_torrents): os.mkdir(save_path_torrents)

    # -- Usar - archivo torrent desde web, magnet o HD ---------
    if not os.path.isfile(url) and not url.startswith("magnet"):
        # -- http - crear archivo torrent -----------------------
        data = url_get(url)
        # -- El nombre del torrent será el que contiene en los --
        # -- datos.                                             -
        re_name = urllib.unquote(
            scrapertools.get_match(data, ':name\d+:(.*?)\d+:'))
        torrent_file = filetools.join(save_path_torrents,
                                      filetools.encode(re_name + '.torrent'))

        f = open(torrent_file, 'wb')
        f.write(data)
        f.close()
    elif os.path.isfile(url):
        # -- file - para usar torrens desde el HD ---------------
        torrent_file = url
    else:
        # -- magnet ---------------------------------------------
        torrent_file = url
    # -----------------------------------------------------------

    # -- MCT - MiniClienteTorrent -------------------------------
    ses = lt.session()

    # -- log ----------------------------------------------------
    xbmc.log("### Init session ########")
    xbmc.log(lt.version)
    xbmc.log("#########################")
    # -- --------------------------------------------------------

    ses.add_dht_router("router.bittorrent.com", 6881)
    ses.add_dht_router("router.utorrent.com", 6881)
    ses.add_dht_router("dht.transmissionbt.com", 6881)

    trackers = [
        "udp://tracker.openbittorrent.com:80/announce",
        "http://tracker.torrentbay.to:6969/announce",
        "http://tracker.pow7.com/announce",
        "udp://tracker.ccc.de:80/announce",
        "udp://open.demonii.com:1337",
        "http://9.rarbg.com:2710/announce",
        "http://bt.careland.com.cn:6969/announce",
        "http://explodie.org:6969/announce",
        "http://mgtracker.org:2710/announce",
        "http://tracker.best-torrents.net:6969/announce",
        "http://tracker.tfile.me/announce",
        "http://tracker1.wasabii.com.tw:6969/announce",
        "udp://9.rarbg.com:2710/announce",
        "udp://9.rarbg.me:2710/announce",
        "udp://coppersurfer.tk:6969/announce",
        "http://www.spanishtracker.com:2710/announce",
        "http://www.todotorrents.com:2710/announce",
    ]

    video_file = ""
    # -- magnet2torrent -----------------------------------------
    if torrent_file.startswith("magnet"):
        try:
            import zlib
            btih = hex(
                zlib.crc32(
                    scrapertools.get_match(
                        torrent_file,
                        'magnet:\?xt=urn:(?:[A-z0-9:]+|)([A-z0-9]{32})'))
                & 0xffffffff)
            files = [
                f for f in os.listdir(save_path_torrents)
                if os.path.isfile(os.path.join(save_path_torrents, f))
            ]
            for file in files:
                if btih in os.path.basename(file):
                    torrent_file = os.path.join(save_path_torrents, file)
        except:
            pass

    if torrent_file.startswith("magnet"):
        try:
            tempdir = tempfile.mkdtemp()
        except IOError:
            tempdir = os.path.join(save_path_torrents, "temp")
            if not os.path.exists(tempdir):
                os.mkdir(tempdir)
        params = {
            'save_path': tempdir,
            'trackers': trackers,
            'storage_mode': lt.storage_mode_t.storage_mode_allocate,
            'paused': False,
            'auto_managed': True,
            'duplicate_is_error': True
        }
        h = lt.add_magnet_uri(ses, torrent_file, params)
        dp = xbmcgui.DialogProgress()
        dp.create('pelisalacarta-MCT')
        while not h.has_metadata():
            message, porcent, msg_file, s, download = getProgress(
                h, "Creando torrent desde magnet")
            dp.update(porcent, message, msg_file)
            if s.state == 1: download = 1
            if dp.iscanceled():
                dp.close()
                remove_files(download, torrent_file, video_file, ses, h)
                return
            h.force_dht_announce()
            xbmc.sleep(1000)

        dp.close()
        info = h.get_torrent_info()
        data = lt.bencode(lt.create_torrent(info).generate())

        torrent_file = os.path.join(
            save_path_torrents,
            unicode(info.name() + "-" + btih, "'utf-8'", errors="replace") +
            ".torrent")
        f = open(torrent_file, 'wb')
        f.write(data)
        f.close()
        ses.remove_torrent(h)
        shutil.rmtree(tempdir)
    # -----------------------------------------------------------

    # -- Archivos torrent ---------------------------------------
    e = lt.bdecode(open(torrent_file, 'rb').read())
    info = lt.torrent_info(e)

    # -- El más gordo o uno de los más gordo se entiende que es -
    # -- el vídeo o es el vídeo que se usará como referencia    -
    # -- para el tipo de archivo                                -
    xbmc.log("##### Archivos ## %s ##" % len(info.files()))
    _index_file, _video_file, _size_file = get_video_file(info)

    # -- Prioritarizar/Seleccionar archivo-----------------------
    _index, video_file, video_size, len_files = get_video_files_sizes(info)
    if len_files == 0:
        dp = xbmcgui.Dialog().ok(
            "No se puede reproducir",
            "El torrent no contiene ningún archivo de vídeo")

    if _index == -1:
        _index = _index_file
        video_file = _video_file
        video_size = _size_file

    _video_file_ext = os.path.splitext(_video_file)[1]
    xbmc.log("##### _video_file_ext ## %s ##" % _video_file_ext)
    if (_video_file_ext == ".avi" or _video_file_ext == ".mp4") and allocate:
        xbmc.log("##### storage_mode_t.storage_mode_allocate (" +
                 _video_file_ext + ") #####")
        h = ses.add_torrent({
            'ti':
            info,
            'save_path':
            save_path_videos,
            'trackers':
            trackers,
            'storage_mode':
            lt.storage_mode_t.storage_mode_allocate
        })
    else:
        xbmc.log("##### storage_mode_t.storage_mode_sparse (" +
                 _video_file_ext + ") #####")
        h = ses.add_torrent({
            'ti':
            info,
            'save_path':
            save_path_videos,
            'trackers':
            trackers,
            'storage_mode':
            lt.storage_mode_t.storage_mode_sparse
        })
        allocate = True
    # -----------------------------------------------------------

    # -- Descarga secuencial - trozo 1, trozo 2, ... ------------
    h.set_sequential_download(True)

    h.force_reannounce()
    h.force_dht_announce()

    # -- Inicio de variables para 'pause' automático cuando el  -
    # -- el vídeo se acerca a una pieza sin completar           -
    is_greater_num_pieces = False
    is_greater_num_pieces_plus = False
    is_greater_num_pieces_pause = False

    porcent4first_pieces = int(video_size * 0.000000005)
    if porcent4first_pieces < 10: porcent4first_pieces = 10
    if porcent4first_pieces > 100: porcent4first_pieces = 100
    porcent4last_pieces = int(porcent4first_pieces / 2)

    num_pieces_to_resume = int(video_size * 0.0000000025)
    if num_pieces_to_resume < 5: num_pieces_to_resume = 5
    if num_pieces_to_resume > 25: num_pieces_to_resume = 25

    xbmc.log("##### porcent4first_pieces ## %s ##" % porcent4first_pieces)
    xbmc.log("##### porcent4last_pieces ## %s ##" % porcent4last_pieces)
    xbmc.log("##### num_pieces_to_resume ## %s ##" % num_pieces_to_resume)

    # -- Prioritarizar o seleccionar las piezas del archivo que -
    # -- se desea reproducir con 'file_priorities'              -
    piece_set = set_priority_pieces(h, _index, video_file, video_size,
                                    porcent4first_pieces, porcent4last_pieces,
                                    allocate)

    # -- Crear diálogo de progreso para el primer bucle ---------
    dp = xbmcgui.DialogProgress()
    dp.create('pelisalacarta-MCT')

    _pieces_info = {}

    # -- Doble bucle anidado ------------------------------------
    # -- Descarga - Primer bucle                                -
    while not h.is_seed():
        s = h.status()

        xbmc.sleep(100)

        # -- Recuperar los datos del progreso -------------------
        message, porcent, msg_file, s, download = getProgress(h,
                                                              video_file,
                                                              _pf=_pieces_info)

        # -- Si hace 'checking' existe descarga -----------------
        # -- 'download' Se usará para saber si hay datos        -
        # -- descargados para el diálogo de 'remove_files'      -
        if s.state == 1: download = 1

        # -- Player - play --------------------------------------
        # -- Comprobar si se han completado las piezas para el  -
        # -- inicio del vídeo                                   -
        first_pieces = True

        _c = 0
        for i in range(piece_set[0], piece_set[porcent4first_pieces]):
            first_pieces &= h.have_piece(i)
            if h.have_piece(i): _c += 1
        _pieces_info = {
            'current': 0,
            'continuous': "%s/%s" % (_c, porcent4first_pieces),
            'continuous2': "",
            'have': h.status().num_pieces,
            'len': len(piece_set)
        }

        last_pieces = True
        if not allocate:
            _c = len(piece_set) - 1
            _cc = 0
            for i in range(
                    len(piece_set) - porcent4last_pieces, len(piece_set)):
                last_pieces &= h.have_piece(i)
                if h.have_piece(i):
                    _c -= 1
                    _cc += 1
            _pieces_info['continuous2'] = "[%s/%s] " % (_cc,
                                                        porcent4last_pieces)

        if is_view != "Ok" and first_pieces and last_pieces:
            _pieces_info['continuous2'] = ""
            xbmc.log("##### porcent [%.2f%%]" % (s.progress * 100))
            is_view = "Ok"
            dp.close()

            # -- Player - Ver el vídeo --------------------------
            playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
            playlist.clear()

            ren_video_file = os.path.join(save_path_videos, video_file)
            try:
                playlist.add(ren_video_file, xlistitem)
            except:
                playlist.add(ren_video_file)

            if xbmc_version < 17:
                player = play_video(xbmc.PLAYER_CORE_AUTO)
            else:
                player = play_video()
            player.play(playlist)

            # -- Contador de cancelaciones para la ventana de   -
            # -- 'pause' automático                             -
            is_greater_num_pieces_canceled = 0
            continuous_pieces = 0
            porcent_time = 0.00
            current_piece = 0
            set_next_continuous_pieces = porcent4first_pieces

            # -- Impedir que kodi haga 'resume' a un archivo ----
            # -- que se reprodujo con anterioridad y que se     -
            # -- eliminó para impedir que intente la reprucción -
            # -- en una pieza que aún no se ha completado y se  -
            # -- active 'pause' automático                      -
            not_resume = True

            # -- Bandera subTítulos
            _sub = False

            # -- Segundo bucle - Player - Control de eventos ----
            while player.isPlaying():
                xbmc.sleep(100)

                # -- Añadir subTítulos
                if subtitle != "" and not _sub:
                    _sub = True
                    player.setSubtitles(subtitle)

                # -- Impedir que kodi haga 'resume' al inicio ---
                # -- de la descarga de un archivo conocido      -
                if not_resume:
                    player.seekTime(0)
                    not_resume = False

                # -- Control 'pause' automático                 -
                continuous_pieces = count_completed_continuous_pieces(
                    h, piece_set)

                if xbmc.Player().isPlaying():

                    # -- Porcentage del progreso del vídeo ------
                    player_getTime = player.getTime()
                    player_getTotalTime = player.getTotalTime()
                    porcent_time = player_getTime / player_getTotalTime * 100

                    # -- Pieza que se está reproduciendo --------
                    current_piece = int(porcent_time / 100 * len(piece_set))

                    # -- Banderas de control --------------------
                    is_greater_num_pieces = (
                        current_piece >
                        continuous_pieces - num_pieces_to_resume)
                    is_greater_num_pieces_plus = (
                        current_piece + porcent4first_pieces >
                        continuous_pieces)
                    is_greater_num_pieces_finished = (
                        current_piece + porcent4first_pieces >= len(piece_set))

                    # -- Activa 'pause' automático --------------
                    if is_greater_num_pieces and not player.paused and not is_greater_num_pieces_finished:
                        is_greater_num_pieces_pause = True
                        player.pause()

                    if continuous_pieces >= set_next_continuous_pieces:
                        set_next_continuous_pieces = continuous_pieces + num_pieces_to_resume
                    next_continuous_pieces = str(
                        continuous_pieces -
                        current_piece) + "/" + str(set_next_continuous_pieces -
                                                   current_piece)
                    _pieces_info = {
                        'current': current_piece,
                        'continuous': next_continuous_pieces,
                        'continuous2': _pieces_info['continuous2'],
                        'have': h.status().num_pieces,
                        'len': len(piece_set)
                    }

                # -- Cerrar el diálogo de progreso --------------
                if player.resumed:
                    dp.close()

                # -- Mostrar el diálogo de progreso -------------
                if player.paused:
                    # -- Crear diálogo si no existe -------------
                    if not player.statusDialogoProgress:
                        dp = xbmcgui.DialogProgress()
                        dp.create('pelisalacarta-MCT')
                        player.setDialogoProgress()

                    # -- Diálogos de estado en el visionado -----
                    if not h.is_seed():
                        # -- Recuperar los datos del progreso ---
                        message, porcent, msg_file, s, download = getProgress(
                            h, video_file, _pf=_pieces_info)
                        dp.update(porcent, message, msg_file)
                    else:
                        dp.update(100, "Descarga completa: " + video_file)

                    # -- Se canceló el progreso en el visionado -
                    # -- Continuar                              -
                    if dp.iscanceled():
                        dp.close()
                        player.pause()

                    # -- Se canceló el progreso en el visionado -
                    # -- en la ventana de 'pause' automático.   -
                    # -- Parar si el contador llega a 3         -
                    if dp.iscanceled() and is_greater_num_pieces_pause:
                        is_greater_num_pieces_canceled += 1
                        if is_greater_num_pieces_canceled == 3:
                            player.stop()

                    # -- Desactiva 'pause' automático y ---------
                    # -- reinicia el contador de cancelaciones  -
                    if not dp.iscanceled(
                    ) and not is_greater_num_pieces_plus and is_greater_num_pieces_pause:
                        dp.close()
                        player.pause()
                        is_greater_num_pieces_pause = False
                        is_greater_num_pieces_canceled = 0

                    # -- El usuario cancelo el visionado --------
                    # -- Terminar                               -
                    if player.ended:
                        # -- Diálogo eliminar archivos ----------
                        remove_files(download, torrent_file, video_file, ses,
                                     h)
                        return

        # -- Kodi - Se cerró el visionado -----------------------
        # -- Continuar | Terminar                               -
        if is_view == "Ok" and not xbmc.Player().isPlaying():

            if info.num_files() == 1:
                # -- Diálogo continuar o terminar ---------------
                d = xbmcgui.Dialog()
                ok = d.yesno('pelisalacarta-MCT', 'XBMC-Kodi Cerró el vídeo.',
                             '¿Continuar con la sesión?')
            else:
                ok = False
            # -- SI ---------------------------------------------
            if ok:
                # -- Continuar: ---------------------------------
                is_view = None
            else:
                # -- Terminar: ----------------------------------
                # -- Comprobar si el vídeo pertenece a una ------
                # -- lista de archivos                          -
                _index, video_file, video_size, len_files = get_video_files_sizes(
                    info)
                if _index == -1 or len_files == 1:
                    # -- Diálogo eliminar archivos --------------
                    remove_files(download, torrent_file, video_file, ses, h)
                    return
                else:
                    # -- Lista de archivos. Diálogo de opciones -
                    piece_set = set_priority_pieces(h, _index, video_file,
                                                    video_size,
                                                    porcent4first_pieces,
                                                    porcent4last_pieces,
                                                    allocate)
                    is_view = None
                    dp = xbmcgui.DialogProgress()
                    dp.create('pelisalacarta-MCT')

        # -- Mostar progeso antes del visionado -----------------
        if is_view != "Ok":
            dp.update(porcent, message, msg_file)

        # -- Se canceló el progreso antes del visionado ---------
        # -- Terminar                                           -
        if dp.iscanceled():
            dp.close()
            # -- Comprobar si el vídeo pertenece a una lista de -
            # -- archivos                                       -
            _index, video_file, video_size, len_files = get_video_files_sizes(
                info)
            if _index == -1 or len_files == 1:
                # -- Diálogo eliminar archivos ------------------
                remove_files(download, torrent_file, video_file, ses, h)
                return
            else:
                # -- Lista de archivos. Diálogo de opciones -----
                piece_set = set_priority_pieces(h, _index, video_file,
                                                video_size,
                                                porcent4first_pieces,
                                                porcent4last_pieces, allocate)
                is_view = None
                dp = xbmcgui.DialogProgress()
                dp.create('pelisalacarta-MCT')

    # -- Kodi - Error? - No debería llegar aquí -----------------
    if is_view == "Ok" and not xbmc.Player().isPlaying():
        dp.close()
        # -- Diálogo eliminar archivos --------------------------
        remove_files(download, torrent_file, video_file, ses, h)

    return
示例#46
0
def youtube_search(item):
    logger.info("streamondemand.channels.trailertools youtube_search")
    itemlist = []

    titulo = item.contentTitle
    if item.extra != "youtube":
        titulo += " trailer"
    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        data = scrapertools.downloadpage(item.page)
    else:
        titulo = urllib.quote(titulo)
        titulo = titulo.replace("%20", "+")
        data = scrapertools.downloadpage(
            "https://www.youtube.com/results?sp=EgIQAQ%253D%253D&q=" + titulo)

    data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data)
    patron = '<span class="yt-thumb-simple">.*?(?:src="https://i.ytimg.com/|data-thumb="https://i.ytimg.com/)([^"]+)"' \
             '.*?<h3 class="yt-lockup-title ">.*?<a href="([^"]+)".*?title="([^"]+)".*?' \
             '</a><span class="accessible-description".*?>.*?(\d+:\d+)'
    matches = scrapertools.find_multiple_matches(data, patron)
    for scrapedthumbnail, scrapedurl, scrapedtitle, scrapedduration in matches:
        scrapedthumbnail = urlparse.urljoin("https://i.ytimg.com/",
                                            scrapedthumbnail)
        scrapedtitle = scrapedtitle.decode("utf-8")
        scrapedtitle = scrapedtitle + " (" + scrapedduration + ")"
        if item.contextual:
            scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle
        url = urlparse.urljoin('https://www.youtube.com/', scrapedurl)
        itemlist.append(
            item.clone(title=scrapedtitle,
                       action="play",
                       server="youtube",
                       url=url,
                       thumbnail=scrapedthumbnail,
                       text_color="white"))

    next_page = scrapertools.find_single_match(
        data, '<a href="([^"]+)"[^>]+><span class="yt-uix-button-content">'
        'Siguiente')
    if next_page != "":
        next_page = urlparse.urljoin("https://www.youtube.com", next_page)
        itemlist.append(
            item.clone(title=">> Seguente",
                       action="youtube_search",
                       extra="youtube",
                       page=next_page,
                       thumbnail="",
                       text_color=""))

    if not itemlist:
        itemlist.append(
            item.clone(title="Nessun risultato trovato per (%s)" % titulo,
                       action="",
                       thumbnail="",
                       text_color=""))

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else:
            title = "%s"
        itemlist.append(
            item.clone(title=title % "Ricerca manuale su Youtube",
                       action="manual_search",
                       text_color="green",
                       thumbnail="",
                       extra="youtube"))

    return itemlist
示例#47
0
def filtro(item):
    logger.info("pelisalacarta.channels.cinefox filtro")

    list_controls = []
    valores = {}
    strings = {}
    # Se utilizan los valores por defecto/guardados o los del filtro personalizado
    if not item.values:
        valores_guardados = config.get_setting("filtro_defecto_" + item.extra,
                                               item.channel)
    else:
        valores_guardados = item.values
        item.values = ""
    if valores_guardados:
        dict_values = valores_guardados
    else:
        dict_values = None
    if dict_values:
        dict_values["filtro_per"] = 0

    excluidos = ['País', 'Películas', 'Series', 'Destacar']
    data = scrapertools.downloadpage(item.url)
    matches = scrapertools.find_multiple_matches(
        data, '<div class="dropdown-sub[^>]+>(\S+)(.*?)</ul>')
    i = 0
    for filtro_title, values in matches:
        if filtro_title in excluidos: continue

        filtro_title = filtro_title.replace("Tendencia", "Ordenar por")
        id = filtro_title.replace("Género",
                                  "genero").replace("Año", "year").replace(
                                      " ", "_").lower()
        list_controls.append({
            'id': id,
            'label': filtro_title,
            'enabled': True,
            'type': 'list',
            'default': 0,
            'visible': True
        })
        valores[id] = []
        valores[id].append('')
        strings[filtro_title] = []
        list_controls[i]['lvalues'] = []
        if filtro_title == "Ordenar por":
            list_controls[i]['lvalues'].append('Más recientes')
            strings[filtro_title].append('Más recientes')
        else:
            list_controls[i]['lvalues'].append('Cualquiera')
            strings[filtro_title].append('Cualquiera')
        patron = '<li>.*?(?:genre|release|quality|language|order)=([^"]+)">([^<]+)<'
        matches_v = scrapertools.find_multiple_matches(values, patron)
        for value, key in matches_v:
            if value == "action-adventure": continue
            list_controls[i]['lvalues'].append(key)
            valores[id].append(value)
            strings[filtro_title].append(key)

        i += 1

    item.valores = valores
    item.strings = strings
    if "Filtro Personalizado" in item.title:
        return filtrado(item, valores_guardados)

    list_controls.append({
        'id': 'espacio',
        'label': '',
        'enabled': False,
        'type': 'label',
        'default': '',
        'visible': True
    })
    list_controls.append({
        'id': 'save',
        'label': 'Establecer como filtro por defecto',
        'enabled': True,
        'type': 'bool',
        'default': False,
        'visible': True
    })
    list_controls.append({
        'id':
        'filtro_per',
        'label':
        'Guardar filtro en acceso directo...',
        'enabled':
        True,
        'type':
        'list',
        'default':
        0,
        'visible':
        True,
        'lvalues': ['No guardar', 'Filtro 1', 'Filtro 2', 'Filtro 3']
    })
    list_controls.append({
        'id':
        'remove',
        'label':
        'Eliminar filtro personalizado...',
        'enabled':
        True,
        'type':
        'list',
        'default':
        0,
        'visible':
        True,
        'lvalues': ['No eliminar', 'Filtro 1', 'Filtro 2', 'Filtro 3']
    })

    from platformcode import platformtools
    return platformtools.show_channel_settings(list_controls=list_controls,
                                               dict_values=dict_values,
                                               caption="Filtra los resultados",
                                               item=item,
                                               callback='filtrado')
示例#48
0
def abandomoviez_search(item):
    logger.info("streamondemand.channels.trailertools abandomoviez_search")

    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        data = scrapertools.downloadpage(item.page)
    else:
        titulo = item.contentTitle.decode('utf-8').encode('iso-8859-1')
        post = urllib.urlencode({
            'query': titulo,
            'searchby': '1',
            'posicion': '1',
            'orden': '1',
            'anioin': item.year,
            'anioout': item.year,
            'orderby': '1'
        })
        url = "http://www.abandomoviez.net/db/busca_titulo_advance.php"
        item.prefix = "db/"
        data = scrapertools.downloadpage(url, post=post)
        if "No hemos encontrado ninguna" in data:
            url = "http://www.abandomoviez.net/indie/busca_titulo_advance.php"
            item.prefix = "indie/"
            data = scrapertools.downloadpage(
                url, post=post).decode("iso-8859-1").encode('utf-8')

    itemlist = []
    patron = '(?:<td width="85"|<div class="col-md-2 col-sm-2 col-xs-3">).*?<img src="([^"]+)"' \
             '.*?href="([^"]+)">(.*?)(?:<\/td>|<\/small>)'
    matches = scrapertools.find_multiple_matches(data, patron)
    # Si solo hay un resultado busca directamente los trailers, sino lista todos los resultados
    if len(matches) == 1:
        item.url = urlparse.urljoin(
            "http://www.abandomoviez.net/%s" % item.prefix, matches[0][1])
        item.thumbnail = matches[0][0]
        itemlist = search_links_abando(item)
    elif len(matches) > 1:
        for scrapedthumbnail, scrapedurl, scrapedtitle in matches:
            scrapedurl = urlparse.urljoin(
                "http://www.abandomoviez.net/%s" % item.prefix, scrapedurl)
            scrapedtitle = scrapertools.htmlclean(scrapedtitle)
            itemlist.append(
                item.clone(title=scrapedtitle,
                           action="search_links_abando",
                           url=scrapedurl,
                           thumbnail=scrapedthumbnail,
                           text_color="white"))

        next_page = scrapertools.find_single_match(
            data, '<a href="([^"]+)">Siguiente')
        if next_page != "":
            next_page = urlparse.urljoin(
                "http://www.abandomoviez.net/%s" % item.prefix, next_page)
            itemlist.append(
                item.clone(title=">> Seguente",
                           action="abandomoviez_search",
                           page=next_page,
                           thumbnail="",
                           text_color=""))

    if not itemlist:
        itemlist.append(
            item.clone(title="Nessun risultato trovato",
                       action="",
                       thumbnail="",
                       text_color=""))

        if keyboard:
            if item.contextual:
                title = "[COLOR green]%s[/COLOR]"
            else:
                title = "%s"
            itemlist.append(
                item.clone(title=title % "Ricerca manuale su Abandomoviez",
                           action="manual_search",
                           thumbnail="",
                           text_color="green",
                           extra="abandomoviez"))

    return itemlist
示例#49
0
def findvideos(item):
    logger.info()
    itemlist = []
    item.text_color = color3

    data = scrapertools.downloadpage(item.url)
    data = scrapertools.decodeHtmlentities(data)

    #Busca en la seccion descarga/torrent
    data_download = scrapertools.find_single_match(
        data, '<th>Episodio - Enlaces de Descarga</th>(.*?)</table>')
    patron = '<p class="item_name".*?<a href="([^"]+)".*?>([^"]+)</a>'
    matches = scrapertools.find_multiple_matches(data_download, patron)
    for scrapedurl, scrapedepi in matches:
        new_item = item.clone()
        if "Episodio" not in scrapedepi:
            scrapedtitle = "[Torrent] Episodio " + scrapedepi
        else:
            scrapedtitle = "[Torrent] " + scrapedepi
        scrapedtitle = scrapertools.htmlclean(scrapedtitle)

        new_item.infoLabels['episode'] = scrapertools.find_single_match(
            scrapedtitle, "Episodio (\d+)")
        logger.debug("title=[" + scrapedtitle + "], url=[" + scrapedurl + "]")
        itemlist.append(
            new_item.clone(action="play",
                           title=scrapedtitle,
                           url=scrapedurl,
                           server="torrent",
                           contentType="episode"))

    #Busca en la seccion online
    data_online = scrapertools.find_single_match(
        data, "<th>Enlaces de Visionado Online</th>(.*?)</table>")
    patron = '<a href="([^"]+)\\n.*?src="([^"]+)".*?' \
              'title="Enlace de Visionado Online">([^"]+)</a>'
    matches = scrapertools.find_multiple_matches(data_online, patron)

    for scrapedurl, scrapedthumb, scrapedtitle in matches:
        #Deshecha enlaces de trailers
        scrapedtitle = scrapertools.htmlclean(scrapedtitle)
        if (scrapedthumb != "images/series/youtube.png") & (scrapedtitle !=
                                                            "Trailer"):
            new_item = item.clone()
            server = scrapertools.find_single_match(scrapedthumb,
                                                    "images/series/(.*?).png")
            title = "[" + server.capitalize() + "]" + " " + scrapedtitle

            new_item.infoLabels['episode'] = scrapertools.find_single_match(
                scrapedtitle, "Episodio (\d+)")
            itemlist.append(
                new_item.clone(action="play",
                               title=title,
                               url=scrapedurl,
                               contentType="episode"))

    #Comprueba si hay otras temporadas
    if not "No hay disponible ninguna Temporada adicional" in data:
        data_temp = scrapertools.find_single_match(
            data, '<div class="panel panel-success">(.*?)</table>')
        data_temp = re.sub(r"\n|\r|\t|\s{2}|&nbsp;", "", data_temp)
        patron = '<tr><td><p class="item_name"><a href="([^"]+)".*?' \
                  '<p class="text-success"><strong>([^"]+)</strong>'
        matches = scrapertools.find_multiple_matches(data_temp, patron)
        for scrapedurl, scrapedtitle in matches:
            new_item = item.clone()
            url = urlparse.urljoin(URL_BASE, scrapedurl)
            scrapedtitle = scrapedtitle.capitalize()
            temporada = scrapertools.find_single_match(scrapedtitle,
                                                       "Temporada (\d+)")
            if temporada != "":
                new_item.infoLabels['season'] = temporada
                new_item.infoLabels['episode'] = ""
            itemlist.append(
                new_item.clone(action="findvideos",
                               title=scrapedtitle,
                               url=url,
                               text_color="red",
                               contentType="season"))

    try:
        from core import tmdb
        tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
    except:
        pass

    new_item = item.clone()
    if config.is_xbmc():
        new_item.contextual = True
    itemlist.append(
        new_item.clone(channel="trailertools",
                       title="Buscar Tráiler",
                       action="buscartrailer",
                       context="",
                       text_color="magenta"))
    return itemlist
示例#50
0
def search_links_abando(item):
    logger.info("streamondemand.channels.trailertools search_links_abando")

    data = scrapertools.downloadpage(item.url)
    itemlist = []
    if "Lo sentimos, no tenemos trailer" in data:
        itemlist.append(
            item.clone(title="Nessun video disponibile",
                       action="",
                       text_color=""))
    else:
        if item.contextual:
            progreso = platformtools.dialog_progress(
                "Cercando su abandomoviez", "Caricando i trailer...")
            progreso.update(10)
            i = 0
            message = "Caricando i trailer..."
        patron = '<div class="col-md-3 col-xs-6"><a href="([^"]+)".*?' \
                 'Images/(\d+).gif.*?</div><small>(.*?)</small>'
        matches = scrapertools.find_multiple_matches(data, patron)
        if len(matches) == 0:
            trailer_url = scrapertools.find_single_match(
                data, '<iframe.*?src="([^"]+)"')
            if trailer_url != "":
                trailer_url = trailer_url.replace("embed/", "watch?v=")
                code = scrapertools.find_single_match(trailer_url,
                                                      'v=([A-z0-9\-_]+)')
                thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
                itemlist.append(
                    item.clone(title="Trailer  [youtube]",
                               url=trailer_url,
                               server="youtube",
                               thumbnail=thumbnail,
                               action="play",
                               text_color="white"))
        else:
            for scrapedurl, language, scrapedtitle in matches:
                if language == "1":
                    idioma = " (ESP)"
                else:
                    idioma = " (V.O)"
                scrapedurl = urlparse.urljoin(
                    "http://www.abandomoviez.net/%s" % item.prefix, scrapedurl)
                scrapedtitle = scrapertools.htmlclean(
                    scrapedtitle) + idioma + "  [youtube]"
                if item.contextual:
                    i += 1
                    message += ".."
                    progreso.update(10 + (90 * i / len(matches)), message)
                    scrapedtitle = "[COLOR white]%s[/COLOR]" % scrapedtitle

                data_trailer = scrapertools.downloadpage(scrapedurl)
                trailer_url = scrapertools.find_single_match(
                    data_trailer, 'iframe.*?src="([^"]+)"')
                trailer_url = trailer_url.replace("embed/", "watch?v=")
                code = scrapertools.find_single_match(trailer_url,
                                                      'v=([A-z0-9\-_]+)')
                thumbnail = "https://img.youtube.com/vi/%s/0.jpg" % code
                itemlist.append(
                    item.clone(title=scrapedtitle,
                               url=trailer_url,
                               server="youtube",
                               action="play",
                               thumbnail=thumbnail,
                               text_color="white"))

        if item.contextual:
            progreso.close()

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else:
            title = "%s"
        itemlist.append(
            item.clone(title=title % "Ricerca manuale su Abandomoviez",
                       action="manual_search",
                       thumbnail="",
                       text_color="green",
                       extra="abandomoviez"))
    return itemlist
示例#51
0
def agenda(item):
    logger.info("deportesalacarta.channels.myfootball agenda")
    itemlist = []

    thumbs = {
        'ukraine': 'upl',
        'russia': 'russia',
        'england': 'apl',
        'spaine': 'La_Liga',
        'italy': 'serie_a',
        'germany': 'bundesliga',
        'france': 'ligue-1',
        'UEFA_Champions_League_logo.svg': 'lch',
        'el': 'le',
        'netherland': 'gollandija',
        'portugal': 'portugalija'
    }
    data = translate(item.url)
    data_ru = scrapertools.downloadpage(item.url)
    enlaces = scrapertools.find_multiple_matches(
        data_ru,
        '<div class="rewievs_tab1">\s*<a href="(http://www.myfootball.ws[^"]+)"'
    )

    dia, mes, bloque = scrapertools.find_single_match(
        data,
        '(?i)emisiones</span>.*?>(\d+) (?:(?i)de) (\w+)[^<]+</span></span>(.*?)<div class=block_full'
    )
    mes = month_convert(mes.title())
    mes = str(mes).zfill(2)
    dia = dia.zfill(2)
    patron = 'src=([^>]+)>.*?<\/span>\s*([^<]+)<.*?(\d+:\d+)'
    matches = scrapertools.find_multiple_matches(bloque, patron)
    i = 0
    for thumb, partido, hora in matches:
        partido = partido.replace(" - ", " vs ")
        partido = partido[0].upper() + partido[1:]
        horas, minutos = hora.split(":")
        fecha_actual = datetime.datetime.today()
        fecha_partido = datetime.datetime(fecha_actual.year, int(mes),
                                          int(dia), int(horas), int(minutos))
        fecha_partido = fecha_partido - datetime.timedelta(hours=1)
        hora = fecha_partido.strftime("%H:%M")
        date = fecha_partido.strftime("%d/%m")

        if fecha_partido <= fecha_actual:
            if (fecha_partido +
                    datetime.timedelta(hours=2, minutes=30)) < fecha_actual:
                i += 1
                continue
            scrapedtitle = "[B][COLOR red]" + date + " " + hora + "[/COLOR][COLOR darkorange] " + partido + "[/COLOR][/B]"
        else:
            scrapedtitle = "[B][COLOR green]" + date + " " + hora + "[/COLOR][COLOR darkorange] " + partido + "[/COLOR][/B]"

        try:
            thumb_temp = thumb.rsplit(".",
                                      1)[0].rsplit("/",
                                                   1)[1].replace("flag_", "")
            thumb = "http://www.myfootball.ws/DPYrOE/table/new2/" + thumbs[
                thumb_temp] + ".png"
        except:
            pass

        url = enlaces[i]
        itemlist.append(
            item.clone(title=scrapedtitle,
                       action="findlives",
                       url=url,
                       thumbnail=thumb,
                       date=date,
                       time=hora,
                       evento=partido,
                       deporte="futbol",
                       context="info_partido"))
        i += 1

    if not itemlist:
        itemlist.append(
            item.clone(title="No hay partidos programados en la agenda",
                       action=""))
    else:
        itemlist.sort(key=lambda item: item.time)
    return itemlist
示例#52
0
def filmaffinity_search(item):
    logger.info("streamondemand.channels.trailertools filmaffinity_search")

    if item.filmaffinity:
        item.url = item.filmaffinity
        return search_links_filmaff(item)

    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        data = scrapertools.downloadpage(item.page)
    else:
        params = urllib.urlencode([('stext', item.contentTitle),
                                   ('stype%5B%5D', 'title'), ('country', ''),
                                   ('genre', ''), ('fromyear', item.year),
                                   ('toyear', item.year)])
        url = "http://www.filmaffinity.com/es/advsearch.php?%s" % params
        data = scrapertools.downloadpage(url)

    itemlist = []
    patron = '<div class="mc-poster">.*?<img.*?src="([^"]+)".*?' \
             '<div class="mc-title"><a  href="/es/film(\d+).html"[^>]+>(.*?)<img'
    matches = scrapertools.find_multiple_matches(data, patron)
    # Si solo hay un resultado, busca directamente los trailers, sino lista todos los resultados
    if len(matches) == 1:
        item.url = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % matches[
            0][1]
        item.thumbnail = matches[0][0]
        if not item.thumbnail.startswith("http"):
            item.thumbnail = "http://www.filmaffinity.com" + item.thumbnail
        itemlist = search_links_filmaff(item)
    elif len(matches) > 1:
        for scrapedthumbnail, id, scrapedtitle in matches:
            if not scrapedthumbnail.startswith("http"):
                scrapedthumbnail = "http://www.filmaffinity.com" + scrapedthumbnail
            scrapedurl = "http://www.filmaffinity.com/es/evideos.php?movie_id=%s" % id
            scrapedtitle = unicode(scrapedtitle,
                                   encoding="utf-8",
                                   errors="ignore")
            scrapedtitle = scrapertools.htmlclean(scrapedtitle)
            itemlist.append(
                item.clone(title=scrapedtitle,
                           url=scrapedurl,
                           text_color="white",
                           action="search_links_filmaff",
                           thumbnail=scrapedthumbnail))

        next_page = scrapertools.find_single_match(
            data, '<a href="([^"]+)">&gt;&gt;</a>')
        if next_page != "":
            next_page = urlparse.urljoin("http://www.filmaffinity.com/es/",
                                         next_page)
            itemlist.append(
                item.clone(title=">> Seguente",
                           page=next_page,
                           action="filmaffinity_search",
                           thumbnail="",
                           text_color=""))

    if not itemlist:
        itemlist.append(
            item.clone(title="Nessun risultato trovato per (%s)" %
                       item.contentTitle,
                       action="",
                       thumbnail="",
                       text_color=""))

        if keyboard:
            if item.contextual:
                title = "[COLOR green]%s[/COLOR]"
            else:
                title = "%s"
            itemlist.append(
                item.clone(title=title % "Ricerca manuale su Filmaffinity",
                           action="manual_search",
                           text_color="green",
                           thumbnail="",
                           extra="filmaffinity"))

    return itemlist
def fanart(item):
    #Vamos a sacar todos los fanarts y arts posibles
    logger.info("pelisalacarta.verseriesonlinetv fanart")
    itemlist = []
    url = item.url
    data = dhe( scrapertools.cachePage(item.url) )
    data = re.sub(r"\n|\r|\t|\s{2}|\(.*?\)|\[.*?\]|&nbsp;","",data)
    try:
     sinopsis= scrapertools.get_match(data,'<div class="sinopsis">.*?</b>(.*?)</div>')
     if " . Aquí podrán encontrar la información de toda la serie incluyendo sus temporadas y episodios." in sinopsis:
         sinopsis = ""
     else:
       sinopsis=re.sub('.. Aquí podrán encontrar la información de toda la serie incluyendo sus temporadas y episodios.','.',sinopsis)
    except:
     sinopsis=""
        
    title_fan=item.show.split("|")[0]
    title= title_fan.decode('utf8').encode('latin1')
    title= title.replace(' ','%20')
    item.title = re.sub(r"\(.*?\)","",item.title)
    year = item.show.split("|")[1]
    
    url = "http://www.filmaffinity.com/es/advsearch.php?stext={0}&stype%5B%5D=title&country=&ggenre=TV_SE&fromyear={1}&toyear={1}".format(title, year)
    data = scrapertools.downloadpage(url)

    url_filmaf = scrapertools.find_single_match(data, '<div class="mc-poster">\s*<a title="[^"]*" href="([^"]+)"')
    if url_filmaf:
        url_filmaf = "http://www.filmaffinity.com%s" % url_filmaf
        data = scrapertools.downloadpage(url_filmaf)
    else:

               try:
                 url_bing="http://www.bing.com/search?q=%s+%s+site:filmaffinity.com" %  (title.replace(' ', '+'),  year)
                 data = browser (url_bing)
                 data = re.sub(r'\n|\r|\t|\s{2}|&nbsp;','',data)

                 if "myaddrproxy.php" in data:
                     subdata_bing = scrapertools.get_match(data,'li class="b_algo"><div class="b_title"><h2>(<a href="/ myaddrproxy.php/http/www.filmaffinity.com/es/film.*?)"')
                     subdata_bing = re.sub(r'\/myaddrproxy.php\/http\/','',subdata_bing)
                 else:
                     subdata_bing = scrapertools.get_match(data,'li class="b_algo"><h2>(<a href="http://www.filmaffinity.com/es/film.*?)"')
    
                 url_filma = scrapertools.get_match(subdata_bing,'<a href="([^"]+)')
                 if not "http" in url_filma:
                    data = scrapertools.cachePage ("http://"+url_filma)
                 else:
                    data = scrapertools.cachePage (url_filma)
                 data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
            
               except:
                 pass
    if sinopsis == "":
           try:
            sinopsis = scrapertools.find_single_match(data, '<dd itemprop="description">(.*?)</dd>')
            sinopsis = sinopsis.replace("<br><br />", "\n")
            sinopsis=re.sub(r"\(FILMAFFINITY\)<br />","",sinopsis)
           except:
              pass
    try:
        rating_filma=scrapertools.get_match(data,'itemprop="ratingValue" content="(.*?)">')
    except:
        rating_filma = "Sin puntuacion"
    print "lobeznito"
    print rating_filma
        
    critica=""
    patron = '<div itemprop="reviewBody">(.*?)</div>.*?itemprop="author">(.*?)\s*<i alt="([^"]+)"'
    matches_reviews = scrapertools.find_multiple_matches(data, patron)

    if matches_reviews:
            for review, autor, valoracion in matches_reviews:
                review = dhe(scrapertools.htmlclean(review))
                review += "\n" + autor +"[CR]"
                review = re.sub(r'Puntuac.*?\)','',review)
                if "positiva" in valoracion:
                    critica += "[COLOR green][B]%s[/B][/COLOR]\n" % review
                elif "neutral" in valoracion:
                    critica += "[COLOR yellow][B]%s[/B][/COLOR]\n" % review
                else:
                    critica += "[COLOR red][B]%s[/B][/COLOR]\n" % review
    else:
        critica = "[COLOR floralwhite][B]Esta serie no tiene críticas[/B][/COLOR]"


    ###Busqueda en tmdb
        
    url_tmdb="http://api.themoviedb.org/3/search/tv?api_key="+api_key+"&query=" + title +"&language=es&include_adult=false&first_air_date_year="+year
    data_tmdb = scrapertools.cachePage(url_tmdb)
    data_tmdb = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data_tmdb)
    patron = '"page":1.*?,"id":(.*?),"backdrop_path":(.*?),"vote_average"'
    matches = re.compile(patron,re.DOTALL).findall(data_tmdb)

    ###Busqueda en bing el id de imdb de la serie
    if len(matches)==0:
         url_tmdb="http://api.themoviedb.org/3/search/tv?api_key="+api_key+"&query=" + title +"&language=es"
         data_tmdb = scrapertools.cachePage(url_tmdb)
         data_tmdb = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data_tmdb)
         patron = '"page":1.*?,"id":(.*?),"backdrop_path":(.*?),"vote_average"'
         matches = re.compile(patron,re.DOTALL).findall(data_tmdb)
         if len(matches)==0:
          urlbing_imdb = "http://www.bing.com/search?q=%s+%s+tv+series+site:imdb.com" % (title.replace(' ', '+'),  year)
          data = browser (urlbing_imdb)
          data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|http://ssl-proxy.my-addr.org/myaddrproxy.php/","",data)
          try:
            subdata_imdb =scrapertools.find_single_match(data,'<li class="b_algo">(.*?)h="ID.*?<strong>.*?TV Series')
          except:
             pass
        
    
          try:
              imdb_id = scrapertools.get_match(subdata_imdb,'<a href=.*?http.*?imdb.com/title/(.*?)/.*?"')
          except:
              imdb_id = ""
          ###Busca id de tvdb y tmdb mediante imdb id
         
          urlremotetbdb = "https://api.themoviedb.org/3/find/"+imdb_id+"?api_key="+api_key+"&external_source=imdb_id&language=es"
          data_tmdb= scrapertools.cachePage(urlremotetbdb)
          matches= scrapertools.find_multiple_matches(data_tmdb,'"tv_results":.*?"id":(.*?),.*?"poster_path":(.*?),"popularity"')
         
          if len(matches)==0:
             id_tmdb=""
             fanart_3 = ""
             extra= item.thumbnail+"|"+year+"|"+"no data"+"|"+"no data"+"|"+rating_filma+"|"+critica+"|"+""+"|"+id_tmdb
             show=  item.fanart+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+item.thumbnail+"|"+id_tmdb
             fanart_info = item.fanart
             fanart_2=item.fanart
             id_scraper = " "+"|"+"serie"+"|"+rating_filma+"|"+critica+"|"+" "
             category= ""
             posterdb= item.thumbnail
             itemlist.append( Item(channel=item.channel, title=item.title, url=item.url, action="findvideos", thumbnail=item.thumbnail, fanart=item.fanart ,extra=extra, category= category,  show=show , folder=True) )


    for id_tmdb, fan in matches:
            ###Busca id tvdb
            urlid_tvdb="https://api.themoviedb.org/3/tv/"+id_tmdb+"/external_ids?api_key="+api_key+"&language=es"
            data_tvdb = scrapertools.cachePage(urlid_tvdb)
            id= scrapertools.find_single_match(data_tvdb,'tvdb_id":(.*?),"tvrage_id"')
            if id == "null":
               id = ""
            category = id
            ###Busqueda nºepisodios y temporadas,status
            url_status ="http://api.themoviedb.org/3/tv/"+id_tmdb+"?api_key="+api_key+"&append_to_response=credits&language=es"
            data_status= scrapertools.cachePage(url_status)
            season_episodes=scrapertools.find_single_match(data_status,'"(number_of_episodes":\d+,"number_of_seasons":\d+,)"')
            season_episodes=re.sub(r'"','',season_episodes)
            season_episodes=re.sub(r'number_of_episodes','Episodios ',season_episodes)
            season_episodes=re.sub(r'number_of_seasons','Temporadas',season_episodes)
            season_episodes=re.sub(r'_',' ',season_episodes)
            status = scrapertools.find_single_match(data_status,'"status":"(.*?)"')
            if status== "Ended":
                status ="Finalizada"
            else:
                status = "En emisión"
            status = status +" ("+ season_episodes+")"
            status= re.sub(r',','.',status)
            #######

            fan = re.sub(r'\\|"','',fan)

            try:
             #rating tvdb
             url_rating_tvdb = "http://thetvdb.com/api/1D62F2F90030C444/series/"+id+"/es.xml"
             print "pepote"
             print url_rating_tvdb
             data = scrapertools.cachePage(url_rating_tvdb)
             rating =scrapertools.find_single_match(data,'<Rating>(.*?)<')
            except:
              ratintg_tvdb = ""
              try:
                rating = scrapertools.get_match(data,'"vote_average":(.*?),')
              except:
                
                rating = "Sin puntuación"
         
            id_scraper =id_tmdb+"|"+"serie"+"|"+rating_filma+"|"+critica+"|"+rating+"|"+status #+"|"+emision
            posterdb = scrapertools.find_single_match(data_tmdb,'"poster_path":(.*?)","popularity"')

            if "null" in posterdb:
                posterdb = item.thumbnail
            else:
                posterdb = re.sub(r'\\|"','',posterdb)
                posterdb =  "https://image.tmdb.org/t/p/original" + posterdb
            if "null" in fan:
                fanart = "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg"
            else:
                fanart="https://image.tmdb.org/t/p/original" + fan

            if fanart =="http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg":
                fanart_info = fanart
                fanart_2 = fanart
                fanart_3 = fanart
                fanart_4 = fanart
            else:
             url ="http://api.themoviedb.org/3/tv/"+id_tmdb+"/images?api_key="+api_key

             data = scrapertools.cachePage(url)
             data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)

             file_path= scrapertools.find_multiple_matches(data, '"file_path":"(.*?)"')
             if len(file_path)>= 5:
                fanart_info = "https://image.tmdb.org/t/p/original" + file_path[1]
                fanart_2 = "https://image.tmdb.org/t/p/original" + file_path[2]
                fanart_3 = "https://image.tmdb.org/t/p/original" + file_path[3]
                fanart_4 = "https://image.tmdb.org/t/p/original" + file_path[4]
                if fanart== "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg":
                    fanart= "https://image.tmdb.org/t/p/original" + fanart_info
             elif len(file_path)== 4  :
                fanart_info = "https://image.tmdb.org/t/p/original" + file_path[1]
                fanart_2 = "https://image.tmdb.org/t/p/original" + file_path[2]
                fanart_3 = "https://image.tmdb.org/t/p/original" + file_path[3]
                fanart_4 = "https://image.tmdb.org/t/p/original" + file_path[1]
                if fanart== "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg":
                    fanart= "https://image.tmdb.org/t/p/original" + fanart_info
             elif len(file_path)== 3:
                fanart_info = "https://image.tmdb.org/t/p/original" + file_path[1]
                fanart_2 = "https://image.tmdb.org/t/p/original" + file_path[2]
                fanart_3 = "https://image.tmdb.org/t/p/original" + file_path[1]
                fanart_4 = "https://image.tmdb.org/t/p/original" + file_path[0]
                if fanart== "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg":
                    fanart= "https://image.tmdb.org/t/p/original" + fanart_info
             elif len(file_path)== 2:
                fanart_info = "https://image.tmdb.org/t/p/original" + file_path[1]
                fanart_2 = "https://image.tmdb.org/t/p/original" + file_path[0]
                fanart_3 = "https://image.tmdb.org/t/p/original" + file_path[1]
                fanart_4 = "https://image.tmdb.org/t/p/original" + file_path[1]
                if fanart== "http://s6.postimg.org/qcbsfbvm9/verseriesnofan2.jpg":
                    fanart= "https://image.tmdb.org/t/p/original" + fanart_info
             else:
                fanart_info = fanart
                fanart_2 = fanart
                fanart_3 = fanart
                fanart_4 = fanart


            url ="http://webservice.fanart.tv/v3/tv/"+id+"?api_key="+api_fankey
            data = scrapertools.cachePage(url)
            data = re.sub(r"\n|\r|\t|\s{2}|&nbsp;","",data)
            patron = '"clearlogo":.*?"url": "([^"]+)"'
            matches = re.compile(patron,re.DOTALL).findall(data)
            if '"tvbanner"' in data:
                tvbanner = scrapertools.get_match(data,'"tvbanner":.*?"url": "([^"]+)"')
                tfv=tvbanner
            elif '"tvposter"' in data:
                tvposter = scrapertools.get_match(data,'"tvposter":.*?"url": "([^"]+)"')
                tfv=tvposter
            else:
                tfv = posterdb
            if '"tvthumb"' in data:
                tvthumb = scrapertools.get_match(data,'"tvthumb":.*?"url": "([^"]+)"')
            if '"hdtvlogo"' in data:
                hdtvlogo = scrapertools.get_match(data,'"hdtvlogo":.*?"url": "([^"]+)"')
            if '"hdclearart"' in data:
                hdtvclear = scrapertools.get_match(data,'"hdclearart":.*?"url": "([^"]+)"')
            if len(matches)==0:
                if '"hdtvlogo"' in data:
                    if "showbackground" in data:
                
                        if '"hdclearart"' in data:
                            thumbnail = hdtvlogo
                            extra=  hdtvclear+"|"+year
                            show = fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                        else:
                            thumbnail = hdtvlogo
                            extra= thumbnail+"|"+year
                            show = fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                        itemlist.append( Item(channel=item.channel, title = item.title , action="temporadas", url=item.url, server="torrent", thumbnail=thumbnail, fanart=fanart, category=category, extra=extra, show=show, folder=True) )
                                                                        
                                                                        
                    else:
                        if '"hdclearart"' in data:
                            thumbnail= hdtvlogo
                            extra= hdtvclear+"|"+year
                            show= fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                        else:
                            thumbnail= hdtvlogo
                            extra= thumbnail+"|"+year
                            show= fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                        itemlist.append( Item(channel=item.channel, title = item.title , action="temporadas", url=item.url, server="torrent", thumbnail=thumbnail, fanart=fanart, extra=extra, show=show,  category= category, folder=True) )
                else:
                    extra=  ""+"|"+year
                    show = fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                    itemlist.append( Item(channel=item.channel, title = item.title , action="temporadas", url=item.url,  server="torrent", thumbnail=posterdb, fanart=fanart, extra=extra, show=show, category = category, folder=True) )
                                                                                                                                
            for logo in matches:
                if '"hdtvlogo"' in data:
                    thumbnail = hdtvlogo
                elif not '"hdtvlogo"' in data :
                        if '"clearlogo"' in data:
                            thumbnail= logo
                else:
                    thumbnail= item.thumbnail
                if '"clearart"' in data:
                    clear=scrapertools.get_match(data,'"clearart":.*?"url": "([^"]+)"')
                    if "showbackground" in data:
                                
                        extra=clear+"|"+year
                        show= fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                        itemlist.append( Item(channel=item.channel, title = item.title , action="temporadas", url=item.url, server="torrent", thumbnail=thumbnail, fanart=fanart, extra=extra,show=show, category= category,  folder=True) )
                    else:
                        extra= clear+"|"+year
                        show=fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                        itemlist.append( Item(channel=item.channel, title = item.title , action="temporadas", url=item.url, server="torrent", thumbnail=thumbnail, fanart=fanart, extra=extra,show=show, category= category, folder=True) )
                                     
                if "showbackground" in data:
                        
                    if '"clearart"' in data:
                        clear=scrapertools.get_match(data,'"clearart":.*?"url": "([^"]+)"')
                        extra=clear+"|"+year
                        show= fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                    else:
                        extra=logo+"|"+year
                        show= fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                        itemlist.append( Item(channel=item.channel, title = item.title , action="temporadas", url=item.url, server="torrent", thumbnail=thumbnail, fanart=fanart, extra=extra,show=show,  category = category, folder=True) )
                                     
                if not '"clearart"' in data and not '"showbackground"' in data:
                        if '"hdclearart"' in data:
                            extra= hdtvclear+"|"+year
                            show= fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                        else:
                            extra= thumbnail+"|"+year
                            show=  fanart_2+"|"+fanart_3+"|"+sinopsis+"|"+title_fan+"|"+tfv+"|"+id_tmdb+"|"+fanart_4
                        itemlist.append( Item(channel=item.channel, title = item.title , action="temporadas", url=item.url, server="torrent", thumbnail=thumbnail, fanart=fanart, extra=extra,show=show , category = category, folder=True) )
    title ="Info"
    title_info = title.replace(title,"[COLOR seagreen]"+title+"[/COLOR]")
    
    
    if '"tvposter"' in data:
            thumbnail= scrapertools.get_match(data,'"tvposter":.*?"url": "([^"]+)"')
    else:
        thumbnail = posterdb
        
    if "tvbanner" in data:
        category = tvbanner
    else:
        category = show
    if '"tvthumb"' in data:
        plot = item.plot+"|"+tvthumb
    else:
        plot = item.plot+"|"+item.thumbnail
    if '"tvbanner"' in data:
        plot=plot+"|"+ tvbanner
    elif '"tvthumb"' in data:
        plot=plot+"|"+ tvthumb
    else:
        plot=plot+"|"+ item.thumbnail


    id = id_scraper
    
    extra = extra+"|"+id+"|"+title.encode('utf8')
    
    itemlist.append( Item(channel=item.channel, action="info" , title=title_info , url=item.url, thumbnail=thumbnail, fanart=fanart_info, extra= extra, category = category,plot=plot, show= show, viewmode="movie_with_plot", folder=False ))

    return itemlist
def get_video_url(page_url,
                  premium=False,
                  user="",
                  password="",
                  video_password=""):
    logger.info("url=" + page_url)

    page_url = page_url.replace("playvid-", "")

    headers = {
        'Host': 'www.flashx.tv',
        'User-Agent':
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
        'Accept':
        'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
        'Accept-Language': 'en-US,en;q=0.5',
        'Accept-Encoding': 'gzip, deflate, br',
        'Connection': 'keep-alive',
        'Upgrade-Insecure-Requests': '1',
        'Cookie': ''
    }
    data = httptools.downloadpage(page_url,
                                  headers=headers,
                                  replace_headers=True).data
    flashx_id = scrapertools.find_single_match(data,
                                               'name="id" value="([^"]+)"')
    fname = scrapertools.find_single_match(data,
                                           'name="fname" value="([^"]+)"')
    hash_f = scrapertools.find_single_match(data,
                                            'name="hash" value="([^"]+)"')
    post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed to the video' % (
        flashx_id, urllib.quote(fname), hash_f)
    wait_time = scrapertools.find_single_match(data, "<span id='xxc2'>(\d+)")

    headers['Referer'] = "https://www.flashx.tv/"
    headers['Accept'] = "*/*"
    headers['Host'] = "www.flashx.tv"

    coding_url = 'https://www.flashx.tv/flashx.php?fxfx=5'
    headers['X-Requested-With'] = 'XMLHttpRequest'
    httptools.downloadpage(coding_url, headers=headers, replace_headers=True)

    try:
        time.sleep(int(wait_time) + 1)
    except:
        time.sleep(6)

    headers.pop('X-Requested-With')
    headers['Content-Type'] = 'application/x-www-form-urlencoded'
    data = httptools.downloadpage('https://www.flashx.tv/dl?playnow',
                                  post,
                                  headers,
                                  replace_headers=True).data

    matches = scrapertools.find_multiple_matches(
        data, "(eval\(function\(p,a,c,k.*?)\s+</script>")

    video_urls = []
    for match in matches:
        try:
            match = jsunpack.unpack(match)
            match = match.replace("\\'", "'")

            # {src:\'https://bigcdn.flashx1.tv/cdn25/5k7xmlcjfuvvjuw5lx6jnu2vt7gw4ab43yvy7gmkvhnocksv44krbtawabta/normal.mp4\',type:\'video/mp4\',label:\'SD\',res:360},
            media_urls = scrapertools.find_multiple_matches(
                match, "{src:'([^']+)'.*?,label:'([^']+)'")
            subtitle = ""
            for media_url, label in media_urls:
                if media_url.endswith(".srt") and label == "Spanish":
                    try:
                        from core import filetools
                        data = scrapertools.downloadpage(media_url)
                        subtitle = os.path.join(config.get_data_path(),
                                                'sub_flashx.srt')
                        filetools.write(subtitle, data)
                    except:
                        import traceback
                        logger.info("Error al descargar el subtítulo: " +
                                    traceback.format_exc())

            for media_url, label in media_urls:
                if not media_url.endswith("png") and not media_url.endswith(
                        ".srt"):
                    video_urls.append([
                        "." + media_url.rsplit('.', 1)[1] + " [flashx]",
                        media_url, 0, subtitle
                    ])

            for video_url in video_urls:
                logger.info("%s - %s" % (video_url[0], video_url[1]))
        except:
            pass

    return video_urls
示例#55
0
def addchannel(item):
    from platformcode import platformtools
    from core import filetools
    import time, os
    logger.info("pelisalacarta.channels.configuracion addchannel")

    tecleado = platformtools.dialog_input("", "Introduzca la URL")
    if not tecleado:
        return
    logger.info("pelisalacarta.channels.configuracion url=%s" % tecleado)

    local_folder = config.get_runtime_path()
    if "canal" in item.title:
        local_folder = filetools.join(local_folder, 'channels')
        folder_to_extract = "channels"
        info_accion = "canal"
    else:
        local_folder = filetools.join(local_folder, 'servers')
        folder_to_extract = "servers"
        info_accion = "conector"

    # Detecta si es un enlace a un .py o .xml (pensado sobre todo para enlaces de github)
    try:
        extension = tecleado.rsplit(".", 1)[1]
    except:
        extension = ""

    files = []
    zip = False
    if extension == "py" or extension == "xml":
        filename = tecleado.rsplit("/", 1)[1]
        localfilename = filetools.join(local_folder, filename)
        files.append([tecleado, localfilename, filename])
    else:
        import re
        from core import scrapertools
        # Comprueba si la url apunta a una carpeta completa (channels o servers) de github
        if re.search(r'https://github.com/[^\s]+/' + folder_to_extract,
                     tecleado):
            try:
                data = scrapertools.downloadpage(tecleado)
                matches = scrapertools.find_multiple_matches(
                    data,
                    '<td class="content">.*?href="([^"]+)".*?title="([^"]+)"')
                for url, filename in matches:
                    url = "https://raw.githubusercontent.com" + url.replace(
                        "/blob/", "/")
                    localfilename = filetools.join(local_folder, filename)
                    files.append([url, localfilename, filename])
            except:
                import traceback
                logger.info("Detalle del error: %s" % traceback.format_exc())
                platformtools.dialog_ok(
                    "Error", "La url no es correcta o no está disponible")
                return
        else:
            filename = 'new%s.zip' % info_accion
            localfilename = filetools.join(config.get_data_path(), filename)
            files.append([tecleado, localfilename, filename])
            zip = True

    logger.info("pelisalacarta.channels.configuracion localfilename=%s" %
                localfilename)
    logger.info("pelisalacarta.channels.configuracion descarga fichero...")

    try:
        if len(files) > 1:
            lista_opciones = ["No", "Sí", "Sí (Sobrescribir todos)"]
            overwrite_all = False
        from core import downloadtools
        for url, localfilename, filename in files:
            result = downloadtools.downloadfile(url,
                                                localfilename,
                                                continuar=False)
            if result == -3:
                if len(files) == 1:
                    dyesno = platformtools.dialog_yesno("El archivo ya existe", "Ya existe el %s %s." \
                                                        " ¿Desea sobrescribirlo?" % (info_accion, filename))
                else:
                    if not overwrite_all:
                        dyesno = platformtools.dialog_select(
                            "El archivo %s ya existe, ¿desea sobrescribirlo?" %
                            filename, lista_opciones)
                    else:
                        dyesno = 1
                # Diálogo cancelado
                if dyesno == -1:
                    return
                # Caso de carpeta github, opción sobrescribir todos
                elif dyesno == 2:
                    overwrite_all = True
                elif dyesno:
                    hora_folder = "Copia seguridad [%s]" % time.strftime(
                        "%d-%m_%H-%M", time.localtime())
                    backup = filetools.join(config.get_data_path(), 'backups',
                                            hora_folder, folder_to_extract)
                    if not filetools.exists(backup):
                        os.makedirs(backup)
                    import shutil
                    shutil.copy2(localfilename,
                                 filetools.join(backup, filename))
                    result = downloadtools.downloadfile(url,
                                                        localfilename,
                                                        continuar=True)
                else:
                    if len(files) == 1:
                        return
                    else:
                        continue
    except:
        import traceback
        logger.info("Detalle del error: %s" % traceback.format_exc())
        return

    if zip:
        try:
            # Lo descomprime
            logger.info(
                "pelisalacarta.channels.configuracion descomprime fichero...")
            from core import ziptools
            unzipper = ziptools.ziptools()
            logger.info(
                "pelisalacarta.channels.configuracion destpathname=%s" %
                local_folder)
            unzipper.extract(localfilename, local_folder, folder_to_extract,
                             True, True)
        except:
            import traceback
            logger.info("Detalle del error: %s" % traceback.format_exc())
            # Borra el zip descargado
            filetools.remove(localfilename)
            platformtools.dialog_ok(
                "Error", "Se ha producido un error extrayendo el archivo")
            return

        # Borra el zip descargado
        logger.info("pelisalacarta.channels.configuracion borra fichero...")
        filetools.remove(localfilename)
        logger.info("pelisalacarta.channels.configuracion ...fichero borrado")

    platformtools.dialog_ok(
        "Éxito", "Actualización/Instalación realizada correctamente")
示例#56
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("pelisalacarta.servers.flashx url=" + page_url)

    # Lo pide una vez
    data = scrapertools.cache_page(page_url, headers=headers)
    # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
    if "You try to access this video with Kodi" in data:
        url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
        url_reload = "http://www.flashx.tv" + url_reload[1:]
        try:
            data = scrapertools.cache_page(url_reload, headers=headers)
            data = scrapertools.cache_page(page_url, headers=headers)
        except:
            pass

    match = scrapertools.find_single_match(data, "<script type='text/javascript'>(.*?)</script>")

    if match.startswith("eval"):
        try:
            match = jsunpack.unpack(match)
        except:
            pass

    if not "sources:[{file:" in match:
        page_url = page_url.replace("playvid-", "")
        data = scrapertools.downloadpageWithoutCookies(page_url)

        file_id = scrapertools.find_single_match(data, "'file_id', '([^']+)'")
        aff = scrapertools.find_single_match(data, "'aff', '([^']+)'")
        headers_c = [['User-Agent', 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'],
                     ['Referer', page_url],
                     ['Cookie', '; lang=1']]
        coding_url = scrapertools.find_single_match(data, '(?i)src="(http://www.flashx.tv/\w+.js\?[^"]+)"')
        if coding_url.endswith("="):
            coding_url += file_id
        coding = scrapertools.downloadpage(coding_url, headers=headers_c)

        data = scrapertools.downloadpage(page_url, headers=headers)
        flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
        fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
        hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
        post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed+to+video' % (flashx_id, urllib.quote(fname), hash_f)

        time.sleep(6)
        headers.append(['Referer', page_url])
        headers.append(['Cookie', 'lang=1; file_id=%s; aff=%s' % (file_id, aff)])
        data = scrapertools.downloadpage('http://www.flashx.tv/dl', post=post, headers=headers)

        matches = scrapertools.find_multiple_matches(data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
        for match in matches:
            try:
                match = jsunpack.unpack(match)
            except:
                match = ""
            if "file" in match:
                break

        if not match:
            match = data

    # Extrae la URL
    # {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
    video_urls = []
    media_urls = scrapertools.find_multiple_matches(match, '\{file\:"([^"]+)",label:"([^"]+)"')
    subtitle = ""
    for media_url, label in media_urls:
        if media_url.endswith(".srt") and label == "Spanish":
            try:
                from core import filetools
                data = scrapertools.downloadpage(media_url)
                subtitle = os.path.join(config.get_data_path(), 'sub_flashx.srt')
                filetools.write(subtitle, data)
            except:
                import traceback
                logger.info("pelisalacarta.servers.flashx Error al descargar el subtítulo: "+traceback.format_exc())
            
    for media_url, label in media_urls:
        if not media_url.endswith("png") and not media_url.endswith(".srt"):
            video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle])

    for video_url in video_urls:
        logger.info("pelisalacarta.servers.flashx %s - %s" % (video_url[0], video_url[1]))

    return video_urls
示例#57
0
def jayhap_search(item):
    logger.info("streamondemand.channels.trailertools jayhap_search")
    itemlist = []

    if item.extra != "jayhap":
        item.contentTitle += " trailer"
    texto = item.contentTitle
    post = urllib.urlencode({
        'q': texto,
        'yt': 'true',
        'vm': 'true',
        'dm': 'true',
        'v': 'all',
        'l': 'all',
        'd': 'all'
    })

    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        post += urllib.urlencode(item.page)
        data = scrapertools.downloadpage(
            "https://www.jayhap.com/load_more.php", post=post)
    else:
        data = scrapertools.downloadpage(
            "https://www.jayhap.com/get_results.php", post=post)
    data = jsontools.load_json(data)
    for video in data['videos']:
        url = video['url']
        server = video['source'].lower()
        duration = " (" + video['duration'] + ")"
        title = video['title'].decode(
            "utf-8") + duration + "  [" + server.capitalize() + "]"
        thumbnail = video['thumbnail']
        if item.contextual:
            title = "[COLOR white]%s[/COLOR]" % title
        itemlist.append(
            item.clone(action="play",
                       server=server,
                       title=title,
                       url=url,
                       thumbnail=thumbnail,
                       text_color="white"))

    if not itemlist:
        itemlist.append(
            item.clone(title="Nessun risultato trovato per (%s)" %
                       item.contentTitle,
                       action="",
                       thumbnail="",
                       text_color=""))
    else:
        tokens = data['tokens']
        tokens['yt_token'] = tokens.pop('youtube')
        tokens['vm_token'] = tokens.pop('vimeo')
        tokens['dm_token'] = tokens.pop('dailymotion')
        itemlist.append(
            item.clone(title=">> Seguente",
                       page=tokens,
                       action="jayhap_search",
                       extra="jayhap",
                       thumbnail="",
                       text_color=""))

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else:
            title = "%s"
        itemlist.append(
            item.clone(title=title % "Ricerca manuale su Jayhap",
                       action="manual_search",
                       text_color="green",
                       thumbnail="",
                       extra="jayhap"))

    return itemlist
示例#58
0
def get_video_url(page_url, premium=False, user="", password="", video_password=""):
    logger.info("streamondemand.servers.flashx url=" + page_url)

    # Lo pide una vez
    data = scrapertools.downloadpageWithoutCookies(page_url)
    # Si salta aviso, se carga la pagina de comprobacion y luego la inicial
    if "You try to access this video with Kodi" in data:
        url_reload = scrapertools.find_single_match(data, 'try to reload the page.*?href="([^"]+)"')
        url_reload = "http://www.flashx.tv" + url_reload[1:]
        try:
            data = scrapertools.downloadpageWithoutCookies(url_reload)
            data = scrapertools.downloadpageWithoutCookies(page_url)
        except:
            pass

    matches = scrapertools.find_multiple_matches(data, "<script type='text/javascript'>(.*?)</script>")
    m = ""
    for n, m in enumerate(matches):
        if m.startswith("eval"):
            try:
                m = jsunpack.unpack(m)
                fake = (scrapertools.find_single_match(m, "(\w{40,})") == "")
                if fake:
                    m = ""
                else:
                    break
            except:
                m = ""
    match = m
    if "sources:[{file:" not in match:
        page_url = page_url.replace("playvid-", "")

        headers = {'Host': 'www.flashx.tv', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/34.0.1847.137 Safari/537.36',
                  'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept-Language': 'en-US,en;q=0.5',
                  'Accept-Encoding': 'gzip, deflate, br', 'Connection': 'keep-alive', 'Upgrade-Insecure-Requests': '1',
                  'Cookie': ''}
        data = scrapertools.downloadpage(page_url, headers=headers.items())
        flashx_id = scrapertools.find_single_match(data, 'name="id" value="([^"]+)"')
        fname = scrapertools.find_single_match(data, 'name="fname" value="([^"]+)"')
        hash_f = scrapertools.find_single_match(data, 'name="hash" value="([^"]+)"')
        post = 'op=download1&usr_login=&id=%s&fname=%s&referer=&hash=%s&imhuman=Proceed+to+video' % (flashx_id, urllib.quote(fname), hash_f)
        wait_time = scrapertools.find_single_match(data, "<span id='xxc2'>(\d+)")

        file_id = scrapertools.find_single_match(data, "'file_id', '([^']+)'")
        coding_url = 'https://files.fx.fastcontentdelivery.com/jquery2.js?fx=%s' % base64.encodestring(file_id)
        headers['Host'] = "files.fx.fastcontentdelivery.com"
        headers['Referer'] = "https://www.flashx.tv/"
        headers['Accept'] = "*/*"
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        coding_url = 'https://www.flashx.tv/counter.cgi?fx=%s' % base64.encodestring(file_id)
        headers['Host'] = "www.flashx.tv"
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        coding_url = 'https://www.flashx.tv/flashx.php?fxfx=3'
        headers['X-Requested-With'] = 'XMLHttpRequest'
        coding = scrapertools.downloadpage(coding_url, headers=headers.items())

        try:
           time.sleep(int(wait_time)+1)
        except:
           time.sleep(6)

        headers.pop('X-Requested-With')
        headers['Content-Type'] = 'application/x-www-form-urlencoded'
        data = scrapertools.downloadpage('https://www.flashx.tv/dl?playthis', post=post, headers=headers.items())

        matches = scrapertools.find_multiple_matches(data, "(eval\(function\(p,a,c,k.*?)\s+</script>")
        for match in matches:
            if match.startswith("eval"):
                try:
                    match = jsunpack.unpack(match)
                    fake = (scrapertools.find_single_match(match, "(\w{40,})") == "")
                    if fake:
                        match = ""
                    else:
                        break
                except:
                    match = ""

        if not match:
            match = data

    # Extrae la URL
    # {file:"http://f11-play.flashx.tv/luq4gfc7gxixexzw6v4lhz4xqslgqmqku7gxjf4bk43u4qvwzsadrjsozxoa/video1.mp4"}
    video_urls = []
    media_urls = scrapertools.find_multiple_matches(match, '\{file\:"([^"]+)",label:"([^"]+)"')
    subtitle = ""
    for media_url, label in media_urls:
        if media_url.endswith(".srt") and label == "Italian":
            try:
                from core import filetools
                data = scrapertools.downloadpage(media_url)
                subtitle = os.path.join(config.get_data_path(), 'sub_flashx.srt')
                filetools.write(subtitle, data)
            except:
                import traceback
                logger.info("streamondemand.servers.flashx Error al descargar el subtítulo: "+traceback.format_exc())

    for media_url, label in media_urls:
        if not media_url.endswith("png") and not media_url.endswith(".srt"):
            video_urls.append(["." + media_url.rsplit('.', 1)[1] + " [flashx]", media_url, 0, subtitle])

    for video_url in video_urls:
        logger.info("streamondemand.servers.flashx %s - %s" % (video_url[0], video_url[1]))

    return video_urls
示例#59
0
def listado(item):
    logger.info("pelisalacarta.channels.vixto listado")
    itemlist = list()

    item.infoLabels['mediatype'] = "movie"
    if "Estrenos" in item.title:
        bloque_head = "ESTRENOS CARTELERA"
    elif "Series" in item.title:
        bloque_head = "SERIES RECIENTES"
        item.infoLabels['mediatype'] = "tvshow"
    else:
        bloque_head = "RECIENTE PELICULAS"

    # Descarga la página
    data = scrapertools.downloadpage(item.url)
    data = re.sub(r"\n|\r|\t|&nbsp;|\s{2}", "", data)

    # Extrae las entradas (carpetas)
    bloque = scrapertools.find_single_match(
        data, bloque_head + '\s*</h2>(.*?)</section>')

    patron = '<div class="".*?href="([^"]+)".*?src="([^"]+)".*?<div class="calZG">(.*?)</div>' \
             '(.*?)</div>.*?href.*?>(.*?)</a>'
    matches = scrapertools.find_multiple_matches(bloque, patron)

    for scrapedurl, scrapedthumbnail, calidad, idiomas, scrapedtitle in matches:
        title = scrapedtitle
        langs = []
        if 'idio idi1' in idiomas:
            langs.append("VOS")
        if 'idio idi2' in idiomas:
            langs.append("LAT")
        if 'idio idi4' in idiomas:
            langs.append("ESP")
        if langs:
            title += "  [%s]" % "/".join(langs)
        if calidad:
            title += " %s" % calidad

        if DEBUG:
            logger.info("title=[{0}], url=[{1}], thumbnail=[{2}]".format(
                title, scrapedurl, scrapedthumbnail))

        filtro_thumb = scrapedthumbnail.replace(
            "http://image.tmdb.org/t/p/w342", "")
        filtro_list = {"poster_path": filtro_thumb}
        filtro_list = filtro_list.items()

        if item.contentType == "tvshow":
            new_item = item.clone(action="episodios",
                                  title=title,
                                  url=scrapedurl,
                                  thumbnail=scrapedthumbnail,
                                  fulltitle=scrapedtitle,
                                  infoLabels={'filtro': filtro_list},
                                  contentTitle=scrapedtitle,
                                  context="buscar_trailer",
                                  text_color=color1,
                                  show=scrapedtitle,
                                  text_blod=False)
        else:
            new_item = item.clone(action="findvideos",
                                  title=title,
                                  url=scrapedurl,
                                  thumbnail=scrapedthumbnail,
                                  fulltitle=scrapedtitle,
                                  infoLabels={'filtro': filtro_list},
                                  text_blod=False,
                                  contentTitle=scrapedtitle,
                                  context="buscar_trailer",
                                  text_color=color1)

        itemlist.append(new_item)

    if item.action == "listado":
        try:
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    return itemlist
示例#60
0
def authentication(item):
    logger.info()
    import urllib
    from core import channeltools
    from core import jsontools
    from core import scrapertools

    itemlist = []
    headers = {
        'User-Agent':
        'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:47.0) Gecko/20100101 Firefox/47.0'
    }
    client_id = "YTWNFBIJEEBP6"
    device_code = item.extra
    token = ""
    try:
        url = "https://api.real-debrid.com/oauth/v2/device/credentials?client_id=%s&code=%s" \
              % (client_id, device_code)
        data = scrapertools.downloadpage(url, headers=headers.items())
        data = jsontools.load_json(data)

        debrid_id = data["client_id"]
        secret = data["client_secret"]

        # Se solicita el token de acceso y el de actualización para cuando el primero caduque
        post = urllib.urlencode({
            "client_id":
            debrid_id,
            "client_secret":
            secret,
            "code":
            device_code,
            "grant_type":
            "http://oauth.net/grant_type/device/1.0"
        })
        data = scrapertools.downloadpage(
            "https://api.real-debrid.com/oauth/v2/token",
            post=post,
            headers=headers.items())
        data = jsontools.load_json(data)

        token = data["access_token"]
        refresh = data["refresh_token"]

        config.set_setting("id", debrid_id, server="realdebrid")
        config.set_setting("secret", secret, server="realdebrid")
        config.set_setting("token", token, server="realdebrid")
        config.set_setting("refresh", refresh, server="realdebrid")

    except:
        import traceback
        logger.error(traceback.format_exc())

    if token:
        itemlist.append(
            Item(channel=item.channel,
                 action="",
                 title="Cuenta activada correctamente"))
    else:
        itemlist.append(
            Item(channel=item.channel,
                 action="",
                 title="Error en el proceso de activación, vuelve a intentarlo"
                 ))

    return itemlist