Esempio n. 1
0
def findvideos(item):
    logger.info()

    itemlist = []

    _id = scrapertools.find_single_match(item.url, 'https://animeflv.ru/ver/([^/]+)/')
    post = "embed_id=%s" % _id
    data = httptools.downloadpage("https://animeflv.ru/get_video_info", post=post).data
    dict_data = jsontools.load_json(data)

    headers = dict()
    headers["Referer"] = item.url
    data = httptools.downloadpage("https:" + dict_data["value"], headers=headers).data
    dict_data = jsontools.load_json(data)

    list_videos = dict_data["playlist"][0]["sources"]

    if isinstance(list_videos, list):
        for video in list_videos:
            itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
                                 title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
                                 thumbnail=item.thumbnail))

    else:
        for video in list_videos.values():
            itemlist.append(Item(channel=item.channel, action="play", url=video["file"], show=re.escape(item.show),
                                 title="Ver en calidad [%s]" % video["label"], plot=item.plot, fulltitle=item.title,
                                 thumbnail=item.thumbnail))

    return itemlist
Esempio n. 2
0
def loadlives(item):
    logger.info("[mitele.py] loadlives")
    itemlist = []
    
    import time
    tiempo = int(time.time())
    data = scrapertools.downloadpage("http://indalo.mediaset.es/mmc-player/api/mmc/v1/lives.json")
    # Parrilla de programación
    parrilla = jsontools.load_json(data)

    channels = []
    for channel in parrilla:
        programa = channel["name"]
        canal = channel["channel"]
        if canal not in channels:
            channels.append(canal)
            title = canal.capitalize() + " [[COLOR red]" + programa + "[/COLOR]]"
            url = "http://indalo.mediaset.es/mmc-player/api/mmc/v1/%s/live/flash.json" % canal
            data_channel = scrapertools.downloadpage(url)
            embed_code = jsontools.load_json(data_channel)["locations"][0]["yoo"]
            if not embed_code:
                continue
            url = "http://player.ooyala.com/player.js?embedCode="+embed_code
            itemlist.append(item.clone(title=title, action="play", server="mitele", url=url))


    return itemlist
Esempio n. 3
0
def get_video_url(page_url, premium=False, video_password=""):
    logger.info("pelisalacarta.servers.realdebrid get_video_url( page_url='%s' , video_password=%s)"
                % (page_url, video_password))
    
    # Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación
    token_auth = channeltools.get_channel_setting("realdebrid_token", "realdebrid")
    if token_auth is None or token_auth == "":
        if config.is_xbmc():
            token_auth = authentication()
            if token_auth == "":
                return [["REAL-DEBRID: No se ha completado el proceso de autentificación", ""]]
        else:
            return [["Es necesario activar la cuenta. Accede al menú de ayuda", ""]]

    post_link = urllib.urlencode([("link", page_url), ("password", video_password)])
    headers["Authorization"] = "Bearer %s" % token_auth
    url = "https://api.real-debrid.com/rest/1.0/unrestrict/link"
    data = scrapertools.downloadpage(url, post=post_link, headers=headers.items())
    data = jsontools.load_json(data)
    
    # Si el token es erróneo o ha caducado, se solicita uno nuevo
    if "error" in data and data["error"] == "bad_token":
        debrid_id = channeltools.get_channel_setting("realdebrid_id", "realdebrid")
        secret = channeltools.get_channel_setting("realdebrid_secret", "realdebrid")
        refresh = channeltools.get_channel_setting("realdebrid_refresh", "realdebrid")

        post_token = urllib.urlencode({"client_id": debrid_id, "client_secret": secret, "code": refresh,
                                       "grant_type": "http://oauth.net/grant_type/device/1.0"})
        renew_token = scrapertools.downloadpage("https://api.real-debrid.com/oauth/v2/token", post=post_token,
                                                headers=headers.items())
        renew_token = jsontools.load_json(renew_token)
        if not "error" in renew_token:
            token_auth = renew_token["access_token"]
            channeltools.set_channel_setting("realdebrid_token", token_auth, "realdebrid")
            headers["Authorization"] = "Bearer %s" % token_auth
            data = scrapertools.downloadpage(url, post=post_link, headers=headers.items())
            data = jsontools.load_json(data)

    if "download" in data:
        return get_enlaces(data)
    else:
        if "error" in data:
            msg = data["error"].decode("utf-8","ignore")
            msg = msg.replace("hoster_unavailable", "Servidor no disponible") \
                     .replace("unavailable_file", "Archivo no disponible") \
                     .replace("hoster_not_free", "Servidor no gratuito") \
                     .replace("bad_token", "Error en el token")
            return [["REAL-DEBRID: " + msg, ""]]
        else:
            return [["REAL-DEBRID: No se ha generado ningún enlace", ""]]
def update_json_data(dict_series, filename):
    """
    actualiza el json_data de un fichero con el diccionario pasado

    :param dict_series: diccionario con las series
    :type dict_series: dict
    :param filename: nombre del fichero para guardar
    :type filename: str
    :return: fname, json_data
    :rtype: str, dict
    """
    logger.info("[filtertools.py] update_json_data")
    if not os.path.exists(os.path.join(config.get_data_path(), "settings_channels")):
        os.mkdir(os.path.join(config.get_data_path(), "settings_channels"))
    fname = os.path.join(config.get_data_path(), "settings_channels", filename + "_data.json")
    data = filetools.read(fname)
    dict_data = jsontools.load_json(data)
    # es un dict
    if dict_data:
        if TAG_TVSHOW_FILTER in dict_data:
            logger.info("   existe el key SERIES")
            dict_data[TAG_TVSHOW_FILTER] = dict_series
        else:
            logger.info("   NO existe el key SERIES")
            new_dict = {TAG_TVSHOW_FILTER: dict_series}
            dict_data.update(new_dict)
    else:
        logger.info("   NO es un dict")
        dict_data = {TAG_TVSHOW_FILTER: dict_series}
    json_data = jsontools.dump_json(dict_data)
    return fname, json_data
def get_filtered_tvshows(from_channel):
    """
    Obtiene las series filtradas de un canal

    :param from_channel: canal que tiene las series filtradas
    :type from_channel: str
    :return: dict con las series
    :rtype: dict
    """
    logger.info("[filtertools.py] get_filtered_tvshows")
    dict_series = {}
    name_file = from_channel

    if not os.path.exists(os.path.join(config.get_data_path(), "settings_channels")):
        os.mkdir(os.path.join(config.get_data_path(), "settings_channels"))

    fname = os.path.join(config.get_data_path(), "settings_channels", name_file + "_data.json")

    data = filetools.read(fname)
    dict_data = jsontools.load_json(data)

    check_json_file(data, fname, dict_data)

    if TAG_TVSHOW_FILTER in dict_data:
        dict_series = dict_data[TAG_TVSHOW_FILTER]

    if DEBUG:
        logger.info("json_series: {0}".format(dict_series))

    return dict_series
Esempio n. 6
0
def mainlist(item):
    logger.info("tvalacarta.channels.a3media mainlist")
    itemlist = []

    if account:
        log_result = login()

    if not account:
        itemlist.append( Item(channel=CHANNELNAME, title=bbcode_kodi2html("[COLOR yellow]Regístrate y habilita tu cuenta para disfrutar de más contenido[/COLOR]"), action="openconfig", folder=False) )
    elif not log_result:
        itemlist.append( Item(channel=CHANNELNAME, title=bbcode_kodi2html("[COLOR yellow]Error en el login. Comprueba tus credenciales[/COLOR]"), action="openconfig", folder=False) )

    url="http://servicios.atresplayer.com/api/mainMenu"
    data = scrapertools.cachePage(url)
    #logger.info(data)
    lista = jsontools.load_json(data)[0]
    if lista == None: lista =[]

    url2="http://servicios.atresplayer.com/api/categorySections/"


    itemlist.append( Item(channel=CHANNELNAME, title="Directos", action="loadlives", folder=True) )
    itemlist.append( Item(channel=CHANNELNAME, title="Destacados", action="episodios", url="http://servicios.atresplayer.com/api/highlights", folder=True) )

    for entry in lista['menuItems']:
        eid = entry['idSection']
        scrapedtitle = entry['menuTitle']
        scrapedurl = url2 + str(eid)
        itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="secciones" , url=scrapedurl, folder=True, view="programs") )

    itemlist.append( Item(channel=CHANNELNAME, title="A.....Z" , action="secciones" , url="http://servicios.atresplayer.com/api/sortedCategorySections", folder=True) )


    return itemlist
Esempio n. 7
0
def play(item):
    logger.info("tvalacarta.channels.a3media play")

    itemlist = []

    # Si es un stream de directo, no lo procesa
    if item.url.startswith("rtmp://") or item.url.startswith("http://a3live-lh"):
        itemlist.append(item)
        return itemlist
    else:
        token = d(item.extra, "QWtMLXs414Yo+c#_+Q#K@NN)")
        url = item.url + token

        if account:
            cookies = os.path.join( config.get_data_path(), 'cookies.dat' )
            cookiedatafile = open(cookies,'r')
            cookiedata = cookiedatafile.read()
            cookiedatafile.close();
            jsessionid = scrapertools.find_single_match(cookiedata,"servicios.atresplayer.com.*?JSESSIONID\s+([A-Za-z0-9\+\-]+)")
            ANDROID_HEADERS.append(['Cookie','JSESSIONID='+jsessionid])

        data = scrapertools.cachePage(url,headers=ANDROID_HEADERS)
        logger.info(data)
        lista = jsontools.load_json(data)
        if lista != None: 
            item.url = lista['resultObject']['es']
            logger.info("tvalacarta.channels.a3media item.url="+item.url)
            itemlist.append(item)

        return itemlist
Esempio n. 8
0
def programas(item):
    logger.info("tvalacarta.channels.rtpa programas")

    itemlist = []

    data = scrapertools.cache_page(item.url)
    json_object = jsontools.load_json(data)
    #logger.info("json_object="+repr(json_object))
    #logger.info("VOD="+repr(json_object["VOD"]))

    for vodlist in json_object["VOD"]:
        
        for vod in vodlist:
            title = vod["nombre_programa"]

            # http://www.rtpa.es/programa:LA%20QUINTANA%20DE%20POLA_1329394981.html
            #url = "http://www.rtpa.es/programa:"+urllib.quote(vod["nombre_programa"])+"_"+vod["id_programa"]+".html"

            # http://www.rtpa.es/api/muestra_json_vod.php?id_programa=1293185502
            url = "http://www.rtpa.es/api/muestra_json_vod.php?id_programa="+vod["id_programa"]
            thumbnail = urllib.quote(vod["url_imagen"]).replace("//","/").replace("http%3A/","http://")
            plot = ""
            itemlist.append( Item(channel=CHANNELNAME, title=title , url=url,  thumbnail=thumbnail , plot=plot, action="episodios" , show = item.title , viewmode="movie", folder=True) )

    return itemlist
Esempio n. 9
0
def episodios(item):
    logger.info("tvalacarta.channels.rtpa episodios")
    itemlist = []

    if "&fin=" not in item.url:
        item.url = item.url + "&fin=1000"

    data = scrapertools.cache_page(item.url)
    json_object = jsontools.load_json(data)
    #logger.info("json_object="+repr(json_object))
    #logger.info("VOD="+repr(json_object["VOD"]))

    for vod in json_object["VOD"]:
        logger.info("vod="+repr(vod))
        title = vod["nombre_programa"]
        if vod["titulo"]!="":
            title = title + " - " + vod["titulo"]
        if vod["fecha_emision"]!="":
            title = title + " ("+scrapertools.htmlclean(vod["fecha_emision"])+")"
        url = "http://www.rtpa.es/video:"+urllib.quote(vod["nombre_programa"])+"_"+vod["id_generado"]+".html"

        try:
            url_imagen = vod["url_imagen"]
            thumbnail = urllib.quote(url_imagen).replace("//","/").replace("http%3A/","http://")
        except:
            thumbnail = ""

        plot = scrapertools.htmlclean(vod["sinopsis"])
        itemlist.append( Item(channel=CHANNELNAME, title=title , url=url,  thumbnail=thumbnail , plot=plot, fanart=thumbnail, server="rtpa", action="play" , show = item.title , viewmode="movie_with_plot", folder=False) )

    return itemlist
Esempio n. 10
0
def get_video_url(page_url, premium = False, user="", password="", video_password="", page_data=""):

    logger.info("[tv3.py] get_video_url(page_url='%s')" % page_url)

    video = []
    urlbase = "http://dinamics.ccma.cat/pvideo/media.jsp?media=video&version=0s&idint=%s&profile=tv"

    try:

        # Se mira si la URL tiene el formato nuevo o el antiguo y entonces se usa un patrón u otro para extraer
        # el número de video, que es lo único que importa para obtener en última instancia la URL del video en MP4.
        # Precondición: los dos únicos tipos de URLs que pueden llegar aquí son, por ejemplo:
        # http://www.ccma.cat/tv3/alacarta/telenoticies/telenoticies-vespre-17042015/video/5505723/
        # http://www.tv3.cat/videos/5495372/La-baldana
        #
       if page_url.startswith("http://www.ccma.cat"):
            patron = '/tv3/alacarta/.*?/.*?/video/(\d+)'
       else:
            patron = '/videos/(\d+)/.*?'

        matches = re.compile(patron,re.DOTALL).findall(page_url)
        data = scrapertools.cachePage(urlbase % matches[0])
        response = jsontools.load_json(data.decode('iso-8859-1').encode('utf8'))
        video.append([ "HTTP [mp4]", response['media']['url']])

    except:
        import traceback
        logger.info(traceback.format_exc())

    return video
Esempio n. 11
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("[mailru.py] get_video_url(page_url='%s')" % (page_url))

    video_urls = []

    ## Carga la página

    data = scrapertools.cache_page(page_url)

    ## Nueva url al final de los datos
    url = page_url.replace("embed/","").replace(".html",".json")
    ## Carga los datos y los headers
    data, headers = scrapertools.read_body_and_headers(url)
    data = jsontools.load_json(data)

    ## La cookie video_key necesaria para poder visonar el video
    for cookie in headers:
        if 'set-cookie' in cookie: break
    cookie_video_key = scrapertools.get_match(cookie[1], '(video_key=[a-f0-9]+)')

    ## Formar url del video + cookie video_key
    for videos in data['videos']:
        media_url = videos['url'] + "|Cookie=" + cookie_video_key
        quality = " "+videos['key']
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:] + quality +" [mail.ru]", media_url ] )

    for video_url in video_urls:
        logger.info("[mail.ru] %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 12
0
def get_data(payload):
    """
    obtiene la información de la llamada JSON-RPC con la información pasada en payload
    @type payload: dict
    @param payload: data
    :return:
    """
    logger.info("pelisalacarta.platformcode.library get_data: payload %s" % payload)
    # Required header for XBMC JSON-RPC calls, otherwise you'll get a 415 HTTP response code - Unsupported media type
    headers = {'content-type': 'application/json'}

    if modo_cliente:
        try:
            req = urllib2.Request(xbmc_json_rpc_url, data=jsontools.dump_json(payload), headers=headers)
            f = urllib2.urlopen(req)
            response = f.read()
            f.close()

            logger.info("pelisalacarta.platformcode.library get_data: response %s" % response)
            data = jsontools.load_json(response)
        except Exception, ex:
            template = "An exception of type {0} occured. Arguments:\n{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            logger.info("pelisalacarta.platformcode.library get_data: error en xbmc_json_rpc_url: %s" % message)
            data = ["error"]
Esempio n. 13
0
    def get_tvshow_cast(self, _id, lang=DEFAULT_LANG):
        """
        obtiene el casting de una serie
        @param _id: codigo de la serie
        @type _id: str
        @param lang: codigo idioma para buscar
        @type lang: str
        @return: diccionario con los actores
        @rtype: dict
        """
        logger.info()

        url = HOST + "/series/%s/actors" % _id
        DEFAULT_HEADERS["Accept-Language"] = lang
        logger.debug("url: %s, \nheaders: %s" % (url, DEFAULT_HEADERS))

        req = urllib2.Request(url, headers=DEFAULT_HEADERS)
        response = urllib2.urlopen(req)
        html = response.read()
        response.close()

        dict_html = jsontools.load_json(html)

        dict_html["cast"] = dict_html.pop("data")
        self.result.update(dict_html)
Esempio n. 14
0
def episodios(item):
    logger.info("tvalacarta.channels.disneychannel episodios")
    itemlist = []

    try:
        data = scrapertools.cachePage(item.url)
        #logger.info("data="+repr(data))

        #data = scrapertools.find_single_match(data,'Grill.burger\=(.*?)\:\(function\(\)')
        # Limpia el json incorrecto
        #data = "{"+scrapertools.find_single_match(data,'("title"\:"Episodios completos","data"\:\[.*?)\,"config_options"')+"}"
        
        data = scrapertools.find_single_match(data,'(\{"view"\:"slider".*?\}),\{"view"')
        data_json = jsontools.load_json(data)
        #logger.info("data_json="+repr(data_json))

        for video in data_json["data"]:
            logger.info("video="+repr(video))

            title = video["title"]+" ("+video["duration"]+")"
            url = video["href"]
            thumbnail = video["thumb"]
            plot = video["description"]
            itemlist.append( Item(channel=CHANNELNAME, action="play", server="disneychannel", title=title, url=url, thumbnail=thumbnail, plot=plot, show=item.show, folder=False) )
    except:
        import traceback
        logger.info(traceback.format_exc())

    return itemlist
Esempio n. 15
0
def play(item):
    import time
    import requests
    logger.info("pelisalacarta.channels.tvvip play")
    itemlist = []

    cookie = get_cookie_value()
    headers_play = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0',
                    'Accept': 'application/json, text/javascript, */*; q=0.01',
                    'Accept-Language': 'es-ES,es;q=0.8,en-US;q=0.5,en;q=0.3',
                    'Accept-Encoding': 'gzip, deflate',
                    'Connection': 'keep-alive',
                    'DNT': '1',
                    'Referer': 'http://tv-vip.com/film/' + item.extra + '/',
                    'Cookie': cookie}

    head = "|User-Agent=" + headers_play['User-Agent'] + "&Referer=" + headers_play['Referer'] + "&Cookie=" + \
           headers_play['Cookie']
    uri = scrapertools.find_single_match(item.url, '(/transcoder[\w\W]+)')
    uri_request = "http://tv-vip.com/video-prod/s/uri?uri=%s&_=%s" % (uri, int(time.time()))

    data = requests.get(uri_request, headers=headers_play)
    data = jsontools.load_json(data.text)
    url = item.url.replace("/transcoder/", "/s/transcoder/") + "?tt=" + str(data['tt']) + \
          "&mm=" + data['mm'] + "&bb=" + data['bb'] + head
    itemlist.append(item.clone(action="play", server="directo", url=url, folder=False))
    return itemlist
Esempio n. 16
0
def secciones(item):
    logger.info("tvalacarta.channels.a3media secciones")

    data = scrapertools.cachePage(item.url)
    #logger.info(data)
    lista = jsontools.load_json(data)
    if lista == None: lista =[]

    itemlist = []

    for entrys in lista:
        try:
            entry = entrys['section']
        except:
            logger.info("tvalacarta.channels.a3media -----------------------")
            logger.info("tvalacarta.channels.a3media error en "+repr(entrys))
            continue
        extra = entry['idSection']
        scrapedtitle = entry['menuTitle']
        scrapedurl = item.url
        if entry.has_key('storyline'): scrapedplot = entry['storyline']
        else: scrapedplot = ""
        scrapedthumbnail = entry['urlImage'].replace('.jpg','03.jpg')
     
        if entry['drm'] == False: ##solo añade las secciones con visualizacion no protegida  
            # Añade al listado
            itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="temporadas" , url=scrapedurl, thumbnail=scrapedthumbnail, plot=scrapedplot , extra=str(extra), folder=True) )

    return itemlist
Esempio n. 17
0
def mainlist(item):
    logger.info("tvalacarta.channels.a3media mainlist")

    url="http://servicios.atresplayer.com/api/mainMenu"
    data = scrapertools.cachePage(url)
    #logger.info(data)
    lista = jsontools.load_json(data)[0]
    if lista == None: lista =[]
  
    url2="http://servicios.atresplayer.com/api/categorySections/"
    itemlist = []

    itemlist.append( Item(channel=CHANNELNAME, title="Destacados" , action="episodios" , url="http://servicios.atresplayer.com/api/highlights", folder=True) )

    for entry in lista['menuItems']:
        eid = entry['idSection']
        scrapedtitle = entry['menuTitle']
        scrapedurl = url2 + str(eid)
    
        itemlist.append( Item(channel=CHANNELNAME, title=scrapedtitle , action="secciones" , url=scrapedurl, folder=True) )

    itemlist.append( Item(channel=CHANNELNAME, title="A.....Z" , action="secciones" , url="http://servicios.atresplayer.com/api/sortedCategorySections", folder=True) )


    return itemlist
Esempio n. 18
0
def cat(item):
    logger.info("pelisalacarta.channels.tvvip cat")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load_json(data)
    head = header_string + get_cookie_value()
    exception = ["peliculas-mas-vistas", "ultimas-peliculas"]
    for child in data["sortedPlaylistChilds"]:
        if child["id"] not in exception:
            url = "http://tv-vip.com/json/playlist/%s/index.json" % child["id"]
            # Fanart
            if child['hashBackground']:
                fanart = "http://tv-vip.com/json/playlist/%s/background.jpg" % child["id"]
            else:
                fanart = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child["id"]
            # Thumbnail
            thumbnail = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child["id"]
            thumbnail += head
            fanart += head
            title = child['id'].replace('-', ' ').capitalize().replace("Manga", "Animación/Cine Oriental")
            title += " ([COLOR gold]" + str(child['number']) + "[/COLOR])"
            itemlist.append(
                    Item(channel=item.channel, action="entradasconlistas", title=bbcode_kodi2html(title), url=url,
                         thumbnail=thumbnail, fanart=fanart, folder=True))

    return itemlist
Esempio n. 19
0
def search(item, texto):
    logger.info()
    itemlist = []
    item.url = urlparse.urljoin(HOST, "search_suggest")
    texto = texto.replace(" ", "+")
    post = "value=%s" % texto
    data = httptools.downloadpage(item.url, post=post).data

    dict_data = jsontools.load_json(data)

    for e in dict_data:
        title = clean_title(scrapertools.htmlclean(e["name"]))
        url = e["url"]
        plot = e["description"]
        thumbnail = HOST + e["thumb"]
        new_item = item.clone(action="episodios", title=title, url=url, plot=plot, thumbnail=thumbnail)

        if "Pelicula" in e["genre"]:
            new_item.contentType = "movie"
            new_item.contentTitle = title
        else:
            new_item.show = title
            new_item.context = renumbertools.context

        itemlist.append(new_item)

    return itemlist
Esempio n. 20
0
def extract_safe(item):
    logger.info("pelisalacarta.channels.puyasubs extract_safe")
    if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
        from core import tmdb
        tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")
    itemlist = list()
    
    hash = item.url.rsplit("/", 1)[1]
    headers = [['Content-Type', 'application/json;charset=utf-8']]
    post = jsontools.dump_json({"hash": hash})
    data = scrapertools.downloadpage("http://safelinking.net/v1/protected", post, headers)
    data = jsontools.load_json(data)

    for link in data.get("links"):
        enlace = link["url"]
        domain = link["domain"]
        title = "Ver por %s" % domain
        action = "play"
        if "mega" in domain:
            server = "mega"
            if "/#F!" in enlace:
                action = "carpeta"

        elif "1fichier" in domain:
            server = "onefichier"
            if "/dir/" in enlace:
                action = "carpeta"

        itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))
    
    return itemlist
Esempio n. 21
0
def get_video_url( page_url , premium = False , user="" , password="", video_password="" ):
    logger.info("pelisalacarta.servers.playwire url="+page_url)
    
    data = scrapertools.cachePage(page_url)
    data = jsontools.load_json(data)
    f4m = data['content']['media']['f4m']

    video_urls = []
    data = scrapertools.downloadpageGzip(f4m)

    xml = ET.fromstring(data)
    base_url = xml.find('{http://ns.adobe.com/f4m/1.0}baseURL').text
    for media in xml.findall('{http://ns.adobe.com/f4m/1.0}media'):
        if ".m3u8" in media.get('url'): continue
        media_url = base_url + "/" + media.get('url')
        try:
            height = media.get('height')
            width = media.get('width')
            label = "("+ width + "x" + height + ")"
        except:
            label = ""
        video_urls.append( [ scrapertools.get_filename_from_url(media_url)[-4:]+" "+label+" [playwire]",media_url])


    for video_url in video_urls:
        logger.info("pelisalacarta.servers.playwire %s - %s" % (video_url[0],video_url[1]))

    return video_urls
Esempio n. 22
0
def findvideos(item):
    logger.info("pelisalacarta.channels.tvvip findvideos")
    itemlist = []
    # En caso de llamarse a la función desde una serie de la biblioteca
    if item.title.startswith("http"): item.url = item.title.split('%')[0]
    data = anti_cloudflare(item.url)
    data = jsontools.load_json(data)
    for child in data["profiles"].keys():
        videopath = data["profiles"][child]['videoPath']
        extension = videopath[-4:]
        head = header_string + get_cookie_value(extension)
        for i in range(0, len(data["profiles"][child]['servers'])):
            url = data["profiles"][child]['servers'][i]['url'] + videopath + head
            size = "  "+data["profiles"][child]["sizeHuman"]
            resolution = " ["+(data["profiles"][child]['videoResolution'])+"]"
            if i == 0: title = "Ver vídeo en " + resolution.replace('1920x1080','HD-1080p') + size + " [COLOR purple]Mirror "+str(i+1)+"[/COLOR]"
            else: title = "Ver vídeo en " + resolution.replace('1920x1080','HD-1080p') + size +  " [COLOR green]Mirror "+str(i+1)+"[/COLOR]"
            # Para poner enlaces de mayor calidad al comienzo de la lista
            if data["profiles"][child]["profileId"] == "default": itemlist.insert(i, Item(channel=__channel__, action='play', server='directo', title=title , url=url , thumbnail=item.thumbnail, fanart=item.fanart, fulltitle=item.fulltitle, plot=item.plot, folder=False) )
            else: itemlist.append( Item(channel=__channel__, action='play', server='directo', title=title , url=url , thumbnail=item.thumbnail, fanart=item.fanart, fulltitle=item.fulltitle, plot=item.plot, folder=False) )
    if len(itemlist) > 0 and item.category == "tvvip":
        if config.get_library_support():
            itemlist.append( Item(channel=__channel__, title="[COLOR green]Añadir enlaces a la biblioteca[/COLOR]", url=item.url, action="add_pelicula_to_library", fulltitle=item.fulltitle))
 
    return itemlist
Esempio n. 23
0
def obtenirpelis(item):
    itemlist = []
    data = scrapertools.cachePage(item.url)
    List = jsontools.load_json(data)

    for i in List:
        title = i['nom']
        try:
            if i['magnets']['M1080']['magnet'] != None:
                url = i['magnets']['M1080']['magnet']
            else:
                url = i['magnets']['M720']['magnet']
        except:
            try:
                url = i['magnets']['M720']['magnet']
            except:
                return [Item(channel=__channel__, title='No hay enlace magnet disponible para esta pelicula')]
        try:
            thumbnail = 'http://image.tmdb.org/t/p/w342' + i['posterurl']
        except:
            thumbnail = 'No disponible'
        plot = i['info']
        itemlist.append( Item(channel=__channel__, action="play", title=title , url=url, server="torrent", thumbnail=thumbnail , plot=plot , folder=False) )
        if len(itemlist) == 0:
            itemlist.append( Item(channel=__channel__, action="obtenirpelis", title="Fin de lista", folder=False) )
        elif len(itemlist) == 50:
            url = re.sub(
                r'page=(\d+)',
                r'page=' + str( int( re.search('\d+', item.url).group() ) + 1 ),
                item.url
            )
            itemlist.append( Item(channel=__channel__, action="obtenirpelis", title=">> Página siguiente" , url=url) )

    return itemlist
Esempio n. 24
0
def extract_flashvars(data):
    assets = 0
    flashvars = {}
    found = False

    for line in data.split("\n"):
        if line.strip().find(";ytplayer.config = ") > 0:
            found = True
            p1 = line.find(";ytplayer.config = ") + len(";ytplayer.config = ") - 1
            p2 = line.rfind(";")
            if p1 <= 0 or p2 <= 0:
                continue
            data = line[p1 + 1:p2]
            break
    data = remove_additional_ending_delimiter(data)

    if found:
        data = json.load_json(data)
        if assets:
            flashvars = data["assets"]
        else:
            flashvars = data["args"]

    for k in ["html", "css", "js"]:
        if k in flashvars:
            flashvars[k] = normalize_url(flashvars[k])

    return flashvars
Esempio n. 25
0
    def fromurl(self, url):
        """
        Genera un item a partir de una cadena de texto. La cadena puede ser creada por la funcion tourl() o tener
        el formato antiguo: plugin://plugin.video.pelisalacarta/?channel=... (+ otros parametros)
        Uso: item.fromurl("cadena")

        @param url: url
        @type url: str
        """
        if "?" in url:
            url = url.split("?")[1]
        decoded = False
        try:
            str_item = base64.b64decode(urllib.unquote(url))
            json_item = json.load_json(str_item, object_hook=self.toutf8)
            if json_item is not None and len(json_item) > 0:
                self.__dict__.update(json_item)
                decoded = True
        except:
            pass

        if not decoded:
            url = urllib.unquote_plus(url)
            dct = dict([[param.split("=")[0], param.split("=")[1]] for param in url.split("&") if "=" in param])
            self.__dict__.update(dct)
            self.__dict__ = self.toutf8(self.__dict__)

        if 'infoLabels' in self.__dict__ and not isinstance(self.__dict__['infoLabels'], InfoLabels):
            self.__dict__['infoLabels'] = InfoLabels(self.__dict__['infoLabels'])

        return self
Esempio n. 26
0
def series_buscar(item):
    logger.info("pelisalacarta.channels.cinehanwer series_buscar")

    # Descarga la pagina
    headers = DEFAULT_HEADERS[:]
    headers.append(["Referer",item.extra])
    headers.append(["X-Requested-With","XMLHttpRequest"])
    data = scrapertools.cache_page(item.url,headers=headers)
    logger.info("data="+data)

    # Extrae las entradas (carpetas)  
    '''
    {"searchTerms":"yes","results":[{"ID":4501,"post_author":"1","post_date":"mayo 23, 2014","post_date_gmt":"2014-05-23 17:56:47","post_title":"4x06 - Leyes de dioses y hombres","post_excerpt":"<p>La historia de Canci\u00f3n de Hielo y Fuego se sit\u00faa en un mundo ficticio medieval. Hay tres l\u00edneas [...]<\/p>\n","post_status":"publish","comment_status":"open","ping_status":"open","post_password":"","post_name":"4x06-leyes-de-dioses-y-hombres","to_ping":"","pinged":"","post_modified":"2014-05-23 19:56:47","post_modified_gmt":"2014-05-23 17:56:47","post_content_filtered":"","post_parent":0,"guid":"http:\/\/series.cinehanwer.com\/?p=4501","menu_order":0,"post_type":"post","post_mime_type":"","comment_count":"0","filter":"raw","post_author_nicename":"admin","permalink":"http:\/\/series.cinehanwer.com\/4x06-leyes-de-dioses-y-hombres\/","attachment_thumbnail":"http:\/\/series.cinehanwer.com\/wp-content\/uploads\/2013\/04\/\u00edndice-150x150.jpg","show_more":true},{"ID":4424,"post_author":"1","post_date":"mayo 16, 2014","post_date_gmt":"2014-05-16 09:02:06","post_title":"1x20 - El hacedor de reyes","post_excerpt":"<p>El criminal m\u00e1s buscado del mundo, Thomas Raymond Reddington (James Spader, se entrega [...]<\/p>\n","post_status":"publish","comment_status":"open","ping_status":"open","post_password":"","post_name":"1x20-el-hacedor-de-reyes","to_ping":"","pinged":"","post_modified":"2014-05-16 11:02:06","post_modified_gmt":"2014-05-16 09:02:06","post_content_filtered":"","post_parent":0,"guid":"http:\/\/series.cinehanwer.com\/?p=4424","menu_order":0,"post_type":"post","post_mime_type":"","comment_count":"0","filter":"raw","post_author_nicename":"admin","permalink":"http:\/\/series.cinehanwer.com\/1x20-el-hacedor-de-reyes\/","attachment_thumbnail":"http:\/\/series.cinehanwer.com\/wp-content\/uploads\/2014\/01\/The-Blacklist-128x128.jpeg","show_more":true}],"displayPostMeta":true}
    '''
    json_object = jsontools.load_json(data)
    logger.info("results="+json_object["results"])
    data = json_object["results"]
    
    for entries in data:
        title = scrapertools.htmlclean(entries["post_title"])
        thumbnail = scrapertools.htmlclean(entries["attachment_thumbnail"])
        url = scrapertools.htmlclean(entries["permalink"])
        plot = ""
   
        itemlist.append( Item(channel=__channel__, action="findvideos_series" , title=title , url=url, thumbnail=thumbnail, plot=plot, fulltitle=title, viewmode="movie"))
 
    return itemlist    
Esempio n. 27
0
    def fromjson(self, json_item=None, path=""):
        """
        Genera un item a partir de un archivo JSON
        Si se especifica un path, lee directamente el archivo, si no, lee la cadena de texto pasada.
        Usos: item = Item().fromjson(path="ruta\archivo\json.json")
              item = Item().fromjson("Cadena de texto json")

        @param json_item: item
        @type json_item: json
        @param path: ruta
        @type path: str
        """
        if path:
            if os.path.exists(path):
                json_item = open(path, "rb").read()
            else:
                json_item = {}

        if json_item is None:
            json_item = {}

        item = json.load_json(json_item, object_hook=self.toutf8)
        self.__dict__.update(item)

        if 'infoLabels' in self.__dict__ and not isinstance(self.__dict__['infoLabels'], InfoLabels):
            self.__dict__['infoLabels'] = InfoLabels(self.__dict__['infoLabels'])

        return self
def get_data(payload):
    """
    obtiene la información de la llamada JSON-RPC con la información pasada en payload
    @type payload: dict
    @param payload: data
    :return:
    """
    logger.info("payload: %s" % payload)
    # Required header for XBMC JSON-RPC calls, otherwise you'll get a 415 HTTP response code - Unsupported media type
    headers = {'content-type': 'application/json'}

    if config.get_setting("library_mode", "biblioteca"):
        try:
            try:
                xbmc_port = int(config.get_setting("xbmc_puerto", "biblioteca"))
            except:
                xbmc_port = 0

            xbmc_json_rpc_url = "http://" + config.get_setting("xbmc_host", "biblioteca") + ":" + str(
                xbmc_port) + "/jsonrpc"
            req = urllib2.Request(xbmc_json_rpc_url, data=jsontools.dump_json(payload), headers=headers)
            f = urllib2.urlopen(req)
            response = f.read()
            f.close()

            logger.info("get_data: response %s" % response)
            data = jsontools.load_json(response)
        except Exception, ex:
            template = "An exception of type {0} occured. Arguments:\n{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            logger.info("get_data: error en xbmc_json_rpc_url: %s" % message)
            data = ["error"]
Esempio n. 29
0
def findvideos(item):
    logger.info("pelisalacarta.channels.tvvip findvideos")
    itemlist = []

    # En caso de llamarse a la función desde una serie de la biblioteca
    if item.extra.startswith("http"): item.url = item.extra
    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load_json(data)
    id = urllib.quote(data['id'])
    for child in data["profiles"].keys():
        videopath = urllib.quote(data["profiles"][child]['videoUri'])
        for i in range(0, len(data["profiles"][child]['servers'])):
            url = data["profiles"][child]['servers'][i]['url'] + videopath
            size = "  " + data["profiles"][child]["sizeHuman"]
            resolution = " [" + (data["profiles"][child]['videoResolution']) + "]"
            title = "Ver vídeo en " + resolution.replace('1920x1080', 'HD-1080p')
            if i == 0:
                title += size + " [COLOR purple]Mirror " + str(i + 1) + "[/COLOR]"
            else:
                title += size + " [COLOR green]Mirror " + str(i + 1) + "[/COLOR]"
            # Para poner enlaces de mayor calidad al comienzo de la lista
            if data["profiles"][child]["profileId"] == "default":
                itemlist.insert(i, item.clone(action="play", server="directo", title=bbcode_kodi2html(title), url=url,
                                              contentTitle=item.fulltitle, viewmode="list", extra=id, folder=False))
            else:
                itemlist.append(item.clone(action="play", server="directo", title=bbcode_kodi2html(title), url=url,
                                           contentTitle=item.fulltitle, viewmode="list", extra=id, folder=False))
    if len(itemlist) > 0 and item.category != "Cine" and item.category != "" and item.category != "Series":
        if config.get_library_support():
            itemlist.append(Item(channel=item.channel, title="Añadir enlaces a la biblioteca", text_color="green",
                                       contentTitle=item.fulltitle, url=item.url, action="add_pelicula_to_library",
                                       infoLabels={'title':item.fulltitle}, fulltitle=item.fulltitle))

    return itemlist
Esempio n. 30
0
def episodios(item):
    logger.info("tvalacarta.channels.clantv episodios")

    itemlist = []

    # Descarga la página
    url = item.url+"/videos.json"
    data = scrapertools.cache_page(url)
    json_object = jsontools.load_json(data)
    #logger.info("json_object="+json_object)
    json_items = json_object["page"]["items"]

    for json_item in json_items:
        title = json_item["longTitle"]
        url = json_item["uri"]
        thumbnail = item.thumbnail
        if json_item["description"] is not None:
            plot = json_item["description"]
        else:
            plot = ""
        fanart = item.fanart
        page = url
        if (DEBUG): logger.info(" title=["+repr(title)+"], url=["+repr(url)+"], thumbnail=["+repr(thumbnail)+"] plot=["+repr(plot)+"]")
        itemlist.append( Item(channel="rtve", title=title , action="play" , server="rtve", page=page, url=url, thumbnail=thumbnail, fanart=thumbnail, show=item.show , plot=plot , viewmode="movie_with_plot", folder=False) )

    from core import config
    if (config.get_platform().startswith("xbmc") or config.get_platform().startswith("boxee")) and len(itemlist)>0:
        itemlist.append( Item(channel=item.channel, title=">> Opciones para esta serie", url=item.url, action="serie_options##episodios", thumbnail=item.thumbnail, show=item.show, folder=False))

    return itemlist
Esempio n. 31
0
def findvideos(item):
    logger.info("pelisalacarta.channels.cinefox findvideos")
    itemlist = []

    if not "|" in item.extra and not __menu_info__:
        data = scrapertools.downloadpage(item.url, headers=headers.items())
        year = scrapertools.find_single_match(
            data, '<div class="media-summary">.*?release.*?>(\d+)<')
        if year != "" and not "tmdb_id" in item.infoLabels:
            try:
                from core import tmdb
                item.infoLabels["year"] = year
                tmdb.set_infoLabels_item(item, __modo_grafico__)
            except:
                pass

        if item.infoLabels["plot"] == "":
            sinopsis = scrapertools.find_single_match(
                data, '<p id="media-plot".*?>.*?\.\.\.(.*?)Si te parece')
            item.infoLabels["plot"] = scrapertools.htmlclean(sinopsis)

    id = scrapertools.find_single_match(item.url, '/(\d+)/')
    if "|" in item.extra or not __menu_info__:
        extra = item.extra
        if "|" in item.extra:
            extra = item.extra[:-1]
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (
            id, extra, "streaming")
        itemlist.extend(get_enlaces(item, url, "Online"))
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (
            id, extra, "download")
        itemlist.extend(get_enlaces(item, url, "de Descarga"))

        if extra == "media":
            data_trailer = scrapertools.downloadpage(
                "http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id,
                headers=headers.items())
            trailer_url = jsontools.load_json(data_trailer)["video"]["url"]
            if trailer_url != "":
                item.infoLabels["trailer"] = trailer_url

            title = "Ver enlaces %s - [" + item.contentTitle + "]"
            itemlist.append(
                item.clone(channel="trailertools",
                           action="buscartrailer",
                           title="Buscar Tráiler",
                           text_color="magenta",
                           context=""))

            if config.get_library_support() and not "|" in item.extra:
                itemlist.append(
                    Item(channel=item.channel,
                         action="add_pelicula_to_library",
                         text_color=color5,
                         title="Añadir película a la biblioteca",
                         url=item.url,
                         thumbnail=item.thumbnail,
                         fanart=item.fanart,
                         fulltitle=item.fulltitle,
                         extra="media|"))
    else:
        url = "http://www.cinefox.tv/sources/list?id=%s&type=%s&order=%s" % (
            id, item.extra, item.type)
        type = item.type.replace("streaming",
                                 "Online").replace("download", "de Descarga")
        itemlist.extend(get_enlaces(item, url, type))

    return itemlist
Esempio n. 32
0
def episodios(item):
    logger.info("pelisalacarta.channels.cinefox episodios")
    itemlist = []

    if item.extra == "ultimos":
        data = scrapertools.downloadpage(item.url, headers=headers.items())
        item.url = scrapertools.find_single_match(
            data, '<a href="([^"]+)" class="h1-like media-title"')
        item.url += "/episodios"

    data = scrapertools.downloadpage(item.url, headers=headers.items())

    data_season = data[:]
    headers["Referer"] = item.url

    if item.extra == "episodios" or not __menu_info__:
        action = "findvideos"
    else:
        action = "menu_info_episode"

    seasons = scrapertools.find_multiple_matches(
        data, '<a href="([^"]+)"[^>]+><span class="season-toggle')
    for i, url in enumerate(seasons):
        if i != 0:
            data_season = scrapertools.downloadpage(url,
                                                    headers=headers.items())
        patron = '<div class="ep-list-number">.*?href="([^"]+)">([^<]+)</a>.*?<span class="name">([^<]+)</span>'
        matches = scrapertools.find_multiple_matches(data_season, patron)
        for scrapedurl, episode, scrapedtitle in matches:
            item.contentSeason = episode.split("x")[0]
            item.contentEpisodeNumber = episode.split("x")[1]

            title = episode + " - " + scrapedtitle
            extra = "episode"
            if item.extra == "episodios":
                extra = "episode|"
            itemlist.append(
                item.clone(action=action,
                           title=title,
                           url=scrapedurl,
                           text_color=color2,
                           extra=extra,
                           contentType="episode"))

    if item.extra != "episodios":
        try:
            from core import tmdb
            tmdb.set_infoLabels_itemlist(itemlist, __modo_grafico__)
        except:
            pass

    itemlist.reverse()
    if item.extra != "episodios":
        id = scrapertools.find_single_match(item.url, '/(\d+)/')
        data_trailer = scrapertools.downloadpage(
            "http://www.cinefox.tv/media/trailer?idm=%s&mediaType=1" % id,
            headers=headers.items())
        item.infoLabels["trailer"] = jsontools.load_json(
            data_trailer)["video"]["url"]
        itemlist.append(
            item.clone(channel="trailertools",
                       action="buscartrailer",
                       title="Buscar Tráiler",
                       text_color="magenta"))
        if config.get_library_support():
            itemlist.append(
                Item(channel=item.channel,
                     action="add_serie_to_library",
                     text_color=color5,
                     title="Añadir serie a la biblioteca",
                     show=item.show,
                     thumbnail=item.thumbnail,
                     url=item.url,
                     fulltitle=item.fulltitle,
                     fanart=item.fanart,
                     extra="episodios"))

    return itemlist
Esempio n. 33
0
def findvideos(item):
    logger.info()
    if item.infoLabels["tmdb_id"] and not item.infoLabels["plot"]:
        from core import tmdb
        tmdb.set_infoLabels_item(item, True, idioma_busqueda="en")

    itemlist = list()

    data = httptools.downloadpage(item.url).data
    idiomas = scrapertools.find_single_match(data, 'Subtitulo:\s*(.*?)<br />')
    calidades = ['720p', '1080p']
    torrentes = scrapertools.find_multiple_matches(data, '<a href="(https://www.frozen-layer.com/descargas[^"]+)"')
    if torrentes:
        for i, enlace in enumerate(torrentes):
            title = "Ver por Torrent   %s" % idiomas
            if ">720p" in data and ">1080p" in data:
                try:
                    title = "[%s] %s" % (calidades[i], title)
                except:
                    pass
            itemlist.append(item.clone(title=title, action="play", url=enlace, server="torrent"))

    onefichier = scrapertools.find_multiple_matches(data, '<a href="(https://1fichier.com/[^"]+)"')
    if onefichier:
        for i, enlace in enumerate(onefichier):
            title = "Ver por 1fichier   %s" % idiomas
            if ">720p" in data and ">1080p" in data:
                try:
                    title = "[%s] %s" % (calidades[i], title)
                except:
                    pass
            itemlist.append(item.clone(title=title, action="play", url=enlace, server="onefichier"))

    safelink = scrapertools.find_multiple_matches(data, '<a href="(http(?:s|)://safelinking.net/[^"]+)"')
    if safelink:
        for i, safe in enumerate(safelink):
            headers = [['Content-Type', 'application/json;charset=utf-8']]
            hash = safe.rsplit("/", 1)[1]
            post = jsontools.dump_json({"hash": hash})
            data_sf = httptools.downloadpage("http://safelinking.net/v1/protected", post, headers).data
            data_sf = jsontools.load_json(data_sf)

            for link in data_sf.get("links"):
                enlace = link["url"]
                domain = link["domain"]
                title = "Ver por %s" % domain
                action = "play"
                if "mega" in domain:
                    server = "mega"
                    if "/#F!" in enlace:
                        action = "carpeta"

                elif "1fichier" in domain:
                    server = "onefichier"
                    if "/dir/" in enlace:
                        action = "carpeta"

                title += "   %s" % idiomas
                if ">720p" in data and ">1080p" in data:
                    try:
                        title = "[%s]  %s" % (calidades[i], title)
                    except:
                        pass
                itemlist.append(item.clone(title=title, action=action, url=enlace, server=server))

    return itemlist
Esempio n. 34
0
def programas(item, load_all_pages=False):
    logger.info("tvalacarta.channels.clantv programas")

    itemlist = []

    if item.url == "":
        item.url = MAIN_URL

    # Descarga la página
    data = scrapertools.cache_page(item.url)
    logger.info("data=" + data)
    json_object = jsontools.load_json(data)
    logger.info("json_object=" + repr(json_object))
    json_items = json_object["page"]["items"]

    for json_item in json_items:
        title = json_item["name"]
        url = json_item["uri"]
        thumbnail = json_item["logo"]
        if json_item["description"] is not None:
            plot = json_item["description"]
        else:
            plot = ""
        fanart = json_item["imgPortada"]
        page = json_item["htmlUrl"]
        if (DEBUG):
            logger.info(" title=[" + repr(title) + "], url=[" + repr(url) +
                        "], thumbnail=[" + repr(thumbnail) + "] plot=[" +
                        repr(plot) + "]")
        itemlist.append(
            Item(channel=__channel__,
                 title=title,
                 action="episodios",
                 url=url,
                 thumbnail=thumbnail,
                 plot=plot,
                 page=page,
                 show=title,
                 fanart=fanart,
                 folder=True,
                 view="videos"))

    # Añade el resto de páginas, siempre que haya al menos algún elemento
    if len(itemlist) > 0:
        current_page = scrapertools.find_single_match(item.url, 'page=(\d+)')
        next_page = str(int(current_page) + 1)
        next_page_url = item.url.replace("page=" + current_page,
                                         "page=" + next_page)

        if load_all_pages:
            item.url = next_page_url
            itemlist.extend(programas(item, load_all_pages))
        else:
            itemlist.append(
                Item(channel=__channel__,
                     title=">> Página siguiente",
                     url=next_page_url,
                     action="programas",
                     view="programs"))

    return itemlist
Esempio n. 35
0
def episodios(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load_json(data)

    capitulos = []
    if data.get("b"):
        for child in data["b"]:
            for child2 in child["a"]:
                capitulos.append([child["season"], child2, child["id"]])
    else:
        for child in data.get("a", []):
            capitulos.append(['', child, ''])

    for season, child, id_season in capitulos:
        infoLabels = item.infoLabels.copy()

        if child.get('runtime'):
            try:
                infoLabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if not season or not season.isdigit():
            season = scrapertools.find_single_match(child['name'], '(\d+)x\d+')
        try:
            infoLabels['season'] = int(season)
        except:
            infoLabels['season'] = 0

        if not child['episode']:
            episode = scrapertools.find_single_match(child['name'],
                                                     '\d+x(\d+)')
            if not episode:
                episode = "0"
            infoLabels['episode'] = int(episode)
        else:
            infoLabels['episode'] = int(child['episode'])
        infoLabels['mediatype'] = "episode"

        url = host % "movie/%s/movie.js" % child["id"]
        thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
        if id_season:
            fanart = host % "list/%s/background_1080.jpg" % id_season
        else:
            fanart = item.fanart

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        try:
            title = fulltitle = child['name'].rsplit(
                " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
        except:
            title = fulltitle = child['id'].replace("-", " ")
        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 contentTitle=fulltitle,
                 viewmode="movie",
                 show=item.show,
                 infoLabels=infoLabels,
                 video_urls=video_urls,
                 extra="episodios",
                 text_color=color3))

    itemlist.sort(key=lambda it:
                  (it.infoLabels["season"], it.infoLabels["episode"]),
                  reverse=True)
    if itemlist and config.get_library_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la biblioteca",
                 text_color=color5,
                 url=item.url,
                 action="add_serie_to_library",
                 infoLabels=item.infoLabels,
                 show=item.show,
                 extra="episodios"))

    return itemlist
Esempio n. 36
0
def numbered_for_tratk(show, season, episode):
    """
    Devuelve la temporada y episodio convertido para que se marque correctamente en tratk.tv

    :param show: Nombre de la serie a comprobar
    :type show: str
    :param season: Temporada que devuelve el scrapper
    :type season: int
    :param episode: Episodio que devuelve el scrapper
    :type episode: int
    :return: season, episode
    :rtype: int, int
    """
    logger.info("pelisalacarta.channels.animeflv numbered_for_tratk")
    show = show.lower()

    new_season = season
    new_episode = episode
    dict_series = {}

    name_file = os.path.splitext(os.path.basename(__file__))[0]
    fname = os.path.join(config.get_data_path(), "settings_channels",
                         name_file + "_data.json")

    if os.path.isfile(fname):

        data = ""

        try:
            with open(fname, "r") as f:
                for line in f:
                    data += line
        except EnvironmentError:
            logger("ERROR al leer el archivo: {0}".format(fname))

        json_data = jsontools.load_json(data)

        if 'TVSHOW_RENUMBER' in json_data:
            dict_series = json_data['TVSHOW_RENUMBER']

        # ponemos en minusculas el key, ya que previamente hemos hecho lo mismo con show.
        for key in dict_series.keys():
            new_key = key.lower()
            if new_key != key:
                dict_series[new_key] = dict_series[key]
                del dict_series[key]

    if show in dict_series:
        logger.info("ha encontrado algo: {0}".format(dict_series[show]))

        if len(dict_series[show]['season_episode']) > 1:
            for row in dict_series[show]['season_episode']:

                if new_episode > row[1]:
                    new_episode -= row[1]
                    new_season = row[0]
                    break

        else:
            new_season = dict_series[show]['season_episode'][0][0]
            new_episode += dict_series[show]['season_episode'][0][1]

    logger.info(
        "pelisalacarta.channels.animeflv numbered_for_tratk: {0}:{1}".format(
            new_season, new_episode))
    return new_season, new_episode
Esempio n. 37
0
                                  data=jsontools.dump_json(payload),
                                  headers=headers)
            f = urllib2.urlopen(req)
            response = f.read()
            f.close()

            logger.info("get_data: response %s" % response)
            data = jsontools.load_json(response)
        except Exception, ex:
            template = "An exception of type {0} occured. Arguments:\n{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            logger.info("get_data: error en xbmc_json_rpc_url: %s" % message)
            data = ["error"]
    else:
        try:
            data = jsontools.load_json(
                xbmc.executeJSONRPC(jsontools.dump_json(payload)))
        except Exception, ex:
            template = "An exception of type {0} occured. Arguments:\n{1!r}"
            message = template.format(type(ex).__name__, ex.args)
            logger.info(
                "get_data:: error en xbmc.executeJSONRPC: {0}".format(message))
            data = ["error"]

    logger.info("get_data: data %s" % data)

    return data


def update(content_type=FOLDER_TVSHOWS, folder=""):
    """
    Actualiza la libreria dependiendo del tipo de contenido y la ruta que se le pase.
def items_usuario(item):
    logger.info("pelisalacarta.channels.hdfull menupeliculas")

    itemlist = []

    ## Carga estados
    status = jsontools.load_json(
        scrapertools.cache_page(host + '/a/status/all'))

    ## Fichas usuario
    url = item.url.split("?")[0]
    post = item.url.split("?")[1]

    old_start = scrapertools.get_match(post, 'start=([^&]+)&')
    limit = scrapertools.get_match(post, 'limit=(\d+)')
    start = "%s" % (int(old_start) + int(limit))

    post = post.replace("start=" + old_start, "start=" + start)
    next_page = url + "?" + post

    ## Carga las fichas de usuario
    data = scrapertools.cache_page(url, post=post)
    fichas_usuario = jsontools.load_json(data)

    for ficha in fichas_usuario:

        try:
            title = ficha['title']['es'].strip()
        except:
            title = ficha['title']['en'].strip()

        try:
            title = title.encode('utf-8')
        except:
            pass

        show = title

        try:
            thumbnail = host + "/thumbs/" + ficha['thumbnail']
        except:
            thumbnail = host + "/thumbs/" + ficha['thumb']

        try:
            url = urlparse.urljoin(
                host,
                '/serie/' + ficha['permalink']) + "###" + ficha['id'] + ";1"
            action = "episodios"
            str = get_status(status, 'shows', ficha['id'])
            if "show_title" in ficha:
                action = "findvideos"
                try:
                    serie = ficha['show_title']['es'].strip()
                except:
                    serie = ficha['show_title']['en'].strip()
                temporada = ficha['season']
                episodio = ficha['episode']
                serie = bbcode_kodi2html("[COLOR whitesmoke][B]" + serie +
                                         "[/B][/COLOR]")
                if len(episodio) == 1: episodio = '0' + episodio
                try:
                    title = temporada + "x" + episodio + " - " + serie + ": " + title
                except:
                    title = temporada + "x" + episodio + " - " + serie.decode(
                        'iso-8859-1') + ": " + title.decode('iso-8859-1')
                url = urlparse.urljoin(
                    host, '/serie/' + ficha['permalink'] + '/temporada-' +
                    temporada + '/episodio-' +
                    episodio) + "###" + ficha['id'] + ";3"
        except:
            url = urlparse.urljoin(host, '/pelicula/' +
                                   ficha['perma']) + "###" + ficha['id'] + ";2"
            action = "findvideos"
            str = get_status(status, 'movies', ficha['id'])
        if str != "": title += str

        #try: title = title.encode('utf-8')
        #except: pass

        itemlist.append(
            Item(channel=__channel__,
                 action=action,
                 title=title,
                 fulltitle=title,
                 url=url,
                 thumbnail=thumbnail,
                 show=show,
                 folder=True))

    if len(itemlist) == int(limit):
        itemlist.append(
            Item(channel=__channel__,
                 action="items_usuario",
                 title=">> Página siguiente",
                 url=next_page,
                 folder=True))

    return itemlist
Esempio n. 39
0
def get_server_setting(name, server, default=None):
    """
        Retorna el valor de configuracion del parametro solicitado.

        Devuelve el valor del parametro 'name' en la configuracion propia del servidor 'server'.

        Busca en la ruta \addon_data\plugin.video.pelisalacarta\settings_servers el archivo server_data.json y lee 
        el valor del parametro 'name'. Si el archivo server_data.json no existe busca en la carpeta servers el archivo 
        server.xml y crea un archivo server_data.json antes de retornar el valor solicitado. Si el parametro 'name' 
        tampoco existe en el el archivo server.xml se devuelve el parametro default.


        @param name: nombre del parametro
        @type name: str
        @param server: nombre del servidor
        @type server: str
        @param default: valor devuelto en caso de que no exista el parametro name
        @type default: cualquiera

        @return: El valor del parametro 'name'
        @rtype: El tipo del valor del parametro 

        """
    # Creamos la carpeta si no existe
    if not os.path.exists(
            os.path.join(config.get_data_path(), "settings_servers")):
        os.mkdir(os.path.join(config.get_data_path(), "settings_servers"))

    file_settings = os.path.join(config.get_data_path(), "settings_servers",
                                 server + "_data.json")
    dict_settings = {}
    dict_file = {}
    if os.path.exists(file_settings):
        # Obtenemos configuracion guardada de ../settings/channel_data.json
        try:
            dict_file = jsontools.load_json(open(file_settings, "rb").read())
            if isinstance(dict_file, dict) and 'settings' in dict_file:
                dict_settings = dict_file['settings']
        except EnvironmentError:
            logger.info("ERROR al leer el archivo: %s" % file_settings)

    if not dict_settings or name not in dict_settings:
        # Obtenemos controles del archivo ../channels/channel.xml
        try:
            list_controls, default_settings = get_server_controls_settings(
                server)
        except:
            default_settings = {}
        if name in default_settings:  # Si el parametro existe en el channel.xml creamos el channel_data.json
            default_settings.update(dict_settings)
            dict_settings = default_settings
            dict_file['settings'] = dict_settings
            # Creamos el archivo ../settings/channel_data.json
            json_data = jsontools.dump_json(dict_file)
            try:
                open(file_settings, "wb").write(json_data)
            except EnvironmentError:
                logger.info("ERROR al salvar el archivo: %s" % file_settings)

    # Devolvemos el valor del parametro local 'name' si existe, si no se devuelve default
    return dict_settings.get(name, default)
Esempio n. 40
0
def jayhap_search(item):
    logger.info("streamondemand.channels.trailertools jayhap_search")
    itemlist = []

    if item.extra != "jayhap":
        item.contentTitle += " trailer"
    texto = item.contentTitle
    post = urllib.urlencode({
        'q': texto,
        'yt': 'true',
        'vm': 'true',
        'dm': 'true',
        'v': 'all',
        'l': 'all',
        'd': 'all'
    })

    # Comprueba si es una búsqueda de cero o viene de la opción Siguiente
    if item.page != "":
        post += urllib.urlencode(item.page)
        data = scrapertools.downloadpage(
            "https://www.jayhap.com/load_more.php", post=post)
    else:
        data = scrapertools.downloadpage(
            "https://www.jayhap.com/get_results.php", post=post)
    data = jsontools.load_json(data)
    for video in data['videos']:
        url = video['url']
        server = video['source'].lower()
        duration = " (" + video['duration'] + ")"
        title = video['title'].decode(
            "utf-8") + duration + "  [" + server.capitalize() + "]"
        thumbnail = video['thumbnail']
        if item.contextual:
            title = "[COLOR white]%s[/COLOR]" % title
        itemlist.append(
            item.clone(action="play",
                       server=server,
                       title=title,
                       url=url,
                       thumbnail=thumbnail,
                       text_color="white"))

    if not itemlist:
        itemlist.append(
            item.clone(title="Nessun risultato trovato per (%s)" %
                       item.contentTitle,
                       action="",
                       thumbnail="",
                       text_color=""))
    else:
        tokens = data['tokens']
        tokens['yt_token'] = tokens.pop('youtube')
        tokens['vm_token'] = tokens.pop('vimeo')
        tokens['dm_token'] = tokens.pop('dailymotion')
        itemlist.append(
            item.clone(title=">> Seguente",
                       page=tokens,
                       action="jayhap_search",
                       extra="jayhap",
                       thumbnail="",
                       text_color=""))

    if keyboard:
        if item.contextual:
            title = "[COLOR green]%s[/COLOR]"
        else:
            title = "%s"
        itemlist.append(
            item.clone(title=title % "Ricerca manuale su Jayhap",
                       action="manual_search",
                       text_color="green",
                       thumbnail="",
                       extra="jayhap"))

    return itemlist
Esempio n. 41
0
def colecciones(item):
    logger.info()
    from core import jsontools
    itemlist = []

    usuario = False
    data = httptools.downloadpage(item.url).data
    if "Ver colecciones del usuario" not in item.title and not item.index:
        data = jsontools.load_json(data)["Data"]
        content = data["Content"]
        content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)
    else:
        usuario = True
        if item.follow:
            content = scrapertools.find_single_match(
                data,
                'id="followed_collections"(.*?)<div id="recommended_collections"'
            )
        else:
            content = scrapertools.find_single_match(
                data,
                '<div id="collections".*?<div class="collections_list(.*?)<div class="collections_list'
            )
        content = re.sub(r"\n|\r|\t|\s{2}|&nbsp;|<br>", "", content)

    patron = '<a class="name" href="([^"]+)".*?>([^<]+)<.*?src="([^"]+)".*?<p class="info">(.*?)</p>'
    matches = scrapertools.find_multiple_matches(content, patron)

    index = ""
    if item.index and item.index != "0":
        matches = matches[item.index:item.index + 20]
        if len(matches) > item.index + 20:
            index = item.index + 20
    elif len(matches) > 20:
        matches = matches[:20]
        index = 20

    folder = filetools.join(config.get_data_path(), 'thumbs_copiapop')
    for url, scrapedtitle, thumb, info in matches:
        url = item.extra + url + "/gallery,1,1?ref=pager"
        title = "%s  (%s)" % (scrapedtitle, scrapertools.htmlclean(info))
        try:
            scrapedthumbnail = filetools.join(
                folder, "%s.jpg" % thumb.split("e=", 1)[1][-20:])
        except:
            try:
                scrapedthumbnail = filetools.join(
                    folder, "%s.jpg" % thumb.split("/thumbnail/", 1)[1][-20:])
                thumb = thumb.replace("/thumbnail/", "/")
            except:
                scrapedthumbnail = ""
        if scrapedthumbnail:
            t = threading.Thread(target=download_thumb,
                                 args=[scrapedthumbnail, thumb])
            t.setDaemon(True)
            t.start()
        else:
            scrapedthumbnail = thumb

        itemlist.append(
            Item(channel=item.channel,
                 action="listado",
                 title=title,
                 url=url,
                 thumbnail=scrapedthumbnail,
                 text_color=color2,
                 extra=item.extra,
                 foldername=scrapedtitle))

    if not usuario and data.get("NextPageUrl"):
        url = item.extra + data["NextPageUrl"]
        itemlist.append(
            item.clone(title=">> Página Siguiente", url=url, text_color=""))
    elif index:
        itemlist.append(
            item.clone(title=">> Página Siguiente",
                       url=item.url,
                       index=index,
                       text_color=""))

    return itemlist
Esempio n. 42
0
def get_video_url(page_url, premium=False, video_password=""):
    logger.info(
        "pelisalacarta.servers.realdebrid get_video_url( page_url='%s' , video_password=%s)"
        % (page_url, video_password))

    # Se comprueba si existe un token guardado y sino se ejecuta el proceso de autentificación
    token_auth = channeltools.get_channel_setting("realdebrid_token",
                                                  "realdebrid")
    if token_auth is None or token_auth == "":
        if config.is_xbmc():
            token_auth = authentication()
            if token_auth == "":
                return [[
                    "REAL-DEBRID: No se ha completado el proceso de autentificación",
                    ""
                ]]
        else:
            return [[
                "Es necesario activar la cuenta. Accede al menú de ayuda", ""
            ]]

    post_link = urllib.urlencode([("link", page_url),
                                  ("password", video_password)])
    headers["Authorization"] = "Bearer %s" % token_auth
    url = "https://api.real-debrid.com/rest/1.0/unrestrict/link"
    data = scrapertools.downloadpage(url,
                                     post=post_link,
                                     headers=headers.items())
    data = jsontools.load_json(data)

    # Si el token es erróneo o ha caducado, se solicita uno nuevo
    if "error" in data and data["error"] == "bad_token":
        debrid_id = channeltools.get_channel_setting("realdebrid_id",
                                                     "realdebrid")
        secret = channeltools.get_channel_setting("realdebrid_secret",
                                                  "realdebrid")
        refresh = channeltools.get_channel_setting("realdebrid_refresh",
                                                   "realdebrid")

        post_token = urllib.urlencode({
            "client_id":
            debrid_id,
            "client_secret":
            secret,
            "code":
            refresh,
            "grant_type":
            "http://oauth.net/grant_type/device/1.0"
        })
        renew_token = scrapertools.downloadpage(
            "https://api.real-debrid.com/oauth/v2/token",
            post=post_token,
            headers=headers.items())
        renew_token = jsontools.load_json(renew_token)
        if not "error" in renew_token:
            token_auth = renew_token["access_token"]
            channeltools.set_channel_setting("realdebrid_token", token_auth,
                                             "realdebrid")
            headers["Authorization"] = "Bearer %s" % token_auth
            data = scrapertools.downloadpage(url,
                                             post=post_link,
                                             headers=headers.items())
            data = jsontools.load_json(data)

    if "download" in data:
        return get_enlaces(data)
    else:
        if "error" in data:
            msg = data["error"].decode("utf-8", "ignore")
            msg = msg.replace("hoster_unavailable", "Servidor no disponible") \
                     .replace("unavailable_file", "Archivo no disponible") \
                     .replace("hoster_not_free", "Servidor no gratuito") \
                     .replace("bad_token", "Error en el token")
            return [["REAL-DEBRID: " + msg, ""]]
        else:
            return [["REAL-DEBRID: No se ha generado ningún enlace", ""]]
Esempio n. 43
0
def findvideos(item):
    logger.info()
    itemlist = []

    if not item.video_urls:
        data = httptools.downloadpage(item.url)
        if not data.sucess:
            itemlist.append(
                item.clone(title="Película no disponible", action=""))
            return itemlist
        data = jsontools.load_json(data.data)

        item.video_urls = []
        for k, v in data.get("video", {}).items():
            for vid in v:
                item.video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

    if item.video_urls:
        import random
        import base64
        item.video_urls.sort(key=lambda it: (it[1], random.random()),
                             reverse=True)
        i = 0
        calidad_actual = ""
        for vid, calidad in item.video_urls:
            title = "Ver vídeo en %sp" % calidad
            if calidad != calidad_actual:
                i = 0
                calidad_actual = calidad

            if i % 2 == 0:
                title += " [COLOR purple]Mirror %s[/COLOR] - %s" % (
                    str(i + 1), item.fulltitle)
            else:
                title += " [COLOR green]Mirror %s[/COLOR] - %s" % (
                    str(i + 1), item.fulltitle)
            url = vid % "%s" % base64.b64decode(
                "dHQ9MTQ4MDE5MDQ1MSZtbT1NRzZkclhFand6QmVzbmxSMHNZYXhBJmJiPUUwb1dVVVgx"
                "WTBCQTdhWENpeU9paUE=")
            itemlist.append(
                item.clone(title=title,
                           action="play",
                           url=url,
                           server="directo",
                           video_urls=""))
            i += 1

        if itemlist and item.extra == "" and config.get_library_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir enlaces a la biblioteca",
                     text_color=color5,
                     contentTitle=item.fulltitle,
                     url=item.url,
                     action="add_pelicula_to_library",
                     infoLabels={'title': item.fulltitle},
                     extra="findvideos",
                     fulltitle=item.fulltitle))

    return itemlist
Esempio n. 44
0
def enlaces(item):
    logger.info("deportesalacarta.topbongda scraper")
    
    itemlist = []
    
    # Descarga la página
    
    if "sopcast" in item.url:
        data = get_page(item.url)
        try:
           eid = scrapertools.get_match(data,'http.get.*?eid=(.*?)"')
        
        
           url ="http://topbongda.com/xem-bong-da-truc-tuyen/api/link/?eid="+ eid
           
           data = get_page(url)
           data = jsontools.load_json(data)
           sop = data['sop']
          
           if sop:
              tipo = "[COLOR aquamarine][B]Sopcast[/B][/COLOR]"
              thumbnail= "http://s6.postimg.org/v9z5ggmfl/sopcast.jpg"
            
              itemlist.append( Item(channel=__channel__,title=tipo.strip(), url="",action="mainlist",thumbnail=thumbnail, fanart= "http://s6.postimg.org/6756rs973/topbongda.jpg",folder=False) )
              for sop in data["sop"]:
                  no_sop = "false"
                  url = sop['url']
                  bibrate = sop['bitrate']
                  languaje =sop['language']
                  if languaje == '':
                      languaje ="Desconocido"
            
                  title = languaje.strip()
                  title = "[COLOR darkolivegreen][B]"+title+"[/B][/COLOR]"
                  if str(bibrate) != "0":
                     title = title +"  "+ "[COLOR palegreen]"+"("+str(bibrate)+" Kbps"+")"+"[/COLOR]"
                  itemlist.append( Item(channel=__channel__, title="        "+title,action="play",url =url,thumbnail= thumbnail,fanart="http://s6.postimg.org/6756rs973/topbongda.jpg",fulltitle = item.fulltitle, folder=True))
           else :
                 no_sop = "true"
           ace = data['ace']
           if ace:
               no_ace= "false"
               tipo = "[COLOR yellow][B]Acestream[/B][/COLOR]"
               thumbnail= "http://s6.postimg.org/c2c0jv441/torrent_stream_logo_300x262.png"
               itemlist.append( Item(channel=__channel__,title=tipo.strip(), url="",action="mainlist",thumbnail=thumbnail, fanart= "http://s6.postimg.org/6756rs973/topbongda.jpg",folder=False) )
               
               for ace in data["ace"]:
            
                   url = ace['url']
                   bibrate = ace['bitrate']
                   languaje =ace['language']
                   if languaje == '':
                      languaje ="Desconocido"
                   title = languaje.strip()
                   title = "[COLOR darkolivegreen][B]"+title+"[/B][/COLOR]"
            
            
                   if str(bibrate) != "0":
                      title = title +"  "+ "[COLOR palegreen]"+"("+str(bibrate)+" Kbps"+")"+"[/COLOR]"
                
                   itemlist.append( Item(channel=__channel__, title="        "+title,action="play",url =url,thumbnail= thumbnail,fanart="http://s6.postimg.org/6756rs973/topbongda.jpg",fulltitle = item.fulltitle, folder=True) )
           else :
                no_ace = "true"

           if no_sop == "true" and no_ace =="true":
              title ="No hay ningun enlace Sopcast / Acestream".title()
              itemlist.append( Item(channel=__channel__,title="[COLOR limegreen][B]"+title+"[/B][/COLOR]", url="",action="mainlist",fanart="http://s6.postimg.org/unwjdqopd/topbongdafannolink.jpg",thumbnail="http://s6.postimg.org/m6x12tk0h/topbongdathumbnolink.png", folder=False) )
              
        except:
            xbmc.executebuiltin('Action(Back)')
            xbmc.sleep(100)
            xbmc.executebuiltin('Notification([COLOR palegreen][B]Partido[/B][/COLOR], [COLOR yellow][B]'+'sin enlaces'.upper()+'[/B][/COLOR],4000,"http://s6.postimg.org/ke8bfk7f5/topbongda.png")')
        
        
    else:
        
        check_hour = scrapertools.get_match(item.extra.split("|")[0],'(\d)\d:\d+')

        from time import gmtime, strftime
        import time
        get_date=strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        
        ok_date_hour =re.compile('(\d+)-(\d+)-(\d+) (\d+:\d+:\d+)',re.DOTALL).findall(get_date)
        for year,mes,day,hour in ok_date_hour:
            current_day =day+"/"+mes+"/"+year
            current_hour = hour
        
        today =scrapertools.get_match(current_day,'(\d+)/\d+/\d+')
        dia_match = scrapertools.get_match(item.extra.split("|")[1],'(\d+)/\d+/\d+')
        check_day =int(dia_match) - int(today)
        
        check_match_hour = scrapertools.get_match(item.extra.split("|")[0],'(\d+):\d+')
        
        check_today_hour = scrapertools.get_match(current_hour,'(\d+):\d+')
        
        if item.extra.split("|")[1] == current_day or item.extra.split("|")[1] != current_day and check_day <= 2   :
            
            time= re.compile('(\d+):(\d+):(\d+)',re.DOTALL).findall(get_date)
            for h,m,s in time:
                hora=h
                min = m
                            
            time_match = re.compile('(\d+):(\d+)',re.DOTALL).findall(item.extra.split("|")[0])
            
            for h,m in time_match:
                check_time_match = scrapertools.get_match(h,'(\d)\d')
                if "0" in str(check_hour) and str(check_match_hour) <= str(check_today_hour)   or  str(check_match_hour) < str(check_today_hour) :
                    #check_day == 1 and
                    h = 24 + int(h)
                    
                hora_match = h
                min_match = m
                remaining = int(hora_match) - int(hora)
                
                if min != "00":
                    correct_min = (60 - int(min))*60
                    remaining = (int(remaining) -1)*3600
                    remaining = remaining + correct_min+(int(min_match)*60)
                else :
                    remaining = (remaining*3600)+ int(min_match)
                        
                        
                num=int(remaining)
                #dia =(int(num/84600))
                hor=(int(num/3600))
                minu=int((num-(hor*3600))/60)
                #seg=num-((hor*3600)+(minu*60))
                
                remaining= (str(hor)+"h "+str(minu)+"m ")
                if check_day == 0 and check_match_hour == check_today_hour :
                    remaining =  str(minu)+"m "
                if check_day == 1 and check_match_hour >= check_today_hour or check_day == 2  and check_match_hour <= check_today_hour:
                    
                    if check_match_hour == check_today_hour :
                        
                       remaining = "23h" +" "+ str(minu)+"m "
                    else:
                        if "0h" in remaining:
                           remaining = re.sub(r"0h","",remaining)
                        remaining = "1d" +" "+ remaining
            
                elif check_day == 2:
                    
                    remaining = "2d" + " "+remaining
                else:
                    remaining = remaining
                
        else:
            
            
            if check_day >=3 and str(check_match_hour) >= str(check_today_hour):
               remaining = str(day)+" dias"
            
            else:
               time= re.compile('(\d+):(\d+):(\d+)',re.DOTALL).findall(get_date)
               for h,m,s in time:
                   hora=h
                   min = m
               time_match = re.compile('(\d+):(\d+)',re.DOTALL).findall(item.extra.split("|")[0])
               for h,m in time_match:
                   check_time_match = scrapertools.get_match(h,'(\d)\d')
                   
                   h = 24 + int(h)
                   hora_match = h
                   min_match = m
                   remaining = int(hora_match) - int(hora)
                   if min != "00":
                      correct_min = (60 - int(min))*60
                      remaining = (int(remaining) -1)*3600
                      remaining = remaining + correct_min+(int(min_match)*60)
                   else :
                      remaining = (remaining*3600)+ int(min_match)*60
               
                   num=int(remaining)
                   dia =(int(num/84600))
                   hor=(int(num/3600))
                   minu=int((num-(hor*3600))/60)
                   remaining= "2d"+(str(hor)+"h "+str(minu)+"m ")

        no_link="Aun no hay enlaces"
        no_link = no_link.title()
        itemlist.append( Item(channel=__channel__,title="               "+"[COLOR springgreen]"+no_link+"[/COLOR]", url="",action="mainlist",fanart="http://s6.postimg.org/mktb5axsh/topbongdafantime.jpg",thumbnail="http://s6.postimg.org/ippx2qemp/topbongdathumbtime.png", folder=False) )
        itemlist.append( Item(channel=__channel__,title="                                            "+"[COLOR lawngreen]Disponibles en[/COLOR]"+"  "+"[COLOR palegreen][B]"+str(remaining)+"[/B][/COLOR]", url="",action="mainlist",fanart="http://s6.postimg.org/mktb5axsh/topbongdafantime.jpg",thumbnail="http://s6.postimg.org/ippx2qemp/topbongdathumbtime.png", folder=False) )
    return itemlist
Esempio n. 45
0
def nuevos_cap(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load_json(data)
    capitulos = []
    if "Nuevas" in item.title:
        for child in data["b"]:
            capitulos.append([child["season"], child])
    else:
        for child in data["a"]:
            capitulos.append(['', child])

    for season, child in capitulos:
        infoLabels = item.infoLabels
        if child.get('runtime'):
            try:
                infoLabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if not season:
            season = scrapertools.find_single_match(child['name'], '(\d+)x\d+')
        try:
            infoLabels['season'] = int(season)
        except:
            infoLabels['season'] = 0

        if "Nuevos" in item.title:
            if not child['episode']:
                episode = scrapertools.find_single_match(
                    child['name'], '\d+x(\d+)')
                if not episode:
                    episode = "0"
                infoLabels['episode'] = int(episode)
            else:
                infoLabels['episode'] = int(child['episode'])
            infoLabels['mediatype'] = "episode"

        if "Nuevos" in item.title:
            url = host % "movie/%s/movie.js" % child["id"]
            action = "findvideos"
            thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
            fanart = item.fanart
        else:
            url = host % "list/%s" % child["season"] + ext
            action = "episodios"
            thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"]
            fanart = host % "list/%s/background_1080.jpg" % child["id"]

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        if "Nuevos" in item.title:
            title = fulltitle = child['name'].rsplit(
                " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
        else:
            title = fulltitle = child['name']

        itemlist.append(
            Item(channel=item.channel,
                 action=action,
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 contentTitle=fulltitle,
                 viewmode="movie",
                 show=item.fulltitle,
                 infoLabels=infoLabels,
                 video_urls=video_urls,
                 extra="nuevos",
                 text_color=color3))

    return itemlist
Esempio n. 46
0
def authentication():
    logger.info("pelisalacarta.servers.realdebrid authentication")
    try:
        client_id = "YTWNFBIJEEBP6"

        # Se solicita url y código de verificación para conceder permiso a la app
        url = "http://api.real-debrid.com/oauth/v2/device/code?client_id=%s&new_credentials=yes" % (
            client_id)
        data = scrapertools.downloadpage(url, headers=headers.items())
        data = jsontools.load_json(data)
        verify_url = data["verification_url"]
        user_code = data["user_code"]
        device_code = data["device_code"]
        intervalo = data["interval"]

        dialog_auth = platformtools.dialog_progress(
            "Autentificación. No cierres esta ventana!!",
            "1. Entra en la siguiente url: %s" % verify_url,
            "2. Ingresa este código en la página y presiona Allow:  %s" %
            user_code, "3. Espera a que se cierre esta ventana")

        # Generalmente cada 5 segundos se intenta comprobar si el usuario ha introducido el código
        while True:
            time.sleep(intervalo)
            try:
                if dialog_auth.iscanceled():
                    return ""

                url = "https://api.real-debrid.com/oauth/v2/device/credentials?client_id=%s&code=%s" \
                      % (client_id, device_code)
                data = scrapertools.downloadpage(url, headers=headers.items())
                data = jsontools.load_json(data)
                if "client_secret" in data:
                    # Código introducido, salimos del bucle
                    break
            except:
                pass

        try:
            dialog_auth.close()
        except:
            pass

        debrid_id = data["client_id"]
        secret = data["client_secret"]

        # Se solicita el token de acceso y el de actualización para cuando el primero caduque
        post = urllib.urlencode({
            "client_id":
            debrid_id,
            "client_secret":
            secret,
            "code":
            device_code,
            "grant_type":
            "http://oauth.net/grant_type/device/1.0"
        })
        data = scrapertools.downloadpage(
            "https://api.real-debrid.com/oauth/v2/token",
            post=post,
            headers=headers.items())
        data = jsontools.load_json(data)

        token = data["access_token"]
        refresh = data["refresh_token"]

        channeltools.set_channel_setting("realdebrid_id", debrid_id,
                                         "realdebrid")
        channeltools.set_channel_setting("realdebrid_secret", secret,
                                         "realdebrid")
        channeltools.set_channel_setting("realdebrid_token", token,
                                         "realdebrid")
        channeltools.set_channel_setting("realdebrid_refresh", refresh,
                                         "realdebrid")

        return token
    except:
        import traceback
        logger.error(traceback.format_exc())
        return ""
def fichas(item):
    logger.info("pelisalacarta.channels.hdfull series")
    itemlist = []

    ## Carga estados
    status = jsontools.load_json(
        scrapertools.cache_page(host + '/a/status/all'))

    if item.title == "Buscar...":
        data = agrupa_datos(scrapertools.cache_page(item.url, post=item.extra))

        s_p = scrapertools.get_match(
            data,
            '<h3 class="section-title">(.*?)<div id="footer-wrapper">').split(
                '<h3 class="section-title">')

        if len(s_p) == 1:
            data = s_p[0]
            if 'Lo sentimos</h3>' in s_p[0]:
                return [
                    Item(
                        channel=__channel__,
                        title=bbcode_kodi2html(
                            "[COLOR gold][B]HDFull:[/B][/COLOR] [COLOR blue]" +
                            texto.replace('%20', ' ') +
                            "[/COLOR] sin resultados"))
                ]
        else:
            data = s_p[0] + s_p[1]
    else:
        data = agrupa_datos(scrapertools.cache_page(item.url))

    data = re.sub(
        r'<div class="span-6[^<]+<div class="item"[^<]+' + \
         '<a href="([^"]+)"[^<]+' + \
         '<img.*?src="([^"]+)".*?' + \
         '<div class="left"(.*?)</div>' + \
         '<div class="right"(.*?)</div>.*?' + \
         'title="([^"]+)".*?' + \
         'onclick="setFavorite.\d, (\d+),',
         r"'url':'\1';'image':'\2';'langs':'\3';'rating':'\4';'title':\5;'id':'\6';",
        data
    )

    patron = "'url':'([^']+)';'image':'([^']+)';'langs':'([^']+)';'rating':'([^']+)';'title':([^;]+);'id':'([^']+)';"

    matches = re.compile(patron, re.DOTALL).findall(data)

    for scrapedurl, scrapedthumbnail, scrapedlangs, scrapedrating, scrapedtitle, scrapedid in matches:

        thumbnail = scrapedthumbnail.replace("/tthumb/130x190/", "/thumbs/")

        title = scrapedtitle.strip()
        show = title

        if scrapedlangs != ">":
            textoidiomas = extrae_idiomas(scrapedlangs)
            title += bbcode_kodi2html(" ( [COLOR teal][B]" + textoidiomas +
                                      "[/B][/COLOR])")

        if scrapedrating != ">":
            valoracion = re.sub(r'><[^>]+>(\d+)<b class="dec">(\d+)</b>',
                                r'\1,\2', scrapedrating)
            title += bbcode_kodi2html(" ([COLOR orange]" + valoracion +
                                      "[/COLOR])")

        url = urlparse.urljoin(item.url, scrapedurl)

        if "/serie" in url or "/tags-tv" in url:
            action = "episodios"
            url += "###" + scrapedid + ";1"
            type = "shows"
        else:
            action = "findvideos"
            url += "###" + scrapedid + ";2"
            type = "movies"

        str = get_status(status, type, scrapedid)
        if str != "": title += str

        if item.title == "Buscar...":
            tag_type = scrapertools.get_match(url, 'l.tv/([^/]+)/')
            title += bbcode_kodi2html(" - [COLOR blue]" +
                                      tag_type.capitalize() + "[/COLOR]")

        itemlist.append(
            Item(channel=__channel__,
                 action=action,
                 title=title,
                 url=url,
                 fulltitle=title,
                 thumbnail=thumbnail,
                 show=show,
                 folder=True))

    ## Paginación
    next_page_url = scrapertools.find_single_match(
        data, '<a href="([^"]+)">.raquo;</a>')
    if next_page_url != "":
        itemlist.append(
            Item(channel=__channel__,
                 action="fichas",
                 title=">> Página siguiente",
                 url=urlparse.urljoin(item.url, next_page_url),
                 folder=True))

    return itemlist
Esempio n. 48
0
def listas(item):
    logger.info("pelisalacarta.channels.tvvip listas")
    # Para añadir listas a la biblioteca en carpeta CINE
    itemlist = []
    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load_json(data)
    head = header_string + get_cookie_value()
    for child in data["sortedRepoChilds"]:
        infolabels = {}

        # Fanart
        if child['hashBackground']:
            fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child[
                "id"]
        else:
            fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child[
                "id"]
        # Thumbnail
        if child['hasPoster']:
            thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child[
                "id"]
        else:
            thumbnail = fanart
        thumbnail += head
        fanart += head

        url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"]
        if child['name'] == "":
            title = scrapertools.slugify(child['id'].rsplit(".", 1)[0])
        else:
            title = scrapertools.slugify(child['name'])
        title = title.replace('-', ' ').replace('_', ' ').capitalize()
        infolabels['title'] = title
        try:
            from core import library
            new_item = item.clone(title=title,
                                  url=url,
                                  fulltitle=title,
                                  fanart=fanart,
                                  extra="findvideos",
                                  thumbnail=thumbnail,
                                  infoLabels=infolabels,
                                  category="Cine")
            library.add_pelicula_to_library(new_item)
            error = False
        except:
            error = True
            import traceback
            logger.info(traceback.format_exc())

    if not error:
        itemlist.append(
            Item(channel=item.channel,
                 title='Lista añadida correctamente a la biblioteca',
                 action="",
                 folder=False))
    else:
        itemlist.append(
            Item(
                channel=item.channel,
                title='ERROR. Han ocurrido uno o varios errores en el proceso',
                action="",
                folder=False))

    return itemlist
Esempio n. 49
0
def busqueda(item, texto):
    logger.info("pelisalacarta.channels.tvvip busqueda")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load_json(data)
    head = header_string + get_cookie_value()
    for child in data["objectList"]:
        infolabels = {}

        infolabels['year'] = child['year']
        if child['tags']:
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rate'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        if child['cast']: infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']

        if 'playListChilds' not in child:
            infolabels['plot'] = child['description']
            type = "repo"
            fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')) \
                                   .encode('ASCII', 'ignore').decode("utf-8")
            title = child['name']
            infolabels['duration'] = child['duration']
            if child['height'] < 720:
                quality = "[B]  [SD][/B]"
            elif child['height'] < 1080:
                quality = "[B]  [720p][/B]"
            elif child['height'] >= 1080:
                quality = "[B]  [1080p][/B]"
            if child['name'] == "":
                title = child['id'].rsplit(".", 1)[0]
            else:
                title = child['name']
            if child['year']:
                title += " (" + child['year'] + ")"
            title += quality
        else:
            type = "playlist"
            infolabels['plot'] = "Contiene:\n" + "\n".join(
                child['playListChilds']) + "\n".join(child['repoChilds'])
            fulltitle = child['id']
            title = "[COLOR red][LISTA][/COLOR] " + child['id'].replace('-', ' ').capitalize() + " ([COLOR gold]" + \
                    str(child['number']) + "[/COLOR])"

        # En caso de búsqueda global se filtran los resultados
        if item.extra != "local":
            if "+" in texto: texto = "|".join(texto.split("+"))
            if not re.search(r'(?i)' + texto, title, flags=re.DOTALL): continue

        url = "http://tv-vip.com/json/%s/%s/index.json" % (type, child["id"])
        # Fanart
        if child['hashBackground']:
            fanart = "http://tv-vip.com/json/%s/%s/background.jpg" % (
                type, child["id"])
        else:
            fanart = "http://tv-vip.com/json/%s/%s/thumbnail.jpg" % (
                type, child["id"])
        # Thumbnail
        if child['hasPoster']:
            thumbnail = "http://tv-vip.com/json/%s/%s/poster.jpg" % (
                type, child["id"])
        else:
            thumbnail = fanart
        thumbnail += head
        fanart += head

        if type == 'playlist':
            itemlist.insert(
                0,
                Item(channel=item.channel,
                     action="entradasconlistas",
                     title=bbcode_kodi2html(title),
                     url=url,
                     thumbnail=thumbnail,
                     fanart=fanart,
                     fulltitle=fulltitle,
                     infoLabels=infolabels,
                     viewmode="movie_with_plot",
                     folder=True))
        else:
            itemlist.append(
                Item(channel=item.channel,
                     action="findvideos",
                     title=bbcode_kodi2html(title),
                     url=url,
                     thumbnail=thumbnail,
                     fanart=fanart,
                     fulltitle=fulltitle,
                     contentTitle=fulltitle,
                     context="05",
                     infoLabels=infolabels,
                     viewmode="movie_with_plot",
                     folder=True))

    return itemlist
Esempio n. 50
0
def entradas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load_json(data)

    if "Destacados" in item.title:
        itemlist.append(
            item.clone(
                title=
                "Aviso: Si una película no tiene (imagen/carátula) NO va a funcionar",
                action="",
                text_color=color4))

    for child in data["a"]:
        infolabels = {}

        infolabels['originaltitle'] = child['originalTitle']
        infolabels['plot'] = child['description']
        infolabels['year'] = child['year']
        if child.get('tags'):
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rateHuman'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        if child.get('runtime'):
            try:
                infolabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = host % "movie/%s/movie.js" % child["id"]
        # Fanart
        fanart = host % "movie/%s/background_480.jpg" % child["id"]
        if child.get("episode"):
            thumbnail = host % "movie/%s/thumbnail_200x112.jpg" % child["id"]
        else:
            thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"]

        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
                               .decode("utf-8")
        if child['name'] == "":
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 server="",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 video_urls=video_urls,
                 text_color=color3))

    return itemlist
Esempio n. 51
0
def findvideos(item):
    logger.info("pelisalacarta.channels.tvvip findvideos")
    itemlist = []

    # En caso de llamarse a la función desde una serie de la biblioteca
    if item.extra.startswith("http"): item.url = item.extra
    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load_json(data)
    id = urllib.quote(data['id'])
    for child in data["profiles"].keys():
        videopath = urllib.quote(data["profiles"][child]['videoUri'])
        for i in range(0, len(data["profiles"][child]['servers'])):
            url = data["profiles"][child]['servers'][i]['url'] + videopath
            size = "  " + data["profiles"][child]["sizeHuman"]
            resolution = " [" + (
                data["profiles"][child]['videoResolution']) + "]"
            title = "Ver vídeo en " + resolution.replace(
                '1920x1080', 'HD-1080p')
            if i == 0:
                title += size + " [COLOR purple]Mirror " + str(i +
                                                               1) + "[/COLOR]"
            else:
                title += size + " [COLOR green]Mirror " + str(i +
                                                              1) + "[/COLOR]"
            # Para poner enlaces de mayor calidad al comienzo de la lista
            if data["profiles"][child]["profileId"] == "default":
                itemlist.insert(
                    i,
                    item.clone(action="play",
                               server="directo",
                               title=bbcode_kodi2html(title),
                               url=url,
                               contentTitle=item.fulltitle,
                               viewmode="list",
                               extra=id,
                               folder=False))
            else:
                itemlist.append(
                    item.clone(action="play",
                               server="directo",
                               title=bbcode_kodi2html(title),
                               url=url,
                               contentTitle=item.fulltitle,
                               viewmode="list",
                               extra=id,
                               folder=False))

    itemlist.append(
        item.clone(channel="trailertools",
                   action="buscartrailer",
                   title="Buscar Tráiler",
                   text_color="magenta"))
    if len(itemlist) > 0 and item.extra == "":
        if config.get_library_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir enlaces a la biblioteca",
                     text_color="green",
                     contentTitle=item.fulltitle,
                     url=item.url,
                     action="add_pelicula_to_library",
                     infoLabels={'title': item.fulltitle},
                     extra="findvideos",
                     fulltitle=item.fulltitle))

    return itemlist
Esempio n. 52
0
def entradasconlistas(item):
    logger.info()
    itemlist = []

    data = httptools.downloadpage(item.url).data
    data = jsontools.load_json(data)

    # Si hay alguna lista
    contentSerie = False
    contentList = False
    if data.get('b'):
        for child in data['b']:
            infolabels = {}

            infolabels['originaltitle'] = child['originalTitle']
            infolabels['plot'] = child['description']
            infolabels['year'] = data['year']
            if child.get('tags'):
                infolabels['genre'] = ', '.join(
                    [x.strip() for x in child['tags']])
            infolabels['rating'] = child['rateHuman'].replace(',', '.')
            infolabels['votes'] = child['rateCount']
            if child.get('runtime'):
                try:
                    infolabels['duration'] = int(child['runtime'].replace(
                        " min.", "")) * 60
                except:
                    pass
            if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
            infolabels['director'] = child['director']
            season = child.get('season', '')
            if season.isdigit() and not contentList:
                contentSerie = True
                action = "episodios"
            else:
                contentSerie = False
                contentList = True
                action = "entradasconlistas"

            url = host % "list/%s" % child["id"] + ext
            title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name'])
            fulltitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['name'])
            if not title:
                title = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id'])
                fulltitle = re.sub(r"(\w)-(\w)", '\g<1> \g<2>', child['id'])
            title = unicode(title, "utf-8").capitalize().encode("utf-8")
            fulltitle = unicode(fulltitle,
                                "utf-8").capitalize().encode("utf-8")
            show = ""
            if contentSerie:
                title += " (Serie TV)"
                show = fulltitle
            thumbnail = host % "list/%s/thumbnail_167x250.jpg" % child["id"]
            fanart = host % "list/%s/background_1080.jpg" % child["id"]

            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=title,
                     url=url,
                     thumbnail=thumbnail,
                     fanart=fanart,
                     fulltitle=fulltitle,
                     show=show,
                     infoLabels=infolabels,
                     contentTitle=fulltitle,
                     viewmode="movie_with_plot",
                     text_color=color3))
    else:
        contentList = True

    if contentSerie and itemlist:
        itemlist.sort(key=lambda it: it.infoLabels['season'], reverse=True)

    if itemlist:
        itemlist.insert(
            0,
            Item(channel=item.channel,
                 title="**LISTAS**",
                 action="",
                 text_color=color4,
                 text_blod=True,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart))

    if data.get("a") and itemlist:
        itemlist.append(
            Item(channel=item.channel,
                 title="**VÍDEOS**",
                 action="",
                 text_color=color6,
                 text_blod=True,
                 thumbnail=item.thumbnail,
                 fanart=item.fanart))

    for child in data.get("a", []):
        infolabels = {}

        infolabels['originaltitle'] = child['originalTitle']
        infolabels['plot'] = child['description']
        infolabels['year'] = data['year']
        if child.get('tags'):
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rateHuman'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        if child.get('runtime'):
            try:
                infolabels['duration'] = int(child['runtime'].replace(
                    " min.", "")) * 60
            except:
                pass
        if child.get('cast'): infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = host % "movie/%s/movie.js" % child["id"]
        # Fanart
        fanart = host % "movie/%s/background_1080.jpg" % child["id"]
        if child.get("episode"):
            thumbnail = host % "movie/%s/thumbnail.jpg" % child["id"]
        else:
            thumbnail = host % "movie/%s/poster_167x250.jpg" % child["id"]

        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
                               .decode("utf-8")
        if not child['name']:
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        video_urls = []
        for k, v in child.get("video", {}).items():
            for vid in v:
                video_urls.append([
                    "http://%s.pelisipad.com/s/transcoder/%s" %
                    (vid["server"], vid["url"]) + "?%s", vid["height"]
                ])

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=title,
                 url=url,
                 video_urls=video_urls,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 viewmode="movie_with_plot",
                 text_color=color3))

    # Se añade item para añadir la lista de vídeos a la biblioteca
    if data.get(
            'a') and itemlist and contentList and config.get_library_support():
        itemlist.append(
            Item(channel=item.channel,
                 text_color=color5,
                 title="Añadir esta lista a la biblioteca",
                 url=item.url,
                 action="listas"))
    elif contentSerie and config.get_library_support():
        itemlist.append(
            Item(channel=item.channel,
                 title="Añadir esta serie a la biblioteca",
                 text_color=color5,
                 url=item.url,
                 action="add_serie_to_library",
                 show=item.show,
                 fulltitle=item.fulltitle,
                 extra="episodios"))

    return itemlist
Esempio n. 53
0
def episodios(item):
    logger.info("pelisalacarta.channels.tvvip episodios")
    logger.info("categoriaaa es " + item.tostring())
    itemlist = []
    # Redirección para actualización de biblioteca
    if item.extra == "series_library":
        itemlist = series_library(item)
        return itemlist

    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load_json(data)
    head = header_string + get_cookie_value()
    # Se prueba un método u otro porque algunas series no están bien listadas
    if data["sortedRepoChilds"]:
        for child in data["sortedRepoChilds"]:
            if item.infoLabels:
                item.infoLabels['duration'] = str(child['duration'])
                item.infoLabels['season'] = str(data['seasonNumber'])
                item.infoLabels['episode'] = str(child['episode'])
                item.infoLabels['mediatype'] = "episode"
            contentTitle = item.fulltitle + "|" + str(
                data['seasonNumber']) + "|" + str(child['episode'])
            # En caso de venir del apartado nuevos capítulos se redirige a la función series para mostrar los demás
            if item.title == "Nuevos Capítulos":
                url = "http://tv-vip.com/json/playlist/%s/index.json" % child[
                    "season"]
                action = "series"
                extra = "new"
            else:
                url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"]
                action = "findvideos"
                extra = ""
            if child['hasPoster']:
                thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child[
                    "id"]
            else:
                thumbnail = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child[
                    "id"]
            thumbnail += head
            try:
                title = fulltitle = child['name'].rsplit(
                    " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
            except:
                title = fulltitle = child['id']
            itemlist.append(
                item.clone(action=action,
                           server="",
                           title=title,
                           url=url,
                           thumbnail=thumbnail,
                           fanart=item.fanart,
                           fulltitle=fulltitle,
                           contentTitle=contentTitle,
                           context="35",
                           viewmode="movie",
                           extra=extra,
                           show=item.fulltitle,
                           folder=True))
    else:
        for child in data["repoChilds"]:
            url = "http://tv-vip.com/json/repo/%s/index.json" % child
            if data['hasPoster']:
                thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child
            else:
                thumbnail = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child
            thumbnail += head
            title = fulltitle = child.capitalize().replace('_', ' ')
            itemlist.append(
                item.clone(action="findvideos",
                           server="",
                           title=title,
                           url=url,
                           thumbnail=thumbnail,
                           fanart=item.fanart,
                           fulltitle=fulltitle,
                           contentTitle=item.fulltitle,
                           context="25",
                           show=item.fulltitle,
                           folder=True))

    # Opción de añadir a la biblioteca en casos de series de una única temporada
    if len(
            itemlist
    ) > 0 and not "---" in item.title and item.title != "Nuevos Capítulos":
        if config.get_library_support() and item.show == "":
            if "-" in item.title:
                show = item.title.split('-')[0]
            else:
                show = item.title.split('(')[0]
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir esta serie a la biblioteca",
                     text_color="green",
                     url=item.url,
                     action="add_serie_to_library",
                     show=show,
                     extra="series_library"))
    return itemlist
Esempio n. 54
0
        try:
            req = urllib2.Request(url,
                                  data=jsontools.dump_json(params),
                                  headers=DEFAULT_HEADERS)
            response = urllib2.urlopen(req)
            html = response.read()
            response.close()

        except Exception, ex:
            message = "An exception of type %s occured. Arguments:\n%s" % (
                type(ex).__name__, repr(ex.args))
            logger.error("error en: %s" % message)

        else:
            dict_html = jsontools.load_json(html)
            # logger.debug("dict_html %s" % dict_html)

            if "token" in dict_html:
                token = dict_html["token"]
                DEFAULT_HEADERS["Authorization"] = "Bearer " + token

                TOKEN = config.set_setting("tvdb_token", token)

    @classmethod
    def __refresh_token(cls):
        # logger.info()
        global TOKEN
        is_success = False

        url = HOST + "/refresh_token"
Esempio n. 55
0
def convert_old_to_v4():
    logger.info()
    path_series_xml = filetools.join(config.get_data_path(), "series.xml")
    path_series_json = filetools.join(config.get_data_path(), "series.json")
    series_insertadas = 0
    series_fallidas = 0
    version = 'v?'

    # Renombrar carpeta Series y crear una vacia
    import time
    new_name = "SERIES_OLD_" + str(time.time())
    path_series_old = filetools.join(library.LIBRARY_PATH, new_name)
    if filetools.rename(library.TVSHOWS_PATH, new_name):
        if not filetools.mkdir(library.TVSHOWS_PATH):
            logger.error(
                "ERROR, no se ha podido crear la nueva carpeta de SERIES")
            return False
    else:
        logger.error(
            "ERROR, no se ha podido renombrar la antigua carpeta de SERIES")
        return False

    # Convertir libreria de v1(xml) a v4
    if filetools.exists(path_series_xml):
        try:
            data = filetools.read(path_series_xml)
            for line in data.splitlines():
                try:
                    aux = line.rstrip('\n').split(",")
                    tvshow = aux[0].strip()
                    url = aux[1].strip()
                    channel = aux[2].strip()

                    serie = Item(contentSerieName=tvshow,
                                 url=url,
                                 channel=channel,
                                 action="episodios",
                                 title=tvshow,
                                 active=True)

                    patron = "^(.+)[\s]\((\d{4})\)$"
                    matches = re.compile(patron, re.DOTALL).findall(
                        serie.contentSerieName)

                    if matches:
                        serie.infoLabels['title'] = matches[0][0]
                        serie.infoLabels['year'] = matches[0][1]
                    else:
                        serie.infoLabels['title'] = tvshow

                    insertados, sobreescritos, fallidos = library.save_library_tvshow(
                        serie, list())
                    if fallidos == 0:
                        series_insertadas += 1
                        platformtools.dialog_notification(
                            "Serie actualizada", serie.infoLabels['title'])
                    else:
                        series_fallidas += 1
                except:
                    series_fallidas += 1

            filetools.rename(path_series_xml, "series.xml.old")
            version = 'v4'

        except EnvironmentError:
            logger.error("ERROR al leer el archivo: %s" % path_series_xml)
            return False

    # Convertir libreria de v2(json) a v4
    if filetools.exists(path_series_json):
        try:
            data = jsontools.load_json(filetools.read(path_series_json))
            for tvshow in data:
                for channel in data[tvshow]["channels"]:
                    try:
                        serie = Item(
                            contentSerieName=data[tvshow]["channels"][channel]
                            ["tvshow"],
                            url=data[tvshow]["channels"][channel]["url"],
                            channel=channel,
                            action="episodios",
                            title=data[tvshow]["name"],
                            active=True)
                        if not tvshow.startswith("t_"):
                            serie.infoLabels["tmdb_id"] = tvshow

                        insertados, sobreescritos, fallidos = library.save_library_tvshow(
                            serie, list())
                        if fallidos == 0:
                            series_insertadas += 1
                            platformtools.dialog_notification(
                                "Serie actualizada", serie.infoLabels['title'])
                        else:
                            series_fallidas += 1
                    except:
                        series_fallidas += 1

            filetools.rename(path_series_json, "series.json.old")
            version = 'v4'

        except EnvironmentError:
            logger.error("ERROR al leer el archivo: %s" % path_series_json)
            return False

    # Convertir libreria de v3 a v4
    if version != 'v4':
        # Obtenemos todos los tvshow.json de la biblioteca de SERIES_OLD recursivamente
        for raiz, subcarpetas, ficheros in filetools.walk(path_series_old):
            for f in ficheros:
                if f == "tvshow.json":
                    try:
                        serie = Item().fromjson(
                            filetools.read(filetools.join(raiz, f)))
                        insertados, sobreescritos, fallidos = library.save_library_tvshow(
                            serie, list())
                        if fallidos == 0:
                            series_insertadas += 1
                            platformtools.dialog_notification(
                                "Serie actualizada", serie.infoLabels['title'])
                        else:
                            series_fallidas += 1
                    except:
                        series_fallidas += 1

    config.set_setting("library_version", 'v4')

    platformtools.dialog_notification(
        "Biblioteca actualizada al nuevo formato",
        "%s series convertidas y %s series descartadas. A continuación se va a "
        "obtener la información de todos los episodios" %
        (series_insertadas, series_fallidas),
        time=12000)

    # Por ultimo limpia la libreria, por que las rutas anteriores ya no existen
    xbmc_library.clean()

    return True
Esempio n. 56
0
def series_library(item):
    logger.info("pelisalacarta.channels.tvvip series_library")
    # Funcion unicamente para añadir/actualizar series a la libreria
    lista_episodios = []
    show = item.show.strip()

    data_serie = scrapertools.anti_cloudflare(item.url,
                                              host=host,
                                              headers=headers)
    data_serie = jsontools.load_json(data_serie)
    # Para series que en la web se listan divididas por temporadas
    if data_serie["sortedPlaylistChilds"]:
        for season_name in data_serie["sortedPlaylistChilds"]:
            url_season = "http://tv-vip.com/json/playlist/%s/index.json" % season_name[
                'id']
            data = scrapertools.anti_cloudflare(url_season,
                                                host=host,
                                                headers=headers)
            data = jsontools.load_json(data)

            if data["sortedRepoChilds"]:
                for child in data["sortedRepoChilds"]:
                    url = "http://tv-vip.com/json/repo/%s/index.json" % child[
                        "id"]
                    fulltitle = child['name'].rsplit(
                        " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
                    try:
                        check_filename = scrapertools.get_season_and_episode(
                            fulltitle)
                    except:
                        fulltitle += " " + str(data['seasonNumber']) + "x00"
                    lista_episodios.append(
                        Item(channel=item.channel,
                             action="findvideos",
                             server="",
                             title=fulltitle,
                             extra=url,
                             url=item.url,
                             fulltitle=fulltitle,
                             contentTitle=fulltitle,
                             show=show))
            else:
                for child in data["repoChilds"]:
                    url = "http://tv-vip.com/json/repo/%s/index.json" % child
                    fulltitle = child.capitalize().replace('_', ' ')
                    try:
                        check_filename = scrapertools.get_season_and_episode(
                            fulltitle)
                    except:
                        fulltitle += " " + str(data['seasonNumber']) + "x00"
                    lista_episodios.append(
                        Item(channel=item.channel,
                             action="findvideos",
                             server="",
                             title=fulltitle,
                             extra=url,
                             url=item.url,
                             contentTitle=fulltitle,
                             fulltitle=fulltitle,
                             show=show))
    # Para series directas de una sola temporada
    else:
        data = data_serie
        if data["sortedRepoChilds"]:
            for child in data["sortedRepoChilds"]:
                url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"]
                fulltitle = child['name'].rsplit(
                    " ", 1)[0] + " - " + child['name'].rsplit(" ", 1)[1]
                try:
                    check_filename = scrapertools.get_season_and_episode(
                        fulltitle)
                except:
                    fulltitle += " 1x00"
                lista_episodios.append(
                    Item(channel=item.channel,
                         action="findvideos",
                         server="",
                         title=fulltitle,
                         contentTitle=fulltitle,
                         url=item.url,
                         extra=url,
                         fulltitle=fulltitle,
                         show=show))
        else:
            for child in data["repoChilds"]:
                url = "http://tv-vip.com/json/repo/%s/index.json" % child
                fulltitle = child.capitalize().replace('_', ' ')
                try:
                    check_filename = scrapertools.get_season_and_episode(
                        fulltitle)
                except:
                    fulltitle += " 1x00"
                lista_episodios.append(
                    Item(channel=item.channel,
                         action="findvideos",
                         server="",
                         title=fulltitle,
                         contentTitle=fulltitle,
                         url=item.url,
                         extra=url,
                         fulltitle=fulltitle,
                         show=show))

    return lista_episodios
Esempio n. 57
0
def entradasconlistas(item):
    logger.info("pelisalacarta.channels.tvvip entradasconlistas")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load_json(data)
    head = header_string + get_cookie_value()
    # Si hay alguna lista
    contentSerie = False
    contentList = False
    if data['playListChilds']:
        itemlist.append(
            Item(channel=item.channel,
                 title="**LISTAS**",
                 action="",
                 text_color="red",
                 text_blod=True,
                 folder=False))
        for child in data['sortedPlaylistChilds']:
            infolabels = {}

            infolabels['plot'] = "Contiene:\n" + "\n".join(
                child['playListChilds']) + "\n".join(child['repoChilds'])
            if child['seasonNumber'] and not contentList and re.search(
                    r'(?i)temporada', child['id']):
                infolabels['season'] = child['seasonNumber']
                contentSerie = True
            else:
                contentSerie = False
                contentList = True
            title = child['id'].replace(
                '-', ' ').capitalize() + " ([COLOR gold]" + str(
                    child['number']) + "[/COLOR])"
            url = "http://tv-vip.com/json/playlist/%s/index.json" % child["id"]
            thumbnail = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child[
                "id"]
            if child['hashBackground']:
                fanart = "http://tv-vip.com/json/playlist/%s/background.jpg" % child[
                    "id"]
            else:
                fanart = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child[
                    "id"]

            thumbnail += head
            fanart += head
            itemlist.append(
                Item(channel=item.channel,
                     action="entradasconlistas",
                     title=bbcode_kodi2html(title),
                     url=url,
                     thumbnail=thumbnail,
                     fanart=fanart,
                     fulltitle=child['id'],
                     infoLabels=infolabels,
                     viewmode="movie_with_plot"))
    else:
        contentList = True
    if data["sortedRepoChilds"] and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="**VÍDEOS**",
                 action="",
                 text_color="blue",
                 text_blod=True,
                 folder=False))

    for child in data["sortedRepoChilds"]:
        infolabels = {}

        infolabels['plot'] = child['description']
        infolabels['year'] = data['year']
        if child['tags']:
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rate'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        infolabels['duration'] = child['duration']
        if child['cast']: infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"]
        # Fanart
        if child['hashBackground']:
            fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child[
                "id"]
        else:
            fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child[
                "id"]
        # Thumbnail
        if child['hasPoster']:
            thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child[
                "id"]
        else:
            thumbnail = fanart
        thumbnail += head
        fanart += head
        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
                               .decode("utf-8")
        if child['name'] == "":
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=bbcode_kodi2html(title),
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 context="05",
                 viewmode="movie_with_plot",
                 folder=True))

    # Se añade item para añadir la lista de vídeos a la biblioteca
    if data['sortedRepoChilds'] and len(itemlist) > 0 and contentList:
        if config.get_library_support():
            itemlist.append(
                Item(channel=item.channel,
                     text_color="green",
                     title="Añadir esta lista a la biblioteca",
                     url=item.url,
                     action="listas"))
    elif contentSerie:
        if config.get_library_support():
            itemlist.append(
                Item(channel=item.channel,
                     title="Añadir esta serie a la biblioteca",
                     url=item.url,
                     action="series_library",
                     fulltitle=data['name'],
                     show=data['name'],
                     text_color="green"))

    return itemlist
Esempio n. 58
0
def episodios(item):
    logger.info("tvalacarta.channels.clantv episodios")

    itemlist = []

    # Descarga la página
    url = item.url + "/videos.json"
    data = scrapertools.cache_page(url)
    json_object = jsontools.load_json(data)
    #logger.info("json_object="+json_object)
    json_items = json_object["page"]["items"]

    for json_item in json_items:
        title = json_item["longTitle"]
        url = json_item["uri"]
        thumbnail = json_item["imageSEO"]
        if json_item["description"] is not None:
            plot = scrapertools.htmlclean(json_item["description"])
        else:
            plot = ""
        fanart = item.fanart
        page = json_item["htmlUrl"]
        aired_date = scrapertools.parse_date(json_item["publicationDate"])

        ms = json_item["duration"]
        if ms is None:
            duration = ""
        else:
            x = ms / 1000
            seconds = x % 60
            x /= 60
            minutes = x % 60
            x /= 60
            hours = x % 24
            if hours > 0:
                duration = str(hours) + ":" + str(minutes) + ":" + str(seconds)
            else:
                duration = str(minutes) + ":" + str(seconds)

        if (DEBUG):
            logger.info(" title=[" + repr(title) + "], url=[" + repr(url) +
                        "], thumbnail=[" + repr(thumbnail) + "] plot=[" +
                        repr(plot) + "]")
        itemlist.append(
            Item(channel="rtve",
                 title=title,
                 action="play",
                 server="rtve",
                 page=page,
                 url=page,
                 thumbnail=thumbnail,
                 fanart=thumbnail,
                 show=item.show,
                 plot=plot,
                 duration=duration,
                 aired_date=aired_date,
                 viewmode="movie_with_plot",
                 folder=False))

    from core import config
    if config.is_xbmc() and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title=">> Opciones para esta serie",
                 url=item.url,
                 action="serie_options##episodios",
                 thumbnail=item.thumbnail,
                 show=item.show,
                 folder=False))

    return itemlist
Esempio n. 59
0
def entradas(item):
    logger.info("pelisalacarta.channels.tvvip entradas")
    itemlist = []
    if item.title == "Nuevos Capítulos":
        context = "5"
    else:
        context = "05"
    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load_json(data)
    head = header_string + get_cookie_value()
    for child in data["sortedRepoChilds"]:
        infolabels = {}

        infolabels['plot'] = child['description']
        infolabels['year'] = child['year']
        if child['tags']:
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rate'].replace(',', '.')
        infolabels['votes'] = child['rateCount']
        infolabels['duration'] = child['duration']
        if child['cast']: infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']
        url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"]
        # Fanart
        if child['hashBackground']:
            fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child[
                "id"]
        else:
            fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child[
                "id"]
        # Thumbnail
        if child['hasPoster']:
            thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child[
                "id"]
        else:
            thumbnail = fanart
        thumbnail += head
        fanart += head

        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
                               .decode("utf-8")
        if child['name'] == "":
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 server="",
                 title=title,
                 url=url,
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 context=context))

    return itemlist
Esempio n. 60
0
def series(item):
    logger.info("pelisalacarta.channels.tvvip series")
    itemlist = []

    data = scrapertools.anti_cloudflare(item.url, host=host, headers=headers)
    data = jsontools.load_json(data)
    head = header_string + get_cookie_value()
    exception = ["top-series", "nuevos-capitulos"]
    for child in data["sortedPlaylistChilds"]:
        if child["id"] not in exception:
            infolabels = {}

            infolabels['plot'] = child['description']
            infolabels['year'] = child['year']
            if child['tags']:
                infolabels['genre'] = ', '.join(
                    [x.strip() for x in child['tags']])
            infolabels['rating'] = child['rate'].replace(',', '.')
            infolabels['votes'] = child['rateCount']
            if child['cast']: infolabels['cast'] = child['cast'].split(",")
            infolabels['director'] = child['director']
            infolabels['mediatype'] = "episode"
            if child['seasonNumber']:
                infolabels['season'] = child['seasonNumber']
            url = "http://tv-vip.com/json/playlist/%s/index.json" % child["id"]
            # Fanart
            if child['hashBackground']:
                fanart = "http://tv-vip.com/json/playlist/%s/background.jpg" % child[
                    "id"]
            else:
                fanart = "http://tv-vip.com/json/playlist/%s/thumbnail.jpg" % child[
                    "id"]
            # Thumbnail
            if child['hasPoster']:
                thumbnail = "http://tv-vip.com/json/playlist/%s/poster.jpg" % child[
                    "id"]
            else:
                thumbnail = fanart
            thumbnail += head
            fanart += head

            if item.contentTitle == "Series":
                if child['name'] != "":
                    fulltitle = unicodedata.normalize('NFD', unicode(child['name'].split(" Temporada")[0], 'utf-8')) \
                                           .encode('ASCII', 'ignore').decode("utf-8")
                    fulltitle = fulltitle.replace('-', '')
                    title = child['name'] + " (" + child['year'] + ")"
                else:
                    title = fulltitle = child['id'].capitalize()
                if "Temporada" not in title:
                    title += "     [Temporadas: [COLOR gold]" + str(
                        child['numberOfSeasons']) + "[/COLOR]]"
                elif item.title == "Más Vistas":
                    title = title.replace("- Temporada", "--- Temporada")
            else:
                if data['name'] != "":
                    fulltitle = unicodedata.normalize('NFD', unicode(data['name'], 'utf-8')).encode('ASCII', 'ignore') \
                                           .decode("utf-8")
                    if child['seasonNumber']:
                        title = data['name'] + " --- Temporada " + child['seasonNumber'] + \
                                "  [COLOR gold](" + str(child['number']) + ")[/COLOR]"
                    else:
                        title = child['name'] + "  [COLOR gold](" + str(
                            child['number']) + ")[/COLOR]"
                else:
                    fulltitle = unicodedata.normalize('NFD', unicode(data['id'], 'utf-8')).encode('ASCII', 'ignore') \
                                           .decode("utf-8")
                    if child['seasonNumber']:
                        title = data['id'].capitalize() + " --- Temporada " + child['seasonNumber'] + \
                                "  [COLOR gold](" + str(child['number']) + ")[/COLOR]"
                    else:
                        title = data['id'].capitalize(
                        ) + "  [COLOR gold](" + str(
                            child['number']) + ")[/COLOR]"
            if not child['playListChilds']:
                action = "episodios"
            else:
                action = "series"
            itemlist.append(
                Item(channel=item.channel,
                     action=action,
                     title=bbcode_kodi2html(title),
                     url=url,
                     server="",
                     thumbnail=thumbnail,
                     fanart=fanart,
                     fulltitle=fulltitle,
                     infoLabels=infolabels,
                     contentTitle=fulltitle,
                     context="25",
                     viewmode="movie_with_plot",
                     folder=True))
            if len(itemlist) == len(data["sortedPlaylistChilds"]
                                    ) and item.contentTitle != "Series":

                itemlist.sort(key=lambda item: item.title, reverse=True)
                if config.get_library_support():
                    itemlist.append(
                        Item(channel=item.channel,
                             title="Añadir esta serie a la biblioteca",
                             url=item.url,
                             action="add_serie_to_library",
                             show=data['name'],
                             text_color="green",
                             extra="series_library"))

    if item.title == "Últimas Series": return itemlist
    if item.title == "Lista de Series A-Z":
        itemlist.sort(key=lambda item: item.fulltitle)

    if data["sortedRepoChilds"] and len(itemlist) > 0:
        itemlist.append(
            Item(channel=item.channel,
                 title="**VÍDEOS RELACIONADOS/MISMA TEMÁTICA**",
                 text_color="blue",
                 text_blod=True,
                 action="",
                 folder=False))
    for child in data["sortedRepoChilds"]:
        infolabels = {}

        if child['description']:
            infolabels['plot'] = data['description']
        else:
            infolabels['plot'] = child['description']
        infolabels['year'] = data['year']
        if not child['tags']:
            infolabels['genre'] = ', '.join([x.strip() for x in data['tags']])
        else:
            infolabels['genre'] = ', '.join([x.strip() for x in child['tags']])
        infolabels['rating'] = child['rate'].replace(',', '.')
        infolabels['duration'] = child['duration']
        if child['cast']: infolabels['cast'] = child['cast'].split(",")
        infolabels['director'] = child['director']

        url = "http://tv-vip.com/json/repo/%s/index.json" % child["id"]
        # Fanart
        if child['hashBackground']:
            fanart = "http://tv-vip.com/json/repo/%s/background.jpg" % child[
                "id"]
        else:
            fanart = "http://tv-vip.com/json/repo/%s/thumbnail.jpg" % child[
                "id"]
        # Thumbnail
        if child['hasPoster']:
            thumbnail = "http://tv-vip.com/json/repo/%s/poster.jpg" % child[
                "id"]
        else:
            thumbnail = fanart
        thumbnail += head
        fanart += head

        if child['height'] < 720:
            quality = "[B]  [SD][/B]"
        elif child['height'] < 1080:
            quality = "[B]  [720p][/B]"
        elif child['height'] >= 1080:
            quality = "[B]  [1080p][/B]"
        fulltitle = unicodedata.normalize('NFD', unicode(child['name'], 'utf-8')).encode('ASCII', 'ignore') \
                               .decode("utf-8")

        if child['name'] == "":
            title = child['id'].rsplit(".", 1)[0]
        else:
            title = child['name']
        if child['year']:
            title += " (" + child['year'] + ")"
        title += quality

        itemlist.append(
            Item(channel=item.channel,
                 action="findvideos",
                 title=bbcode_kodi2html(title),
                 url=url,
                 server="",
                 thumbnail=thumbnail,
                 fanart=fanart,
                 fulltitle=fulltitle,
                 infoLabels=infolabels,
                 contentTitle=fulltitle,
                 context="25",
                 viewmode="movie_with_plot",
                 folder=True))
    if item.extra == "new":
        itemlist.sort(key=lambda item: item.title, reverse=True)

    return itemlist