コード例 #1
0
def checkforupdates():
    logger.info("streamondemand.core.updater checkforupdates")

    # Lee la versión remota
    logger.info("streamondemand.core.updater Verificando actualizaciones...")
    logger.info("streamondemand.core.updater Version remota: " + REMOTE_VERSION_FILE)
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)

    numero_version_publicada = scrapertools.find_single_match(data, "<version>([^<]+)</version>").strip()
    tag_version_publicada = scrapertools.find_single_match(data, "<tag>([^<]+)</tag>").strip()
    logger.info("streamondemand.core.updater version remota=" + tag_version_publicada + " " + numero_version_publicada)

    try:
        numero_version_publicada = int(numero_version_publicada)
    except:
        numero_version_publicada = 0
        import traceback
        logger.info(traceback.format_exc())

    # Lee la versión local
    numero_version_local = get_current_plugin_version()
    logger.info("streamondemand.core.updater checkforupdates version local=" + str(numero_version_local))

    hayqueactualizar = numero_version_publicada > numero_version_local
    logger.info("streamondemand.core.updater checkforupdates -> hayqueactualizar=" + repr(hayqueactualizar))

    # Si hay actualización disponible, devuelve la Nueva versión para que cada plataforma se encargue de mostrar los avisos
    if hayqueactualizar:
        return tag_version_publicada
    else:
        return None
コード例 #2
0
ファイル: httptools.py プロジェクト: Reunion90/raiz
def channel_proxy_list(url, forced_proxy=None):
    import base64
    import ast
    import scrapertools

    try:
        proxy_channel_bloqued_str = base64.b64decode(
            config.get_setting('proxy_channel_bloqued')).decode('utf-8')
        proxy_channel_bloqued = dict()
        proxy_channel_bloqued = ast.literal_eval(proxy_channel_bloqued_str)
    except:
        logger.debug('Proxytools no inicializado correctamente')
        return False

    if not url.endswith('/'):
        url += '/'
    if scrapertools.find_single_match(
            url,
            '(?:http.*:\/\/)?([^\?|\/]+)(?:\?|\/)') in proxy_channel_bloqued:
        if forced_proxy:
            return True
        if 'ON' in proxy_channel_bloqued[scrapertools.find_single_match(
                url, '(?:http.*:\/\/)?([^\?|\/]+)(?:\?|\/)')]:
            return True

    return False
コード例 #3
0
ファイル: httptools.py プロジェクト: martinbm76/addon
def channel_proxy_list(url, forced_proxy=None):
    import base64
    import ast

    try:
        proxy_channel_bloqued_str = base64.b64decode(
            config.get_setting('proxy_channel_bloqued')).decode('utf-8')
        proxy_channel_bloqued = dict()
        proxy_channel_bloqued = ast.literal_eval(proxy_channel_bloqued_str)
    except:
        logger.debug('Proxytools no inicializado correctamente')
        return False

    if not url.endswith('/'):
        url += '/'
    if scrapertools.find_single_match(url, '(?:http.*\:)?\/\/(?:www\.)?([^\?|\/]+)(?:\?|\/)') \
                in proxy_channel_bloqued:
        if forced_proxy and forced_proxy not in [
                'Total', 'ProxyDirect', 'ProxyCF', 'ProxyWeb'
        ]:
            if forced_proxy in proxy_channel_bloqued[
                    scrapertools.find_single_match(
                        url,
                        '(?:http.*\:)?\/\/(?:www\.)?([^\?|\/]+)(?:\?|\/)')]:
                return True
            else:
                return False
        if forced_proxy:
            return True
        if not 'OFF' in proxy_channel_bloqued[scrapertools.find_single_match(
                url, '(?:http.*\:)?\/\/(?:www\.)?([^\?|\/]+)(?:\?|\/)')]:
            return True

    return False
コード例 #4
0
def checkforupdates():
    logger.info("streamondemand.core.updater checkforupdates")

    # Lee la versión remota
    logger.info("streamondemand.core.updater Verificando actualizaciones...")
    logger.info("streamondemand.core.updater Version remota: " + REMOTE_VERSION_FILE)
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)

    numero_version_publicada = scrapertools.find_single_match(data, "<version>([^<]+)</version>").strip()
    tag_version_publicada = scrapertools.find_single_match(data, "<tag>([^<]+)</tag>").strip()
    logger.info("streamondemand.core.updater version remota=" + tag_version_publicada + " " + numero_version_publicada)

    try:
        numero_version_publicada = int(numero_version_publicada)
    except:
        numero_version_publicada = 0
        import traceback
        logger.info(traceback.format_exc())

    # Lee la versión local
    numero_version_local = get_current_plugin_version()
    logger.info("streamondemand.core.updater checkforupdates version local=" + str(numero_version_local))

    hayqueactualizar = numero_version_publicada > numero_version_local
    logger.info("streamondemand.core.updater checkforupdates -> hayqueactualizar=" + repr(hayqueactualizar))

    # Si hay actualización disponible, devuelve la Nueva versión para que cada plataforma se encargue de mostrar los avisos
    if hayqueactualizar:
        return tag_version_publicada
    else:
        return None
コード例 #5
0
def get_channel_parameters(channel_name):
    #logger.info("streamondemand.core.channeltools get_channel_parameters channel_name="+channel_name)

    channel_xml = os.path.join(config.get_runtime_path(), 'channels', channel_name+".xml")

    if os.path.exists(channel_xml):
        # logger.info("channel_name+".xml found")

        infile = open(channel_xml, "rb")
        data = infile.read()
        infile.close()

        channel_parameters = dict()
        channel_parameters["title"] = scrapertools.find_single_match(data, "<name>([^<]*)</name>")
        channel_parameters["channel"] = scrapertools.find_single_match(data, "<id>([^<]*)</id>")
        channel_parameters["active"] = scrapertools.find_single_match(data, "<active>([^<]*)</active>")
        channel_parameters["adult"] = scrapertools.find_single_match(data, "<adult>([^<]*)</adult>")
        channel_parameters["language"] = scrapertools.find_single_match(data, "<language>([^<]*)</language>")
        # Imagenes: se admiten url y archivos locales dentro de "resources/images"
        channel_parameters["thumbnail"] = scrapertools.find_single_match(data, "<thumbnail>([^<]*)</thumbnail>")
        channel_parameters["bannermenu"] = scrapertools.find_single_match(data, "<bannermenu>([^<]*)</bannermenu>")
        channel_parameters["fanart"] = scrapertools.find_single_match(data, "<fanart>([^<]*)</fanart>")
        channel_parameters["update_url"] = scrapertools.find_single_match(data, "<update_url>([^<]*)</update_url>")

        if channel_parameters["thumbnail"] and "://" not in channel_parameters["thumbnail"]:
            channel_parameters["thumbnail"] = os.path.join(config.get_runtime_path(), "resources", "images", "squares",
                                                           channel_parameters["thumbnail"])
        if channel_parameters["bannermenu"] and "://" not in channel_parameters["bannermenu"]:
            channel_parameters["bannermenu"] = os.path.join(config.get_runtime_path(), "resources", "images",
                                                            "bannermenu", channel_parameters["bannermenu"])
        if channel_parameters["fanart"] and "://" not in channel_parameters["fanart"]:
            channel_parameters["fanart"] = os.path.join(config.get_runtime_path(), "resources", "images", "fanart",
                                                        channel_parameters["fanart"])

        if channel_parameters["update_url"]=="":
            channel_parameters["update_url"] = DEFAULT_UPDATE_URL

        channel_parameters["include_in_global_search"] = scrapertools.find_single_match(
            data, "<include_in_global_search>([^<]*)</include_in_global_search>")

        category_list = []
        matches = scrapertools.find_multiple_matches(data, "<category>([^<]*)</category>")
        for match in matches:
            category_list.append(match)

        channel_parameters["categories"] = category_list

        logger.info(channel_name+" -> "+repr(channel_parameters))

    else:
        logger.info(channel_name+".xml NOT found")

        channel_parameters = dict()
        channel_parameters["adult"] = "false"
        channel_parameters["update_url"] = DEFAULT_UPDATE_URL

    return channel_parameters
コード例 #6
0
def checkforupdates():
    logger.info("streamondemand.core.updater checkforupdates")

    # Descarga el fichero con la versión en la web
    logger.info("streamondemand.core.updater Verificando actualizaciones...")
    logger.info("streamondemand.core.updater Version remota: " +
                REMOTE_VERSION_FILE)
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)

    version_publicada = scrapertools.find_single_match(
        data, "<version>([^<]+)</version>").strip()
    tag_publicada = scrapertools.find_single_match(
        data, "<tag>([^<]+)</tag>").strip()
    logger.info("streamondemand.core.updater version remota=" + tag_publicada +
                " " + version_publicada)

    # Lee el fichero con la versión instalada
    logger.info("streamondemand.core.updater fichero local version: " +
                LOCAL_VERSION_FILE)
    data = open(LOCAL_VERSION_FILE).read()

    version_local = scrapertools.find_single_match(
        data, "<version>([^<]+)</version>").strip()
    tag_local = scrapertools.find_single_match(data,
                                               "<tag>([^<]+)</tag>").strip()

    logger.info("streamondemand.core.updater version local=" + tag_local +
                " " + version_local)

    try:
        numero_version_publicada = int(version_publicada)
        numero_version_local = int(version_local)
    except:
        import traceback
        logger.info(traceback.format_exc())
        version_publicada = None
        version_local = None

    hayqueactualizar = False
    # Si no tenemos la versión, comprobamos el tag
    if version_publicada is None or version_local is None:
        logger.info("streamondemand.core.updater comprobando el tag")
        from distutils.version import StrictVersion
        hayqueactualizar = StrictVersion(tag_publicada) > StrictVersion(
            tag_local)

    else:
        logger.info("streamondemand.core.updater comprobando la version")
        hayqueactualizar = numero_version_publicada > numero_version_local

    # Si hay actualización disponible, devuelve la Nueva versión para que cada plataforma se encargue de mostrar los avisos
    if hayqueactualizar:
        return tag_publicada
    else:
        return None
コード例 #7
0
def get_channel_parameters(channel_name):
    #logger.info("streamondemand.core.channeltools get_channel_parameters channel_name="+channel_name)

    channel_xml = os.path.join(config.get_runtime_path(), 'channels',
                               channel_name + ".xml")

    if os.path.exists(channel_xml):
        #logger.info("streamondemand.core.channeltools get_channel_parameters "+channel_name+".xml found")

        infile = open(channel_xml, "rb")
        data = infile.read()
        infile.close()

        # TODO: Pendiente del json :)
        channel_parameters = {}
        channel_parameters["title"] = scrapertools.find_single_match(
            data, "<name>([^<]*)</name>")
        channel_parameters["channel"] = scrapertools.find_single_match(
            data, "<id>([^<]*)</id>")
        channel_parameters["active"] = scrapertools.find_single_match(
            data, "<active>([^<]*)</active>")
        channel_parameters["adult"] = scrapertools.find_single_match(
            data, "<adult>([^<]*)</adult>")
        channel_parameters["language"] = scrapertools.find_single_match(
            data, "<language>([^<]*)</language>")
        channel_parameters["thumbnail"] = scrapertools.find_single_match(
            data, "<thumbnail>([^<]*)</thumbnail>")
        channel_parameters["bannermenu"] = scrapertools.find_single_match(
            data, "<bannermenu>([^<]*)</bannermenu>")
        channel_parameters["fanart"] = scrapertools.find_single_match(
            data, "<fanart>([^<]*)</fanart>")
        channel_parameters[
            "include_in_global_search"] = scrapertools.find_single_match(
                data,
                "<include_in_global_search>([^<]*)</include_in_global_search>")

        category_list = []
        matches = scrapertools.find_multiple_matches(
            data, "<category>([^<]*)</category>")
        for match in matches:
            category_list.append(match)

        channel_parameters["categories"] = category_list

        logger.info(
            "streamondemand.core.channeltools get_channel_parameters " +
            channel_name + " -> " + repr(channel_parameters))

    else:
        logger.info(
            "streamondemand.core.channeltools get_channel_parameters " +
            channel_name + ".xml NOT found")

        channel_parameters = dict()
        channel_parameters["adult"] = "false"

    return channel_parameters
コード例 #8
0
def update_channel(channel_name):
    logger.info(channel_name)

    import channeltools
    remote_channel_url, remote_version_url = channeltools.get_channel_remote_url(
        channel_name)
    local_channel_path, local_version_path, local_compiled_path = channeltools.get_channel_local_path(
        channel_name)

    # Version remota
    try:
        data = scrapertools.cachePage(remote_version_url)
        logger.info("remote_data=" + data)
        remote_version = int(
            scrapertools.find_single_match(data, '<version>([^<]+)</version>'))
        addon_condition = int(
            scrapertools.find_single_match(
                data, "<addon_version>([^<]*)</addon_version>").replace(
                    ".", "").ljust(
                        len(str(versiontools.get_current_plugin_version())),
                        '0'))
    except:
        remote_version = 0
        addon_condition = 0

    logger.info("remote_version=%d" % remote_version)

    # Version local
    if os.path.exists(local_version_path):
        infile = open(local_version_path)
        data = infile.read()
        infile.close()
        #logger.info("pelisalacarta.core.updater local_data="+data)

        local_version = int(
            scrapertools.find_single_match(data, '<version>([^<]+)</version>'))
    else:
        local_version = 0

    logger.info("local_version=%d" % local_version)

    # Comprueba si ha cambiado
    updated = (remote_version > local_version) and (
        versiontools.get_current_plugin_version() >= addon_condition)

    if updated:
        logger.info("downloading...")
        download_channel(channel_name)

    return updated
コード例 #9
0
def check():
    logger.info("deportesalacarta.channels.update_sports Comprobando versión")
    try:
        # Lee el fichero con la versión instalada
        global bin
        fichero = open(LOCAL_XML_FILE, "r")
        data = fichero.read()
        fichero.close()
        version_local = scrapertools.find_single_match(
            data, "<version>([^<]+)</version>").strip()

        url_repo = ""
        server = ""
        if float(version_local) > 1.15:
            for i in range(3):
                bin = base64.b64decode(bin)

            data = eval(httptools.downloadpage(bin, hide=True).data)
            version_publicada = data["version"]
            message = data["changes"]
            url_repo = data["link"]
            server = data["server"]
        else:
            data = scrapertools.downloadpage(REMOTE_VERSION_FILE)
            version_publicada = scrapertools.find_single_match(
                data, "<version>([^<]+)</version>").strip()
            message = scrapertools.find_single_match(
                data, "<changes>([^<]+)</changes>").strip()
            logger.info(
                "deportesalacarta.channels.update_sports Versión en el repositorio: %s"
                % version_publicada)

        logger.info(
            "deportesalacarta.channels.update_sports Versión local: %s" %
            version_local)
        if float(version_publicada) > float(version_local):
            logger.info(
                "deportesalacarta.channels.update_sports Nueva versión encontrada"
            )
            return True, version_publicada, message, url_repo, server
        else:
            logger.info(
                "deportesalacarta.channels.update_sports No existe versión actualizada"
            )
            return False, "", "", "", ""
    except:
        import traceback
        logger.error("deportesalacarta.platformcode.launcher " +
                     traceback.format_exc())
        return False, "", "", "", ""
コード例 #10
0
ファイル: updater.py プロジェクト: Dreancala/Prova
def update_channel(channel_name):
    logger.info("streamondemand.core.updater update_channel " + channel_name)

    import channeltools
    remote_channel_url, remote_version_url = channeltools.get_channel_remote_url(
        channel_name)
    local_channel_path, local_version_path, local_compiled_path = channeltools.get_channel_local_path(
        channel_name)

    # Version remota
    try:
        data = scrapertools.cachePage(remote_version_url)
        logger.info("streamondemand.core.updater update_channel remote_data=" +
                    data)
        remote_version = int(
            scrapertools.find_single_match(data, '<version>([^<]+)</version>'))
    except:
        remote_version = 0

    logger.info(
        "streamondemand.core.updater update_channel remote_version=%d" %
        remote_version)

    # Version local
    local_version = 0
    if os.path.exists(local_version_path):
        try:
            infile = open(local_version_path)
            data = infile.read()
            infile.close()

            local_version = int(
                scrapertools.find_single_match(data,
                                               '<version>([^<]+)</version>'))
        except:
            pass

    logger.info("streamondemand.core.updater local_version=%d" % local_version)

    # Comprueba si ha cambiado
    updated = remote_version > local_version

    if updated:
        logger.info(
            "streamondemand.core.updater update_channel downloading...")
        download_channel(channel_name)

    return updated
コード例 #11
0
def list_remote_servers():
    '''
        Obtiene un diccionario de los servers remotos y su fecha de la ultima actualizacion, analizando la web del repositorio GitHub.
        Cuando se porte pelisalacarta a la GitHub oficial hay q modificar la url.
    '''
    remote_files_url = "https://github.com/superberny70/plugin.video.pelisalacarta/tree/master/servers"

    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  scrapertools.cache_page(remote_files_url))
    last_commit = scrapertools.find_single_match(
        data, '<time class="updated" datetime="([^"]+)"')

    patron = '<td class="content">.*?title="([a-zA-Z0-9]+\.py)".*?'  # name_server
    patron += '<time datetime="([^"]+)"'  # date_time

    matches = re.compile(patron, re.DOTALL).findall(data)

    d = {}
    d['__ultima_actualizacion__'] = last_commit.replace('T',
                                                        ' ').replace('Z', '')
    for name_server, date_time in matches:
        d[name_server] = date_time.replace('T', ' ').replace('Z', '')

    logger.info("updater.list_remote_servers :" + str(d))
    return d
コード例 #12
0
ファイル: versiontools.py プロジェクト: enursha101/xbmc-addon
def get_current_servers_version():

    f = open( os.path.join( config.get_runtime_path() , "servers" , "version.xml" ) )
    data = f.read()
    f.close()

    return int(scrapertools.find_single_match(data,"<version>([^<]+)</version>"))
コード例 #13
0
def updatechannel(channel_name):
    '''
    Funcion experimental para actualizar el canal desde github basandose en la fecha de modificacion de los archivos.
    '''
    if channel_name == "channelselector":
        remote_files_url = "https://github.com/superberny70/plugin.video.pelisalacarta"
        local_files_path=os.path.join( config.get_runtime_path() , channel_name+'.py')
    else:
        remote_files_url = "https://github.com/superberny70/plugin.video.pelisalacarta/tree/master/pelisalacarta/channels"
        local_files_path=os.path.join( config.get_runtime_path(), PLUGIN_NAME , 'channels' , channel_name + '.py')
     
    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)","",scrapertools.cache_page(remote_files_url))
    #last_commit= scrapertools.find_single_match(data,'<time class="updated" datetime="([^"]+)"')
    
    patron = '<td class="content">.*?title="'+ channel_name +'\.py".*?' 
    patron += '<time datetime="([^"]+)"' # date_time
    
    date= scrapertools.find_single_match(data,patron).replace('T',' ').replace('Z','')
    
    if date =='': # El canal no esta en el repositorio remoto
        return False
        
    struct= time.strptime(date,'%Y-%m-%d %H:%M:%S')    
    dt_remote = datetime.datetime.fromtimestamp(time.mktime(struct))
    
    if os.path.exists(local_files_path):
        dt_local =datetime.datetime.fromtimestamp(os.path.getmtime (local_files_path))
    
    #logger.info("[updater.py] remote_data= "+str(dt_remote) + " local_data= " + str(dt_local ))
    if dt_remote > dt_local:
        dialogo('Actualizando canal', 'Actualizando canal ' + channel_name )
        return download_channel(channel_name)
        
    return False
コード例 #14
0
def get_channel_parameters(channel_name):
    # logger.info("channel_name="+channel_name)

    channel_xml = os.path.join(config.get_runtime_path(), 'channels', channel_name+".xml")

    if os.path.exists(channel_xml):
        # logger.info("channel_name+".xml found")

        infile = open(channel_xml, "rb")
        data = infile.read()
        infile.close()

        channel_parameters = dict()
        channel_parameters["title"] = scrapertools.find_single_match(data, "<name>([^<]*)</name>")
        channel_parameters["channel"] = scrapertools.find_single_match(data, "<id>([^<]*)</id>")
        channel_parameters["active"] = scrapertools.find_single_match(data, "<active>([^<]*)</active>")
        channel_parameters["adult"] = scrapertools.find_single_match(data, "<adult>([^<]*)</adult>")
        channel_parameters["language"] = scrapertools.find_single_match(data, "<language>([^<]*)</language>")
        # Imagenes: se admiten url y archivos locales dentro de "resources/images"
        channel_parameters["thumbnail"] = scrapertools.find_single_match(data, "<thumbnail>([^<]*)</thumbnail>")
        channel_parameters["bannermenu"] = scrapertools.find_single_match(data, "<bannermenu>([^<]*)</bannermenu>")
        channel_parameters["fanart"] = scrapertools.find_single_match(data, "<fanart>([^<]*)</fanart>")

        if channel_parameters["thumbnail"] and "://" not in channel_parameters["thumbnail"]:
            channel_parameters["thumbnail"] = os.path.join(config.get_runtime_path(), "resources", "images", "squares",
                                                           channel_parameters["thumbnail"])
        if channel_parameters["bannermenu"] and "://" not in channel_parameters["bannermenu"]:
            channel_parameters["bannermenu"] = os.path.join(config.get_runtime_path(), "resources", "images",
                                                            "bannermenu", channel_parameters["bannermenu"])
        if channel_parameters["fanart"] and "://" not in channel_parameters["fanart"]:
            channel_parameters["fanart"] = os.path.join(config.get_runtime_path(), "resources", "images", "fanart",
                                                        channel_parameters["fanart"])
        channel_parameters["include_in_global_search"] = scrapertools.find_single_match(
            data, "<include_in_global_search>([^<]*)</include_in_global_search>")

        category_list = []
        matches = scrapertools.find_multiple_matches(data, "<category>([^<]*)</category>")
        for match in matches:
            category_list.append(match)

        channel_parameters["categories"] = category_list

        logger.info(channel_name+" -> "+repr(channel_parameters))

    else:
        logger.info(channel_name+".xml NOT found")

        channel_parameters = dict()
        channel_parameters["adult"] = "false"

    return channel_parameters
コード例 #15
0
def checkforupdates():
    logger.info("fusionse.core.updater checkforupdates")

    # Descarga el fichero con la versión en la web
    logger.info("fusionse.core.updater Verificando actualizaciones...")
    logger.info("fusionse.core.updater Version remota: " + REMOTE_VERSION_FILE)
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)

    version_publicada = scrapertools.find_single_match(data, "<version>([^<]+)</version>").strip()
    tag_publicada = scrapertools.find_single_match(data, "<tag>([^<]+)</tag>").strip()
    logger.info("fusionse.core.updater version remota=" + tag_publicada + " " + version_publicada)

    # Lee el fichero con la versión instalada
    logger.info("fusionse.core.updater fichero local version: " + LOCAL_VERSION_FILE)
    data = open(LOCAL_VERSION_FILE).read()

    version_local = scrapertools.find_single_match(data, "<version>([^<]+)</version>").strip()
    tag_local = scrapertools.find_single_match(data, "<tag>([^<]+)</tag>").strip()

    logger.info("fusionse.core.updater version local=" + tag_local + " " + version_local)

    try:
        numero_version_publicada = int(version_publicada)
        numero_version_local = int(version_local)
    except:
        import traceback
        logger.info(traceback.format_exc())
        version_publicada = None
        version_local = None

    hayqueactualizar = False
    # Si no tenemos la versión, comprobamos el tag
    if version_publicada is None or version_local is None:
        logger.info("fusionse.core.updater comprobando el tag")
        from distutils.version import StrictVersion
        hayqueactualizar = StrictVersion(tag_publicada) > StrictVersion(tag_local)

    else:
        logger.info("fusionse.core.updater comprobando la version")
        hayqueactualizar = numero_version_publicada > numero_version_local

    # Si hay actualización disponible, devuelve la Nueva versión para que cada plataforma se encargue de mostrar los avisos
    if hayqueactualizar:
        return tag_publicada
    else:
        return None
コード例 #16
0
def get_channel_parameters(channel_name):
    logger.info("pelisalacarta.core.channeltools get_channel_parameters channel_name="+channel_name)

    channel_xml = os.path.join( config.get_runtime_path() , 'channels' , channel_name+".xml" )

    if os.path.exists(channel_xml):
        logger.info("pelisalacarta.core.channeltools get_channel_parameters "+channel_name+".xml found")

        infile = open( channel_xml , "rb" )
        data = infile.read()
        infile.close();

        # TODO: Pendiente del json :)
        channel_parameters = {}
        channel_parameters["category"] = re.compile("<category>([^<]*)</category>",re.DOTALL).findall(data)
        channel_parameters["title"] = scrapertools.find_single_match(data,"<name>([^<]*)</name>")
        channel_parameters["channel"] = scrapertools.find_single_match(data,"<id>([^<]*)</id>")
        channel_parameters["active"] = scrapertools.find_single_match(data,"<active>([^<]*)</active>")
        channel_parameters["adult"] = scrapertools.find_single_match(data,"<adult>([^<]*)</adult>")
        channel_parameters["language"] = scrapertools.find_single_match(data,"<language>([^<]*)</language>")
        channel_parameters["thumbnail"] = scrapertools.find_single_match(data,"<thumbnail>([^<]*)</thumbnail>")
        channel_parameters["fanart"] = scrapertools.find_single_match(data,"<fanart>([^<]*)</fanart>")
        channel_parameters["type"] = "generic"

        logger.info("pelisalacarta.core.channeltools get_channel_parameters channel_parameters="+repr(channel_parameters) )

    else:
        logger.info("pelisalacarta.core.channeltools get_channel_parameters "+channel_name+".xml NOT found")

        channel_parameters = {}
        channel_parameters["adult"] = "false"

    return channel_parameters
コード例 #17
0
def scraper_channel_py(data_channel_py):
    '''
    Analiza el parametro 'data_channel_py' y retorna un diccionario con las siguientes claves:
    title, channel, language, category, type, adult, thumbnail y version.
    '''
    data_channel_py = data_channel_py.replace(' ', '')
    title = scrapertools.find_single_match(
        data_channel_py, '__title__="([^"]+)"').decode('utf-8')
    channel = scrapertools.find_single_match(
        data_channel_py, '__channel__="([^"]+)"').decode('utf-8')
    language = scrapertools.find_single_match(
        data_channel_py, '__language__="([^"]+)"').decode('utf-8')
    category = scrapertools.find_single_match(
        data_channel_py, '__category__="([^"]+)"').decode('utf-8')
    type = scrapertools.find_single_match(data_channel_py,
                                          '__type__="([^"]+)"').decode('utf-8')
    adult = scrapertools.find_single_match(
        data_channel_py, '__adult__="([^"]+)"').decode('utf-8')
    thumbnail = scrapertools.find_single_match(
        data_channel_py, '__thumbnail__="([^"]+)"').decode('utf-8')
    '''
    version= scrapertools.find_single_match(data_channel_py,'__version__="([^"]+)"').decode('utf-8')
    if version=='': version=u'0'
    return {"title": title, "channel":channel, "language":language, "category":category, "type":type, "adult":adult, "thumbnail":thumbnail, "version":version}
    '''
    return {
        "title": title,
        "channel": channel,
        "language": language,
        "category": category,
        "type": type,
        "adult": adult,
        "thumbnail": thumbnail
    }
コード例 #18
0
def scraper_channel_py(data_channel_py):
    """
    Analiza el parametro 'data_channel_py' y retorna un diccionario con las siguientes claves:
    title, channel, language, category, type, adult, thumbnail y version.
    """
    data_channel_py = data_channel_py.replace(" ", "")
    title = scrapertools.find_single_match(data_channel_py, '__title__="([^"]+)"').decode("utf-8")
    channel = scrapertools.find_single_match(data_channel_py, '__channel__="([^"]+)"').decode("utf-8")
    language = scrapertools.find_single_match(data_channel_py, '__language__="([^"]+)"').decode("utf-8")
    category = scrapertools.find_single_match(data_channel_py, '__category__="([^"]+)"').decode("utf-8")
    type = scrapertools.find_single_match(data_channel_py, '__type__="([^"]+)"').decode("utf-8")
    adult = scrapertools.find_single_match(data_channel_py, '__adult__="([^"]+)"').decode("utf-8")
    thumbnail = scrapertools.find_single_match(data_channel_py, '__thumbnail__="([^"]+)"').decode("utf-8")
    """
    version= scrapertools.find_single_match(data_channel_py,'__version__="([^"]+)"').decode('utf-8')
    if version=='': version=u'0'
    return {"title": title, "channel":channel, "language":language, "category":category, "type":type, "adult":adult, "thumbnail":thumbnail, "version":version}
    """
    return {
        "title": title,
        "channel": channel,
        "language": language,
        "category": category,
        "type": type,
        "adult": adult,
        "thumbnail": thumbnail,
    }
コード例 #19
0
def update_server(server_name):
    logger.info("Stefano.core.updater updateserver('" + server_name + "')")

    import servertools
    remote_server_url, remote_version_url = servertools.get_server_remote_url(
        server_name)
    local_server_path, local_version_path, local_compiled_path = servertools.get_server_local_path(
        server_name)

    # Version remota
    try:
        data = scrapertools.cachePage(remote_version_url)
        logger.info("Stefano.core.updater remote_data=" + data)
        remote_version = int(
            scrapertools.find_single_match(data, '<version>([^<]+)</version>'))
    except:
        remote_version = 0

    logger.info("Stefano.core.updater remote_version=%d" % remote_version)

    # Version local
    local_version = 0
    if os.path.exists(local_version_path):
        try:
            infile = open(local_version_path)
            data = infile.read()
            infile.close()
            logger.info("Stefano.core.updater local_data=" + data)
            local_version = int(
                scrapertools.find_single_match(data,
                                               '<version>([^<]+)</version>'))
        except:
            pass

    logger.info("Stefano.core.updater local_version=%d" % local_version)

    # Comprueba si ha cambiado
    updated = remote_version > local_version

    if updated:
        logger.info("Stefano.core.updater updated")
        download_server(server_name)

    return updated
コード例 #20
0
def download_channel(channel_name):
    logger.info("[updater.py] download_channel('" + channel_name + "')")
    ret = True
    remote_files_url, local_files_path = get_path_url_channel(channel_name)

    # Descarga el canal
    for ext in ['.xml', '.py']:
        try:
            updated_data = scrapertools.cachePage(remote_files_url + ext)
            if scrapertools.find_single_match(updated_data,
                                              '<title>Page not found') != "":
                continue

            outfile = open(local_files_path + ext, "w")
            outfile.write(updated_data)
            outfile.flush()
            outfile.close()
            logger.info("[updater.py] Grabado a " + local_files_path + ext)

            if ext == '.py' and channel_name != "channelselector":
                # Actualizar listado de canales
                if os.path.exists(
                        PATH_LIST_CHANNELS_JSON
                ):  # Si existe list_channels.json lo abrimos...
                    with codecs.open(PATH_LIST_CHANNELS_JSON, 'r',
                                     'utf-8') as input_file:
                        indice_canales = json.load(input_file)

                    # ... actualizamos los atributos del canal...
                    indice_canales[channel_name +
                                   '.py'] = scraper_channel_py(updated_data)

                    #...y lo volvemos a guardar
                    with codecs.open(PATH_LIST_CHANNELS_JSON, 'w',
                                     'utf-8') as outfile:
                        json.dump(indice_canales,
                                  outfile,
                                  sort_keys=True,
                                  indent=4,
                                  ensure_ascii=False,
                                  encoding="utf8")

                else:  # Si no existe list_channels.json lo creamos
                    ini_list_channels_json()
        except:
            logger.info("[updater.py] Error al grabar " + local_files_path)
            ret = False
            for line in sys.exc_info():
                logger.error("%s" % line)
            break

    if os.path.exists(local_files_path + '.pyo'):
        os.remove(local_files_path + '.pyo')

    return ret
コード例 #21
0
def episodio_ya_descargado(show_title, episode_title):
    import scrapertools
    ficheros = os.listdir(".")

    for fichero in ficheros:
        # logger.info("fichero="+fichero)
        if fichero.lower().startswith(show_title.lower()) and \
                        scrapertools.find_single_match(fichero, "(\d+x\d+)") == episode_title:
            logger.info("encontrado!")
            return True

    return False
コード例 #22
0
def _read_suscription_file():
    logger.info("suscription._read_suscription_file")

    # Read file
    if os.path.exists(SUSCRIPTIONS_FILE):
        f = open(SUSCRIPTIONS_FILE,"r")
        data = f.read()
        f.close()
    else:
        data = ""

    # Parse suscriptions
    suscriptions = []

    matches = scrapertools.find_multiple_matches(data,"<suscription>(.*?)</suscription>")
    for match in matches:
        channel = scrapertools.find_single_match(match,"<channel>([^<]+)</channel>")
        url = scrapertools.find_single_match(match,"<url>([^<]+)</url>")
        extra = scrapertools.find_single_match(match,"<extra>([^<]+)</extra>")
        action = scrapertools.find_single_match(match,"<action>([^<]+)</action>")
        show_name = scrapertools.find_single_match(match,"<show_name>([^<]+)</show_name>")
        thumbnail = scrapertools.find_single_match(match,"<thumbnail>([^<]+)</thumbnail>")

        suscriptions.append( Item( channel=channel, url=url, action=action, title=show_name, show=show_name, thumbnail=thumbnail ) )

    return suscriptions
コード例 #23
0
ファイル: channeltools.py プロジェクト: chelopr/pelisalacarta
def get_channel_parameters(channel_name):
    logger.info("pelisalacarta.core.channeltools get_channel_parameters channel_name=" + channel_name)

    channel_xml = os.path.join(config.get_runtime_path(), "channels", channel_name + ".xml")

    if os.path.exists(channel_xml):
        logger.info("pelisalacarta.core.channeltools get_channel_parameters " + channel_name + ".xml found")

        infile = open(channel_xml, "rb")
        data = infile.read()
        infile.close()

        # TODO: Pendiente del json :)
        channel_parameters = {}
        channel_parameters["title"] = scrapertools.find_single_match(data, "<name>([^<]*)</name>")
        channel_parameters["channel"] = scrapertools.find_single_match(data, "<id>([^<]*)</id>")
        channel_parameters["active"] = scrapertools.find_single_match(data, "<active>([^<]*)</active>")
        channel_parameters["adult"] = scrapertools.find_single_match(data, "<adult>([^<]*)</adult>")
        channel_parameters["language"] = scrapertools.find_single_match(data, "<language>([^<]*)</language>")
        channel_parameters["thumbnail"] = scrapertools.find_single_match(data, "<thumbnail>([^<]*)</thumbnail>")
        channel_parameters["bannermenu"] = scrapertools.find_single_match(data, "<bannermenu>([^<]*)</bannermenu>")
        channel_parameters["fanart"] = scrapertools.find_single_match(data, "<fanart>([^<]*)</fanart>")
        channel_parameters["include_in_global_search"] = scrapertools.find_single_match(
            data, "<include_in_global_search>([^<]*)</include_in_global_search>"
        )
        channel_parameters["type"] = "generic"

        category_list = []
        matches = scrapertools.find_multiple_matches(data, "<category>([^<]*)</category>")
        for match in matches:
            category_list.append(match)

        channel_parameters["categories"] = category_list

        logger.info(
            "pelisalacarta.core.channeltools get_channel_parameters channel_parameters=" + repr(channel_parameters)
        )

    else:
        logger.info("pelisalacarta.core.channeltools get_channel_parameters " + channel_name + ".xml NOT found")

        channel_parameters = {}
        channel_parameters["adult"] = "false"

    return channel_parameters
コード例 #24
0
def updatechannel2(channel_name):
    '''
    Esta funcion no se usa actualmente. 
    Actualizacion desde el repositorio oficial basandose en el numero de version del xml
    '''
    logger.info("[updater.py] updatechannel('" + channel_name + "')")

    remote_files_url, local_files_path = get_path_url_channel(channel_name)

    # Version remota
    try:
        data = scrapertools.cachePage(remote_files_url + '.xml')
        #logger.info("[updater.py] remote_data="+data)
        remote_version = scrapertools.find_single_match(
            data, '<tag>([^<]+)</tag>')
        if '.' not in remote_version: remote_version += '.0'
    except:
        remote_version = '0.0'
    logger.info("[updater.py] remote_version=%s" % remote_version)

    # Version local
    if os.path.exists(local_files_path + '.xml'):
        infile = open(local_files_path + '.xml')
        data = infile.read()
        infile.close()
        #logger.info("[updater.py] local_data="+data)
        local_version = scrapertools.find_single_match(data,
                                                       '<tag>([^<]+)</tag>')
        if '.' not in local_version: local_version += '.0'
    else:
        local_version = '0.0'
    logger.info("[updater.py] local_version=%s" % local_version)

    # Comprueba si ha cambiado
    if StrictVersion(remote_version) > StrictVersion(local_version):
        logger.info("[updater.py] updated")
        return download_channel(channel_name)

    return False
コード例 #25
0
def update_server(server_name):
    logger.info("streamondemand.core.updater updateserver('" + server_name + "')")

    import servertools
    remote_server_url, remote_version_url = servertools.get_server_remote_url(server_name)
    local_server_path, local_version_path, local_compiled_path = servertools.get_server_local_path(server_name)

    # Version remota
    try:
        data = scrapertools.cachePage(remote_version_url)
        logger.info("streamondemand.core.updater remote_data=" + data)
        remote_version = int(scrapertools.find_single_match(data, '<version>([^<]+)</version>'))
    except:
        remote_version = 0

    logger.info("streamondemand.core.updater remote_version=%d" % remote_version)

    # Version local
    local_version = 0
    if os.path.exists(local_version_path):
        try:
            infile = open(local_version_path)
            data = infile.read()
            infile.close()
            logger.info("streamondemand.core.updater local_data=" + data)
            local_version = int(scrapertools.find_single_match(data, '<version>([^<]+)</version>'))
        except:
            pass

    logger.info("streamondemand.core.updater local_version=%d" % local_version)

    # Comprueba si ha cambiado
    updated = remote_version > local_version

    if updated:
        logger.info("streamondemand.core.updater updated")
        download_server(server_name)

    return updated
コード例 #26
0
def updatechannel2(channel_name):
    """
    Esta funcion no se usa actualmente. 
    Actualizacion desde el repositorio oficial basandose en el numero de version del xml
    """
    logger.info("[updater.py] updatechannel('" + channel_name + "')")

    remote_files_url, local_files_path = get_path_url_channel(channel_name)

    # Version remota
    try:
        data = scrapertools.cachePage(remote_files_url + ".xml")
        # logger.info("[updater.py] remote_data="+data)
        remote_version = scrapertools.find_single_match(data, "<tag>([^<]+)</tag>")
        if "." not in remote_version:
            remote_version += ".0"
    except:
        remote_version = "0.0"
    logger.info("[updater.py] remote_version=%s" % remote_version)

    # Version local
    if os.path.exists(local_files_path + ".xml"):
        infile = open(local_files_path + ".xml")
        data = infile.read()
        infile.close()
        # logger.info("[updater.py] local_data="+data)
        local_version = scrapertools.find_single_match(data, "<tag>([^<]+)</tag>")
        if "." not in local_version:
            local_version += ".0"
    else:
        local_version = "0.0"
    logger.info("[updater.py] local_version=%s" % local_version)

    # Comprueba si ha cambiado
    if StrictVersion(remote_version) > StrictVersion(local_version):
        logger.info("[updater.py] updated")
        return download_channel(channel_name)

    return False
コード例 #27
0
ファイル: updater.py プロジェクト: CYBERxNUKE/xbmc-addon
def update_channel(channel_name):
    logger.info("pelisalacarta.core.updater update_channel "+channel_name)
    
    import channeltools
    remote_channel_url , remote_version_url = channeltools.get_channel_remote_url(channel_name)
    local_channel_path , local_version_path , local_compiled_path = channeltools.get_channel_local_path(channel_name)
    
    # Version remota
    try:
        data = scrapertools.cachePage( remote_version_url )
        logger.info("pelisalacarta.core.updater update_channel remote_data="+data)
        remote_version = int( scrapertools.find_single_match(data,'<version>([^<]+)</version>') )
    except:
        remote_version = 0

    logger.info("pelisalacarta.core.updater update_channel remote_version=%d" % remote_version)

    # Version local
    if os.path.exists( local_version_path ):
        infile = open( local_version_path )
        data = infile.read()
        infile.close();
        #logger.info("pelisalacarta.core.updater local_data="+data)

        local_version = int( scrapertools.find_single_match(data,'<version>([^<]+)</version>') )
    else:
        local_version = 0

    logger.info("pelisalacarta.core.updater local_version=%d" % local_version)

    # Comprueba si ha cambiado
    updated = remote_version > local_version

    if updated:
        logger.info("pelisalacarta.core.updater update_channel downloading...")
        download_channel(channel_name)

    return updated
コード例 #28
0
def check():
    logger.info("deportesalacarta.channels.update_sports Comprobando versión")
    try:
        data = scrapertools.downloadpage(REMOTE_VERSION_FILE)
        version_publicada = scrapertools.find_single_match(
            data, "<version>([^<]+)</version>").strip()
        message = scrapertools.find_single_match(
            data, "<changes>([^<]+)</changes>").strip()
        logger.info(
            "deportesalacarta.channels.update_sports Versión en el repositorio: %s"
            % version_publicada)

        # Lee el fichero con la versión instalada
        fichero = open(LOCAL_XML_FILE, "r")
        data = fichero.read()
        fichero.close()
        version_local = scrapertools.find_single_match(
            data, "<version>([^<]+)</version>").strip()

        logger.info(
            "deportesalacarta.channels.update_sports Versión local: %s" %
            version_local)
        if float(version_publicada) > float(version_local):
            logger.info(
                "deportesalacarta.channels.update_sports Nueva versión encontrada"
            )
            return True, version_publicada, message
        else:
            logger.info(
                "deportesalacarta.channels.update_sports No existe versión actualizada"
            )
            return False, "", ""
    except:
        import traceback
        logger.error("deportesalacarta.platformcode.launcher " +
                     traceback.format_exc())
        return False, "", ""
コード例 #29
0
def update(item):
    logger.info("Stefano.core.updater update")

    # Lee la versión remota
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)
    numero_version_publicada = scrapertools.find_single_match(
        data, "<version>([^<]+)</version>").strip()
    tag_version_publicada = scrapertools.find_single_match(
        data, "<tag>([^<]+)</tag>").strip()

    remotefilename = REMOTE_FILE % (tag_version_publicada,
                                    tag_version_publicada)
    localfilename = LOCAL_FILE + item.version + ".zip"

    download_and_install(remotefilename, localfilename)

    try:
        numero_version_publicada = int(numero_version_publicada)
    except:
        numero_version_publicada = 0
        import traceback
        logger.info(traceback.format_exc())

    set_current_plugin_version(numero_version_publicada)
コード例 #30
0
ファイル: seriesretro.py プロジェクト: wapitores/addon
def seriesretro(params):
    plugintools.log('[%s %s] seriecatcher %s' % (addonName, addonVersion, repr(params)))
    
	
    url = params.get("url")
    referer = 'http://youanimehd.com/video/'
    data = gethttp_referer_headers(url,referer)
    plugintools.log("data= "+data)
    matches = plugintools.find_multiple_matches(data, '<ul class="sc_menu">(.*?)</ul></div>')
    for entry in matches:
        title = scrapertools.find_single_match(entry, '" alt="(.*?)" width="140" height="200" />')
        cover = plugintools.find_single_match(entry, '<img src="([^"]+)')
        url = plugintools.find_single_match(entry, '<a href="([^"]+)')
        plugintools.log("url= "+url)
        plugintools.add_item(action="pelisya", title = title , thumbnail = cover , url = url , fanart = fanart , folder = True , isPlayable = False)
    
	next_page = scrapertools.find_single_match(data,"href='http://www.yaske.to/es/peliculas/page/(.*?)'>")
    next_page = next_page.replace('("', "").replace('")', "")
    next_page = 'http://www.yaske.to/es/peliculas/page/'+next_page
    plugintools.log("next_page= "+next_page)
    if next_page!="":
        plugintools.add_item(action="seriecatcher", title =">> siguiente" , thumbnail = "", url = next_page, fanart = fanart , folder = True)

    return plugintools.add_item
コード例 #31
0
def list_remote_channels():
    """
    Obtiene una lista de los canales remotos, analizando la web del repositorio.
    """
    remote_files_url = "https://github.com/superberny70/plugin.video.pelisalacarta/tree/master/pelisalacarta/channels"

    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", scrapertools.cache_page(remote_files_url))
    last_commit = scrapertools.find_single_match(data, '<time class="updated" datetime="([^"]+)"')
    files_remotos = []
    if last_commit != "":
        patron = '<td class="content">.*?title="([a-zA-Z0-9]+\.py)">'
        files_remotos = re.compile(patron, re.DOTALL).findall(data)

    # logger.info("updater.list_remote_channels :"  + str(files_remotos))
    return files_remotos
コード例 #32
0
ファイル: httptools.py プロジェクト: j3tanga/addon
def get_url_headers(url, forced=False):
    domain = urlparse.urlparse(url)[1]
    sub_dom = scrapertools.find_single_match(domain, '\.(.*?\.\w+)')
    if sub_dom and not 'google' in url:
        domain = sub_dom
    domain_cookies = cj._cookies.get("." + domain, {}).get("/", {})

    if "|" in url or not "cf_clearance" in domain_cookies:
        if not forced:
            return url

    headers = dict()
    headers["User-Agent"] = default_headers["User-Agent"]
    headers["Cookie"] = "; ".join(["%s=%s" % (c.name, c.value) for c in domain_cookies.values()])

    return url + "|" + "&".join(["%s=%s" % (h, urllib.quote(headers[h])) for h in headers])
コード例 #33
0
ファイル: filetools.py プロジェクト: staycanuca/BUILDONLY
def remove_tags(title):
    """
    devuelve el titulo sin tags como color
    @type title: str
    @param title: title
    @rtype: str
    @return: cadena sin tags
    """
    logger.info("deportesalacarta.core.filetools remove_tags")

    title_without_tags = scrapertools.find_single_match(
        title, '\[color .+?\](.+)\[\/color\]')

    if title_without_tags:
        return title_without_tags
    else:
        return title
コード例 #34
0
def list_remote_channels():
    '''
    Obtiene una lista de los canales remotos, analizando la web del repositorio.
    '''
    remote_files_url = "https://github.com/superberny70/plugin.video.pelisalacarta/tree/master/pelisalacarta/channels"

    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  scrapertools.cache_page(remote_files_url))
    last_commit = scrapertools.find_single_match(
        data, '<time class="updated" datetime="([^"]+)"')
    files_remotos = []
    if last_commit != '':
        patron = '<td class="content">.*?title="([a-zA-Z0-9]+\.py)">'
        files_remotos = re.compile(patron, re.DOTALL).findall(data)

    #logger.info("updater.list_remote_channels :"  + str(files_remotos))
    return files_remotos
コード例 #35
0
def download_channel(channel_name):
    logger.info("[updater.py] download_channel('" + channel_name + "')")
    ret = True
    remote_files_url, local_files_path = get_path_url_channel(channel_name)

    # Descarga el canal
    for ext in [".xml", ".py"]:
        try:
            updated_data = scrapertools.cachePage(remote_files_url + ext)
            if scrapertools.find_single_match(updated_data, "<title>Page not found") != "":
                continue

            outfile = open(local_files_path + ext, "w")
            outfile.write(updated_data)
            outfile.flush()
            outfile.close()
            logger.info("[updater.py] Grabado a " + local_files_path + ext)

            if ext == ".py" and channel_name != "channelselector":
                # Actualizar listado de canales
                if os.path.exists(PATH_LIST_CHANNELS_JSON):  # Si existe list_channels.json lo abrimos...
                    with codecs.open(PATH_LIST_CHANNELS_JSON, "r", "utf-8") as input_file:
                        indice_canales = json.load(input_file)

                    # ... actualizamos los atributos del canal...
                    indice_canales[channel_name + ".py"] = scraper_channel_py(updated_data)

                    # ...y lo volvemos a guardar
                    with codecs.open(PATH_LIST_CHANNELS_JSON, "w", "utf-8") as outfile:
                        json.dump(
                            indice_canales, outfile, sort_keys=True, indent=4, ensure_ascii=False, encoding="utf8"
                        )

                else:  # Si no existe list_channels.json lo creamos
                    ini_list_channels_json()
        except:
            logger.info("[updater.py] Error al grabar " + local_files_path)
            ret = False
            for line in sys.exc_info():
                logger.error("%s" % line)
            break

    if os.path.exists(local_files_path + ".pyo"):
        os.remove(local_files_path + ".pyo")

    return ret
コード例 #36
0
def update(item):
    logger.info("streamondemand.core.updater update")

    remotefilename = REMOTE_FILE
    localfilename = LOCAL_FILE + item.version + ".zip"

    download_and_install(remotefilename, localfilename)

    # Lee la versión remota
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)
    numero_version_publicada = scrapertools.find_single_match(data, "<version>([^<]+)</version>").strip()
    try:
        numero_version_publicada = int(numero_version_publicada)
    except:
        numero_version_publicada = 0
        import traceback
        logger.info(traceback.format_exc())

    set_current_plugin_version(numero_version_publicada)
コード例 #37
0
def jhexdecode(t):
    r = re.sub(r'_\d+x\w+x(\d+)', 'var_' + r'\1', t)
    r = re.sub(r'_\d+x\w+', 'var_0', r)
    def to_hx(c):
        h = int("%s" % c.groups(0), 16)
        if 19 < h < 160:
            return chr(h)
        else:
            return ""
    r = re.sub(r'(?:\\|)x(\w{2})', to_hx, r).replace('var ', '')
    f = eval(scrapertools.find_single_match(r, '\s*var_0\s*=\s*([^;]+);'))
    for i, v in enumerate(f):
        r = r.replace('[[var_0[%s]]' % i, "." + f[i])
        r = r.replace(':var_0[%s]' % i, ":\"" + f[i] + "\"")
        r = r.replace(' var_0[%s]' % i, " \"" + f[i] + "\"")
        r = r.replace('(var_0[%s]' % i, "(\"" + f[i] + "\"")
        r = r.replace('[var_0[%s]]' % i, "." + f[i])
        if v == "": r = r.replace('var_0[%s]' % i, '""')
    r = re.sub(r':(function.*?\})', r":'\g<1>'", r)
    r = re.sub(r':(var[^,]+),', r":'\g<1>',", r)
    return r
コード例 #38
0
def updatechannel(channel_name):
    '''
    Funcion experimental para actualizar el canal desde github basandose en la fecha de modificacion de los archivos.
    '''
    if channel_name == "channelselector":
        remote_files_url = "https://github.com/superberny70/plugin.video.pelisalacarta"
        local_files_path = os.path.join(config.get_runtime_path(),
                                        channel_name + '.py')
    else:
        remote_files_url = "https://github.com/superberny70/plugin.video.pelisalacarta/tree/master/pelisalacarta/channels"
        local_files_path = os.path.join(config.get_runtime_path(), PLUGIN_NAME,
                                        'channels', channel_name + '.py')

    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "",
                  scrapertools.cache_page(remote_files_url))
    #last_commit= scrapertools.find_single_match(data,'<time class="updated" datetime="([^"]+)"')

    patron = '<td class="content">.*?title="' + channel_name + '\.py".*?'
    patron += '<time datetime="([^"]+)"'  # date_time

    date = scrapertools.find_single_match(data, patron).replace('T',
                                                                ' ').replace(
                                                                    'Z', '')

    if date == '':  # El canal no esta en el repositorio remoto
        return False

    struct = time.strptime(date, '%Y-%m-%d %H:%M:%S')
    dt_remote = datetime.datetime.fromtimestamp(time.mktime(struct))

    if os.path.exists(local_files_path):
        dt_local = datetime.datetime.fromtimestamp(
            os.path.getmtime(local_files_path))

    #logger.info("[updater.py] remote_data= "+str(dt_remote) + " local_data= " + str(dt_local ))
    if dt_remote > dt_local:
        dialogo('Actualizando canal', 'Actualizando canal ' + channel_name)
        return download_channel(channel_name)

    return False
コード例 #39
0
def list_remote_servers():
    """
        Obtiene un diccionario de los servers remotos y su fecha de la ultima actualizacion, analizando la web del repositorio GitHub.
        Cuando se porte pelisalacarta a la GitHub oficial hay q modificar la url.
    """
    remote_files_url = "https://github.com/superberny70/plugin.video.pelisalacarta/tree/master/servers"

    data = re.sub(r"\n|\r|\t|\s{2}|(<!--.*?-->)", "", scrapertools.cache_page(remote_files_url))
    last_commit = scrapertools.find_single_match(data, '<time class="updated" datetime="([^"]+)"')

    patron = '<td class="content">.*?title="([a-zA-Z0-9]+\.py)".*?'  # name_server
    patron += '<time datetime="([^"]+)"'  # date_time

    matches = re.compile(patron, re.DOTALL).findall(data)

    d = {}
    d["__ultima_actualizacion__"] = last_commit.replace("T", " ").replace("Z", "")
    for name_server, date_time in matches:
        d[name_server] = date_time.replace("T", " ").replace("Z", "")

    logger.info("updater.list_remote_servers :" + str(d))
    return d
コード例 #40
0
ファイル: config.py プロジェクト: llaurator/addon
def verify_directories_created():
    from core import logger
    from core import filetools
    from platformcode import xbmc_videolibrary

    config_paths = [["videolibrarypath", "videolibrary"],
                    ["downloadpath", "downloads"],
                    ["downloadlistpath", "downloads/list"],
                    ["settings_path", "settings_channels"]]

    for path, default in config_paths:
        saved_path = get_setting(path)

        # videoteca
        if path == "videolibrarypath":
            if not saved_path:
                saved_path = xbmc_videolibrary.search_library_path()
                if saved_path:
                    set_setting(path, saved_path)

        if not saved_path:
            saved_path = "special://profile/addon_data/plugin.video." + PLUGIN_NAME + "/" + default
            set_setting(path, saved_path)

        saved_path = xbmc.translatePath(saved_path)
        if not filetools.exists(saved_path):
            logger.debug("Creating %s: %s" % (path, saved_path))
            filetools.mkdir(saved_path)

    config_paths = [["folder_movies", "CINE"], ["folder_tvshows", "SERIES"]]

    flag_call = True
    for path, default in config_paths:
        saved_path = get_setting(path)

        if not saved_path:
            saved_path = default
            set_setting(path, saved_path)

        content_path = filetools.join(get_videolibrary_path(), saved_path)
        if not filetools.exists(content_path):
            logger.debug("Creating %s: %s" % (path, content_path))

            # si se crea el directorio
            if filetools.mkdir(content_path):
                if flag_call:
                    # le pasamos el valor para que sepamos que se ha pasado por creación de directorio
                    xbmc_videolibrary.ask_set_content(1)
                    flag_call = False

    try:
        from core import scrapertools
        # Buscamos el archivo addon.xml del skin activo
        skindir = filetools.join(xbmc.translatePath("special://home"),
                                 'addons', xbmc.getSkinDir(), 'addon.xml')
        # Extraemos el nombre de la carpeta de resolución por defecto
        folder = ""
        data = filetools.read(skindir)
        res = scrapertools.find_multiple_matches(data, '(<res .*?>)')
        for r in res:
            if 'default="true"' in r:
                folder = scrapertools.find_single_match(r, 'folder="([^"]+)"')
                break

        # Comprobamos si existe en el addon y sino es así, la creamos
        default = filetools.join(get_runtime_path(), 'resources', 'skins',
                                 'Default')
        if folder and not filetools.exists(filetools.join(default, folder)):
            filetools.mkdir(filetools.join(default, folder))

        # Copiamos el archivo a dicha carpeta desde la de 720p si éste no existe o si el tamaño es diferente
        if folder and folder != '720p':
            for root, folders, files in filetools.walk(
                    filetools.join(default, '720p')):
                for f in files:
                    if not filetools.exists(filetools.join(default, folder, f)) or \
                            (filetools.getsize(filetools.join(default, folder, f)) !=
                                 filetools.getsize(filetools.join(default, '720p', f))):
                        filetools.copy(filetools.join(default, '720p', f),
                                       filetools.join(default, folder, f),
                                       True)
    except:
        import traceback
        logger.error("Al comprobar o crear la carpeta de resolución")
        logger.error(traceback.format_exc())
コード例 #41
0
def checkforupdates(plugin_mode=True):
    logger.info("streamondemand.core.updater checkforupdates")

    # Descarga el fichero con la versión en la web
    logger.info("streamondemand.core.updater Verificando actualizaciones...")
    logger.info("streamondemand.core.updater Version remota: " +
                REMOTE_VERSION_FILE)
    data = scrapertools.cachePage(REMOTE_VERSION_FILE)
    '''    
    <?xml version="1.0" encoding="utf-8" standalone="yes"?>
    <version>
            <name>streamondemand</name>
            <tag>4.0     </tag>
            <version>4000</tag>
            <date>20/03/2015</date>
            <changes>New release</changes>
    </version>
    '''

    version_publicada = scrapertools.find_single_match(
        data, "<version>([^<]+)</version>").strip()
    tag_publicada = scrapertools.find_single_match(
        data, "<tag>([^<]+)</tag>").strip()
    logger.info("streamondemand.core.updater version remota=" + tag_publicada +
                " " + version_publicada)

    # Lee el fichero con la versión instalada
    localFileName = LOCAL_VERSION_FILE
    logger.info("streamondemand.core.updater fichero local version: " +
                localFileName)
    infile = open(localFileName)
    data = infile.read()
    infile.close()
    #logger.info("xml local="+data)

    version_local = scrapertools.find_single_match(
        data, "<version>([^<]+)</version>").strip()
    tag_local = scrapertools.find_single_match(data,
                                               "<tag>([^<]+)</tag>").strip()
    logger.info("streamondemand.core.updater version local=" + tag_local +
                " " + version_local)

    try:
        numero_version_publicada = int(version_publicada)
        numero_version_local = int(version_local)
    except:
        import traceback
        logger.info(traceback.format_exc())
        version_publicada = ""
        version_local = ""

    if version_publicada == "" or version_local == "":
        arraydescargada = tag_publicada.split(".")
        arraylocal = tag_local.split(".")

        # local 2.8.0 - descargada 2.8.0 -> no descargar
        # local 2.9.0 - descargada 2.8.0 -> no descargar
        # local 2.8.0 - descargada 2.9.0 -> descargar
        if len(arraylocal) == len(arraydescargada):
            logger.info("caso 1")
            hayqueactualizar = False
            for i in range(0, len(arraylocal)):
                print arraylocal[i], arraydescargada[i], int(
                    arraydescargada[i]) > int(arraylocal[i])
                if int(arraydescargada[i]) > int(arraylocal[i]):
                    hayqueactualizar = True
        # local 2.8.0 - descargada 2.8 -> no descargar
        # local 2.9.0 - descargada 2.8 -> no descargar
        # local 2.8.0 - descargada 2.9 -> descargar
        if len(arraylocal) > len(arraydescargada):
            logger.info("caso 2")
            hayqueactualizar = False
            for i in range(0, len(arraydescargada)):
                #print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
                if int(arraydescargada[i]) > int(arraylocal[i]):
                    hayqueactualizar = True
        # local 2.8 - descargada 2.8.8 -> descargar
        # local 2.9 - descargada 2.8.8 -> no descargar
        # local 2.10 - descargada 2.9.9 -> no descargar
        # local 2.5 - descargada 3.0.0
        if len(arraylocal) < len(arraydescargada):
            logger.info("caso 3")
            hayqueactualizar = True
            for i in range(0, len(arraylocal)):
                #print arraylocal[i], arraydescargada[i], int(arraylocal[i])>int(arraydescargada[i])
                if int(arraylocal[i]) > int(arraydescargada[i]):
                    hayqueactualizar = False
                elif int(arraylocal[i]) < int(arraydescargada[i]):
                    hayqueactualizar = True
                    break
    else:
        hayqueactualizar = (numero_version_publicada > numero_version_local)

    if hayqueactualizar:

        if plugin_mode:

            logger.info("streamondemand.core.updater actualizacion disponible")

            # Añade al listado de XBMC
            import xbmcgui
            #thumbnail = IMAGES_PATH+"Crystal_Clear_action_info.png"
            thumbnail = os.path.join(config.get_runtime_path(), "resources",
                                     "images", "service_update.png")
            logger.info("thumbnail=" + thumbnail)
            listitem = xbmcgui.ListItem("Scarica la versione " + tag_publicada,
                                        thumbnailImage=thumbnail)
            itemurl = '%s?action=update&version=%s' % (sys.argv[0],
                                                       tag_publicada)
            import xbmcplugin
            xbmcplugin.addDirectoryItem(handle=int(sys.argv[1]),
                                        url=itemurl,
                                        listitem=listitem,
                                        isFolder=True)

            # Avisa con un popup
            dialog = xbmcgui.Dialog()
            dialog.ok(
                "Versione " + tag_publicada + " disponibile",
                "E' possibile scaricare la nuova versione del plugin\nattraverso l'opzione nel menù principale."
            )

        else:

            import xbmcgui
            yes_pressed = xbmcgui.Dialog().yesno(
                "Versione " + tag_publicada + " disponibile", "Installarla?")

            if yes_pressed:
                params = {"version": tag_publicada}
                update(params)
    '''
コード例 #42
0
def get_channel_parameters(channel_name):
    #logger.info("streamondemand.core.channeltools get_channel_parameters channel_name="+channel_name)

    channel_xml = os.path.join(config.get_runtime_path(), 'channels', channel_name+".xml")

    if os.path.exists(channel_xml):
        # logger.info("channel_name+".xml found")

        infile = open(channel_xml, "rb")
        data = infile.read()
        infile.close()

        channel_parameters = dict()
        channel_parameters["title"] = scrapertools.find_single_match(data, "<name>([^<]*)</name>")
        channel_parameters["channel"] = scrapertools.find_single_match(data, "<id>([^<]*)</id>")
        channel_parameters["active"] = scrapertools.find_single_match(data, "<active>([^<]*)</active>")
        channel_parameters["adult"] = scrapertools.find_single_match(data, "<adult>([^<]*)</adult>")
        channel_parameters["language"] = scrapertools.find_single_match(data, "<language>([^<]*)</language>")
        # Imagenes: se admiten url y archivos locales dentro de "resources/images"
        channel_parameters["thumbnail"] = scrapertools.find_single_match(data, "<thumbnail>([^<]*)</thumbnail>")
        channel_parameters["bannermenu"] = scrapertools.find_single_match(data, "<bannermenu>([^<]*)</bannermenu>")
        channel_parameters["fanart"] = scrapertools.find_single_match(data, "<fanart>([^<]*)</fanart>")
        channel_parameters["update_url"] = scrapertools.find_single_match(data, "<update_url>([^<]*)</update_url>")

        if channel_parameters["thumbnail"] and "://" not in channel_parameters["thumbnail"]:
            channel_parameters["thumbnail"] = os.path.join(config.get_runtime_path(), "resources", "images", "squares",
                                                           channel_parameters["thumbnail"])
        if channel_parameters["bannermenu"] and "://" not in channel_parameters["bannermenu"]:
            channel_parameters["bannermenu"] = os.path.join(config.get_runtime_path(), "resources", "images",
                                                            "bannermenu", channel_parameters["bannermenu"])
        if channel_parameters["fanart"] and "://" not in channel_parameters["fanart"]:
            channel_parameters["fanart"] = os.path.join(config.get_runtime_path(), "resources", "images", "fanart",
                                                        channel_parameters["fanart"])

        if channel_parameters["update_url"] == "":
            channel_parameters["update_url"] = DEFAULT_UPDATE_URL

        channel_parameters["include_in_global_search"] = scrapertools.find_single_match(
            data, "<include_in_global_search>([^<]*)</include_in_global_search>")

        category_list = []
        matches = scrapertools.find_multiple_matches(data, "<category>([^<]*)</category>")
        for match in matches:
            category_list.append(match)

        channel_parameters["categories"] = category_list
        # Obtenemos si el canal tiene opciones de configuración
        channel_parameters["has_settings"] = False
        # esta regex devuelve 2 valores por elemento <settings>, el contenido del propio nodo y un \t, por lo que hay
        # posteriormente coger solo el valor del indice 0.
        matches = scrapertools.find_multiple_matches(data, "<settings>((.|\n)*?)<\/settings>")
        for match in matches:
            _id = scrapertools.find_single_match(match[0], "<id>([^<]*)</id>")
            if _id and "include_in_" not in _id:
                channel_parameters["has_settings"] = True
                break

        logger.info(channel_name+" -> "+repr(channel_parameters))

    else:
        logger.info(channel_name+".xml NOT found")

        channel_parameters = dict()
        channel_parameters["adult"] = "false"
        channel_parameters["update_url"] = DEFAULT_UPDATE_URL

    return channel_parameters
コード例 #43
0
def get_channel_parameters(channel_name):
    # logger.info("streamondemand.core.channeltools get_channel_parameters channel_name="+channel_name)
    channel_xml = os.path.join(config.get_runtime_path(), 'channels',
                               channel_name + ".xml")

    if os.path.exists(channel_xml):
        # logger.info("channel_name+".xml found")

        infile = open(channel_xml, "rb")
        data = infile.read()
        infile.close()

        channel_parameters = dict()
        channel_parameters["title"] = scrapertools.find_single_match(
            data, "<name>([^<]*)</name>")
        channel_parameters["channel"] = scrapertools.find_single_match(
            data, "<id>([^<]*)</id>")
        channel_parameters["active"] = str_to_bool(
            scrapertools.find_single_match(data, "<active>([^<]*)</active>"))
        channel_parameters["adult"] = str_to_bool(
            scrapertools.find_single_match(data, "<adult>([^<]*)</adult>"))
        channel_parameters["language"] = scrapertools.find_single_match(
            data, "<language>([^<]*)</language>")
        channel_parameters["version"] = scrapertools.find_single_match(
            data, "<version>([^<]*)</version>")

        # Imagenes: se admiten url y archivos locales dentro de "resources/images"
        channel_parameters["thumbnail"] = scrapertools.find_single_match(
            data, "<thumbnail>([^<]*)</thumbnail>")
        channel_parameters["bannermenu"] = scrapertools.find_single_match(
            data, "<bannermenu>([^<]*)</bannermenu>")
        channel_parameters["fanart"] = scrapertools.find_single_match(
            data, "<fanart>([^<]*)</fanart>")

        if channel_parameters["thumbnail"] and "://" not in channel_parameters[
                "thumbnail"]:
            channel_parameters["thumbnail"] = os.path.join(
                config.get_runtime_path(), "resources", "images", "squares",
                channel_parameters["thumbnail"])
        if channel_parameters[
                "bannermenu"] and "://" not in channel_parameters["bannermenu"]:
            channel_parameters["bannermenu"] = os.path.join(
                config.get_runtime_path(), "resources", "images", "bannermenu",
                channel_parameters["bannermenu"])
        if channel_parameters["fanart"] and "://" not in channel_parameters[
                "fanart"]:
            channel_parameters["fanart"] = os.path.join(
                config.get_runtime_path(), "resources", "images", "fanart",
                channel_parameters["fanart"])

        channel_parameters["update_url"] = scrapertools.find_single_match(
            data, "<update_url>([^<]*)</update_url>")
        if channel_parameters["update_url"] == "":
            channel_parameters["update_url"] = DEFAULT_UPDATE_URL

        include_in_global_search = scrapertools.find_single_match(
            data,
            "<include_in_global_search>([^<]*)</include_in_global_search>")

        if include_in_global_search in ["", "true"]:
            channel_parameters["include_in_global_search"] = True
        else:
            channel_parameters["include_in_global_search"] = False

        category_list = []
        matches = scrapertools.find_multiple_matches(
            data, "<category>([^<]*)</category>")
        for match in matches:
            category_list.append(match)

        channel_parameters["categories"] = category_list

        # Obtenemos si el canal tiene opciones de configuración
        channel_parameters["has_settings"] = False
        # esta regex devuelve 2 valores por elemento <settings>, el contenido del propio nodo y un \t, por lo que hay
        # posteriormente coger solo el valor del indice 0.
        matches = scrapertools.find_multiple_matches(
            data, "<settings>((.|\n)*?)<\/settings>")
        for match in matches:
            _id = scrapertools.find_single_match(match[0], "<id>([^<]*)</id>")
            if _id and "include_in_" not in _id:
                channel_parameters["has_settings"] = True
                break

        # Inicio - condiciones para mostrar canal compatible
        python_condition = scrapertools.find_single_match(
            data, "<python>([^<]*)</python>")
        if python_condition:
            import sys

            def versiontuple(v):
                return tuple(map(int, (v.split("."))))

            if sys.version_info < versiontuple(python_condition):
                python_compatible = False
            else:
                python_compatible = True
        else:
            python_compatible = True

        addon_version_condition = scrapertools.find_single_match(
            data, "<addon_version>([^<]*)</addon_version>")
        if addon_version_condition:
            import versiontools
            addon_version = int(
                addon_version_condition.replace(".", "").ljust(
                    len(str(versiontools.get_current_plugin_version())), '0'))
            if versiontools.get_current_plugin_version() < addon_version:
                addon_version_compatible = False
            else:
                addon_version_compatible = True
        else:
            addon_version_compatible = True

        channel_parameters["compatible"] = True

        if not python_compatible or not addon_version_compatible:
            channel_parameters["compatible"] = False
        # Fin - condiciones para mostrar canal compatible

        logger.info(channel_name + " -> " + repr(channel_parameters))

    else:
        logger.info(channel_name + ".xml NOT found")

        channel_parameters = dict()
        channel_parameters["adult"] = False
        channel_parameters["update_url"] = DEFAULT_UPDATE_URL

    return channel_parameters
コード例 #44
0
def checkforupdates(plugin_mode=True):
    logger.info("pelisalacarta.core.updater checkforupdates")

    # Descarga el fichero con la versión en la web
    logger.info("pelisalacarta.core.updater Verificando actualizaciones...")
    logger.info("pelisalacarta.core.updater Version remota: "+REMOTE_VERSION_FILE)
    data = scrapertools.cachePage( REMOTE_VERSION_FILE )

    '''    
    <?xml version="1.0" encoding="utf-8" standalone="yes"?>
    <version>
            <name>pelisalacarta</name>
            <tag>4.0     </tag>
            <version>4000</tag>
            <date>20/03/2015</date>
            <changes>New release</changes>
    </version>
    '''

    version_publicada = scrapertools.find_single_match(data,"<version>([^<]+)</version>").strip()
    tag_publicada = scrapertools.find_single_match(data,"<tag>([^<]+)</tag>").strip()
    logger.info("pelisalacarta.core.updater version remota="+tag_publicada+" "+version_publicada)
    
    # Lee el fichero con la versión instalada
    localFileName = LOCAL_VERSION_FILE
    logger.info("pelisalacarta.core.updater fichero local version: "+localFileName)
    infile = open( localFileName )
    data = infile.read()
    infile.close();
    #logger.info("xml local="+data)

    version_local = scrapertools.find_single_match(data,"<version>([^<]+)</version>").strip()
    tag_local = scrapertools.find_single_match(data,"<tag>([^<]+)</tag>").strip()
    logger.info("pelisalacarta.core.updater version local="+tag_local+" "+version_local)

    try:
        numero_version_publicada = int(version_publicada)
        numero_version_local = int(version_local)
    except:
        import traceback
        logger.info(traceback.format_exc())
        version_publicada = ""
        version_local = ""

    if version_publicada=="" or version_local=="":
        arraydescargada = tag_publicada.split(".")
        arraylocal = tag_local.split(".")

        # local 2.8.0 - descargada 2.8.0 -> no descargar
        # local 2.9.0 - descargada 2.8.0 -> no descargar
        # local 2.8.0 - descargada 2.9.0 -> descargar
        if len(arraylocal) == len(arraydescargada):
            logger.info("caso 1")
            hayqueactualizar = False
            for i in range(0, len(arraylocal)):
                print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
                if int(arraydescargada[i]) > int(arraylocal[i]):
                    hayqueactualizar = True
        # local 2.8.0 - descargada 2.8 -> no descargar
        # local 2.9.0 - descargada 2.8 -> no descargar
        # local 2.8.0 - descargada 2.9 -> descargar
        if len(arraylocal) > len(arraydescargada):
            logger.info("caso 2")
            hayqueactualizar = False
            for i in range(0, len(arraydescargada)):
                #print arraylocal[i], arraydescargada[i], int(arraydescargada[i]) > int(arraylocal[i])
                if int(arraydescargada[i]) > int(arraylocal[i]):
                    hayqueactualizar = True
        # local 2.8 - descargada 2.8.8 -> descargar
        # local 2.9 - descargada 2.8.8 -> no descargar
        # local 2.10 - descargada 2.9.9 -> no descargar
        # local 2.5 - descargada 3.0.0
        if len(arraylocal) < len(arraydescargada):
            logger.info("caso 3")
            hayqueactualizar = True
            for i in range(0, len(arraylocal)):
                #print arraylocal[i], arraydescargada[i], int(arraylocal[i])>int(arraydescargada[i])
                if int(arraylocal[i]) > int(arraydescargada[i]):
                    hayqueactualizar =  False
                elif int(arraylocal[i]) < int(arraydescargada[i]):
                    hayqueactualizar =  True
                    break
    else:
        hayqueactualizar = (numero_version_publicada > numero_version_local)

    if hayqueactualizar:
    
        if plugin_mode:
    
            logger.info("pelisalacarta.core.updater actualizacion disponible")
            
            # Añade al listado de XBMC
            import xbmcgui
            thumbnail = IMAGES_PATH+"Crystal_Clear_action_info.png"
            logger.info("thumbnail="+thumbnail)
            listitem = xbmcgui.ListItem( "Descargar version "+tag_publicada, thumbnailImage=thumbnail )
            itemurl = '%s?action=update&version=%s' % ( sys.argv[ 0 ] , tag_publicada )
            import xbmcplugin
            xbmcplugin.addDirectoryItem( handle = int(sys.argv[ 1 ]), url = itemurl , listitem=listitem, isFolder=True)
            
            # Avisa con un popup
            dialog = xbmcgui.Dialog()
            dialog.ok("Versión "+tag_publicada+" disponible","Ya puedes descargar la nueva versión del plugin\ndesde el listado principal")

        else:

            import xbmcgui
            yes_pressed = xbmcgui.Dialog().yesno( "Versión "+tag_publicada+" disponible" , "¿Quieres instalarla?" )

            if yes_pressed:
                params = {"version":tag_publicada}
                update(params)

    '''
コード例 #45
0
ファイル: pordede.py プロジェクト: franbetis/actualizarkodi
def pdd_findvideos(params):
    plugintools.log("[%s %s] Pordede: Buscando enlaces... %s " % (addonName, addonVersion, repr(params)))

    if params.get("extra") == "regex":
        try: shutil.rmtree(temp + 'pordede.com', ignore_errors=False, onerror=None)
        except: pass
        params["regex"]='regex'
        login_pordede()
        url_peli = params.get("page")

        # Descarga la pagina
        headers = DEFAULT_HEADERS[:]
        headers.append(["X-Requested-With","XMLHttpRequest"])
        data = scrapertools.cache_page(url_peli,headers=headers)
        #plugintools.log("data= "+data)

        fanart = plugintools.find_single_match(data, 'src=(.*?)>').replace("\\", "").replace('"', "").replace(".png/", ".png").strip()
        thumbnail = fanart.replace("big", "").strip()
        plugintools.log("fanart= "+fanart)
        plugintools.log("thumbnail= "+thumbnail)
        
    url = params.get("url")
    if thumbnail == "":  # Control por si no se ejecuta regex o no captura thumbnail correctamente
        thumbnail = params.get("thumbnail")

    # Descarga la pagina
    headers = DEFAULT_HEADERS[:]
    data = scrapertools.cache_page(url,headers=headers)
    #plugintools.log("data="+data)

    sesion = plugintools.find_single_match(data,'SESS = "([^"]+)";')
    #plugintools.log("sesion="+sesion)

    patron  = '<a target="_blank" class="a aporteLink(.*?)</a>'
    matches = re.compile(patron,re.DOTALL).findall(data)
    itemlist = []

    i = 1
    plugintools.add_item(action="", title='[COLOR lightyellow][B]'+params.get("title")+'[/B][/COLOR]', url="", thumbnail = thumbnail, fanart=fanart, folder=False, isPlayable=False)
    for match in matches:
        #plugintools.log("match= "+match)        
        jdown = scrapertools.find_single_match(match,'<div class="jdownloader">[^<]+</div>')
        if jdown != '': # Descartar enlaces veronline/descargar
            continue

        idiomas = re.compile('<div class="flag([^"]+)">([^<]+)</div>',re.DOTALL).findall(match)
        idioma_0 = (idiomas[0][0].replace("&nbsp;","").strip() + " " + idiomas[0][1].replace("&nbsp;","").strip()).strip()
        if len(idiomas) > 1:
            idioma_1 = (idiomas[1][0].replace("&nbsp;","").strip() + " " + idiomas[1][1].replace("&nbsp;","").strip()).strip()
            idioma = idioma_0 + ", " + idioma_1
        else:
            idioma_1 = ''
            idioma = idioma_0

        idioma=idioma.replace("spanish", "ESP").replace("english", "ENG").replace("spanish SUB", "SUB-ESP").replace("english SUB", "SUB-ENG")

        calidad_video = plugintools.find_single_match(match,'<div class="linkInfo quality"><i class="icon-facetime-video"></i>([^<]+)</div>').strip()
        #plugintools.log("calidad_video="+calidad_video)
        calidad_audio = plugintools.find_single_match(match,'<div class="linkInfo qualityaudio"><i class="icon-headphones"></i>([^<]+)</div>').strip()
        #plugintools.log("calidad_audio="+calidad_audio)
        thumb_servidor = plugintools.find_single_match(match,'<div class="hostimage"[^<]+<img\s*src="([^"]+)">').strip()
        #plugintools.log("thumb_servidor="+thumb_servidor)
        nombre_servidor = plugintools.find_single_match(thumb_servidor,"popup_([^\.]+)\.png").strip()
        #plugintools.log("nombre_servidor="+nombre_servidor)
              
        title = "[COLOR white]Op. "+str(i)+'. [/COLOR][COLOR lightgreen][I]['+nombre_servidor+"] [/I][/COLOR][COLOR gold] ("+idioma+") [/COLOR][COLOR lightyellow][I][Video: "+calidad_video.strip()+", Audio: "+calidad_audio.strip()+"][/COLOR][/I] "
        i = i + 1

        cuenta = []
        valoracion = 0
        for idx, val in enumerate(['1', '2', 'report']):
            nn = plugintools.find_single_match(match,'<span\s+data-num="([^"]+)"\s+class="defaultPopup"\s+href="/likes/popup/value/'+val+'/')
            if nn != '0' and nn != '':
                cuenta.append(nn + ' ' + ['[COLOR green]OK[/COLOR]', '[COLOR red]KO[/COLOR]', 'rep'][idx])
                valoracion += int(nn) if val == '1' else -int(nn)

        if len(cuenta) > 0:
            title += ' [COLOR white](' + ', '.join(cuenta) + ')[/COLOR]'

        item_url = plugintools.find_single_match(match,'href="([^"]+)"')
        item_url = 'http://www.pordede.com'+item_url
        #thumbnail = thumb_servidor
        #plugintools.log("title=["+title+"], url=["+url+"], thumbnail=["+thumbnail+"]")
        plugintools.add_item(action="pordede_play", title=title, url=item_url, thumbnail=thumbnail, fanart=fanart, extra=sesion+"|"+item_url, folder=False, isPlayable=True)
コード例 #46
0
def actualiza(item):
    logger.info("deportesalacarta.channels.update_sports actualiza")

    local_folder = os.path.join(xbmc.translatePath("special://home"), "addons")
    error = False
    if not item.url:
        url = "https://github.com/CmosGit/Mod_pelisalacarta_deportes/raw/addon/plugin.video.deportesalacarta-%s.zip" % item.version
    else:
        import servertools
        urls, puede, msg = servertools.resolve_video_urls_for_playing(
            item.server, item.url, "", False, True)
        if puede:
            data_ = httptools.downloadpage(urls[0], hide=True).data
            url = scrapertools.find_single_match(
                data_, '"downloadUrl"\s*:\s*"([^"]+)"')
            if not url:
                url = scrapertools.find_single_match(
                    data_, '<a id="download_button".*?href="([^"]+)"')
            if not item.server and not url:
                try:
                    name, value = scrapertools.find_single_match(
                        data_,
                        'method="post">.*?name="([^"]+)" value="([^"]+)"')
                    post = "%s=%s" % (name, value)
                    data_ = httptools.downloadpage(urls[0], post,
                                                   hide=True).data
                    url = scrapertools.find_single_match(
                        data_, '"downloadUrl"\s*:\s*"([^"]+)"')
                except:
                    pass

            if not url:
                urls, puede, msg = servertools.resolve_video_urls_for_playing(
                    item.server, base64.b64decode(item.url))
                url = urls[0][1]

    progreso = platformtools.dialog_progress("Progreso de la actualización",
                                             "Descargando...")
    filename = 'deportesalacarta-%s.zip' % item.version
    localfilename = filetools.join(config.get_data_path(), filename)
    try:
        result = downloadtools.downloadfile(url, localfilename, [], False,
                                            True, False)
        progreso.update(50, "Descargando archivo", "Descargando...")
        # Lo descomprime
        logger.info(
            "deportesalacarta.channels.configuracion descomprime fichero...")
        from core import ziptools
        unzipper = ziptools.ziptools()
        logger.info("deportesalacarta.channels.configuracion destpathname=%s" %
                    local_folder)
        unzipper.extract(localfilename, local_folder, update=True)
        progreso.close()
    except:
        import traceback
        logger.info("Detalle del error: %s" % traceback.format_exc())
        # Borra el zip descargado
        try:
            filetools.remove(localfilename)
        except:
            pass
        progreso.close()
        platformtools.dialog_ok(
            "Error", "Se ha producido un error extrayendo el archivo")
        return

    # Borra el zip descargado
    logger.info("deportesalacarta.channels.configuracion borra fichero...")
    try:
        filetools.remove(localfilename)
    except:
        pass
    logger.info("deportesalacarta.channels.configuracion ...fichero borrado")

    platformtools.dialog_notification(
        "Actualizado correctamente",
        "Versión %s instalada con éxito" % item.version)

    xbmc.executebuiltin("Container.Refresh")
コード例 #47
0
ファイル: httptools.py プロジェクト: 32Dexter/KOD-Addon
def downloadpage(url, post=None, headers=None, timeout=None, follow_redirects=True, cookies=True, replace_headers=False,
                 add_referer=False, only_headers=False, bypass_cloudflare=True, count_retries=0, count_retries_tot=1, random_headers=False, ignore_response_code=False, alfa_s=False, proxy=True, proxy_web=False, proxy_addr_forced=None,forced_proxy=None, proxy_retries=1):
    """
    Abre una url y retorna los datos obtenidos

    @param url: url que abrir.
    @type url: str
    @param post: Si contiene algun valor este es enviado mediante POST.
    @type post: str
    @param headers: Headers para la petición, si no contiene nada se usara los headers por defecto.
    @type headers: dict, list
    @param timeout: Timeout para la petición.
    @type timeout: int
    @param follow_redirects: Indica si se han de seguir las redirecciones.
    @type follow_redirects: bool
    @param cookies: Indica si se han de usar las cookies.
    @type cookies: bool
    @param replace_headers: Si True, los headers pasados por el parametro "headers" sustituiran por completo los headers por defecto.
                            Si False, los headers pasados por el parametro "headers" modificaran los headers por defecto.
    @type replace_headers: bool
    @param add_referer: Indica si se ha de añadir el header "Referer" usando el dominio de la url como valor.
    @type add_referer: bool
    @param only_headers: Si True, solo se descargarán los headers, omitiendo el contenido de la url.
    @type only_headers: bool
    @param random_headers: Si True, utiliza el método de seleccionar headers aleatorios.
    @type random_headers: bool
    @param ignore_response_code: Si es True, ignora el método para WebErrorException para error como el error 404 en veseriesonline, pero es un data funcional
    @type ignore_response_code: bool
    @return: Resultado de la petición
    @rtype: HTTPResponse

            Parametro               Tipo    Descripción
            ----------------------------------------------------------------------------------------------------------------
            HTTPResponse.sucess:    bool   True: Peticion realizada correctamente | False: Error al realizar la petición
            HTTPResponse.code:      int    Código de respuesta del servidor o código de error en caso de producirse un error
            HTTPResponse.error:     str    Descripción del error en caso de producirse un error
            HTTPResponse.headers:   dict   Diccionario con los headers de respuesta del servidor
            HTTPResponse.data:      str    Respuesta obtenida del servidor
            HTTPResponse.time:      float  Tiempo empleado para realizar la petición

    """

    response = {}

    # Headers por defecto, si no se especifica nada
    request_headers = default_headers.copy()

    # Headers pasados como parametros
    if headers is not None:
        if not replace_headers:
            request_headers.update(dict(headers))
        else:
            request_headers = dict(headers)

    if add_referer:
        request_headers["Referer"] = "/".join(url.split("/")[:3])
        
    if random_headers or HTTPTOOLS_DEFAULT_RANDOM_HEADERS:
        request_headers['User-Agent'] = random_useragent()

    url = urllib.quote(url, safe="%/:=&?~#+!$,;'@()*[]")

    #Si la descarga requiere que se haga a través de un servicio Proxy o ProxyWeb, se prepara la url
    proxy_retries_counter = 0
    url_save = url
    post_save = post
    while proxy_retries_counter <= proxy_retries:
        # Handlers init
        handlers = [urllib2.HTTPHandler(debuglevel=False)]
        
        proxy_retries_counter += 1
        proxy_stat = ''
        proxy_addr = ''
        proxy_CF_addr = ''
        proxy_web_name = ''
        proxy_log = ''
        
        try:
            if (proxy or proxy_web) and (forced_proxy or proxy_addr_forced or channel_proxy_list(url, forced_proxy=forced_proxy)):
                import proxytools
                proxy_addr, proxy_CF_addr, proxy_web_name, proxy_log = proxytools.get_proxy_addr(url, post=post, forced_proxy=forced_proxy)
                if proxy_addr_forced and proxy_log:
                    import scrapertools
                    proxy_log = scrapertools.find_single_match(str(proxy_addr_forced), "{'http.*':\s*'(.*?)'}")
            
                if proxy and proxy_addr:
                    if proxy_addr_forced: proxy_addr = proxy_addr_forced
                    handlers.append(urllib2.ProxyHandler(proxy_addr))
                    proxy_stat = ', Proxy Direct ' + proxy_log
                elif proxy and proxy_CF_addr:
                    if proxy_addr_forced: proxy_CF_addr = proxy_addr_forced
                    handlers.append(urllib2.ProxyHandler(proxy_CF_addr))
                    proxy_stat = ', Proxy CF ' + proxy_log
                elif proxy and proxy_addr_forced:
                    proxy_addr = proxy_addr_forced
                    handlers.append(urllib2.ProxyHandler(proxy_addr))
                    proxy_stat = ', Proxy Direct ' + proxy_log
                elif proxy and not proxy_addr and not proxy_CF_addr and not proxy_addr_forced:
                    proxy = False
                    if not proxy_web_name:
                        proxy_addr, proxy_CF_addr, proxy_web_name, proxy_log = proxytools.get_proxy_addr(url, forced_proxy='Total')
                    if proxy_web_name:
                        proxy_web = True
                    else:
                        proxy_web = False
                        if proxy_addr:
                            proxy = True
                            handlers.append(urllib2.ProxyHandler(proxy_addr))
                            proxy_stat = ', Proxy Direct ' + proxy_log

                if proxy_web and proxy_web_name:
                    if post: proxy_log = '(POST) ' + proxy_log
                    url, post, headers_proxy, proxy_web_name = proxytools.set_proxy_web(url, proxy_web_name, post=post)
                    if proxy_web_name:
                        proxy_stat = ', Proxy Web ' + proxy_log
                        if headers_proxy:
                            request_headers.update(dict(headers_proxy))
                if proxy_web and not proxy_web_name:
                    proxy_web = False
                    proxy_addr, proxy_CF_addr, proxy_web_name, proxy_log = proxytools.get_proxy_addr(url, forced_proxy='Total')
                    if proxy_CF_addr:
                        proxy = True
                        handlers.append(urllib2.ProxyHandler(proxy_CF_addr))
                        proxy_stat = ', Proxy CF ' + proxy_log
                    elif proxy_addr:
                        proxy = True
                        handlers.append(urllib2.ProxyHandler(proxy_addr))
                        proxy_stat = ', Proxy Direct ' + proxy_log
        except:
            import traceback
            logger.error(traceback.format_exc())
            proxy = ''
            proxy_web = ''
            proxy_stat = ''
            proxy_addr = ''
            proxy_CF_addr = ''
            proxy_web_name = ''
            proxy_log = ''
            url = url_save
            
        # Limitar tiempo de descarga si no se ha pasado timeout y hay un valor establecido en la variable global
        if timeout is None and HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT is not None: timeout = HTTPTOOLS_DEFAULT_DOWNLOAD_TIMEOUT
        if timeout == 0: timeout = None

        if not alfa_s:
            logger.info("----------------------------------------------")
            logger.info("downloadpage KOD: %s" %__version)
            logger.info("----------------------------------------------")
            logger.info("Timeout: %s" % timeout)
            logger.info("URL: " + url)
            logger.info("Domain: " + urlparse.urlparse(url)[1])
            if post:
                logger.info("Request: POST" + proxy_stat)
            else:
                logger.info("Request: GET" + proxy_stat)
                logger.info("Use  Cookies: %s" % cookies)
                logger.info("Download Page: %s" % (not only_headers))
                logger.info("Cookie File: " + ficherocookies)
            logger.info("Headers:")
            for header in request_headers:
                logger.info("- %s: %s" % (header, request_headers[header]))

        # Handlers
        if not follow_redirects:
            handlers.append(NoRedirectHandler())

        if cookies:
            handlers.append(urllib2.HTTPCookieProcessor(cj))

        opener = urllib2.build_opener(*handlers)

        if not alfa_s:
            logger.info("Making Request")

        # Contador
        inicio = time.time()

        req = urllib2.Request(url, post, request_headers)

        try:
            if urllib2.__version__ == "2.4":
                import socket
                deftimeout = socket.getdefaulttimeout()
                if timeout is not None:
                    socket.setdefaulttimeout(timeout)
                handle = opener.open(req)
                socket.setdefaulttimeout(deftimeout)
            else:
                handle = opener.open(req, timeout=timeout)

        except urllib2.HTTPError, handle:
            response["sucess"] = False
            response["code"] = handle.code
            response["error"] = handle.__dict__.get("reason", str(handle))
            response["headers"] = handle.headers.dict
            if not only_headers:
                response["data"] = handle.read()
            else:
                response["data"] = ""
            response["time"] = time.time() - inicio
            response["url"] = handle.geturl()

        except Exception, e:
            response["sucess"] = False
            response["code"] = e.__dict__.get("errno", e.__dict__.get("code", str(e)))
            response["error"] = e.__dict__.get("reason", str(e))
            response["headers"] = {}
            response["data"] = ""
            response["time"] = time.time() - inicio
            response["url"] = url