コード例 #1
0
def salvar_busquedas(params, url, category):
    channel = params.get("channel")
    limite_busquedas = (
        10,
        20,
        30,
        40,
    )[int(config.get_setting("limite_busquedas"))]
    matches = []
    try:
        presets = config.get_setting("presets_buscados")
        if "|" in presets:
            presets = matches = presets.split("|")
            for count, preset in enumerate(presets):
                if url in preset:
                    del presets[count]
                    break

        if len(presets) >= limite_busquedas:
            presets = presets[:limite_busquedas - 1]
    except:
        presets = ""
    presets2 = ""
    if len(matches) > 0:
        for preset in presets:
            presets2 = presets2 + "|" + preset
        presets = url + presets2
    elif presets != "":
        presets = url + "|" + presets
    else:
        presets = url
    config.setSetting("presets_buscados", presets)
コード例 #2
0
def set_color(title, category):
    #logger.info()

    color_scheme = {'otro': 'white', 'dual': 'white'}

    #logger.debug('category antes de remove: %s' % category)
    category = remove_format(category).lower()
    #logger.debug('category despues de remove: %s' % category)
    # Lista de elementos posibles en el titulo
    color_list = ['movie', 'tvshow', 'year', 'rating_1', 'rating_2', 'rating_3', 'quality', 'cast', 'lat', 'vose',
                  'vos', 'vo', 'server', 'library', 'update', 'no_update']

    # Se verifica el estado de la opcion de colores personalizados
    custom_colors = config.get_setting('title_color')

    # Se Forma el diccionario de colores para cada elemento, la opcion esta activas utiliza la configuracion del
    #  usuario, si no  pone el titulo en blanco.
    if title not in ['', ' ']:

        for element in color_list:
            if custom_colors:
                color_scheme[element] = remove_format(config.get_setting('%s_color' % element))
            else:
                color_scheme[element] = 'white'
        if category in ['update', 'no_update']:
           #logger.debug('title antes de updates: %s' % title)
           title= re.sub(r'\[COLOR .*?\]','[COLOR %s]' % color_scheme[category],title)
        else:
            if category not in ['movie', 'tvshow', 'library', 'otro']:
                title = "[COLOR %s][%s][/COLOR]"%(color_scheme[category], title)
            else:
                title = "[COLOR %s]%s[/COLOR]" % (color_scheme[category], title)
    return title
コード例 #3
0
    def get_config(self, session):
        try:
            limit = float(self.get_argument('limit', '0'))
            wall_time = float(self.get_argument('wall_time', '0'))
        except ValueError as e:
            raise errors.ModelError(str(e))

        max_wall_time = config.get_setting(session, 'custom_timeout') * 1000
        if wall_time == 0:
            wall_time = max_wall_time
        elif not 0 <= wall_time <= max_wall_time:
            raise errors.ModelError('Query wall time is out of bounds')

        max_limit = config.get_setting(session, 'custom_max_limit')
        if limit == 0:
            limit = max_limit
        elif not 0 <= limit <= max_limit:
            raise errors.ModelError('Query row limit is out of bounds')

        return DefaultMunch(
            undefined, {
                'wall_time': int(wall_time * 1000),
                'limit': int(limit),
                'base_url': config.get_setting(session, 'app_base_url'),
            })
コード例 #4
0
def auth():
    login = cfg.get_setting(path, 'Authentication', 'Login')
    if login == "":
        login = input("Enter username:"******"":
        password = input("Enter password:"******"username": login,
        "password": password,
        "remember": "true"
    }
    with requests.Session() as s:
        p = s.post(url + "/account/login", data=cred)
        r = s.get(url + "/account/login")
        if p.status_code == 204:
            print("Success. Session created...")
            # print(p.json()['message'])
            wlcm_msg = "Hi! " + str(r.json()['firstName']) + " " + str(r.json()['lastName'])
            print(wlcm_msg)
            return s
        else:
            print(p.status_code)
            if p.status_code == 400:
                print("Username/password incorrect. Try again")
            print(p.json()['message'])
            login = cfg.update_setting(path, 'Authentication', 'Login', "")
            password = cfg.update_setting(path, 'Authentication', 'Password', "")
            auth()
コード例 #5
0
ファイル: buscador.py プロジェクト: hmemar/xbmc-tvalacarta
def salvar_busquedas(params,url,category):
	channel = params.get("channel")
	limite_busquedas = ( 10, 20, 30, 40, )[ int( config.get_setting( "limite_busquedas" ) ) ]
	matches = []
	try:
		presets = config.get_setting("presets_buscados")
		if "|" in presets:
			presets = matches = presets.split("|")			
			for count, preset in enumerate( presets ):
				if url in preset:
					del presets[ count ]
					break
		
		if len( presets ) >= limite_busquedas:
			presets = presets[ : limite_busquedas - 1 ]
	except:
		presets = ""
	presets2 = ""
	if len(matches)>0:
		for preset in presets:
			presets2 = presets2 + "|" + preset 
		presets = url + presets2
	elif presets != "":
		presets = url + "|" + presets
	else:
		presets = url
	config.setSetting("presets_buscados",presets)
コード例 #6
0
def main():
    ssid = config.get_setting("ssid")
    password = config.get_setting("password")
    station = config.get_setting("station")

    # Turn AP off
    ap = network.WLAN(network.AP_IF)
    ap.active(False)

    connect_wlan(ssid, password)

    # set time to UTC time
    ntptime.settime()
    last_ntp_sync = utime.time()

    # Initialize I2C pins
    i2c = I2C(scl=Pin(22), sda=Pin(21), freq=10000)
    bme = bme280.BME280(i2c=i2c)

    while True:
        # dirty hack to make sure the local clock stays in sync with the ntp server pool.ntp.org.
        # Resync every 10min with the upstream ntp server in order to mitigate time shift.
        # Resetting the time is no problem, since time shift should never be larger than delay of sensor readings.
        if abs(utime.time() - last_ntp_sync) > 60 * 10:
            ntptime.settime()
            last_ntp_sync = utime.time()
            print("Local time has been synced with pool.ntp.org")
        transmit_data(read_sensor(bme, station))
        time.sleep(60)
コード例 #7
0
def login(force=False):
    utils.log("User: "******"; Logged in: " + str(config.get_setting_bool(
        constants.LOGGED_IN)) + "; Token: " + config.get_setting(constants.TOKEN))

    if force is False and not utils.isEmpty(config.get_setting(constants.TOKEN)) and config.get_setting_bool(constants.LOGGED_IN):
        utils.log("Already logged in")
        return

    opener = get_url_opener()

    values = {'username': config.get_setting(constants.USERNAME),
              'uid': config.get_unique_id(),
              'password': config.get_setting(constants.PASSWORD)}

    response = opener.open(API_ENDPOINT + '/api/v1.4/post/user/login', urllib.urlencode(values))

    response_code = response.getcode()
    response_text = response.read()

    if response_code != 200:
        raise ApiError(
            "Got incorrect response code during login. Reponse code: " + response_code + "; Text: " + response_text)

    json_object = None
    try:
        json_object = json.loads(response_text)
    except ValueError, e:
        config.set_setting_bool(constants.LOGGED_IN, False)
        config.set_setting(constants.TOKEN, "")
        utils.log("Did not receive json, something wrong: " + response_text)
        raise ApiError("Failed to log in, API error")
コード例 #8
0
ファイル: server.py プロジェクト: MayborodaPavel/simpleSite
 def __init__(self, host=None, port=None, handler=HTTPRequestHandler):
     self.host = host or config.get_setting(config.PATH, 'Connection',
                                            'host')
     self.port = port or int(
         config.get_setting(config.PATH, 'Connection', 'port'))
     self.handler = handler
     self.httpd = HTTPServer((self.host, self.port), self.handler)
コード例 #9
0
def v1_minecraft_whitelist():
    sample = {
        "add": {
            "required": False,
            "allowed_types": [str]
        },
        "remove": {
            "required": False,
            "allowed_types": [str]
        }
    }
    query = request.args.to_dict(flat=True)
    succ, errors = check(sample, query)
    if not succ:
        return make_response(jsonify(errors), 400)

    host = config.get_setting("minecraft-server-host", "127.0.0.1")
    port = config.get_setting("minecraft-server-rcon-port", 25575)
    password = config.get_setting("minecraft-server-password", "password")
    responses = list()
    with MCRcon(host, password, port=port) as mcr:
        if "add" in query:
            responses.append(mcr.command(f"whitelist add {query['add']}"))
        if "remove" in query:
            responses.append(
                mcr.command(f"whitelist remove {query['remove']}"))
    return make_response(jsonify(responses), 200)
コード例 #10
0
def process_once(config):
    rate_limit_interval = 1.0 / config.get('MAX_EMAILS_PER_SECOND', 10)
    n_sent = 0
    with model.session_scope() as session:
        interval = extract('epoch', func.now() - model.AppUser.email_time)
        user_list = (session.query(
            model.AppUser, func.now()).join(model.Organisation).filter(
                model.AppUser.deleted != True,
                model.Organisation.deleted != True,
                model.AppUser.email_interval != 0,
                (model.AppUser.email_time == None) |
                (interval > model.AppUser.email_interval)).all())

        app_name_short = app_config.get_setting(session, 'app_name_short')
        app_base_url = app_config.get_setting(session, 'app_base_url')

        for user, now in user_list:
            messages = []
            activities = get_activities(session, user, now, messages,
                                        config['MAX_ACTIVITIES'],
                                        config['DATE_FORMAT'])
            if len(activities) > 0:
                send_email(config, user, activities, messages, app_name_short,
                           app_base_url)
                n_sent += 1
            user.email_time = now
            # Commit after each email to avoid multiple emails in case a
            # later iteration fails
            session.commit()

            log.debug("Sleeping for %ds", rate_limit_interval)
            time.sleep(rate_limit_interval)

    log.info("Job finished. %d notification emails sent.", n_sent)
    return n_sent
コード例 #11
0
def get_stream_url(data_url):
    utils.log("Getting URL for channel: " + data_url)
    config.login_check()

    streamurl = None

    url = API_ENDPOINT + "/get/content/live-streams/" + data_url + "?include=quality"
    opener = get_url_opener()
    opener.addheaders.append(
        ('Authorization', "Bearer " + config.get_setting(constants.TOKEN)))
    response = None
    try:
        response = opener.open(url)
    except urllib.error.HTTPError as e:
        config.set_setting(constants.LOGGED_IN, "False")
        raise ApiError("Something wrong: " + e.code)

    response_text = response.read()
    response_code = response.getcode()

    if response_code != 200:
        config.set_setting_bool(constants.LOGGED_IN, "False")
        raise ApiError(
            "Got incorrect response code while requesting stream info. Reponse code: "
            + response_code + ";\nText: " + response_text)

    json_object = None
    try:
        json_object = json.loads(response_text)
    except ValueError:
        config.set_setting(constants.LOGGED_IN, "False")
        raise ApiError("Did not receive json, something wrong: " +
                       response_text)

    stream_links = {}

    for stream in json_object["data"]:

        if stream["type"] != "live-streams":
            continue

        url = stream["attributes"][
            "stream-url"] + "&auth_token=app_" + config.get_setting(
                constants.TOKEN)

        if "_lq.stream" in stream["id"]:
            stream_links["3-lq"] = url
        elif "_mq.stream" in stream["id"]:
            stream_links["2-mq"] = url
        elif "_hq.stream" in stream["id"]:
            stream_links["1-hq"] = url
        elif "_hd.stream" in stream["id"]:
            stream_links["0-hd"] = url

    for key in sorted(stream_links.keys()):
        streamurl = stream_links[key]
        break

    return streamurl
コード例 #12
0
ファイル: megavideo.py プロジェクト: vdeku/xbmc-tvalacarta
def getlowurl(code):
	xbmc.output("[megavideo.py] Baja calidad")
	
	code=getcode(code)

	modoPremium = config.get_setting("megavideopremium")
	xbmc.output("[megavideo.py] modoPremium="+modoPremium)
	if modoPremium == "false":
		xbmc.output("[megavideo.py] usando modo normal para baja calidad")
		req = urllib2.Request("http://www.megavideo.com/xml/videolink.php?v="+code)
		req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14')
		req.add_header('Referer', 'http://www.megavideo.com/')
		page = urllib2.urlopen(req);response=page.read();page.close()
		errort = re.compile(' errortext="(.+?)"').findall(response)
		movielink = ""
		if len(errort) <= 0:
			s = re.compile(' s="(.+?)"').findall(response)
			k1 = re.compile(' k1="(.+?)"').findall(response)
			k2 = re.compile(' k2="(.+?)"').findall(response)
			un = re.compile(' un="(.+?)"').findall(response)
			movielink = "http://www" + s[0] + ".megavideo.com/files/" + decrypt(un[0], k1[0], k2[0]) + "/?.flv"
			#addLink(name, movielink+'?.flv','')
	else:
		xbmc.output("[megavideo.py] usando modo premium para baja calidad")
		megavideocookie = config.get_setting("megavideocookie")
		if DEBUG: xbmc.output("[megavideo.py] megavideocookie=#"+megavideocookie+"#")

		xbmc.output("[megavideo.py] Averiguando cookie...")
		megavideologin = config.get_setting("megavideouser")
		if DEBUG: xbmc.output("[megavideo.py] megavideouser=#"+megavideologin+"#")

		megavideopassword = config.get_setting("megavideopassword")
		if DEBUG: xbmc.output("[megavideo.py] megavideopassword=#"+megavideopassword+"#")

		megavideocookie = GetMegavideoUser(megavideologin, megavideopassword)
		if DEBUG: xbmc.output("[megavideo.py] megavideocookie=#"+megavideocookie+"#")

		if len(megavideocookie) == 0:
			advertencia = xbmcgui.Dialog()
			resultado = advertencia.ok('Cuenta de Megavideo errónea' , 'La cuenta de Megavideo que usas no es válida' , 'Comprueba el login y password en la configuración')
			return ""

		req = urllib2.Request("http://www.megavideo.com/xml/videolink.php?v="+code+"&u="+megavideocookie)
		req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.8.1.14) Gecko/20080404 Firefox/2.0.0.14')
		req.add_header('Referer', 'http://www.megavideo.com/')
		page = urllib2.urlopen(req);response=page.read();page.close()
		errort = re.compile(' errortext="(.+?)"').findall(response)
		movielink = ""
		if len(errort) <= 0:
			s = re.compile(' s="(.+?)"').findall(response)
			k1 = re.compile(' k1="(.+?)"').findall(response)
			k2 = re.compile(' k2="(.+?)"').findall(response)
			un = re.compile(' un="(.+?)"').findall(response)
			movielink = "http://www" + s[0] + ".megavideo.com/files/" + decrypt(un[0], k1[0], k2[0]) + "/?.flv"
			#addLink(name, movielink+'?.flv','')
	
	return movielink
コード例 #13
0
ファイル: template.py プロジェクト: Radhika-Envision/Upmark
 def uses_old_url(self, session):
     if truthy(tornado.options.options.dev):
         return False
     if not config.get_setting(session, 'app_redirect'):
         return False
     base_url = config.get_setting(session, 'app_base_url')
     default_url = config.get_setting(session, 'app_base_url', True)
     if base_url == default_url:
         return False
     return not self.request.full_url().startswith(base_url)
コード例 #14
0
def v1_minecraft_command():
    sample = {"cmd": {"required": True, "allowed_types": [str]}}
    query = request.args.to_dict(flat=True)
    succ, errors = check(sample, query)
    if not succ:
        return make_response(jsonify(errors), 400)
    host = config.get_setting("minecraft-server-host", "127.0.0.1")
    port = config.get_setting("minecraft-server-rcon-port", 25575)
    password = config.get_setting("minecraft-server-password", "password")
    with MCRcon(host, password, port=port) as mcr:
        command = query["cmd"]
        resp = mcr.command(command)
        return make_response(jsonify(resp), 200)
コード例 #15
0
ファイル: strava.py プロジェクト: sharjeelaziz/human-miles
    def __init__(self):
        self.client_id = c.get_setting(c.SECTION_SETTINGS, c.CLIENT_ID)
        self.client_secret = c.get_setting(c.SECTION_SETTINGS, c.CLIENT_SECRET)

        self.access_token = c.get_setting(c.SECTION_SETTINGS, c.ACCESS_TOKEN)
        self.refresh_token = c.get_setting(c.SECTION_SETTINGS, c.REFRESH_TOKEN)
        self.expires_at = float(c.get_setting(c.SECTION_SETTINGS,
                                              c.EXPIRES_AT))
        self.total_meters = 0
        self.last_activity_date = None

        self.sc = stravalib.Client(self.access_token)
        self.start_time = time.time()
コード例 #16
0
ファイル: unify.py プロジェクト: userColaborator/addon
def set_color(title, category):
    #logger.info()
    from core import jsontools

    styles_path = os.path.join(config.get_runtime_path(), 'resources',
                               'color_styles.json')
    preset = config.get_setting("preset_style", default="Estilo 1")
    logger.debug(preset)
    color_setting = jsontools.load((open(styles_path, "r").read()))[preset]

    color_scheme = {'otro': 'white', 'dual': 'white'}

    #logger.debug('category antes de remove: %s' % category)
    category = remove_format(category).lower()
    #logger.debug('category despues de remove: %s' % category)
    # Lista de elementos posibles en el titulo
    color_list = [
        'movie', 'tvshow', 'year', 'rating_1', 'rating_2', 'rating_3',
        'quality', 'cast', 'lat', 'vose', 'vos', 'vo', 'server', 'library',
        'update', 'no_update'
    ]

    # Se verifica el estado de la opcion de colores personalizados
    custom_colors = config.get_setting('title_color')

    # Se Forma el diccionario de colores para cada elemento, la opcion esta activas utiliza la configuracion del
    #  usuario, si no  pone el titulo en blanco.
    if title not in ['', ' ']:

        for element in color_list:
            if custom_colors:
                color_scheme[element] = remove_format(
                    config.get_setting('%s_color' % element))
            else:
                color_scheme[element] = remove_format(
                    color_setting.get(element, 'white'))
                #color_scheme[element] = 'white'

        if category in ['update', 'no_update']:
            #logger.debug('title antes de updates: %s' % title)
            title = re.sub(r'\[COLOR .*?\]',
                           '[COLOR %s]' % color_scheme[category], title)
        else:
            if category not in ['movie', 'tvshow', 'library', 'otro']:
                title = "[COLOR %s][%s][/COLOR]" % (color_scheme[category],
                                                    title)
            else:
                title = "[COLOR %s]%s[/COLOR]" % (color_scheme[category],
                                                  title)
    return title
コード例 #17
0
    def get(self):
        with model.session_scope() as session:
            user_session = self.get_user_session(session)
            policy = user_session.policy.derive({})
            policy.verify('custom_query_view')

            to_son = ToSon(r'.*')
            self.set_header("Content-Type", "application/json")

            wall_time = config.get_setting(session, 'custom_timeout') * 1000
            max_limit = config.get_setting(session, 'custom_max_limit')
            conf = {'wall_time': wall_time, 'max_limit': max_limit}

        self.write(json_encode(to_son(conf)))
        self.finish()
コード例 #18
0
ファイル: downloadtools.py プロジェクト: tvalacarta/descargar
def getfilefromtitle(url, title, folder=""):
    logger.info("[downloadtools.py] getfilefromtitle: title=" + title + " url=" + url + " folder=" + folder)

    # logger.info("[downloadtools.py] downloadtitle: title="+urllib.quote_plus( title ))
    plataforma = config.get_system_platform()
    logger.info("[downloadtools.py] getfilefromtitle: plataforma=" + plataforma)

    # nombrefichero = xbmc.makeLegalFilename(title + url[-4:])
    import scrapertools

    if plataforma == "xbox":
        nombrefichero = title[:38] + scrapertools.get_filename_from_url(url)[-4:]
        nombrefichero = limpia_nombre_excepto_1(nombrefichero)
    else:
        nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:]
        logger.info("[downloadtools.py] getfilefromtitle: nombrefichero=%s" % nombrefichero)
        if "videobb" in url or "videozer" in url or "putlocker" in url:
            nombrefichero = title + ".flv"
        if "videobam" in url:
            nombrefichero = title + "." + url.rsplit(".", 1)[1][0:3]
        if "dibujos.tv" in url:
            nombrefichero = title + ".mp4"
        if "filenium" in url:
            # Content-Disposition	filename="filenium_El.Gato.con.Botas.TSScreener.Latino.avi"
            import scrapertools

            content_disposition_header = scrapertools.get_header_from_response(url, header_to_get="Content-Disposition")
            logger.info("content_disposition=" + content_disposition_header)
            partes = content_disposition_header.split("=")
            if len(partes) <= 1:
                raise Exception("filenium", "no existe")

            extension = partes[1][-5:-1]
            nombrefichero = title + extension
        logger.info("[downloadtools.py] getfilefromtitle: nombrefichero=%s" % nombrefichero)

        nombrefichero = limpia_nombre_caracteres_especiales(nombrefichero)

    logger.info("[downloadtools.py] getfilefromtitle: nombrefichero=%s" % nombrefichero)

    if folder == "":
        fullpath = os.path.join(config.get_setting("downloadpath"), nombrefichero)
    else:
        fullpath = os.path.join(config.get_setting("downloadpath"), folder, nombrefichero)

    logger.info("[downloadtools.py] getfilefromtitle: fullpath=%s" % fullpath)

    return fullpath
コード例 #19
0
        def wrapper(*args, **kwargs):
            if privilege and config.get_setting("authorization-enabled", True):
                author = request.headers.get("Authorization")
                if author:
                    succ, username = user_client.get_username_from_token(
                        author)
                    succ, tokens = user_client.validate_token_for_user(
                        username, author)
                    has_priv = user_client.token_has_privilege(
                        author, privilege)

                    if has_priv and succ:
                        return func(*args, **kwargs)
                    elif not has_priv:
                        return {
                            "error":
                            f"Insufficient privileges, resource requires '{privilege.lower()}' for access."
                        }, 403
                    elif not succ:
                        return {"error": f"Invalid token."}, 401
                else:
                    return {
                        "error":
                        f"Must specify 'Authorization' header with token."
                    }, 401
            return func(*args, **kwargs)
コード例 #20
0
def AddItem(item, title, thumbnail, mode): #----------------------------------OK
    contextCommands=[]   
    if "," in item.context:
      for menuitem in item.context.split("|"):
        if "," in menuitem:
          from copy import deepcopy
          Menu = deepcopy(item)
          if len(menuitem.split(",")) == 2:
            Titulo = menuitem.split(",")[0]
            Menu.action = menuitem.split(",")[1]
          elif len(menuitem.split(",")) == 3:
            Titulo = menuitem.split(",")[0]
            Menu.channel = menuitem.split(",")[1]
            Menu.action =menuitem.split(",")[2]
          Menu.refered_action = item.action
          contextCommands.append([Titulo,ConstruirURL(Menu)])
    import xbmcgui
    import xbmcplugin
    listitem = xbmcgui.ListItem( title, iconImage="DefaultFolder.png", thumbnailImage=thumbnail)
    listitem.setInfo( "video", { "Title" : item.title, "Plot" : item.plot, "Studio" : item.channel} )
    if item.fanart!="":
      listitem.setProperty('fanart_image',item.fanart) 
      xbmcplugin.setPluginFanart(int(sys.argv[1]), item.fanart)
    listitem.addContextMenuItems (contextCommands, replaceItems=True)
    
    if item.folder:
      xbmcplugin.addDirectoryItem( handle = int(sys.argv[1]), url = sys.argv[ 0 ] + "?" + item.serialize() , listitem=listitem, isFolder=True)
    else:
      if config.get_setting("player_mode")=="1": # SetResolvedUrl debe ser siempre "isPlayable = true"
        listitem.setProperty('IsPlayable', 'true')
      xbmcplugin.addDirectoryItem( handle = int(sys.argv[1]), url = sys.argv[ 0 ] + "?" + item.serialize() , listitem=listitem, isFolder=False)
コード例 #21
0
ファイル: trial.py プロジェクト: AutumnSun1996/ML
def check(estimator, data, tune=True, fit=True):
    log(0x25, '~Default Setting~', estimator.__class__.__name__)
    if fit:
        tick = time.time()
        estimator.fit(**data['train'])
        log(0x25, 'Fit in:', time.time() - tick)
    if estimator.__class__.__name__ == 'Ensemble':
        log(
            0x25, 'Base Estimators:', ', '.join([
                '%s' % e.__class__.__name__ for e in estimator.base_estimators
            ]))
        log(0x25, 'Ceof:', estimator.estimator.coef_, 'intercept:',
            estimator.estimator.intercept_)
    tick = time.time()
    prediction = estimator.predict_proba(data['test']['X'])
    log(0x25, 'Predict in:', time.time() - tick)
    score = check_result(data['test']['y'], prediction)
    log(0x25, 'Score:', score)

    if not tune:
        return
    log(0x25, '~Tuned~', estimator.__class__.__name__)
    tick = time.time()
    tuning(estimator, **data['train'],
           **get_setting(estimator.__class__.__name__))
    # estimator.fit(**data['train'])
    score = check_result(data['test']['y'],
                         estimator.predict_proba(data['test']['X']))
    log(0x25, 'Params:', estimator.get_params())
    log(0x25, 'Time:', time.time() - tick)
    log(0x25, 'Score:', score)
コード例 #22
0
ファイル: compare.py プロジェクト: AutumnSun1996/ML
def check(estimator_class, data):
    if estimator_class.__name__ == 'CatBoostClassifier':
        estimator = estimator_class(loss_function='MultiClass',
                                    classes_count=len(set(data['train']['y'])))
    else:
        estimator = estimator_class()
    log('~Fit With Default Setting~', estimator_class.__name__)
    tick1 = time.time()
    estimator.fit(**data['train'])
    score = error_func(data['test']['y'], estimator.predict(data['test']['X']))
    tick2 = time.time()
    log('Score:', score)
    log('Time Usage:', tick2 - tick1)

    if estimator_class.__name__ == 'CatBoostClassifier':
        estimator = estimator_class(loss_function='MultiClass',
                                    classes_count=len(set(data['train']['y'])))
    else:
        estimator = estimator_class()
    log('~Tuning~', estimator_class.__name__)
    tick1 = time.time()
    tuning(estimator, **data['train'], **get_setting(estimator_class.__name__))
    score = error_func(data['test']['y'], estimator.predict(data['test']['X']))
    tick2 = time.time()
    log('Score:', score)
    log('Time Usage:', tick2 - tick1)
コード例 #23
0
ファイル: epg.py プロジェクト: winc0235/xbmc-lattelecom.tv
def should_update():
    t1 = utils.dateFromString(config.get_setting(constants.LAST_EPG))
    t2 = datetime.datetime.now()
    interval = 6
    update = abs(t2 - t1) > datetime.timedelta(hours=interval)
    if update is True:
        return True
コード例 #24
0
ファイル: buscador.py プロジェクト: hmemar/xbmc-tvalacarta
def listar_busquedas(params,url,category):
	print "listar_busquedas()"
	channel2 = ""
	# Despliega las busquedas anteriormente guardadas
	try:
		presets = config.get_setting("presets_buscados")
		channel_preset  = params.get("channel")
		if channel_preset != CHANNELNAME:
			channel2 = channel_preset
		print "channel_preset :%s" %channel_preset
		accion = params.get("action")
		matches = ""
		if "|" in presets:
			matches = presets.split("|")
			addfolder( "buscador"   , config.get_localized_string(30103)+"..." , matches[0] , "por_teclado", channel2 ) # Buscar
		else:
			addfolder( "buscador"   , config.get_localized_string(30103)+"..." , "" , "por_teclado", channel2 )
		if len(matches)>0:	
			for match in matches:
				
				title=scrapedurl = match
		
				addfolder( channel_preset , title , scrapedurl , "searchresults" )
		elif presets != "":
		
			title = scrapedurl = presets
			addfolder( channel_preset , title , scrapedurl , "searchresults" )
	except:
		addfolder( "buscador"   , config.get_localized_string(30103)+"..." , "" , "por_teclado" , channel2 )
		
	# Cierra el directorio
	xbmcplugin.setPluginCategory( handle=int( sys.argv[ 1 ] ), category=category )
	xbmcplugin.addSortMethod( handle=int( sys.argv[ 1 ] ), sortMethod=xbmcplugin.SORT_METHOD_NONE )
	xbmcplugin.endOfDirectory( handle=int( sys.argv[ 1 ] ), succeeded=True )
コード例 #25
0
ファイル: buscador.py プロジェクト: hmemar/xbmc-tvalacarta
def borrar_busqueda(params,url,category):
	channel = params.get("channel")
	matches = []
	try:
		presets = config.get_setting("presets_buscados")
		if "|" in presets:
			presets = matches = presets.split("|")
			for count, preset in enumerate( presets ):
				if url in preset:
					del presets[ count ]
					break
		elif presets == url:
			presets = ""
			
	except:
		presets = ""
	if len(matches)>1:
		presets2 = ""
		c = 0
		barra = ""
		for preset in presets:
			if c>0:
				barra = "|"
			presets2 =  presets2 + barra + preset 
			c +=1
		presets = presets2
	elif len(matches) == 1:
		presets = presets[0]
	config.setSetting("presets_buscados",presets)
    # refresh container so item is removed
	xbmc.executebuiltin( "Container.Refresh" )
コード例 #26
0
def getfilefromtitle(url,title):
    # Imprime en el log lo que va a descartar
    logger.info("[downloadtools.py] getfilefromtitle: url="+url )
    #logger.info("[downloadtools.py] downloadtitle: title="+urllib.quote_plus( title ))
    plataforma = config.get_system_platform();
    logger.info("[downloadtools.py] getfilefromtitle: plataforma="+plataforma)
    
    #nombrefichero = xbmc.makeLegalFilename(title + url[-4:])
    if plataforma=="xbox":
        nombrefichero = title[:38] + url[-4:]
        nombrefichero = limpia_nombre_excepto_1(nombrefichero)
    else:
        nombrefichero = title + url[-4:]
        if "videobb" in url or "videozer" in url:
            nombrefichero = title + ".flv"
        if "videobam" in url:
            nombrefichero = title+"."+url.rsplit(".",1)[1][0:3]
        if "filenium" in url:
            # Content-Disposition	filename="filenium_El.Gato.con.Botas.TSScreener.Latino.avi"
            import scrapertools
            content_disposition_header = scrapertools.get_header_from_response(url,header_to_get="Content-Disposition")
            logger.info("content_disposition="+content_disposition_header)
            partes=content_disposition_header.split("=")
            nombrefichero = title + partes[1][-5:-1]

        nombrefichero = limpia_nombre_caracteres_especiales(nombrefichero)

    logger.info("[downloadtools.py] getfilefromtitle: nombrefichero=%s" % nombrefichero)

    fullpath = os.path.join( config.get_setting("downloadpath") , nombrefichero )
    logger.info("[downloadtools.py] getfilefromtitle: fullpath=%s" % fullpath)
    
    return fullpath
コード例 #27
0
ファイル: backtest.py プロジェクト: mainyaa/bitmech
    def load_bitflyer_server():
        import config
        import pybitflyer
        setting = config.get_setting()
        api = pybitflyer.API(api_key=setting["bitflyer"]["api_key"],
                             api_secret=setting["bitflyer"]["api_secret"])
        executions = api.executions(product_code="BTC_JPY", count=500)
        executions = pd.DataFrame.from_dict(executions)
        executions["exec_date"] = pd.to_datetime(executions["exec_date"])
        executions.index = executions["exec_date"]
        print(executions)

        # get bitflyer for target date(today - 90)
        target_date = datetime.datetime.today() - datetime.timedelta(days=365)
        end_date = executions.iloc[-1, 1]
        while target_date < end_date:
            end_date = executions.iloc[-1, 1]
            print(end_date)
            before = executions.iloc[-1, 2]
            e1 = api.executions(product_code="BTC_JPY",
                                count=500,
                                before=before)
            e1 = pd.DataFrame.from_dict(e1)
            e1["exec_date"] = pd.to_datetime(e1["exec_date"])
            e1.index = e1["exec_date"]
            executions = executions.append(e1)
            time.sleep(1)
        executions.to_csv("data/bitflyerBTC_JPY_executions.csv", index=None)
        return executions
コード例 #28
0
ファイル: backtest.py プロジェクト: mainyaa/bitmech
    def load_bitflyer_server_365():
        import config
        import pybitflyer
        setting = config.get_setting()
        api = pybitflyer.API(api_key=setting["bitflyer"]["api_key"],
                             api_secret=setting["bitflyer"]["api_secret"])
        i = 12354
        executions = pd.read_csv("data/bitflyerBTC_JPY_executions_%d.csv" % i)

        # get bitflyer for target date(today - 90)
        target_date = datetime.datetime.today() - datetime.timedelta(days=365)

        end_date = datetime.datetime.strptime(executions.iloc[-1, 1],
                                              '%Y-%m-%d %H:%M:%S.%f')
        before = executions.iloc[-1, 2]
        while target_date < end_date:
            print(end_date)
            e1 = api.executions(product_code="BTC_JPY",
                                count=500,
                                before=before)
            e1 = pd.DataFrame.from_dict(e1)
            e1["exec_date"] = pd.to_datetime(e1["exec_date"])
            e1.index = e1["exec_date"]
            end_date = e1.iloc[-1, 1]
            before = e1.iloc[-1, 2]
            time.sleep(1)
            e1.to_csv("data/bitflyerBTC_JPY_executions_%s.csv" % i,
                      index=None,
                      header=None)
            i += 1
        executions.to_csv("data/bitflyerBTC_JPY_executions.csv", index=None)
        return executions
コード例 #29
0
def get_stream_url(data_url):
    utils.log("Getting URL for channel: " + data_url)
    config.login_check()

    streamurl = None

    url = API_ENDPOINT + "/api/v1.7/get/content/live-streams/" + data_url + "?include=quality"
    opener = get_url_opener()
    opener.addheaders.append(('Authorization', "Bearer " + config.get_setting(constants.TOKEN)))
    response = opener.open(url)

    response_text = response.read()
    response_code = response.getcode()

    if response_code != 200:
        config.set_setting_bool(constants.LOGGED_IN, False)
        raise ApiError(
            "Got incorrect response code while requesting stream info. Reponse code: " + response_code + ";\nText: " + response_text)

    json_object = None
    try:
        json_object = json.loads(response_text)
    except ValueError, e:
        config.set_setting(constants.LOGGED_IN, False)
        raise ApiError("Did not receive json, something wrong: " + response_text)
コード例 #30
0
ファイル: backtest.py プロジェクト: mainyaa/bitmech
def run_optimize(params):
    global optimize_report
    result = []
    setting = config.get_setting()
    num_rounds = setting["hyperopt"]["num_rounds"]
    skip = 0
    for i in tqdm.tqdm(range(num_rounds)):
        backtest = Backtest()
        backtest.strategy = "RSI"
        res = backtest.run_single(params, deltaDays=-1, random=True)
        result.append(res)
        if skip > 1 and skip > num_rounds / 10 and i < num_rounds / 5:
            print("%f: num_rounds/10:" % np.mean(result), params)
            return np.mean(result)
        if backtest.trades < 2:
            skip += 1
            continue
        if len(backtest.result) > 1:
            if backtest.result["alpha"][-1] < 0 or backtest.result[
                    "relative_profit"][-1] < 50 or backtest.result[
                        "treynor_ratio"][-1] < 1:
                skip += 1
                continue
        optimize_report.append(pd.concat(backtest.result))
    r = np.mean(result)
    print(r, params)
    return r
コード例 #31
0
def get_stream_url(data_url):
    utils.log("Getting URL for channel: " + data_url)
    config.login_check()

    streamurl = None

    url = API_ENDPOINT + "/api/v1.4/get/content/live-streams/" + data_url + "?include=quality"
    opener = get_url_opener()
    opener.addheaders.append(
        ('Authorization', "Bearer " + config.get_setting(constants.TOKEN)))
    response = opener.open(url)

    response_text = response.read()
    response_code = response.getcode()

    if response_code != 200:
        config.set_setting_bool(constants.LOGGED_IN, False)
        raise ApiError(
            "Got incorrect response code while requesting stream info. Reponse code: "
            + response_code + ";\nText: " + response_text)

    json_object = None
    try:
        json_object = json.loads(response_text)
    except ValueError, e:
        config.set_setting(constants.LOGGED_IN, False)
        raise ApiError("Did not receive json, something wrong: " +
                       response_text)
コード例 #32
0
def borrar_busqueda(params, url, category):
    channel = params.get("channel")
    matches = []
    try:
        presets = config.get_setting("presets_buscados")
        if "|" in presets:
            presets = matches = presets.split("|")
            for count, preset in enumerate(presets):
                if url in preset:
                    del presets[count]
                    break
        elif presets == url:
            presets = ""

    except:
        presets = ""
    if len(matches) > 1:
        presets2 = ""
        c = 0
        barra = ""
        for preset in presets:
            if c > 0:
                barra = "|"
            presets2 = presets2 + barra + preset
            c += 1
        presets = presets2
    elif len(matches) == 1:
        presets = presets[0]
    config.setSetting("presets_buscados", presets)
    # refresh container so item is removed
    xbmc.executebuiltin("Container.Refresh")
コード例 #33
0
ファイル: unify.py プロジェクト: Jpocas3212/salva59sg
def thumbnail_type(item):
    #logger.info()
    # Se comprueba que tipo de thumbnail se utilizara en findvideos,
    # Poster o Logo del servidor

    thumb_type = config.get_setting('video_thumbnail_type')
    info = item.infoLabels
    if not item.contentThumbnail:
        item.contentThumbnail = item.thumbnail

    if info:
        if info['thumbnail'] != '':
            item.contentThumbnail = info['thumbnail']

        if item.action == 'play':
            if thumb_type == 0:
                if info['thumbnail'] != '':
                    item.thumbnail = info['thumbnail']
            elif thumb_type == 1:
                from core.servertools import get_server_parameters
                #logger.debug('item.server: %s'%item.server)
                server_parameters = get_server_parameters(item.server.lower())
                item.thumbnail = server_parameters.get("thumbnail",
                                                       item.contentThumbnail)

    return item.thumbnail
コード例 #34
0
def getfilefromtitle(url,title,folder=""):
    logger.info("[downloadtools.py] getfilefromtitle: title="+title+" url="+url+" folder="+folder )

    #logger.info("[downloadtools.py] downloadtitle: title="+urllib.quote_plus( title ))
    plataforma = config.get_system_platform();
    logger.info("[downloadtools.py] getfilefromtitle: plataforma="+plataforma)
    
    #nombrefichero = xbmc.makeLegalFilename(title + url[-4:])
    import scrapertools
    if plataforma=="xbox":
        nombrefichero = title[:38] + scrapertools.get_filename_from_url(url)[-4:]
        nombrefichero = limpia_nombre_excepto_1(nombrefichero)
    else:
        nombrefichero = title + scrapertools.get_filename_from_url(url)[-4:]
        logger.info("[downloadtools.py] getfilefromtitle: nombrefichero=%s" % nombrefichero)
        if "videobb" in url or "videozer" in url or "putlocker" in url:
            nombrefichero = title + ".flv"
        if "videobam" in url:
            nombrefichero = title+"."+url.rsplit(".",1)[1][0:3]
        if "dibujos.tv" in url:
            nombrefichero = title + ".mp4"
        if "filenium" in url:
            # Content-Disposition	filename="filenium_El.Gato.con.Botas.TSScreener.Latino.avi"
            import scrapertools
            content_disposition_header = scrapertools.get_header_from_response(url,header_to_get="Content-Disposition")
            logger.info("content_disposition="+content_disposition_header)
            partes=content_disposition_header.split("=")
            if len(partes)<=1:
                raise Exception('filenium', 'no existe')
                
            extension = partes[1][-5:-1]
            nombrefichero = title + extension
        logger.info("[downloadtools.py] getfilefromtitle: nombrefichero=%s" % nombrefichero)

        nombrefichero = limpia_nombre_caracteres_especiales(nombrefichero)

    logger.info("[downloadtools.py] getfilefromtitle: nombrefichero=%s" % nombrefichero)

    if folder=="":
        fullpath = os.path.join( config.get_setting("downloadpath") , nombrefichero )
    else:
        fullpath = os.path.join( config.get_setting("downloadpath") , folder , nombrefichero )

    logger.info("[downloadtools.py] getfilefromtitle: fullpath=%s" % fullpath)
    
    return fullpath
コード例 #35
0
def get_typecode_by_lication(link, login, Password, location):

    with sert.pfx_to_pem(
            config.get_setting ("Network", "Certificate_name"),
            config.get_setting ("Network", "Certificate_pass")) as cert:
        r = requests.get(link, cert=cert, auth=(login, Password))
    if r.status_code == 200:
        j = json.dumps(r.json())
        resp = json.loads(j)
        temp_data = [location, resp['typeCode'], resp['title']]
        working_data.append(temp_data)
        print('\n Успешно! \n')
        return working_data        
    else:
        print('\nОтвет сервера отрицательный\n')
        temp_data = [location, 'Error', 'Сервер ответил отрицательно']
        working_data.append(temp_data)
        return working_data
コード例 #36
0
 def __init__(self):
     self.setting = config.get_setting()
     self.result = []
     self.exchangename = self.setting["realtime"]["exchange"]
     self.pair = self.setting["realtime"]["pair"]
     self.currency = self.pair.split("_")[0]
     self.asset = self.pair.split("_")[1]
     self.strategy = self.setting["realtime"]["strategy"]
     self.exchange = getattr(exchange, self.exchangename)(self.setting)
コード例 #37
0
 def __init__(self):
     self.setting = config.get_setting()
     indsize = self.get_indicator_size(self.setting["indicator"])
     maxnum = np.array([100.0] * indsize)
     #self.observation_space = spaces.Box(-maxnum, maxnum)
     self.observation_space = spaces.Box(-maxnum, maxnum)
     self.action_space = spaces.Discrete(3)
     self.init_candle(candleSize=10, deltaDays=30, random=False)
     super(BacktestEnv, self).__init__()
コード例 #38
0
def play(item, ItemVideo):
    import xbmc
    import xbmcgui
    import xbmcplugin
    
    if not ItemVideo == None:
      mediaurl = ItemVideo.url[1]
      if len(ItemVideo.url)>2:
          wait_time = ItemVideo.url[2]
      else:
          wait_time = 0

      if wait_time>0:
        handle_wait(wait_time,server,"Cargando vídeo...")
        
      xlistitem = xbmcgui.ListItem( item.title, iconImage="DefaultVideo.png", thumbnailImage=item.thumbnail, path=mediaurl)
      xlistitem.setInfo( "video", { "Title": item.title, "Plot" : item.plot , "Studio" : item.channel , "Genre" : item.category } )

      if item.subtitle!="":
          import os
          ficherosubtitulo = os.path.join( config.get_data_path(), 'subtitulo.srt' )
          if os.path.exists(ficherosubtitulo):
                os.remove(ficherosubtitulo)
      
          from core import scrapertools
          data = scrapertools.cache_page(item.subtitle)
          fichero = open(ficherosubtitulo,"w")
          fichero.write(data)
          fichero.close()
          

      if config.get_setting("player_mode")=="3": #download_and_play
        import download_and_play
        download_and_play.download_and_play( mediaurl , "download_and_play.tmp" , config.get_setting("downloadpath"))
        
      elif config.get_setting("player_mode")=="0" or (config.get_setting("player_mode")=="3" and mediaurl.startswith("rtmp")): #Direct
      
        playlist = xbmc.PlayList(xbmc.PLAYLIST_VIDEO)
        playlist.clear()
        playlist.add(mediaurl, xlistitem)
        playersettings = config.get_setting('player_type')
        player_type = xbmc.PLAYER_CORE_AUTO
        if playersettings == "0":
            player_type = xbmc.PLAYER_CORE_AUTO
            logger.info("[xbmctools.py] PLAYER_CORE_AUTO")
        elif playersettings == "1":
            player_type = xbmc.PLAYER_CORE_MPLAYER
            logger.info("[xbmctools.py] PLAYER_CORE_MPLAYER")
        elif playersettings == "2":
            player_type = xbmc.PLAYER_CORE_DVDPLAYER
            logger.info("[xbmctools.py] PLAYER_CORE_DVDPLAYER")
        xbmcPlayer = xbmc.Player(player_type)
        xbmcPlayer.play(playlist)
        
      elif config.get_setting("player_mode")=="1": #setResolvedUrl
        pass
    
      elif config.get_setting("player_mode")=="2": #Built-in
        xbmc.executebuiltin( "PlayMedia("+mediaurl+")" )
コード例 #39
0
ファイル: youtube.py プロジェクト: hmemar/xbmc-tvalacarta
def geturl( id ):
	print '[pelisalacarta] youtube.py Modulo: geturl(%s)' %id
	quality = int(config.get_setting("quality_youtube"))
	if id != "":
		url = "http://www.youtube.com/watch?v=%s" % id
		print 'esta es la url: %s'%url
		req = urllib2.Request(url)
		req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
		response=urllib2.urlopen(req)
		data = response.read()
		response.close()
		if data != "":
			print "Calidad encontrada es :%s" %quality
			if quality == 8:
				videourl = geturls(id,data)
				return videourl
			
			regexp = re.compile(r'fmt_stream_map=([^\&]+)\&')
			match = regexp.search(data)
			print 'match : %s'%str(match)
			videourl = ""
			if match is not None:
				fmt_stream_map = urllib.unquote_plus(match.group(1))
				print "fmt_stream_map :%s" %fmt_stream_map
				
				videourls = dict (nvp.replace("|http","*http").split("*") for nvp in fmt_stream_map.split(","))
				print videourls
				
				while True:
					Tquality = AVAILABLE_FORMATS[quality]
					print "AVAILABLE FORMAT :%s %s" %(Tquality,AVAILABLE_FORMATS2[Tquality])
					#videourl1 = "http://www.youtube.com/get_video?t=%s&video_id=%s&fmt=%s" % (  tParam ,id,Tquality)
					try:
						#videourl = verify_url( videourl1.encode( 'utf-8' ) ).decode( 'utf-8' )
						videourl = videourls[Tquality]
						break
					except:
						
						quality -= 1
						if quality == -1:
							break
				try:
					print "Quality Found: (%s) %s " % (AVAILABLE_FORMATS[quality],AVAILABLE_FORMATS2[AVAILABLE_FORMATS[quality]])
				except:
					print "Quality not available, result : -1"
				if videourl == "":
					alertaCalidad()
					return "Esc" 
				return videourl
			else:
				alertaNone()
		else:
			alertaNone()
		
	
	return "Esc"
コード例 #40
0
def play_video(item, strm=False):
    logger.info()
    # logger.debug(item.tostring('\n'))

    if item.channel == 'downloads':
        logger.info("Reproducir video local: %s [%s]" % (item.title, item.url))
        xlistitem = xbmcgui.ListItem(path=item.url,
                                     thumbnailImage=item.thumbnail)
        set_infolabels(xlistitem, item, True)
        xbmc.Player().play(item.url, xlistitem)
        return

    default_action = config.get_setting("default_action")
    logger.info("default_action=%s" % default_action)

    # Abre el diálogo de selección para ver las opciones disponibles
    opciones, video_urls, seleccion, salir = get_dialogo_opciones(
        item, default_action, strm)
    if salir:
        return

    # se obtienen la opción predeterminada de la configuración del addon
    seleccion = get_seleccion(default_action, opciones, seleccion, video_urls)
    if seleccion < 0:  # Cuadro cancelado
        return

    logger.info("seleccion=%d" % seleccion)
    logger.info("seleccion=%s" % opciones[seleccion])

    # se ejecuta la opcion disponible, jdwonloader, descarga, favoritos, añadir a la videoteca... SI NO ES PLAY
    salir = set_opcion(item, seleccion, opciones, video_urls)
    if salir:
        return

    # obtenemos el video seleccionado
    mediaurl, view, mpd = get_video_seleccionado(item, seleccion, video_urls)
    if mediaurl == "":
        return

    # se obtiene la información del video.
    if not item.contentThumbnail:
        xlistitem = xbmcgui.ListItem(path=mediaurl,
                                     thumbnailImage=item.thumbnail)
    else:
        xlistitem = xbmcgui.ListItem(path=mediaurl,
                                     thumbnailImage=item.contentThumbnail)
    set_infolabels(xlistitem, item, True)

    # si se trata de un vídeo en formato mpd, se configura el listitem para reproducirlo
    # con el addon inpustreamaddon implementado en Kodi 17
    if mpd:
        xlistitem.setProperty('inputstreamaddon', 'inputstream.adaptive')
        xlistitem.setProperty('inputstream.adaptive.manifest_type', 'mpd')

    # se lanza el reproductor
    set_player(item, xlistitem, mediaurl, view, strm)
コード例 #41
0
def login(username,password):
    logger.info("pyload_client.login")

    #url = config.get_setting("pyload")+"/api/login"
    api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/login")
    logger.info("pyload_client.login api_url="+api_url)

    data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"username":username,"password":password} ) )
    logger.info("pyload_client.login data="+data)
    return data
コード例 #42
0
def CloseDirectory(refereditem): #----------------------------------OK
    import xbmc
    import xbmcplugin
    xbmcplugin.endOfDirectory( handle=int(sys.argv[1]), succeeded=True )
    if config.get_setting("forceview")=="true":
      if refereditem.viewmode=="list":
          xbmc.executebuiltin("Container.SetViewMode(50)")
      elif refereditem.viewmode=="movie_with_plot":
          xbmc.executebuiltin("Container.SetViewMode(503)")
      elif refereditem.viewmode=="movie":
          xbmc.executebuiltin("Container.SetViewMode(500)")    
コード例 #43
0
def download(url,package_name):
    logger.info("pyload_client.download url="+url+", package_name="+package_name)

    session = login(config.get_setting("pyload_user"),config.get_setting("pyload_password"))

    package_id = find_package_id(package_name)

    if package_id is None:
        api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/addPackage")
        logger.info("pyload_client.download api_url="+api_url)

        data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"name":"'"+package_name+"'","links":str([url])} ) )
        logger.info("pyload_client.download data="+data)
    else:
        api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/addFiles")
        logger.info("pyload_client.download api_url="+api_url)

        data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"pid":str(package_id),"links":str([url])} ) )
        logger.info("pyload_client.download data="+data)

    return
コード例 #44
0
ファイル: megaupload.py プロジェクト: hmemar/xbmc-tvalacarta
def gethighurl(code):
    megavideologin = config.get_setting("megavideouser")
    if DEBUG:
        xbmc.output("[megaupload.py] megavideouser=#" + megavideologin + "#")
    megavideopassword = config.get_setting("megavideopassword")
    if DEBUG:
        xbmc.output("[megaupload.py] megavideopassword=#" + megavideopassword + "#")
    cookie = getmegauploaduser(megavideologin, megavideopassword)
    if DEBUG:
        xbmc.output("[megaupload.py] cookie=#" + cookie + "#")

    if len(cookie) == 0:
        advertencia = xbmcgui.Dialog()
        resultado = advertencia.ok(
            "Cuenta de Megaupload errónea",
            "La cuenta de Megaupload que usas no es válida",
            "Comprueba el login y password en la configuración",
        )
        return ""

    return getmegauploadvideo(code, cookie)
コード例 #45
0
ファイル: megavideo.py プロジェクト: hmemar/xbmc-tvalacarta
def gethighurl(code):
	xbmc.output("[megavideo.py] Usa modo premium")
	
	code = getcode(code)

	megavideocookie = config.get_setting("megavideocookie")
	if DEBUG:
		xbmc.output("[megavideo.py] megavideocookie=#"+megavideocookie+"#")
	#if megavideocookie=="":
	xbmc.output("[megavideo.py] Averiguando cookie...")
	megavideologin = config.get_setting("megavideouser")
	if DEBUG: xbmc.output("[megavideo.py] megavideouser=#"+megavideologin+"#")
	megavideopassword = config.get_setting("megavideopassword")
	if DEBUG: xbmc.output("[megavideo.py] megavideopassword=#"+megavideopassword+"#")
	megavideocookie = GetMegavideoUser(megavideologin, megavideopassword)
	if DEBUG: xbmc.output("[megavideo.py] megavideocookie=#"+megavideocookie+"#")

	if len(megavideocookie) == 0:
		advertencia = xbmcgui.Dialog()
		resultado = advertencia.ok('Cuenta de Megavideo errónea' , 'La cuenta de Megavideo que usas no es válida' , 'Comprueba el login y password en la configuración')
		return ""

	req = urllib2.Request("http://www.megavideo.com/xml/player_login.php?u="+megavideocookie+"&v="+code)
	req.add_header('User-Agent', 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-GB; rv:1.9.0.3) Gecko/2008092417 Firefox/3.0.3')
	response = urllib2.urlopen(req)
	data=response.read()
	response.close()
	
	# saca los enlaces
	patronvideos  = 'downloadurl="([^"]+)"'
	matches = re.compile(patronvideos,re.DOTALL).findall(data)
	movielink = matches[0]
	movielink = movielink.replace("%3A",":")
	movielink = movielink.replace("%2F","/")
	movielink = movielink.replace("%20"," ")
	
	return movielink
コード例 #46
0
def find_package_id(package_name):
    logger.info("pyload_client.find_package_id package_name="+package_name)

    api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/getQueue")
    logger.info("pyload_client.find_package_id api_url="+api_url)

    data = scrapertools.cache_page( api_url )
    logger.info("pyload_client.find_package_id data="+data)

    try:
        package_id = scrapertools.get_match(data,'"name"\s*:\s*"'+package_name+'".*?"pid"\s*\:\s*(\d+)')
    except:
        package_id = None

    return package_id
コード例 #47
0
ファイル: megavideo.py プロジェクト: hmemar/xbmc-tvalacarta
def Megavideo(mega):

	mega = getcode(mega)

	xbmc.output("[megavideo.py] Megavideo")
	modoPremium = config.get_setting("megavideopremium")
	xbmc.output("[megavideo.py] modoPremium="+modoPremium)
	
	if modoPremium == "false":
		movielink = getlowurl(mega)
	else:
		movielink = gethighurl(mega)

	xbmc.output("[megavideo.py] movielink="+movielink)
		
	return movielink
コード例 #48
0
def getDownloadListPath():

    # La ruta de la lista de descargas es un parámetro
    downloadpath = config.get_setting("downloadlistpath")

    # No está fijada, intenta forzarla
    try:
        if downloadpath == "":
            logger.info("[downloadtools.py] downloadpath está vacio")

            # Busca un setting del skin (Telebision)
            try:
                import xbmc

                downloadpath = xbmc.getInfoLabel("Skin.String(downloadpath)")
                logger.info("[downloadtools.py] downloadpath en el skin es " + downloadpath)
            except:
                pass

                # No es Telebision, fuerza el directorio home de XBMC
            if downloadpath == "":
                downloadpath = os.path.join(config.DATA_PATH, "downloads", "list")
                logger.info("[downloadtools.py] getDownloadPath: downloadpath=%s" % downloadpath)
                if not os.path.exists(downloadpath):
                    logger.info("[downliadtools.py] download path doesn't exist:" + downloadpath)
                    os.mkdir(downloadpath)
                config.setSetting("downloadlistpath", downloadpath)

                # Es Telebision, lo pone en el skin
            else:
                # guardar setting del skin en setting del plugin
                downloadpath = os.path.join(downloadpath, "list")
                downloadpath = xbmc.translatePath(downloadpath)
                logger.info("[downloadtools.py] downloadpath nativo es " + downloadpath)
                config.setSetting("downloadlistpath", downloadpath)
    except:
        pass

    logger.info("[downloadtools.py] downloadlistpath=" + downloadpath)

    try:
        os.mkdir(downloadpath)
    except:
        pass

    return downloadpath
コード例 #49
0
def CloseDirectory(refereditem): #----------------------------------OK

  if "xbmc" in PLATFORM_NAME:
    import xbmc
    import xbmcplugin
    xbmcplugin.endOfDirectory( handle=int(sys.argv[1]), succeeded=True )
    if config.get_setting("forceview")=="true":
      if refereditem.viewmode=="list":
          xbmc.executebuiltin("Container.SetViewMode(50)")
      elif refereditem.viewmode=="movie_with_plot":
          xbmc.executebuiltin("Container.SetViewMode(503)")
      elif refereditem.viewmode=="movie":
          xbmc.executebuiltin("Container.SetViewMode(500)")
  
  elif "mediaserver" in PLATFORM_NAME:
    from platformcode.mediaserver import cliente
    cliente.Acciones().EndItems()
コード例 #50
0
def get_epg(date):
    utils.log("Getting EPG for date: " + date)
    config.login_check()

    url = API_ENDPOINT + "/api/v1.4/get/tv/epg/?daynight=" + date
    opener = get_url_opener()
    opener.addheaders.append(('Authorization', "Bearer " + config.get_setting(constants.TOKEN)))
    response = opener.open(url)

    response_text = response.read()
    response_code = response.getcode()

    if response_code != 200:
        config.set_setting_bool(constants.LOGGED_IN, False)
        raise ApiError(
            "Got bad response from EPG service. Response code: " + response_code)

    json_object = None
    try:
        json_object = json.loads(response_text)
    except ValueError, e:
        config.set_setting(constants.LOGGED_IN, False)
        raise ApiError("Did not receive json, something wrong: " + response_text)
コード例 #51
0
ファイル: scrapertools.py プロジェクト: vphuc81/MyRepository
def downloadpage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],follow_redirects=True, timeout=socket.getdefaulttimeout()):
    if (DEBUG==True): logger.info("[scrapertools.py] downloadpage")
    if (DEBUG==True): logger.info("[scrapertools.py] url="+url)
    
    if post is not None:
        if (DEBUG==True): logger.info("[scrapertools.py] post="+post)
    else:
        if (DEBUG==True): logger.info("[scrapertools.py] post=None")
    
    # ---------------------------------
    # Instala las cookies
    # ---------------------------------

    #  Inicializa la librería de las cookies
    ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' )
    if (DEBUG==True): logger.info("[scrapertools.py] ficherocookies="+ficherocookies)

    cj = None
    ClientCookie = None
    cookielib = None

    # Let's see if cookielib is available
    try:
        if (DEBUG==True): logger.info("[scrapertools.py] Importando cookielib")
        import cookielib
    except ImportError:
        if (DEBUG==True): logger.info("[scrapertools.py] cookielib no disponible")
        # If importing cookielib fails
        # let's try ClientCookie
        try:
            if (DEBUG==True): logger.info("[scrapertools.py] Importando ClientCookie")
            import ClientCookie
        except ImportError:
            if (DEBUG==True): logger.info("[scrapertools.py] ClientCookie no disponible")
            # ClientCookie isn't available either
            urlopen = urllib2.urlopen
            Request = urllib2.Request
        else:
            if (DEBUG==True): logger.info("[scrapertools.py] ClientCookie disponible")
            # imported ClientCookie
            urlopen = ClientCookie.urlopen
            Request = ClientCookie.Request
            cj = ClientCookie.MozillaCookieJar()

    else:
        if (DEBUG==True): logger.info("[scrapertools.py] cookielib disponible")
        # importing cookielib worked
        urlopen = urllib2.urlopen
        Request = urllib2.Request
        cj = cookielib.MozillaCookieJar()
        # This is a subclass of FileCookieJar
        # that has useful load and save methods

    if cj is not None:
    # we successfully imported
    # one of the two cookie handling modules
        if (DEBUG==True): logger.info("[scrapertools.py] Hay cookies")

        if os.path.isfile(ficherocookies):
            if (DEBUG==True): logger.info("[scrapertools.py] Leyendo fichero cookies")
            # if we have a cookie file already saved
            # then load the cookies into the Cookie Jar
            try:
                cj.load(ficherocookies)
            except:
                if (DEBUG==True): logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra")
                os.remove(ficherocookies)

        # Now we need to get our Cookie Jar
        # installed in the opener;
        # for fetching URLs
        if cookielib is not None:
            if (DEBUG==True): logger.info("[scrapertools.py] opener usando urllib2 (cookielib)")
            # if we use cookielib
            # then we get the HTTPCookieProcessor
            # and install the opener in urllib2
            if not follow_redirects:
                opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj),NoRedirectHandler())
            else:
                opener = urllib2.build_opener(urllib2.HTTPHandler(debuglevel=DEBUG_LEVEL),urllib2.HTTPCookieProcessor(cj))
            urllib2.install_opener(opener)

        else:
            if (DEBUG==True): logger.info("[scrapertools.py] opener usando ClientCookie")
            # if we use ClientCookie
            # then we get the HTTPCookieProcessor
            # and install the opener in ClientCookie
            opener = ClientCookie.build_opener(ClientCookie.HTTPCookieProcessor(cj))
            ClientCookie.install_opener(opener)

    # -------------------------------------------------
    # Cookies instaladas, lanza la petición
    # -------------------------------------------------

    # Contador
    inicio = time.clock()

    # Diccionario para las cabeceras
    txheaders = {}

    # Construye el request
    if post is None:
        if (DEBUG==True): logger.info("[scrapertools.py] petición GET")
    else:
        if (DEBUG==True): logger.info("[scrapertools.py] petición POST")
    
    # Añade las cabeceras
    if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------")
    for header in headers:
        if (DEBUG==True): logger.info("[scrapertools.py] header %s=%s" % (str(header[0]),str(header[1])) )
        txheaders[header[0]]=header[1]
    if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------")

    req = Request(url, post, txheaders)
    if timeout is None:
        handle=urlopen(req)
    else:        
        #Disponible en python 2.6 en adelante --> handle = urlopen(req, timeout=timeout)
        #Para todas las versiones:
        deftimeout = socket.getdefaulttimeout()
        try:
            socket.setdefaulttimeout(timeout)
            handle=urlopen(req)            
        except:
            import sys
            for line in sys.exc_info():
                logger.error( "%s" % line ) 
        
        socket.setdefaulttimeout(deftimeout)
    
    # Actualiza el almacén de cookies
    cj.save(ficherocookies)

    # Lee los datos y cierra
    data=handle.read()
    info = handle.info()
    if (DEBUG==True): logger.info("[scrapertools.py] Respuesta")
    if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------")
    for header in info:
        if (DEBUG==True): logger.info("[scrapertools.py] "+header+"="+info[header])
    handle.close()
    if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------")

    '''
    # Lanza la petición
    try:
        response = urllib2.urlopen(req)
    # Si falla la repite sustituyendo caracteres especiales
    except:
        req = urllib2.Request(url.replace(" ","%20"))
    
        # Añade las cabeceras
        for header in headers:
            req.add_header(header[0],header[1])

        response = urllib2.urlopen(req)
    '''
    
    # Tiempo transcurrido
    fin = time.clock()
    if (DEBUG==True): logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1))

    return data
コード例 #52
0
ファイル: scrapertools.py プロジェクト: vphuc81/MyRepository
def cachePage(url,post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']],modoCache=CACHE_ACTIVA, timeout=socket.getdefaulttimeout()):
    if (DEBUG==True): logger.info("[scrapertools.py] cachePage url="+url)
    modoCache = config.get_setting("cache.mode")

    '''
    if config.get_platform()=="plex":
        from PMS import HTTP
        try:
            if (DEBUG==True): logger.info("url="+url)
            data = HTTP.Request(url)
            if (DEBUG==True): logger.info("descargada")
        except:
            data = ""
            logger.error("Error descargando "+url)
            import sys
            for line in sys.exc_info():
                logger.error( "%s" % line )
        
        return data
    '''
    # CACHE_NUNCA: Siempre va a la URL a descargar
    # obligatorio para peticiones POST
    if modoCache == CACHE_NUNCA or post is not None:
        if (DEBUG==True): logger.info("[scrapertools.py] MODO_CACHE=2 (no cachear)")
        
        try:
            data = downloadpage(url,post,headers, timeout=timeout)
        except:
            data=""
    
    # CACHE_SIEMPRE: Siempre descarga de cache, sin comprobar fechas, excepto cuando no está
    elif modoCache == CACHE_SIEMPRE:
        if (DEBUG==True): logger.info("[scrapertools.py] MODO_CACHE=1 (cachear todo)")
        
        # Obtiene los handlers del fichero en la cache
        cachedFile, newFile = getCacheFileNames(url)
    
        # Si no hay ninguno, descarga
        if cachedFile == "":
            logger.debug("[scrapertools.py] No está en cache")
    
            # Lo descarga
            data = downloadpage(url,post,headers)
    
            # Lo graba en cache
            outfile = open(newFile,"w")
            outfile.write(data)
            outfile.flush()
            outfile.close()
            if (DEBUG==True): logger.info("[scrapertools.py] Grabado a " + newFile)
        else:
            if (DEBUG==True): logger.info("[scrapertools.py] Leyendo de cache " + cachedFile)
            infile = open( cachedFile )
            data = infile.read()
            infile.close()
    
    # CACHE_ACTIVA: Descarga de la cache si no ha cambiado
    else:
        if (DEBUG==True): logger.info("[scrapertools.py] MODO_CACHE=0 (automática)")
        
        # Datos descargados
        data = ""
        
        # Obtiene los handlers del fichero en la cache
        cachedFile, newFile = getCacheFileNames(url)
    
        # Si no hay ninguno, descarga
        if cachedFile == "":
            logger.debug("[scrapertools.py] No está en cache")
    
            # Lo descarga
            data = downloadpage(url,post,headers)
            
            # Lo graba en cache
            outfile = open(newFile,"w")
            outfile.write(data)
            outfile.flush()
            outfile.close()
            if (DEBUG==True): logger.info("[scrapertools.py] Grabado a " + newFile)
    
        # Si sólo hay uno comprueba el timestamp (hace una petición if-modified-since)
        else:
            # Extrae el timestamp antiguo del nombre del fichero
            oldtimestamp = time.mktime( time.strptime(cachedFile[-20:-6], "%Y%m%d%H%M%S") )
    
            if (DEBUG==True): logger.info("[scrapertools.py] oldtimestamp="+cachedFile[-20:-6])
            if (DEBUG==True): logger.info("[scrapertools.py] oldtimestamp="+time.ctime(oldtimestamp))
            
            # Hace la petición
            updated,data = downloadtools.downloadIfNotModifiedSince(url,oldtimestamp)
            
            # Si ha cambiado
            if updated:
                # Borra el viejo
                logger.debug("[scrapertools.py] Borrando "+cachedFile)
                os.remove(cachedFile)
                
                # Graba en cache el nuevo
                outfile = open(newFile,"w")
                outfile.write(data)
                outfile.flush()
                outfile.close()
                if (DEBUG==True): logger.info("[scrapertools.py] Grabado a " + newFile)
            # Devuelve el contenido del fichero de la cache
            else:
                if (DEBUG==True): logger.info("[scrapertools.py] Leyendo de cache " + cachedFile)
                infile = open( cachedFile )
                data = infile.read()
                infile.close()

    return data
コード例 #53
0
# streamondemand-pureita-master - XBMC Plugin
# Lista de vídeos favoritos
# http://blog.tvalacarta.info/plugin-xbmc/streamondemand-pureita-master/
#------------------------------------------------------------
import urllib
import os
import sys
import downloadtools
import config
import logger
import samba
from item import Item

CHANNELNAME = "favoritos"
DEBUG = True
BOOKMARK_PATH = config.get_setting( "bookmarkpath" )

if not BOOKMARK_PATH.upper().startswith("SMB://"):
    if BOOKMARK_PATH=="":
        BOOKMARK_PATH = os.path.join( config.get_data_path() , "bookmarks" )
    if not os.path.exists(BOOKMARK_PATH):
        logger.debug("[favoritos.py] Path de bookmarks no existe, se crea: "+BOOKMARK_PATH)
        os.mkdir(BOOKMARK_PATH)

logger.info("[favoritos.py] path="+BOOKMARK_PATH)

def isGeneric():
    return True

def mainlist(item):
    logger.info("[favoritos.py] mainlist")
コード例 #54
0
ファイル: scrapertools.py プロジェクト: vphuc81/MyRepository
def get_header_from_response(url,header_to_get="",post=None,headers=[['User-Agent', 'Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; es-ES; rv:1.9.2.12) Gecko/20101026 Firefox/3.6.12']]):
    header_to_get = header_to_get.lower()
    if (DEBUG==True): logger.info("[scrapertools.py] get_header_from_response url="+url+", header_to_get="+header_to_get)

    if post is not None:
        if (DEBUG==True): logger.info("[scrapertools.py] post="+post)
    else:
        if (DEBUG==True): logger.info("[scrapertools.py] post=None")
    
    #  Inicializa la librería de las cookies
    ficherocookies = os.path.join( config.get_setting("cookies.dir"), 'cookies.dat' )
    if (DEBUG==True): logger.info("[scrapertools.py] ficherocookies="+ficherocookies)

    cj = None
    ClientCookie = None
    cookielib = None

    import cookielib
    # importing cookielib worked
    urlopen = urllib2.urlopen
    Request = urllib2.Request
    cj = cookielib.MozillaCookieJar()
    # This is a subclass of FileCookieJar
    # that has useful load and save methods

    if os.path.isfile(ficherocookies):
        if (DEBUG==True): logger.info("[scrapertools.py] Leyendo fichero cookies")
        # if we have a cookie file already saved
        # then load the cookies into the Cookie Jar
        try:
            cj.load(ficherocookies)
        except:
            if (DEBUG==True): logger.info("[scrapertools.py] El fichero de cookies existe pero es ilegible, se borra")
            os.remove(ficherocookies)

    if header_to_get=="location":
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj),NoRedirectHandler())
    else:
        opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cj))
    urllib2.install_opener(opener)

    # Contador
    inicio = time.clock()

    # Diccionario para las cabeceras
    txheaders = {}

    # Traza la peticion
    if post is None:
        if (DEBUG==True): logger.info("[scrapertools.py] petición GET")
    else:
        if (DEBUG==True): logger.info("[scrapertools.py] petición POST")
    
    # Login y password Filenium
    # http://abcd%40gmail.com:[email protected]/get/Oi8vd3d3/LmZpbGVz/ZXJ2ZS5j/b20vZmls/ZS9kTnBL/dm11/b0/?.zip
    if "filenium" in url:
        from servers import filenium
        url , authorization_header = filenium.extract_authorization_header(url)
        headers.append( [ "Authorization",authorization_header ] )
    
    # Array de cabeceras
    if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------")
    for header in headers:
        if (DEBUG==True): logger.info("[scrapertools.py] header=%s" % str(header[0]))
        txheaders[header[0]]=header[1]
    if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------")

    # Construye el request
    req = Request(url, post, txheaders)
    handle = urlopen(req)
    
    # Actualiza el almacén de cookies
    cj.save(ficherocookies)

    # Lee los datos y cierra
    #data=handle.read()
    info = handle.info()
    if (DEBUG==True): logger.info("[scrapertools.py] Respuesta")
    if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------")
    location_header=""
    for header in info:
        if (DEBUG==True): logger.info("[scrapertools.py] "+header+"="+info[header])
        if header==header_to_get:
            location_header=info[header]
    handle.close()
    if (DEBUG==True): logger.info("[scrapertools.py] ---------------------------")

    # Tiempo transcurrido
    fin = time.clock()
    if (DEBUG==True): logger.info("[scrapertools.py] Descargado en %d segundos " % (fin-inicio+1))

    return location_header
コード例 #55
0
ファイル: wiideoteca.py プロジェクト: Bycacha/BYCACHA
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta/
#------------------------------------------------------------
import re,urllib,urllib2,sys
import os
import downloadtools
import config
import logger
import samba
import scrapertools
from item import Item
from xml.dom import minidom
import scrapertools

CHANNELNAME = "wiideoteca"
DEBUG = True
XML = os.path.join( config.get_setting("bookmarkpath"),"series.xml")
if not os.path.exists(XML) and os.path.exists(config.get_setting("bookmarkpath")):
    import shutil
    shutil.copyfile( os.path.join(config.get_runtime_path(),"resources","wiideoteca.xml") , XML )

XML2 = XML.replace("series","pruebas")
title = []
fulltitle = []
thumbnail = []
channel = []
directory = []
idioma = []
plot = []
solonuevos = []
ultimo = []
url = []
コード例 #56
0
def get_current_plugin_version():
    return int(config.get_setting("plugin_version_number"))
コード例 #57
0
#------------------------------------------------------------
# pelisalacarta
# http://blog.tvalacarta.info/plugin-xbmc/pelisalacarta
# XBMC Plugin
#------------------------------------------------------------

import urlparse,urllib2,urllib,re
import os
import sys
import scrapertools
import time
import config
import logger

# FIXME: Esto está repetido en el channelselector, debería ir a config
thumbnail_type = config.get_setting("thumbnail_type")
if thumbnail_type=="":
    thumbnail_type="2"
logger.info("thumbnail_type="+thumbnail_type)
if thumbnail_type=="0":
    IMAGES_PATH = 'http://media.tvalacarta.info/pelisalacarta/posters/'
elif thumbnail_type=="1":
    IMAGES_PATH = 'http://media.tvalacarta.info/pelisalacarta/banners/'
elif thumbnail_type=="2":
    IMAGES_PATH = 'http://media.tvalacarta.info/pelisalacarta/squares/'

ROOT_DIR = config.get_runtime_path()

REMOTE_VERSION_FILE = "http://descargas.tvalacarta.info/"+config.PLUGIN_NAME+"-version.xml"
LOCAL_VERSION_FILE = os.path.join( ROOT_DIR , "version.xml" )
LOCAL_FILE = os.path.join( ROOT_DIR , config.PLUGIN_NAME+"-" )
コード例 #58
0
def get_current_servers_version():
    return int(config.get_setting("servers_version_number"))
コード例 #59
0
def get_current_channels_version():
    return int(config.get_setting("channels_version_number"))