Beispiel #1
0
 def SetUserInfo(self, userinfo):
     try:
         #utils.Log("SetUserInfo %s" % userinfo, Type="DEBUG")
         if ("headimgurl" in userinfo.keys()):
             self.wxUserData.Avatar = userinfo["headimgurl"]
         if ("avatarUrl" in userinfo.keys()):
             self.wxUserData.Avatar = userinfo["avatarUrl"]
         if ("sex" in userinfo.keys()):
             self.wxUserData.Gender = userinfo["sex"]
         if ("gender" in userinfo.keys()):
             self.wxUserData.Gender = userinfo["gender"]
         if ("User" in userinfo.keys()):
             self.wxUserData.Role = "User"
         if ("country" in userinfo.keys()):
             self.wxUserData.Country = userinfo["country"]
         if ("province" in userinfo.keys()):
             self.wxUserData.Province = userinfo["province"]
         if ("city" in userinfo.keys()):
             self.wxUserData.City = userinfo["city"]
         if ("unionid" in userinfo.keys()):
             self.wxUserData.unionID = userinfo["unionid"]
         if ("subscribe" in userinfo.keys()):
             self.wxUserData.Subscribed = userinfo["subscribe"]
         if ("nickname" in userinfo.keys()):
             utils.Log(userinfo["nickname"].encode('utf-8', 'ignore'))
             self.wxUserData.Name = userinfo["nickname"].encode(
                 'utf-8', 'ignore')
         if ("nickName" in userinfo.keys()):
             utils.Log(userinfo["nickName"].encode('utf-8', 'ignore'))
             self.wxUserData.Name = userinfo["nickName"].encode(
                 'utf-8', 'ignore')
         self.wxUserData.Updated = datetime.datetime.now()
         self.wxUserData.save()
     except Exception, e:
         utils.Log("SetUserInfo Error: %s" % e, Type="DEBUG")
Beispiel #2
0
def main(addonID, param=None):
    try:
        import application
        app = application.Application(addonID)
        app.run(param)
        del app
    except Exception, e:
        utils.Log('******************* ERROR IN MAIN *******************')
        utils.Log(str(e))
        raise
Beispiel #3
0
 def __init__(self, mysqlConfig=None):
     """
     :param mysqlConfig: 用于自定义数据库连接的dict
     """
     if mysqlConfig is None:
         self.log = utils.Log()
         self.db = connector.connect(**utils.getMysqlConfig())
     else:
         self.log = utils.Log(db=mysqlConfig)
         self.db = connector.connect(**mysqlConfig)
     self.logger = self.Logger(self.log.log)
     self.cursor = self.db.cursor()
Beispiel #4
0
 def __init__(self, appid, userid):
     #self.tsaiObj = tsaiPlatform.tsaiPlatform()
     #Log(userinfo)
     try:
         obj = wxUser.objects.filter(openID=userid)
         if obj:
             self.wxUserData = obj[0]
             utils.Log("User found! %s" % userid)
         else:
             self.wxUserData = wxUser(openID=userid, SourceAccount=appid)
             utils.Log("User created! %s" % userid)
             #self.wxUserData.save()
     except Exception, e:
         utils.Log("Couldn't do wxUserClass __init__: %s" % e)
def anime_log(args):
    log_args = dict()
    if len(args) == 0:
        pass
    elif args[0].isnumeric():
        log_args["number"] = int(args[0])
    else:
        log_args["pattern"] = re.compile(args[0])
        if len(args) == 2:
            log_args["number"] = int(args[1])
    logs = utils.read_log(**log_args)
    ongoing = utils.read_log(logfile=config.ongoingfile)
    if len(logs) == 0:
        if len(args) == 0:
            outputs.warning_info("No log entries found.")
        else:
            outputs.prompt_val("Log entries not found for arguments", args[0],
                               "error")
        return
    outputs.bold_info("Watched\t\tAnime Name")
    for k, log in logs.items():
        outputs.normal_info(utils.Log(log).show(), end=" ")
        if k in ongoing:
            outputs.warning_tag("TRACKED", end="")
        outputs.normal_info()
Beispiel #6
0
    def __init__(self, config, sess):
        self.config = config
        self.sess = sess
        self.log = utils.Log()

        self.action_dim = int(self.config['META']['ACTION_DIM'])
        self.statistic_dim = int(self.config['META']['STATISTIC_DIM'])
        self.reward_dim = int(self.config['META']['REWARD_DIM'])
        self.batch_size = int(self.config['TPGR']['PRE_TRAINING_BATCH_SIZE'])
        self.log_step = int(self.config['TPGR']['PRE_TRAINING_LOG_STEP'])
        self.learning_rate = float(self.config['TPGR']['PRE_TRAINING_LEARNING_RATE'])
        self.l2_factor = float(self.config['TPGR']['PRE_TRAINING_L2_FACTOR'])
        self.pre_train_truncated_length = int(self.config['TPGR']['PRE_TRAINING_RNN_TRUNCATED_LENGTH'])
        self.max_item_num = int(self.config['TPGR']['PRE_TRAINING_MAX_ITEM_NUM'])
        self.pre_train_seq_length = min(int(self.config['TPGR']['PRE_TRAINING_SEQ_LENGTH']), self.max_item_num)
        self.pre_train_mask_length = min(int(self.config['TPGR']['PRE_TRAINING_MASK_LENGTH']), self.pre_train_seq_length)
        self.rnn_file_path = '../data/run_time/%s_rnn_model_%s' % (self.config['ENV']['RATING_FILE'], self.config['TPGR']['RNN_MODEL_VS'].split('s')[0])

        self.rnn_input_dim = self.action_dim + self.reward_dim + self.statistic_dim
        self.rnn_output_dim = self.rnn_input_dim

        self.forward_env = Env(self.config)
        self.boundry_user_id = self.forward_env.boundry_user_id
        self.user_num, self.item_num, self.r_matrix, self.user_to_rele_num = self.forward_env.get_init_data()
        self.env = [Env(self.config, self.user_num, self.item_num, self.r_matrix, self.user_to_rele_num) for i in range(max(self.user_num, self.batch_size))]

        self.pre_training_steps = 0
        self.make_graph()
        self.sess.run(tf.global_variables_initializer())

        self.log.log('graph constructed', True)
Beispiel #7
0
    def run(self):
        pre_training_step = int(self.config['TPGR']['PRE_TRAINING_STEP'])
        max_training_step = int(self.config['META']['MAX_TRAINING_STEP'])
        log_step = int(self.config['META']['LOG_STEP'])
        log = utils.Log()

        # pre-training
        if self.config['TPGR']['PRE_TRAINING'] == 'T':
            log.log('start pre-training', True)
            pre_train = PRE_TRAIN(self.config, self.sess)
            for i in range(pre_training_step):
                pre_train.train()
            log.log('end pre-training', True)

        # constructing tree
        if self.config['TPGR']['CONSTRUCT_TREE'] == 'T':
            log.log('start constructing tree', True)
            tree = Tree(self.config, self.sess)
            tree.construct_tree()
            log.log('end constructing tree', True)

        # training model
        log.log('start training tpgr', True)
        tpgr = TPGR(self.config, self.sess)
        for i in range(0, max_training_step):
            if i % log_step == 0:
                tpgr.evaluate()
                log.log('evaluated\n', True)
            tpgr.train()
        log.log('end training tpgr')
Beispiel #8
0
def run_experiment(differential_op,
                   max_gen,
                   pop_size,
                   exp_id=EXP_ID,
                   silent=False):

    # use `functool.partial` to create fix some arguments of the functions
    # and create functions with required signatures

    for fit_gen, fit_name in zip(fit_generators, fit_names):
        fit = fit_gen(DIMENSION)

        for run in range(REPEATS):
            pub_vars.fit_func = fit
            # initialize the log structure
            log = utils.Log(OUT_DIR,
                            exp_id + '.' + fit_name,
                            run,
                            write_immediately=True,
                            print_frequency=5,
                            silent=silent)
            # create population
            pop = create_pop(pop_size, cr_ind)
            pop = differential_evolution(pop,
                                         max_gen,
                                         differential_op,
                                         log=log)

        # write summary logs for the whole experiment
        utils.summarize_experiment(OUT_DIR, exp_id + '.' + fit_name)
Beispiel #9
0
 def _init_logger(self):
     X_test = self.X_test.reshape(self.X_test.shape[0], -1)
     loggers = [
         utils.GlobalStepLogger(),
         utils.AccuracyLogger(X_test, self.Y_test),
     ]
     self.log = utils.Log(self.flags.log_dir, self.flags.name, loggers)
     self.log.write_flags(self.flags)
Beispiel #10
0
    def __init__(self, config, sess):
        self.config = config
        self.sess = sess
        self.log = utils.Log()

        self.episode_length = int(self.config['META']['EPISODE_LENGTH'])
        self.action_dim = int(self.config['META']['ACTION_DIM'])
        self.statistic_dim = int(self.config['META']['STATISTIC_DIM'])
        self.reward_dim = int(self.config['META']['REWARD_DIM'])
        self.discount_factor = float(self.config['META']['DISCOUNT_FACTOR'])
        self.log_step = int(self.config['META']['LOG_STEP'])
        self.sample_episodes_per_batch = int(self.config['TPGR']['SAMPLE_EPISODES_PER_BATCH'])
        self.sample_users_per_batch = int(self.config['TPGR']['SAMPLE_USERS_PER_BATCH'])
        self.learning_rate = float(self.config['TPGR']['LEARNING_RATE'])
        self.l2_factor = float(self.config['TPGR']['L2_FACTOR'])
        self.entropy_factor = float(self.config['TPGR']['ENTROPY_FACTOR'])
        self.child_num = int(self.config['TPGR']['CHILD_NUM'])
        self.boundary_rating = float(self.config['ENV']['BOUNDARY_RATING'])
        self.eval_batch_size = int(self.config['TPGR']['EVAL_BATCH_SIZE'])
        self.train_batch_size = self.sample_episodes_per_batch * self.sample_users_per_batch

        self.result_file_path = '../data/result/result_log/' + time.strftime('%Y%m%d%H%M%S') + '_' + self.config['ENV']['ALPHA'] + '_' + self.config['ENV']['RATING_FILE']
        self.rnn_file_path = '../data/run_time/%s_rnn_model_%s' % (self.config['ENV']['RATING_FILE'], self.config['TPGR']['RNN_MODEL_VS'])
        self.load_model = self.config['TPGR']['LOAD_MODEL'] == 'T'
        self.load_model_path = '../data/model/%s_tpgr_model_%s' % (self.config['ENV']['RATING_FILE'], self.config['TPGR']['MODEL_LOAD_VS'])
        self.save_model_path = '../data/model/%s_tpgr_model_%s' % (self.config['ENV']['RATING_FILE'], self.config['TPGR']['MODEL_SAVE_VS'].split('s')[0])
        self.tree_file_path = '../data/run_time/%s_tree_model_%s_%s_c%d_%s' % (self.config['ENV']['RATING_FILE'], self.config['TPGR']['CLUSTERING_VECTOR_TYPE'].lower(), self.config['TPGR']['CLUSTERING_TYPE'].lower(), self.child_num, self.config['TPGR']['TREE_VS'])
        self.hidden_units = [int(item) for item in self.config['TPGR']['HIDDEN_UNITS'].split(',')] if self.config['TPGR']['HIDDEN_UNITS'].lower() != 'none' else []

        self.forward_env = Env(self.config)
        self.user_num, self.item_num, self.r_matrix, self.user_to_rele_num = self.forward_env.get_init_data()

        self.boundry_user_id = self.forward_env.boundry_user_id
        self.test_user_num = int(self.user_num/self.eval_batch_size)*self.eval_batch_size-self.boundry_user_id
        self.bc_dim = int(math.ceil(math.log(self.item_num, self.child_num)))

        self.env = [Env(self.config, self.user_num, self.item_num, self.r_matrix, self.user_to_rele_num) for i in range(max(self.train_batch_size, self.eval_batch_size * int(math.ceil(self.user_num / self.eval_batch_size))))]

        ###
        self.rnn_input_dim = self.action_dim + self.reward_dim + self.statistic_dim
        self.rnn_output_dim = self.rnn_input_dim
        self.layer_units = [self.statistic_dim + self.rnn_output_dim] + self.hidden_units + [self.child_num]

        self.is_eval = False
        self.qs_mean_list = []
        self.storage = []

        self.training_steps = 0
        if self.load_model:
            self.training_steps = int(self.config['TPGR']['MODEL_LOAD_VS'].split('s')[-1])

        tree_model = utils.pickle_load(self.tree_file_path)
        self.id_to_code, self.code_to_id = (tree_model['id_to_code'], tree_model['code_to_id'])
        self.aval_val = self.get_aval()
        self.log.log('making graph')
        self.make_graph()
        self.sess.run(tf.global_variables_initializer())
        self.log.log('graph made')
Beispiel #11
0
def run_experiment(exp_id='default', data_input='inputs/tsp_std.in', repeats=10, mut_max_len=10, mut_prob=0.2, cx_prob=0.8, max_gen=500, pop_size=100, fitness=fitness,  create_ind=None, cross=order_cross, mutate=swap_mutate,  print_frequency=5, progress_callback=None):
    # read the locations from input
    locations = read_locations(data_input)

    # use `functool.partial` to create fix some arguments of the functions 
    # and create functions with required signatures
    cr_ind = functools.partial(create_ind, ind_len=len(locations))


    globals()['cities'] = locations
    fit = functools.partial(fitness, cities=locations)
    xover = functools.partial(crossover, cross=cross, cx_prob=cx_prob)
    mut = functools.partial(mutation, mut_prob=mut_prob, 
                            mutate=functools.partial(mutate, max_len=mut_max_len))


    # run the algorithm `REPEATS` times and remember the best solutions from 
    # last generations
    best_inds = []
    best_objective = 100000000
    for run in range(repeats):
        # initialize the log structure
        log = utils.Log(OUT_DIR, exp_id, run, 
                        write_immediately=True, print_frequency=print_frequency)
        # create population
        pop = create_pop(pop_size, cr_ind)
        # run evolution - notice we use the pool.map as the map_fn
        pop = evolutionary_algorithm(pop, pop_size, max_gen, fit, [xover, mut], tournament_selection, map_fn=map, log=log, progress_callback=progress_callback)
        # remember the best individual from last generation, save it to file
        bi = max(pop, key=fit)
        best_objective = min(best_objective, fit(bi).objective)
        best_inds.append(bi)

        best_template = '{individual}'
        with open('resources/kmltemplate.kml') as f:
            best_template = f.read()

        with open(f'{OUT_DIR}/{exp_id}_{run}.best', 'w') as f:
            f.write(str(bi))

        with open(f'{OUT_DIR}/{exp_id}_{run}.best.kml', 'w') as f:
            bi_kml = [f'{locations[i][1]},{locations[i][0]},5000' for i in bi]
            bi_kml.append(f'{locations[bi[0]][1]},{locations[bi[0]][0]},5000')
            f.write(best_template.format(individual='\n'.join(bi_kml)))
        
        # if we used write_immediately = False, we would need to save the 
        # files now
        # log.write_files()

    # print an overview of the best individuals from each run
    for i, bi in enumerate(best_inds):
        print(f'Run {i}: difference = {fit(bi).objective}')

    # write summary logs for the whole experiment
    utils.summarize_experiment(OUT_DIR, exp_id)

    return best_objective
Beispiel #12
0
 def __init__(self,
              xml_filename,
              script_path,
              default_skin="Default",
              default_res="720p",
              *args,
              **kwargs):
     self.log = utils.Log()
     WindowXMLDialog.__init__(self)
Beispiel #13
0
 def __init__(self,
              xml_filename,
              script_path,
              default_skin="Default",
              default_res="720p",
              *args,
              **kwargs):
     self.log = utils.Log()
     self.caption = utils.translate(32603)
     xbmcgui.WindowXMLDialog.__init__(self)
Beispiel #14
0
 def complete_play(self, text, line, *ignored):
     m = re.match(r'[a-z-]+ +([0-9a-z-]+) +', line)
     if m:
         name = m.group(1)
         log = utils.read_log(name)
         if not log:
             return ["1"]
         ep = utils.Log(log).eps.split('-')[-1]
         return [str(int(ep) + 1)]
     return self.completedefault(text, line, *ignored)
def continue_play(args, play_func=play_anime):
    name, _ = read_args(args, episodes=False)
    log = utils.Log(utils.read_log().get(name))
    watch_later = utils.read_log(name, logfile=config.watchlaterfile)
    if watch_later:
        episodes = utils.extract_range(utils.Log(watch_later).eps)
    else:
        _, episodes = read_args(args)
    outputs.prompt_val("Watched",
                       log._eps, "success", end='\t')
    outputs.normal_info(log.last_updated_fmt)
    if not log.eps:
        last = 0
    else:
        last = int(re.split('-|,', log.eps)[-1])
    to_play = utils.compress_range(filter(lambda e: e > last, episodes))
    if to_play.strip():
        play_func([name, to_play])
    else:
        unsave_anime(name)
Beispiel #16
0
    def __init__(self, config, sess):
        self.config = config
        self.sess = sess
        self.log = utils.Log()

        self.child_num = int(self.config['TPGR']['CHILD_NUM'])
        self.clustering_type = self.config['TPGR']['CLUSTERING_TYPE']
        self.clustering_vector_file_path = '../data/run_time/%s_%s_vector_v%s' % (config['ENV']['RATING_FILE'], self.config['TPGR']['CLUSTERING_VECTOR_TYPE'].lower(), self.config['TPGR']['CLUSTERING_VECTOR_VERSION'])
        self.tree_file_path = '../data/run_time/%s_tree_model_%s_%s_c%d_%s' % (self.config['ENV']['RATING_FILE'], self.config['TPGR']['CLUSTERING_VECTOR_TYPE'].lower(), self.config['TPGR']['CLUSTERING_TYPE'].lower(), self.child_num, self.config['TPGR']['TREE_VS'])

        self.env = Env(self.config)
        self.bc_dim = int(math.ceil(math.log(self.env.item_num, self.child_num)))
Beispiel #17
0
def test(net, test_loader, config, data_config):
    device = config['device']
    task_path = config['save_path']
    num_classes = data_config['total_classes']

    classes = data_config['classes']

    log = utils.Log(task_path)

    net.eval()
    total, single_total_correct, test_loss = 0.0, 0.0, 0.0

    class_per_sample = np.zeros([num_classes])

    single_class_correct = np.zeros([num_classes])
    single_class_accuracy = np.zeros([num_classes])

    criterion = nn.CrossEntropyLoss()

    with torch.no_grad():
        for data in test_loader:
            images, labels = data[0].to(device), data[1].to(device)
            cur_batch_size = images.size(0)
            outputs = net(images)

            loss = criterion(outputs, labels)
            test_loss += loss.item() * cur_batch_size

            _, predicted = torch.max(outputs, 1)
            c = (predicted == labels)
            for idx in range(c.size()[0]):
                label = labels[idx]
                single_class_correct[label] += c[idx].item()
                class_per_sample[label] += 1

    range_class = range(num_classes)
    for idx in range_class:
        total += class_per_sample[idx]

        single_total_correct += single_class_correct[idx]
        single_class_accuracy[
            idx] = 100.0 * single_class_correct[idx] / class_per_sample[idx]

        log.info('Accuracy of %s - single-head: %.3lf%%' %
                 (classes[idx], single_class_accuracy[idx]))

    test_loss /= total
    single_total_accuracy = 100.0 * single_total_correct / total
    log.info(
        "single-head test accuracy: %.3lf%% test_loss: %.3lf test_sample: %d" %
        (single_total_accuracy, test_loss, total))
    return test_loss, single_total_accuracy, single_class_accuracy
def list_episodes(args):
    name, _ = read_args(args, episodes=False)
    available_rng = anime_source_module.get_episodes_range(anime_source_module.get_anime_url(name))
    if len(args) == 2:
        _, episodes = read_args(args)
        eps = set(episodes)
        avl_eps = set(utils.extract_range(available_rng))
        res = eps.intersection(avl_eps)
        available_rng = utils.compress_range(res)
    outputs.prompt_val("Available episodes", available_rng)
    log = utils.Log(utils.read_log(name))
    outputs.prompt_val("Watched episodes", log.eps, "success", end=' ')
    outputs.normal_info(log.last_updated_fmt)
    utils.write_cache(name)
Beispiel #19
0
    def _onSettingsChanged(self, init=False):
        relaunch = False

        for key in self.settings:
            value = utils.getSetting(key)
            if value <> self.settings[key]:
                relaunch           = True
                self.settings[key] = value

        if init:
            return

        if relaunch:
            utils.Log('Settings changed - relaunching')
            self.relaunch()
def save_anime(args):
    """Put the anime into watch later list."""
    anime_name, eps = read_args(args)
    watched = utils.read_log(anime_name)
    if watched:
        watched_eps = utils.extract_range(utils.Log(watched).eps)
    else:
        watched_eps = []
    save_eps = set(eps).difference(set(watched_eps))
    if not save_eps:
        outputs.warning_info('Already watched the provided episodes.')
        return
    utils.write_log(anime_name,
                    utils.compress_range(save_eps),
                    append=True,
                    logfile=config.watchlaterfile)
def track_anime(args):
    """Put an anime into the track list"""
    anime_name, episodes = read_args(args, episodes=False)
    log = utils.read_log(anime_name)
    if log is None:
        outputs.warning_info(
            "Log entry not found.")
        if not episodes:
            _, episodes = read_args(args)
        episodes = utils.compress_range(episodes)
    else:
        episodes = utils.Log(log).eps
        outputs.prompt_val("Watched", episodes, "success")
    utils.write_log(anime_name,
                    episodes,
                    append=False,
                    logfile=config.ongoingfile)
Beispiel #22
0
    def onFocus(self, controlId):
        utils.Log('onFocus %d' % controlId)

        if controlId == SETTINGS:
            return xbmcgui.Window(10000).setProperty(
                'GVAX_DESC', 'Edit your GVAx settings')

        if controlId == VPN:
            return xbmcgui.Window(10000).setProperty(
                'GVAX_DESC', 'Use the VPN to connect to different countries')

        if controlId == TVGUIDE:
            return xbmcgui.Window(10000).setProperty(
                'GVAX_DESC', 'Open the TV Guide to show what is on TV')

        if controlId == MOVIES:
            return xbmcgui.Window(10000).setProperty(
                'GVAX_DESC', 'Open your Movie Library')

        if controlId == TVSHOWS:
            return xbmcgui.Window(10000).setProperty(
                'GVAX_DESC', 'Open your TV Show Library')

        if controlId == NETFLIX:
            return xbmcgui.Window(10000).setProperty(
                'GVAX_DESC', 'Watch content from Netflix')

        if controlId == MOVIEANDTV:
            return xbmcgui.Window(10000).setProperty(
                'GVAX_DESC', 'Browse and search for TV Shows and Movies')

        if controlId == WORLDTV:
            return xbmcgui.Window(10000).setProperty(
                'GVAX_DESC', 'Browse and search for TV Shows and Movies')

        if controlId == ANDROID:
            return xbmcgui.Window(10000).setProperty('GVAX_DESC',
                                                     'Open your Android Apps')

        if controlId == ADULT:
            return xbmcgui.Window(10000).setProperty('GVAX_DESC',
                                                     'Adult Section')

        xbmcgui.Window(10000).setProperty('GVAX_DESC', '')
Beispiel #23
0
def run_experiment(max_gen=MAX_GEN, mutation_op=Mutation(step_size=MUT_STEP), cross=one_pt_cross, cx_prob=CX_PROB, exp_id=EXP_ID, silent=True):

    # use `functool.partial` to create fix some arguments of the functions
    # and create functions with required signatures

    for fit_gen, fit_name in zip(fit_generators, fit_names):
        fit = fit_gen(DIMENSION)
        xover = functools.partial(crossover, cross=cross, cx_prob=cx_prob)
        mut = functools.partial(
            mutation, mut_prob=MUT_PROB, mutate=mutation_op)

        # run the algorithm `REPEATS` times and remember the best solutions from
        # last generations

        best_inds = []
        for run in range(REPEATS):
            # initialize the log structure
            log = utils.Log(OUT_DIR, exp_id + '.' + fit_name, run,
                            write_immediately=True, print_frequency=5, silent=silent)
            # create population
            pop = create_pop(POP_SIZE, cr_ind)
            # run evolution - notice we use the pool.map as the map_fn
            pop = evolutionary_algorithm(pop, max_gen, fit, [
                                         xover, mut], tournament_selection, mutation_op, map_fn=map, log=log)
            # remember the best individual from last generation, save it to file
            bi = max(pop, key=fit)
            best_inds.append(bi)

            # if we used write_immediately = False, we would need to save the
            # files now
            # log.write_files()

        # print an overview of the best individuals from each run
        for i, bi in enumerate(best_inds):
            if not silent:
                print(f'Run {i}: objective = {fit(bi).objective}')  

        # write summary logs for the whole experiment
        utils.summarize_experiment(OUT_DIR, EXP_ID + '.' + fit_name)
Beispiel #24
0
    def show(self):

        for f in self.files:
            utils.Log(f)
Beispiel #25
0
#!/bin/python
__author__ = 'Felix'
import time
import os, sys
sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import os.path
import utils

logger = utils.Log('VPLAY')
#logging.basicConfig(level=logging.INFO,format='[%(levelname)s]:%(message)s')


def task():
    print "task ..."


def timer(n):
    while True:
        logger.info(time.strftime('%Y-%m-%d %X', time.localtime()))
        task()
        time.sleep(n)


url_dict = {
    'video1-jygmobile-zh':
    'http://221.181.100.24:8088/wd_r1/jygmobile/zh/200/index.m3u8?is_ec=1',
    'video2-jygmobile-gg':
    'http://221.181.100.24:8088/wd_r1/jygmobile/gg/200/index.m3u8?is_ec=1',
    'video6-gesee-chcdzdy':
    'http://221.181.100.24:8088/wd_r1/gesee/chcdzdy/200/index.m3u8?is_ec=1',
    'video19-nfmedia-zjdy':
    # we can use multiprocessing to evaluate fitness in parallel
    import multiprocessing

    pool = multiprocessing.Pool()

    import matplotlib.pyplot as plt

    # run the algorithm `REPEATS` times and remember the best solutions from
    # last generations
    best_inds = []
    for run in range(REPEATS):
        # initialize the log structure
        log = utils.Log(OUT_DIR,
                        EXP_ID,
                        run,
                        write_immediately=True,
                        print_frequency=5)
        # create population
        pop = create_pop(POP_SIZE, cr_ind)
        # run evolution - notice we use the pool.map as the map_fn
        pop = evolutionary_algorithm(pop,
                                     MAX_GEN,
                                     fit, [xover, mut],
                                     roulette_wheel_selection,
                                     map_fn=pool.map,
                                     log=log)
        # remember the best individual from last generation, save it to file
        bi = max(pop, key=fit)
        best_inds.append(bi)
Beispiel #27
0
    def __init__(self, config, user_num=None, item_num=None,
                 r_matrix=None, user_to_rele_num=None):
        self.config = config
        self.action_dim = int(self.config['META']['ACTION_DIM'])
        self.episode_length = int(self.config['META']['EPISODE_LENGTH'])
        self.alpha = float(self.config['ENV']['ALPHA'])
        self.boundary_rating = float(self.config['ENV']['BOUNDARY_RATING'])
        self.log = utils.Log()
        # to normalize the reward into (-1, 1], reward = self.a * rating + self.b
        self.a = 2.0 / (float(self.config['ENV']['MAX_RATING']) -
                        float(self.config['ENV']['MIN_RATING']))
        self.b = - (float(self.config['ENV']['MAX_RATING']) +
                    float(self.config['ENV']['MIN_RATING'])) /      \
            (float(self.config['ENV']['MAX_RATING']) -
             float(self.config['ENV']['MIN_RATING']))

        # calculate boredom
        self.beta = float(self.config['ENV']['BETA'])
        self.boredom_len = int(self.config['ENV']['BOREDOM_LENGTH'])
        self.boredom_order = int(self.config['ENV']['BOREDOM_ORDER'])
        self.genre_cnt = int(self.config['GENRE']['GENRE_COUNT'])
        self.genre_paras = [[] for i in range(self.genre_cnt)]   # dict to list
        genres = [self.config['GENRE']['GENRE_'+str(i)] 
                    for i in range(self.genre_cnt)]     # name only used in visulization and read genre_paras
        for i in range(self.genre_cnt):
            for j in range(self.boredom_order + 1):
                self.genre_paras[i].append(float(self.config['GENRE'][genres[i] + '_' + str(j)]))

        # read movies' genres file
        genre_file_path = '../data/rating/' + self.config['ENV']['GENRE_FILE']
        self.item_genre, self.item_subId, self.genre_items = \
            utils.read_genre_file(genre_file_path, self.genre_cnt)
        self.genre_item_nums = [len(items) for items in self.genre_items]

        # read rating file
        if not user_num is None:
            self.user_num = user_num
            self.item_num = item_num
            self.r_matrix = r_matrix
            self.user_to_rele_num = user_to_rele_num
            self.boundry_user_id = int(self.user_num * 0.8)
            self.test_user_num = self.user_num - self.boundry_user_id
        else:
            rating_file_path = '../data/rating/' + \
                self.config['ENV']['RATING_FILE']
            rating = np.loadtxt(fname=rating_file_path, delimiter='\t')

            self.user = set()
            self.item = set()
            for i, j, k in rating:
                self.user.add(int(i))
                self.item.add(int(j))

            self.user_num = len(self.user)
            self.item_num = len(self.item)
            self.boundry_user_id = int(self.user_num * 0.8)
            self.test_user_num = self.user_num - self.boundry_user_id

            # if you replace the rating file without renaming, you should delete
            # the old env object file before you run the code
            env_object_path = '../data/run_time/%s_env_objects' %                   \
                self.config['ENV']['RATING_FILE']
            if os.path.exists(env_object_path):
                objects = utils.pickle_load(env_object_path)
                self.r_matrix = objects['r_matrix']
                self.user_to_rele_num = objects['user_to_rele_num']
            else:
                self.r_matrix = coo_matrix((rating[:, 2], (rating[:, 0].astype(int),
                                                           rating[:, 1].astype(int)))).todok()

                self.log.log('construct relevant item number for each user')
                self.user_to_rele_num = {}
                for u in tqdm(range(self.user_num)):
                    rele_num = 0
                    for i in range(self.item_num):
                        if self.r_matrix[u, i] >= self.boundary_rating:
                            rele_num += 1
                    self.user_to_rele_num[u] = rele_num

                # dump the env object
                utils.pickle_save({'r_matrix': self.r_matrix,
                                   'user_to_rele_num': self.user_to_rele_num},
                                  env_object_path)

            item_embedding_file_path = '../data/run_time/' +                        \
                self.config['ENV']['RATING_FILE'] +         \
                '_item_embedding_dim%d' % self.action_dim
            if not os.path.exists(item_embedding_file_path):
                run_time_tools.mf_with_bias(self.config)
            self.item_embedding = np.loadtxt(item_embedding_file_path,
                                             dtype=float, delimiter='\t')
Beispiel #28
0
def _train(task_idx, net, train_taskset, sample_memory, test_taskset, config,
           method_config, data_config, logger):
    """
    Args:
        config (dict): config file dictionary.
            task_path (str): directory for save. (taskwise, save_path + task**)
            cml_classes (int): size of cumulative taskset
    """
    batch_size = config['batch_size']
    classes_per_task = config['classes_per_task']
    task_path = config['task_path']
    num_workers = config['num_workers']
    device = config['device']
    cml_classes = config['cml_classes']

    process_list = method_config['process_list']

    log = utils.Log(task_path)
    epoch = 0
    single_best_accuracy, multi_best_accuracy = 0.0, 0.0
    for process in process_list:
        epochs = process['epochs']
        balance_finetune = process['balance_finetune']
        optimizer = process['optimizer'](net.parameters())
        scheduler = process['scheduler'](optimizer)
        criterion = nn.CrossEntropyLoss()

        if balance_finetune and cml_classes != classes_per_task:
            train_set = copy.deepcopy(sample_memory)
            train_set.update(BF=True)
        else:
            train_set = torch.utils.data.ConcatDataset(
                (train_taskset, sample_memory))

        train_loader = torch.utils.data.DataLoader(train_set,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=num_workers)
        test_loader = torch.utils.data.DataLoader(test_taskset,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=num_workers)

        log.info("Start Training")
        for ep in range(epochs):
            log.info("%d Epoch Started" % epoch)
            net.train()
            epoch_loss = 0.0
            total = 0

            for i, data in enumerate(train_loader):
                utils.printProgressBar(i + 1,
                                       len(train_loader),
                                       prefix='train')
                images, labels = data[0].to(device), data[1].to(device)
                cur_batch_size = images.size(0)

                optimizer.zero_grad()

                outputs = net(images)

                loss = criterion(outputs, labels)
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item() * cur_batch_size
                total += cur_batch_size

            epoch_loss /= total
            selector.scheduler.step(scheduler, epoch_loss)

            log.info("epoch: %d  train_loss: %.3lf  train_sample: %d" %
                     (epoch, epoch_loss, total))
            if ep == (epochs - 1):
                test_loss, single_total_accuracy, single_class_accuracy = \
                    test(net, test_loader, config, data_config, task_idx, True)
            logger.epoch_step()
            epoch += 1

        make_heatmap(net, test_loader, config)
        log.info("Finish Training")

    return net
Beispiel #29
0
def consolidation(task_idx, old_net, cur_net, taskset, test_taskset, config,
                  method_config, data_config, logger):

    model = config['model']
    cml_classes = config['cml_classes']
    batch_size = config['batch_size']
    classes_per_task = config['classes_per_task']
    task_path = config['task_path']
    num_workers = config['num_workers']
    device = config['device']

    print(model, cml_classes, batch_size, classes_per_task)

    process_list = method_config['consolidation_process_list']

    log = utils.Log(task_path)
    epoch = 0
    single_best_accuracy, multi_best_accuracy = 0.0, 0.0
    net = selector.model(model, device, cml_classes)

    for param in old_net.parameters():
        param.requires_grad = False

    for param in cur_net.parameters():
        param.requires_grad = False

    for process in process_list:
        epochs = process['epochs']
        optimizer = process['optimizer'](net.parameters())
        scheduler = process['scheduler'](optimizer)
        criterion = nn.MSELoss()

        train_loader = torch.utils.data.DataLoader(taskset,
                                                   batch_size=batch_size,
                                                   shuffle=True,
                                                   num_workers=num_workers)
        test_loader = torch.utils.data.DataLoader(test_taskset,
                                                  batch_size=batch_size,
                                                  shuffle=False,
                                                  num_workers=num_workers)

        log.info("Start Consolidation")
        for ep in range(epochs):
            log.info("%d Epoch Started" % epoch)
            net.train()
            old_net.eval()
            cur_net.eval()
            epoch_loss = 0.0
            total = 0

            for i, data in enumerate(train_loader):
                utils.printProgressBar(i + 1,
                                       len(train_loader),
                                       prefix='train')
                images = data[0].to(device)
                cur_batch_size = images.size(0)

                optimizer.zero_grad()

                outputs_old = old_net(images)
                outputs_cur = cur_net(images)
                outputs_old -= outputs_old.mean(dim=1).reshape(
                    cur_batch_size, -1)
                outputs_cur -= outputs_cur.mean(dim=1).reshape(
                    cur_batch_size, -1)

                outputs_tot = torch.cat((outputs_old, outputs_cur), dim=1)
                outputs = net(images)
                loss = criterion(outputs, outputs_tot)
                loss.backward()
                optimizer.step()

                epoch_loss += loss.item() * cur_batch_size
                total += cur_batch_size

            epoch_loss /= total
            selector.scheduler.step(scheduler, epoch_loss)

            log.info("epoch: %d  train_loss: %.3lf  train_sample: %d" %
                     (epoch, epoch_loss, total))

            if ep == (epochs - 1):
                test_loss, single_total_accuracy, single_class_accuracy, multi_total_accuracy, multi_class_accuracy = \
                    test(net, test_loader, config, data_config, task_idx, False)
                torch.save(net, os.path.join(task_path, 'consolidated_model'))
            logger.epoch_step()
            epoch += 1

        log.info("Finish Consolidation")

    return net
def _train(task_idx, model, ewc, importance, train_taskset, sample_memory, test_taskset, config, method_config, data_config, logger):
    """
    Args:
        config (dict): config file dictionary.
            task_path (str): directory for save. (taskwise, save_path + task**)
            cml_classes (int): size of cumulative taskset
    """
    batch_size = config['batch_size']
    classes_per_task = config['classes_per_task']
    task_path = config['task_path']
    num_workers = config['num_workers']
    device = config['device']
    cml_classes = config['cml_classes']

    process_list = method_config['process_list']

    log = utils.Log(task_path)
    epoch = 0
    single_best_accuracy, multi_best_accuracy = 0.0, 0.0

    for process in process_list:
        epochs = process['epochs']
        balance_finetune = process['balance_finetune']
        optimizer = process['optimizer'](model.parameters())
        scheduler = process['scheduler'](optimizer)

        if balance_finetune and cml_classes != classes_per_task:
            train_set = copy.deepcopy(sample_memory)
            train_set.update(BF=True)
        else:
            train_set = torch.utils.data.ConcatDataset((train_taskset, sample_memory))

        train_loader = torch.utils.data.DataLoader(train_set, batch_size=batch_size,
                                                   shuffle=True, num_workers=num_workers)
        
        test_loader = torch.utils.data.DataLoader(test_taskset, batch_size=batch_size,
                                                  shuffle=False, num_workers=num_workers)
        log.info("Start Training")
        for ep in range(epochs):
            log.info("%d Epoch Started" % epoch)
            model.train()
            epoch_loss = 0.0
            total = 0
            ce_loss = 0.0
            con_loss = 0.0

            for i, data in enumerate(train_loader):
                utils.printProgressBar(i + 1, len(train_loader), prefix='train')
                images, labels = data[0].to(device), data[1].to(device)
                cur_batch_size = images.size(0)
                optimizer.zero_grad()
                outputs = model(images, task_idx)
                loss = F.cross_entropy(outputs, labels)
                epoch_loss += loss#.item() * cur_batch_size
                """if type(loss) == tuple:
                    ce_loss += loss[0].item() * cur_batch_size
                    con_loss += loss[1].item() * cur_batch_size
                    loss = 0.1*loss[0] + loss[1]
                else:
                    ce_loss += loss.item() * cur_batch_size"""
                loss.backward()
                ewc.update()
                loss_ewc = importance * ewc.penalty()
                if loss_ewc != 0:
                    loss_ewc.backward()
                optimizer.step()
                total += cur_batch_size

            epoch_loss /= total
            """ce_loss /= total
            con_loss /= total"""
            selector.scheduler.step(scheduler, epoch_loss)
            """
            log.info("[CE_LOSS] : %.3lf" % ce_loss)
            if con_loss != 0:
                log.info("[Consolidate_LOSS] : %.3lf" % con_loss)"""
            log.info("epoch: %d  train_loss: %.3lf  train_sample: %d" % (epoch, epoch_loss, total))
            test_loss, single_total_accuracy, single_class_accuracy, multi_total_accuracy, multi_class_accuracy = \
                test(model, test_loader, config, data_config)
            single_best_accuracy, multi_best_accuracy = \
                utils.save_model(model, single_best_accuracy, multi_best_accuracy,
                                 single_total_accuracy, single_class_accuracy,
                                 multi_total_accuracy, multi_class_accuracy, task_path, ep, epochs)
            logger.printStatics(epoch_loss, test_loss, single_total_accuracy)
            logger.epoch_step()
            epoch += 1

        make_heatmap(model, test_loader, config)
        log.info("Finish Training")
        return model, ewc