Example #1
0
data_tst = pd.read_pickle(config.DATA_TEST_FILE)
meta = pd.read_pickle(config.META_FILE)
hpc = pd.read_pickle(config.HPC_FILE)

feats = meta.index
costs = meta[config.META_COSTS]

data_trn[feats] = (data_trn[feats] -
                   meta[config.META_AVG]) / meta[config.META_STD]  # normalize
data_val[feats] = (data_val[feats] -
                   meta[config.META_AVG]) / meta[config.META_STD]  # normalize
data_tst[feats] = (data_tst[feats] -
                   meta[config.META_AVG]) / meta[config.META_STD]  # normalize

#==============================
print("Evaluating dataset:", args.dataset)

brain = Brain(None)
brain._load(file='model_best')

print("Performance on the best model:")
log_trn = Log(data_trn, hpc['train'], costs, brain, "trn_best")
log_trn.log_perf()

log_val = Log(data_val, hpc['validation'], costs, brain, "val_best")
log_val.log_perf()

log_tst = Log(data_tst, hpc['test'], costs, brain, "tst_best")
log_tst.log_perf()
# htado
# log_tst.log_perf(histogram=True)
Example #2
0
feats = meta.index
costs = meta[config.META_COSTS]

data_trn[feats] = (data_trn[feats] -
                   meta[config.META_AVG]) / meta[config.META_STD]  # normalize
data_val[feats] = (data_val[feats] -
                   meta[config.META_AVG]) / meta[config.META_STD]  # normalize
data_tst[feats] = (data_tst[feats] -
                   meta[config.META_AVG]) / meta[config.META_STD]  # normalize
groups = meta[config.META_GROUPS].values if (config.META_GROUPS
                                             in meta.columns) else None

#==============================
print("Evaluating dataset:", config.dataset)

brain = Brain(None)
brain._load(file='model')

print("Performance on the last model:")

print("Performance on the best model:")
log_trn = Log(data_trn, hpc['train'], costs, brain, groups, "trn_best")
log_trn.log_perf(save_probs=True)

log_val = Log(data_val, hpc['validation'], costs, brain, groups, "val_best")
log_val.log_perf(save_probs=True)

log_tst = Log(data_tst, hpc['test'], costs, brain, groups, "tst_best")
# import pdb; pdb.set_trace()
log_tst.log_perf(histogram=True, save_probs=True)
Example #3
0
#==============================
epoch_start = 0
lr_start = config.OPT_LR

# average d_lagrangian
avg_l = types.SimpleNamespace()
avg_l.trn_avg = []
avg_l.trn_run = []
avg_l.val_avg = []
avg_l.val_run = []
avg_l.trn_lst = 0.
avg_l.val_lst = 0.

if not config.BLANK_INIT:
    print("Loading progress..")
    brain._load()

    with open('run.state', 'r') as file:
        save_data = json.load(file)

    epoch_start = save_data['epoch']
    lr_start = save_data['lr']
    avg_l = types.SimpleNamespace(**save_data['avg_l'])

    if args.target_type == 'cost':
        config.FEATURE_FACTOR = save_data['lmb']
else:
    # truncate files
    open('run_{}_lagrangian.dat'.format("trn"), "w").close()
    open('run_{}_lagrangian.dat'.format("val"), "w").close()
Example #4
0
def main_(Load=True):

    np.set_printoptions(threshold=np.inf)
    np.random.seed(SEED)
    random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)

    #self.DATA_FILE = DATA_FILE
    DATA_FILE = './data/mb-train'
    DATA_VAL_FILE = './data/mb-val'
    META_FILE = './data/mb-meta'
    data = pd.read_pickle(DATA_FILE)

    data_val = pd.read_pickle(DATA_VAL_FILE)
    meta = pd.read_pickle(META_FILE)
    feats = meta.index
    costs = meta[META_COSTS]

    for col in COLUMN_DROP:
        if col in data.columns:
            data.drop(col, axis=1, inplace=True)
            data_val.drop(col, axis=1, inplace=True)

    #data[feats] = (data[feats] - meta[META_AVG]) / meta[META_STD]            # normalize
    #data_val[feats] = (data_val[feats] - meta[META_AVG]) / meta[META_STD]    # normalize
    #np.save('chck_train_data',data)
    print("Using", DATA_FILE, "with", len(data), "samples.")
    pool = Pool(POOL_SIZE)
    env = Environment(data, costs, FEATURE_FACTOR)
    brain = Brain(pool)
    print(" brain : ")
    agent = Agent(env, pool, brain)
    #log   = Log(data_val, costs, FEATURE_FACTOR, brain)
    epoch_start = 0
    #epoch_start = 0

    if not BLANK_INIT:
        print("Loading progress..")
        brain._load()

        with open('run.state', 'r') as file:
            save_data = json.load(file)

        epoch_start = save_data['epoch']

    brain.update_lr(epoch_start)
    agent.update_epsilon(epoch_start)

    #==============================
    print("Initializing pool..")
    for i in range(POOL_SIZE // AGENTS):
        utils.print_progress(i, POOL_SIZE // AGENTS)
        agent.step()

    pool.cuda()
    #%%
    print("Starting..")
    #info = list()
    for epoch in range(epoch_start + 1, TRAINING_EPOCHS + 1):
        # SAVE
        if is_time(epoch, SAVE_EPOCHS):
            brain._save()

            save_data = {}
            save_data['epoch'] = epoch
            #info.append(test.test_action())

            with open('run.state', 'w') as file:
                json.dump(save_data, file)
            eval_.test_action()

        # SET VALUES
        if is_time(epoch, EPSILON_UPDATE_EPOCHS):
            agent.update_epsilon(epoch)

        if is_time(epoch, LR_SC_EPOCHS):
            brain.update_lr(epoch)

        # LOG
        if is_time(epoch, LOG_EPOCHS):
            print("Epoch: {}/{}".format(epoch, TRAINING_EPOCHS))

            #log.log()
            #log.print_speed()

        if is_time(epoch, LOG_PERF_EPOCHS): pass
        #slog.log_perf()

        # TRAIN
        brain.train()

        for i in range(EPOCH_STEPS):
            agent.step()