示例#1
0
                prior_model,
                prm,
                init_from_prior=init_from_prior,
                verbose=0)

    write_to_log('----- Epoch {} Results: '.format(i_epoch), prm)
    write_to_log(
        '----- Meta-Testing - Avg test err: {:.3}%, STD: {:.3}%, Avg test loss : {:.3}, STD: {:.3} Avg test comp : {:.3} STD : {:.3}'
        .format(100 * test_err_vec.mean(), 100 * test_err_vec.std(),
                test_loss_vec.mean(), test_loss_vec.std(),
                test_comp_vec.mean(), test_comp_vec.std()), prm)

#

# save result
save_run_data(prm, {'test_err_vec': test_err_vec})

# -------------------------------------------------------------------------------------------
#  Print results
# -------------------------------------------------------------------------------------------
#  Print prior analysis
#run_prior_analysis(prior_model)

stop_time = timeit.default_timer()
write_to_log(
    'Total runtime: ' + time.strftime("%H hours, %M minutes and %S seconds",
                                      time.gmtime(stop_time - start_time)),
    prm)

#  Print results
write_to_log('----- Final Results: ', prm)
示例#2
0
             format(n_samples_orig, 100*test_err_orig.mean(), 100*test_err_orig.std()), prm)

write_to_log('Standard learning of task #2  (at most {} samples)'
             ' from scratch, average test error: {:.3}%, STD: {:.3}%'.
             format(limit_train_samples, 100*test_err_scratch.mean(), 100*test_err_scratch.std()), prm)

write_to_log('Bayesian learning of task #2  (at most {} samples)'
             ' from scratch, average test error: {:.3}%, STD: {:.3}%'.
             format(limit_train_samples, 100*test_err_scratch_bayes.mean(), 100*test_err_scratch.std()), prm)


write_to_log('Standard learning of task #2  (at most {} samples) '
             'from scratch with L2 regularizer, average test error: {:.3}%, STD: {:.3}%'.
             format(limit_train_samples, 100*test_err_scratch_reg.mean(), 100*test_err_scratch_reg.std()), prm_reg)

write_to_log('Standard learning of task #2  (at most {} samples)'
             ' using transferred weights as initial point, average test error: {:.3}%, STD: {:.3}%'.
             format(limit_train_samples, 100*test_err_transfer.mean(), 100*test_err_transfer.std()), prm)

write_to_log('Standard learning of task #2  (at most {} samples) using transferred weights as initial point '
             ' + {}, average test error: {:.3}%, STD: {:.3}%'.
             format(limit_train_samples, freeze_description, 100*test_err_freeze.mean(), 100*test_err_freeze.std()), prm_freeze)


# -------------------------------------------------------------------------------------------
#  Save Results
# -------------------------------------------------------------------------------------------

save_run_data(prm, {'test_err_orig': test_err_orig, 'test_err_scratch': test_err_scratch, 'test_err_scratch_bayes': test_err_scratch_bayes,
                    'test_err_transfer': test_err_transfer, 'test_err_freeze':test_err_freeze, 'test_err_scratch_reg': test_err_scratch_reg})
create_result_dir(prm)

#  Define optimizer:
prm.optim_func, prm.optim_args = optim.Adam, {
    'lr': prm.lr
}  #  'weight_decay':5e-4
# prm.optim_func, prm.optim_args = optim.SGD, {'lr': prm.lr, 'momentum': 0.9, 'weight_decay':5e-4}

# Learning rate decay schedule:
# prm.lr_schedule = {'decay_factor': 0.1, 'decay_epochs': [150, 225]}
prm.lr_schedule = {}  # No decay

# Generate task data set:
task_generator = data_gen.Task_Generator(prm)
data_loader = task_generator.get_data_loader(
    prm, limit_train_samples=prm.limit_train_samples)

# -------------------------------------------------------------------------------------------
#  Run learning
# -------------------------------------------------------------------------------------------

test_err, model = learn_single_standard.run_learning(data_loader, prm)

# save final learned weights
f_name = 'final_weights.pt'
f_path = os.path.join(prm.result_dir, f_name)
f_path = save_model_state(model, f_path)
print('Trained model saved in ' + f_path)

save_run_data(prm, {'test_err': test_err})
示例#4
0
    # Generate task data set:
    task_generator = data_gen.Task_Generator(prm)
    data_loader = task_generator.get_data_loader(
        prm, limit_train_samples=prm.limit_train_samples)

    # create prior
    prior_model = get_model(prm)
    set_model_values(prior_model, prm.prior_mean, prm.prior_log_var)

    # Learn a posterior which minimizes some bound with some loss function
    post_model, test_err, test_loss, log_mat = learn_single_Bayes.run_learning(
        data_loader, prm, prior_model, init_from_prior=prm.init_from_prior)
    save_run_data(
        prm, {
            'test_err': test_err,
            'test_loss': test_loss,
            'log_mat': log_mat,
            'post_model': post_model
        })

else:
    loaded_prm, loaded_dict = load_saved_vars(prm.result_dir)
    prm = loaded_prm
    # get learned posterior and results
    test_err, test_loss, log_mat, post_model = loaded_dict.values()

    set_random_seed(prm.seed)

    # Generate task data set  as in saved file:
    task_generator = data_gen.Task_Generator(prm)
    data_loader = task_generator.get_data_loader(
test_loss_vec = np.zeros(n_test_tasks)
test_bound_vec = np.zeros(n_test_tasks)
for i_task in range(n_test_tasks):
    print('Meta-Testing task {} out of {}...'.format(1 + i_task, n_test_tasks))
    task_data = test_tasks_data[i_task]
    test_err_vec[i_task], test_loss_vec[i_task], test_bound_vec[
        i_task], _ = meta_test_Bayes.run_learning(task_data,
                                                  prior_model,
                                                  prm,
                                                  init_from_prior,
                                                  verbose=0)

# save result
save_run_data(
    prm, {
        'test_err_vec': test_err_vec,
        "test_loss_vec": test_loss_vec,
        'test_bound_vec': test_bound_vec
    })
# save_run_data(prm, {})
# save_run_data(prm, {})

# -------------------------------------------------------------------------------------------
#  Print results
# -------------------------------------------------------------------------------------------
#  Print prior analysis
# plot picture together.
mean_list, std_list = run_prior_analysis(prior_model, showPlt=True)
# run_prior_analysis(prior_model, showPlt=False)
write_to_log(
    '----- Meta-learner prior model paramenters: mean_list{}, std_list: {}'.
    format(mean_list, std_list), prm)
示例#6
0
                    prm_eval.divergence_type = val_type[1]
                    val = get_net_densities_divergence(prior_model, post_model,
                                                       prm_eval)
                else:
                    raise ValueError('Invalid val_types')

                val_mat[i_val_type, i_grid, i_rep] = val
            # end val_types loop
        # end reps loop
    # end grid loop

    # Saving the analysis:
    save_run_data(
        prm, {
            'val_mat': val_mat,
            'loss_type_eval': loss_type_eval,
            'train_samples_vec': train_samples_vec,
            'val_types': val_types
        })

else:
    loaded_prm, loaded_dict = load_saved_vars(prm.result_dir)
    prm = loaded_prm
    set_random_seed(prm.seed)
    # get learned posterior and results
    val_mat, loss_type_eval, train_samples_vec, val_types = loaded_dict.values(
    )

# end if run_experiments

val_types_for_show = [['train_loss'], ['test_loss'],
示例#7
0
prm.n_MC = 1

# prm.use_randomness_schedeule = True # False / True
# prm.randomness_init_epoch = 0
# prm.randomness_full_epoch = 500000000

#  Define optimizer:
prm.optim_func, prm.optim_args = optim.Adam, {'lr': prm.lr}
# prm.optim_func, prm.optim_args = optim.SGD, {'lr': prm.lr, 'momentum': 0.9}

# Learning rate decay schedule:
# prm.lr_schedule = {'decay_factor': 0.1, 'decay_epochs': [10, 30]}
prm.lr_schedule = {}  # No decay

# Test type:
prm.test_type = 'MaxPosterior'  # 'MaxPosterior' / 'MajorityVote'

# Generate task data set:
task_generator = data_gen.Task_Generator(prm)
data_loader = task_generator.get_data_loader(
    prm, limit_train_samples=prm.limit_train_samples)

# -------------------------------------------------------------------------------------------
#  Run learning
# -------------------------------------------------------------------------------------------

post_model, test_err, test_loss, log_mat = learn_single_Bayes.run_learning(
    data_loader, prm)

save_run_data(prm, {'test_err': test_err, 'test_loss': test_loss})