Exemple #1
0
    #  Run learning of task 1
    write_to_log('--- Standard learning of task #1', prm)
    test_err_orig[i], transferred_model = learn_single_standard.run_learning(task1_data, prm)

    # Generate the task 2 data set:

    write_to_log('--- Generating task #2 with at most {} samples'.format(limit_train_samples), prm)
    task2_data = task_generator.get_data_loader(prm, limit_train_samples = limit_train_samples)

    #  Run learning of task 2 from scratch:
    write_to_log('--- Standard learning of task #2 from scratch', prm)
    test_err_scratch[i], _ = learn_single_standard.run_learning(task2_data, prm, verbose=0)

    #  Run Bayesian-learning of task 2 from scratch:
    write_to_log('---- Bayesian learning of task #2 from scratch', prm)
    _, test_err_scratch_bayes[i], _, _ = learn_single_Bayes.run_learning(task2_data, prm, verbose=0)

    #  Run learning of task 2 using transferred initial point:
    write_to_log('--- Standard learning of task #2 using transferred weights as initial point', prm)
    test_err_transfer[i], _ = learn_single_standard.run_learning(task2_data, prm, initial_model=transferred_model, verbose=0)

    #  Run learning of task 2 using transferred initial point + freeze some layers:
    write_to_log('--- Standard learning of task #2 using transferred weights as initial point + ' + freeze_description, prm_freeze)
    test_err_freeze[i], _ = learn_single_standard.run_learning(task2_data, prm_freeze, initial_model=transferred_model, verbose=0)
    #
    # #  Run learning of task 2 from scratch + weight regularization:
    # write_to_log('--- Standard learning of task #2 from scratch', prm_reg)
    # test_err_scratch_reg[i], _ = learn_single_standard.run_learning(task2_data, prm_reg, verbose=0)


# -------------------------------------------------------------------------------------------
Exemple #2
0
if run_experiments:

    set_random_seed(prm.seed)

    # Generate task data set:
    task_generator = data_gen.Task_Generator(prm)
    data_loader = task_generator.get_data_loader(
        prm, limit_train_samples=prm.limit_train_samples)

    # create prior
    prior_model = get_model(prm)
    set_model_values(prior_model, prm.prior_mean, prm.prior_log_var)

    # Learn a posterior which minimizes some bound with some loss function
    post_model, test_err, test_loss, log_mat = learn_single_Bayes.run_learning(
        data_loader, prm, prior_model, init_from_prior=prm.init_from_prior)
    save_run_data(
        prm, {
            'test_err': test_err,
            'test_loss': test_loss,
            'log_mat': log_mat,
            'post_model': post_model
        })

else:
    loaded_prm, loaded_dict = load_saved_vars(prm.result_dir)
    prm = loaded_prm
    # get learned posterior and results
    test_err, test_loss, log_mat, post_model = loaded_dict.values()

    set_random_seed(prm.seed)
Exemple #3
0
# Number of Monte-Carlo iterations (for re-parametrization trick):
prm.n_MC = 1

# prm.use_randomness_schedeule = True # False / True
# prm.randomness_init_epoch = 0
# prm.randomness_full_epoch = 500000000

#  Define optimizer:
prm.optim_func, prm.optim_args = optim.Adam, {'lr': prm.lr}
# prm.optim_func, prm.optim_args = optim.SGD, {'lr': prm.lr, 'momentum': 0.9}

# Learning rate decay schedule:
# prm.lr_schedule = {'decay_factor': 0.1, 'decay_epochs': [10, 30]}
prm.lr_schedule = {}  # No decay

# Test type:
prm.test_type = 'MaxPosterior'  # 'MaxPosterior' / 'MajorityVote'

# Generate task data set:
task_generator = data_gen.Task_Generator(prm)
data_loader = task_generator.get_data_loader(
    prm, limit_train_samples=prm.limit_train_samples)

# -------------------------------------------------------------------------------------------
#  Run learning
# -------------------------------------------------------------------------------------------

test_err, _ = learn_single_Bayes.run_learning(data_loader, prm)

save_run_data(prm, {'test_err': test_err})
prm.n_MC = 1

# prm.use_randomness_schedeule = True # False / True
# prm.randomness_init_epoch = 0
# prm.randomness_full_epoch = 500000000

#  Define optimizer:
prm.optim_func, prm.optim_args = optim.Adam, {'lr': prm.lr}
# prm.optim_func, prm.optim_args = optim.SGD, {'lr': prm.lr, 'momentum': 0.9}

# Learning rate decay schedule:
# prm.lr_schedule = {'decay_factor': 0.1, 'decay_epochs': [10, 30]}
prm.lr_schedule = {}  # No decay

# Test type:
prm.test_type = 'MaxPosterior'  # 'MaxPosterior' / 'MajorityVote'

# Generate task data set:
task_generator = data_gen.Task_Generator(prm)
data_loader = task_generator.get_data_loader(
    prm, limit_train_samples=prm.limit_train_samples)

# -------------------------------------------------------------------------------------------
#  Run learning
# -------------------------------------------------------------------------------------------

post_model, test_err, test_loss, log_mat = learn_single_Bayes.run_learning(
    data_loader, prm)

save_run_data(prm, {'test_err': test_err, 'test_loss': test_loss})