Beispiel #1
0
if prm.mode == 'MetaTrain':

    n_train_tasks = prm.n_train_tasks
    if n_train_tasks:
        # In this case we generate a finite set of train (observed) task before meta-training.
        # Generate the data sets of the training tasks:
        write_to_log('--- Generating {} training-tasks'.format(n_train_tasks),
                     prm)
        train_data_loaders = task_generator.create_meta_batch(
            prm, n_train_tasks, meta_split='meta_train')

        # Meta-training to learn prior:
        prior_model = meta_train_Bayes_finite_tasks.run_meta_learning(
            train_data_loaders, prm)
        # save learned prior:
        save_model_state(prior_model, save_path)
        write_to_log('Trained prior saved in ' + save_path, prm)
    else:
        # In this case we observe new tasks generated from the task-distribution in each meta-iteration.
        write_to_log(
            '---- Infinite train tasks - New training tasks are '
            'drawn from tasks distribution in each iteration...', prm)

        # Meta-training to learn meta-prior (theta params):
        prior_model = meta_train_Bayes_infinite_tasks.run_meta_learning(
            task_generator, prm)

elif prm.mode == 'LoadMetaModel':

    # Loads  previously training prior.
    # First, create the model:
set_random_seed(prm.seed)
create_result_dir(prm)

#  Define optimizer:
prm.optim_func, prm.optim_args = optim.Adam, {
    'lr': prm.lr
}  #  'weight_decay':5e-4
# prm.optim_func, prm.optim_args = optim.SGD, {'lr': prm.lr, 'momentum': 0.9, 'weight_decay':5e-4}

# Learning rate decay schedule:
# prm.lr_schedule = {'decay_factor': 0.1, 'decay_epochs': [150, 225]}
prm.lr_schedule = {}  # No decay

# Generate task data set:
task_generator = data_gen.Task_Generator(prm)
data_loader = task_generator.get_data_loader(
    prm, limit_train_samples=prm.limit_train_samples)

# -------------------------------------------------------------------------------------------
#  Run learning
# -------------------------------------------------------------------------------------------

test_err, model = learn_single_standard.run_learning(data_loader, prm)

# save final learned weights
f_name = 'final_weights.pt'
f_path = os.path.join(prm.result_dir, f_name)
f_path = save_model_state(model, f_path)
print('Trained model saved in ' + f_path)

save_run_data(prm, {'test_err': test_err})
Beispiel #3
0
def run_meta_learning(data_loaders, prm):

    # -------------------------------------------------------------------------------------------
    #  Setting-up
    # -------------------------------------------------------------------------------------------
    # Unpack parameters:
    optim_func, optim_args, lr_schedule =\
        prm.optim_func, prm.optim_args, prm.lr_schedule

    # Loss criterion
    loss_criterion = get_loss_criterion(prm.loss_type)

    n_train_tasks = len(data_loaders)
    #import pudb
    #pudb.set_trace()

    # Create posterior models for each task:
    posteriors_models = [get_model(prm) for _ in range(n_train_tasks)]
    # Create a 'dummy' model to generate the set of parameters of the shared prior:
    prior_model = get_model(prm)
    if prm.from_pretrain:
        if prm.data_source == "CIFAR100":
            pretrain_path = "pretrained_cifar100/epoch-46-acc0.478.pth"
        elif prm.data_source == 'Caltech256':
            pretrain_path = "pretrained_caltech256/epoch-7-acc0.303.pth"
        else:
            pretrain_path = None
        for e in posteriors_models:
            load_model_state(e, pretrain_path, pop_softmax = True )

        load_model_state(prior_model, pretrain_path, pop_softmax = True )
        write_to_log("load pretrained from" + pretrain_path, prm)

    # Gather all tasks posterior params:
    all_post_param = sum([list(posterior_model.parameters()) for posterior_model in posteriors_models], [])

    # Create optimizer for all parameters (posteriors + prior)
    prior_params = list(prior_model.parameters())
    all_params = all_post_param + prior_params
    all_optimizer = optim_func(all_params, **optim_args)

    # number of sample-batches in each task:
    n_batch_list = [len(data_loader['train']) for data_loader in data_loaders]

    n_batches_per_task = np.max(n_batch_list)

    # -------------------------------------------------------------------------------------------
    #  Training epoch  function
    # -------------------------------------------------------------------------------------------
    def run_train_epoch(i_epoch):

        # For each task, prepare an iterator to generate training batches:
        train_iterators = [iter(data_loaders[ii]['train']) for ii in range(n_train_tasks)]

        # The task order to take batches from:
        # The meta-batch will be balanced - i.e, each task will appear roughly the same number of times
        # note: if some tasks have less data that other tasks - it may be sampled more than once in an epoch
        task_order = []
        task_ids_list = list(range(n_train_tasks))
        for i_batch in range(n_batches_per_task):
            random.shuffle(task_ids_list)
            task_order += task_ids_list
        # Note: this method ensures each training sample in each task is drawn in each epoch.
        # If all the tasks have the same number of sample, then each sample is drawn exactly once in an epoch.

        # ----------- meta-batches loop (batches of tasks) -----------------------------------#
        # each meta-batch includes several tasks
        # we take a grad step with theta after each meta-batch
        meta_batch_starts = list(range(0, len(task_order), prm.meta_batch_size))
        n_meta_batches = len(meta_batch_starts)

        for i_meta_batch in range(n_meta_batches):


            meta_batch_start = meta_batch_starts[i_meta_batch]
            task_ids_in_meta_batch = task_order[meta_batch_start: (meta_batch_start + prm.meta_batch_size)]
            # meta-batch size may be less than  prm.meta_batch_size at the last one
            # note: it is OK if some tasks appear several times in the meta-batch

            mb_data_loaders = [data_loaders[task_id] for task_id in task_ids_in_meta_batch]
            mb_iterators = [train_iterators[task_id] for task_id in task_ids_in_meta_batch]
            mb_posteriors_models = [posteriors_models[task_id] for task_id in task_ids_in_meta_batch]

            # Get objective based on tasks in meta-batch:
            total_objective, info = get_objective(prior_model, prm, mb_data_loaders,
                                                  mb_iterators, mb_posteriors_models, loss_criterion, n_train_tasks)

            # Take gradient step with the shared prior and all tasks' posteriors:
            grad_step(total_objective, all_optimizer, lr_schedule, prm.lr, i_epoch)

            # Print training status of current batch:
            log_interval = 200
            if i_meta_batch % log_interval == 0:
                batch_acc = info['correct_count'] / info['sample_count']
                write_to_log(cmn.status_string(i_epoch,  prm.n_meta_train_epochs, i_meta_batch, n_meta_batches, batch_acc, get_value(total_objective)) +
                        ' Empiric-Loss: {:.4}\t Task-Comp. {:.4}\t Meta-Comp.: {:.4}, w_kld : {:.4}, b_kld : {:.4}'.
                        format(info['avg_empirical_loss'], info['avg_intra_task_comp'], info['meta_comp'], info['w_kld'], info['b_kld']), prm)
        # end  meta-batches loop

    # end run_epoch()

    # -------------------------------------------------------------------------------------------
    #  Test evaluation function -
    # Evaluate the mean loss on samples from the test sets of the training tasks
    # --------------------------------------------------------------------------------------------
    def run_test():
        test_acc_avg = 0.0
        n_tests = 0
        for i_task in range(n_train_tasks):
            model = posteriors_models[i_task]
            test_loader = data_loaders[i_task]['test']
            if len(test_loader) > 0:
                test_acc, test_loss = run_test_Bayes(model, test_loader, loss_criterion, prm, verbose=0)
                n_tests += 1
                test_acc_avg += test_acc

                n_test_samples = len(test_loader.dataset)

                #write_to_log('Train Task {}, Test set: {} -  Average loss: {:.4}, Accuracy: {:.3} (of {} samples)\n'.format(
                    #i_task, prm.test_type, test_loss, test_acc, n_test_samples), prm)
            else:
                print('Train Task {}, Test set: {} - No test data'.format(i_task, prm.test_type))

        if n_tests > 0:
            test_acc_avg /= n_tests
        return test_acc_avg

    # -----------------------------------------------------------------------------------------------------------#
    # Main script
    # -----------------------------------------------------------------------------------------------------------#

    # Update Log file

    write_to_log(cmn.get_model_string(prior_model), prm)
    write_to_log('---- Meta-Training set: {0} tasks'.format(len(data_loaders)), prm)

    # -------------------------------------------------------------------------------------------
    #  Run epochs
    # -------------------------------------------------------------------------------------------
    start_time = timeit.default_timer()

    # Training loop:
    for i_epoch in range(prm.n_meta_train_epochs):
        save_path = os.path.join(prm.result_dir, 'Epoch_{}_model.pth'.format(i_epoch))
        run_train_epoch(i_epoch)
        if i_epoch % 1 == 0:
            save_model_state(prior_model, save_path)
            #utils.debug()
            import pudb
            #pudb.set_trace()
            test_acc_avg = run_test()
            print("Epoch {}: test_acc is {}".format(i_epoch, test_acc_avg))

    stop_time = timeit.default_timer()

    # Test:
    test_acc_avg = run_test()

    # Update Log file:
    cmn.write_final_result(test_acc_avg, stop_time - start_time, prm, result_name=prm.test_type)

    # Return learned prior:
    return prior_model
Beispiel #4
0
        # Meta-training to learn meta-model (theta params):
        meta_model = meta_train_MAML_finite_tasks.run_meta_learning(
            train_data_loaders, prm)
    else:
        # In this case we observe new tasks generated from the task-distribution in each meta-iteration.
        write_to_log(
            '---- Infinite train tasks - New training tasks '
            'are drawn from tasks distribution in each iteration...', prm)

        # Meta-training to learn meta-model (theta params):
        meta_model = meta_train_MAML_infinite_tasks.run_meta_learning(
            prm, task_generator)

    # save learned meta-model:
    save_model_state(meta_model, save_path)
    write_to_log('Trained meta-model saved in ' + save_path, prm)

elif prm.mode == 'LoadMetaModel':

    # Loads  previously training prior.
    # First, create the model:
    meta_model = get_model(prm)
    # Then load the weights:
    load_model_state(meta_model, prm.load_model_path)
    write_to_log('Pre-trained  meta-model loaded from ' + prm.load_model_path,
                 prm)
else:
    raise ValueError('Invalid mode')

# -------------------------------------------------------------------------------------------
Beispiel #5
0
            .format(mean_list, std_list), prm)

        # Meta-training to learn prior:
        trained_prior_model = DP_meta_train_Bayes_finite_tasks.run_meta_learning(
            train_data_loaders, prm, data_prior_model)
        write_to_log(
            '----- Meta-Training: After training prior model analysis parameter',
            prm)
        mean_list, std_list = run_prior_analysis(trained_prior_model,
                                                 showPlt=False)
        write_to_log(
            '----- Meta-Training: After training prior model : mean_list{}, std_list: {}'
            .format(mean_list, std_list), prm)

        # save learned prior:
        save_model_state(trained_prior_model, save_path)
        write_to_log('Trained prior saved in ' + save_path, prm)
    else:
        # In this case we observe new tasks generated from the task-distribution in each meta-iteration.
        write_to_log(
            '---- Infinite train tasks - New training tasks are '
            'drawn from tasks distribution in each iteration...', prm)

        # Meta-training to learn meta-prior (theta params):
        trained_prior_model = DP_meta_train_Bayes_infinite_tasks.run_meta_learning(
            task_generator, prm)

elif prm.mode == 'LoadMetaModel':

    # Loads  previously training prior.
    # First, create the model: