コード例 #1
0
def run_test(mb_data_loaders, mb_posteriors_models, loss_criterion, prm):
    n_tasks = len(mb_data_loaders)
    test_acc_avg = 0.0
    n_tests = 0
    for i_task in range(n_tasks):
        model = mb_posteriors_models[i_task]
        test_loader = mb_data_loaders[i_task]['test']
        if len(test_loader) > 0:
            test_acc, test_loss = run_test_Bayes(model,
                                                 test_loader,
                                                 loss_criterion,
                                                 prm,
                                                 verbose=0)
            n_tests += 1
            test_acc_avg += test_acc

            n_test_samples = len(test_loader.dataset)

            # write_result(
            #     'Train Task {}, Test set: {} -  Average loss: {:.4}, Accuracy: {:.3} of {} samples\n'.format(
            #         prm.test_type, i_task, test_loss, test_acc, n_test_samples), prm)
        else:
            print('Train Task {}, Test set: {} - No test data'.format(
                i_task, prm.test_type))

    if n_tests > 0:
        test_acc_avg /= n_tests
    return test_acc_avg
コード例 #2
0
    def run_test():
        test_acc_avg = 0.0
        n_tests = 0
        for i_task in range(n_train_tasks):
            model = posteriors_models[i_task]
            test_loader = data_loaders[i_task]['test']
            if len(test_loader) > 0:
                test_acc, test_loss = run_test_Bayes(model, test_loader,
                                                     loss_criterion, prm)
                n_tests += 1
                test_acc_avg += test_acc

                n_test_samples = len(test_loader.dataset)

                write_to_log(
                    'Train Task {}, Test set: {} -  Average loss: {:.4}, Accuracy: {:.3} (of {} samples)\n'
                    .format(i_task, prm.test_type, test_loss, test_acc,
                            n_test_samples), prm)
            else:
                print('Train Task {}, Test set: {} - No test data'.format(
                    i_task, prm.test_type))

        if n_tests > 0:
            test_acc_avg /= n_tests
        return test_acc_avg
コード例 #3
0
def run_learning(task_data, prior_model, prm, init_from_prior=True, verbose=1):

    # -------------------------------------------------------------------------------------------
    #  Setting-up
    # -------------------------------------------------------------------------------------------
    # Unpack parameters:
    optim_func, optim_args, lr_schedule =\
        prm.optim_func, prm.optim_args, prm.lr_schedule

    # Loss criterion
    loss_criterion = get_loss_criterion(prm.loss_type)

    # Create posterior model for the new task:
    post_model = get_model(prm)

    if init_from_prior:
        post_model.load_state_dict(prior_model.state_dict())

        # prior_model_dict = prior_model.state_dict()
        # post_model_dict = post_model.state_dict()
        #
        # # filter out unnecessary keys:
        # prior_model_dict = {k: v for k, v in prior_model_dict.items() if '_log_var' in k or '_mu' in k}
        # # overwrite entries in the existing state dict:
        # post_model_dict.update(prior_model_dict)
        #
        # # #  load the new state dict
        # post_model.load_state_dict(post_model_dict)

        # add_noise_to_model(post_model, prm.kappa_factor)

    # The data-sets of the new task:
    train_loader = task_data['train']
    test_loader = task_data['test']
    #import pudb
    #pudb.set_trace()
    n_train_samples = len(train_loader.dataset)
    n_batches = len(train_loader)

    #  Get optimizer:
    optimizer = optim_func(post_model.parameters(), **optim_args)

    # -------------------------------------------------------------------------------------------
    #  Training epoch  function
    # -------------------------------------------------------------------------------------------

    def run_train_epoch(i_epoch):
        log_interval = 500

        post_model.train()

        train_info = {}
        train_info["task_comp"] = 0.0
        train_info["total_loss"] = 0.0

        cnt = 0
        for batch_idx, batch_data in enumerate(train_loader):
            cnt += 1

            correct_count = 0
            sample_count = 0

            # Monte-Carlo iterations:
            n_MC = prm.n_MC
            task_empirical_loss = 0
            task_complexity = 0
            for i_MC in range(n_MC):
                # get batch:
                inputs, targets = data_gen.get_batch_vars(batch_data, prm)

                # Calculate empirical loss:
                outputs = post_model(inputs)
                curr_empirical_loss = loss_criterion(outputs, targets)

                #hyper_kl = 0 when testing
                curr_empirical_loss, curr_complexity, task_info = get_bayes_task_objective(
                    prm,
                    prior_model,
                    post_model,
                    n_train_samples,
                    curr_empirical_loss,
                    noised_prior=False)

                task_empirical_loss += (1 / n_MC) * curr_empirical_loss
                task_complexity += (1 / n_MC) * curr_complexity

                correct_count += count_correct(outputs, targets)
                sample_count += inputs.size(0)

            # Total objective:

            total_objective = task_empirical_loss + prm.task_complex_w * task_complexity

            train_info["task_comp"] += task_complexity.data[0]
            train_info["total_loss"] += total_objective.data[0]

            # Take gradient step with the posterior:
            grad_step(total_objective, optimizer, lr_schedule, prm.lr, i_epoch)

            # Print status:
            if batch_idx % log_interval == 0:
                batch_acc = correct_count / sample_count
                write_to_log(
                    cmn.status_string(i_epoch, prm.n_meta_test_epochs,
                                      batch_idx, n_batches, batch_acc,
                                      total_objective.data[0]) +
                    ' Empiric Loss: {:.4}\t Intra-Comp. {:.4}, w_kld {:.4}, b_kld {:.4}'
                    .format(task_empirical_loss.data[0],
                            task_complexity.data[0], task_info["w_kld"],
                            task_info["b_kld"]), prm)

        train_info["task_comp"] /= cnt
        train_info["total_loss"] /= cnt
        return train_info

    # -----------------------------------------------------------------------------------------------------------#
    # Update Log file
    if verbose == 1:
        write_to_log(
            'Total number of steps: {}'.format(n_batches *
                                               prm.n_meta_test_epochs), prm)

    # -------------------------------------------------------------------------------------------
    #  Run epochs
    # -------------------------------------------------------------------------------------------
    start_time = timeit.default_timer()

    # Training loop:
    best_acc = -1
    best_acc_loss = -1
    best_acc_comp = -1

    for i_epoch in range(prm.n_meta_test_epochs):
        train_info = run_train_epoch(i_epoch)
        test_acc, test_loss = run_test_Bayes(post_model, test_loader,
                                             loss_criterion, prm)
        if test_acc > best_acc:
            best_acc = test_acc
            best_acc_loss = test_loss
            best_acc_comp = train_info["task_comp"]

    # Test:
    test_acc, test_loss = run_test_Bayes(post_model, test_loader,
                                         loss_criterion, prm)

    stop_time = timeit.default_timer()
    cmn.write_final_result(best_acc,
                           stop_time - start_time,
                           prm,
                           result_name=prm.test_type,
                           verbose=verbose)

    test_err = 1 - best_acc
    return test_err, best_acc_comp, best_acc_loss, post_model
コード例 #4
0
def run_learning(data_loader,
                 prm,
                 prior_model=None,
                 init_from_prior=True,
                 verbose=1):

    # -------------------------------------------------------------------------------------------
    #  Setting-up
    # -------------------------------------------------------------------------------------------

    # Unpack parameters:
    optim_func, optim_args, lr_schedule = \
        prm.optim_func, prm.optim_args, prm.lr_schedule

    # Loss criterion
    loss_criterion = get_loss_criterion(prm.loss_type)

    train_loader = data_loader['train']
    test_loader = data_loader['test']
    n_batches = len(train_loader)
    n_train_samples = data_loader['n_train_samples']

    # get model:
    if prior_model and init_from_prior:
        # init from prior model:
        post_model = deepcopy(prior_model)
    else:
        post_model = get_model(prm)

    # post_model.set_eps_std(0.0) # DEBUG: turn off randomness

    #  Get optimizer:
    optimizer = optim_func(post_model.parameters(), **optim_args)

    # -------------------------------------------------------------------------------------------
    #  Training epoch  function
    # -------------------------------------------------------------------------------------------

    def run_train_epoch(i_epoch):

        # # Adjust randomness (eps_std)
        # if hasattr(prm, 'use_randomness_schedeule') and prm.use_randomness_schedeule:
        #     if i_epoch > prm.randomness_full_epoch:
        #         eps_std = 1.0
        #     elif i_epoch > prm.randomness_init_epoch:
        #         eps_std = (i_epoch - prm.randomness_init_epoch) / (prm.randomness_full_epoch - prm.randomness_init_epoch)
        #     else:
        #         eps_std = 0.0  #  turn off randomness
        #     post_model.set_eps_std(eps_std)

        # post_model.set_eps_std(0.00) # debug

        complexity_term = 0

        post_model.train()

        for batch_idx, batch_data in enumerate(train_loader):

            # Monte-Carlo iterations:
            empirical_loss = 0
            n_MC = prm.n_MC
            for i_MC in range(n_MC):
                # get batch:
                inputs, targets = data_gen.get_batch_vars(batch_data, prm)

                # calculate objective:
                outputs = post_model(inputs)
                empirical_loss_c = loss_criterion(outputs, targets)
                empirical_loss += (1 / n_MC) * empirical_loss_c

            #  complexity/prior term:
            if prior_model:
                empirical_loss, complexity_term = get_bayes_task_objective(
                    prm, prior_model, post_model, n_train_samples,
                    empirical_loss)
            else:
                complexity_term = 0.0

                # Total objective:
            objective = empirical_loss + complexity_term

            # Take gradient step:
            grad_step(objective, optimizer, lr_schedule, prm.lr, i_epoch)

            # Print status:
            log_interval = 500
            if batch_idx % log_interval == 0:
                batch_acc = correct_rate(outputs, targets)
                print(
                    cmn.status_string(i_epoch, prm.num_epochs, batch_idx,
                                      n_batches, batch_acc, objective.data[0])
                    + ' Loss: {:.4}\t Comp.: {:.4}'.format(
                        get_value(empirical_loss), get_value(complexity_term)))

    # -------------------------------------------------------------------------------------------
    #  Main Script
    # -------------------------------------------------------------------------------------------

    #  Update Log file
    update_file = not verbose == 0
    cmn.write_to_log(cmn.get_model_string(post_model),
                     prm,
                     update_file=update_file)
    cmn.write_to_log('Total number of steps: {}'.format(n_batches *
                                                        prm.num_epochs),
                     prm,
                     update_file=update_file)
    cmn.write_to_log('Number of training samples: {}'.format(
        data_loader['n_train_samples']),
                     prm,
                     update_file=update_file)

    start_time = timeit.default_timer()

    # Run training epochs:
    for i_epoch in range(prm.num_epochs):
        run_train_epoch(i_epoch)

    # Test:
    test_acc, test_loss = run_test_Bayes(post_model, test_loader,
                                         loss_criterion, prm)

    stop_time = timeit.default_timer()
    cmn.write_final_result(test_acc,
                           stop_time - start_time,
                           prm,
                           result_name=prm.test_type)

    test_err = 1 - test_acc
    return test_err, post_model
コード例 #5
0
def run_learning(task_data, prior_model, prm, init_from_prior=True, verbose=1):

    # -------------------------------------------------------------------------------------------
    #  Setting-up
    # -------------------------------------------------------------------------------------------
    # Unpack parameters:
    optim_func, optim_args, lr_schedule =\
        prm.optim_func, prm.optim_args, prm.lr_schedule

    # Loss criterion
    loss_criterion = get_loss_criterion(prm.loss_type)

    # Create posterior model for the new task:
    post_model = get_model(prm)

    if init_from_prior:
        post_model.load_state_dict(prior_model.state_dict())

        # prior_model_dict = prior_model.state_dict()
        # post_model_dict = post_model.state_dict()
        #
        # # filter out unnecessary keys:
        # prior_model_dict = {k: v for k, v in prior_model_dict.items() if '_log_var' in k or '_mu' in k}
        # # overwrite entries in the existing state dict:
        # post_model_dict.update(prior_model_dict)
        #
        # # #  load the new state dict
        # post_model.load_state_dict(post_model_dict)

        # add_noise_to_model(post_model, prm.kappa_factor)

    # The data-sets of the new task:
    train_loader = task_data['train']
    test_loader = task_data['test']
    n_train_samples = len(train_loader.dataset)
    n_batches = len(train_loader)

    #  Get optimizer:
    optimizer = optim_func(post_model.parameters(), **optim_args)

    # -------------------------------------------------------------------------------------------
    #  Training epoch  function
    # -------------------------------------------------------------------------------------------

    def run_train_epoch(i_epoch):
        log_interval = 500

        post_model.train()

        for batch_idx, batch_data in enumerate(train_loader):

            correct_count = 0
            sample_count = 0

            # Monte-Carlo iterations:
            n_MC = prm.n_MC
            task_empirical_loss = 0
            task_complexity = 0
            for i_MC in range(n_MC):
                # get batch:
                inputs, targets = data_gen.get_batch_vars(batch_data, prm)

                # Calculate empirical loss:
                outputs = post_model(inputs)
                curr_empirical_loss = loss_criterion(outputs, targets)

                curr_empirical_loss, curr_complexity = get_bayes_task_objective(
                    prm,
                    prior_model,
                    post_model,
                    n_train_samples,
                    curr_empirical_loss,
                    noised_prior=False)

                task_empirical_loss += (1 / n_MC) * curr_empirical_loss
                task_complexity += (1 / n_MC) * curr_complexity

                correct_count += count_correct(outputs, targets)
                sample_count += inputs.size(0)

            # Total objective:

            total_objective = task_empirical_loss + task_complexity

            # Take gradient step with the posterior:
            grad_step(total_objective, optimizer, lr_schedule, prm.lr, i_epoch)

            # Print status:
            if batch_idx % log_interval == 0:
                batch_acc = correct_count / sample_count
                print(
                    cmn.status_string(i_epoch, prm.n_meta_test_epochs,
                                      batch_idx, n_batches, batch_acc,
                                      total_objective.item()) +
                    ' Empiric Loss: {:.4}\t Intra-Comp. {:.4}'.format(
                        task_empirical_loss.item(), task_complexity.item()))

                data_objective.append(total_objective.item())
                data_accuracy.append(batch_acc)
                data_emp_loss.append(task_empirical_loss.item())
                data_task_comp.append(task_complexity.item())

            return total_objective.item()

    # -----------------------------------------------------------------------------------------------------------#
    # Update Log file
    if verbose == 1:
        write_to_log(
            'Total number of steps: {}'.format(n_batches *
                                               prm.n_meta_test_epochs), prm)

    # -------------------------------------------------------------------------------------------
    #  Run epochs
    # -------------------------------------------------------------------------------------------
    start_time = timeit.default_timer()

    data_objective = []
    data_accuracy = []
    data_emp_loss = []
    data_task_comp = []
    # Training loop:
    for i_epoch in range(prm.n_meta_test_epochs):
        test_bound = run_train_epoch(i_epoch)

    with open(
            os.path.join(prm.result_dir, 'run_test_data_prior_bound_data.pkl'),
            'wb') as f:
        pickle.dump(
            {
                'data_objective': data_objective,
                "data_accuracy": data_accuracy,
                'data_emp_loss': data_emp_loss,
                'data_task_comp': data_task_comp
            }, f)

    # Test:
    test_acc, test_loss = run_test_Bayes(post_model, test_loader,
                                         loss_criterion, prm)

    stop_time = timeit.default_timer()
    cmn.write_final_result(test_acc,
                           stop_time - start_time,
                           prm,
                           result_name=prm.test_type,
                           verbose=verbose)

    test_err = 1 - test_acc
    return test_err, test_loss, test_bound, post_model
コード例 #6
0
ファイル: meta_test_Bayes.py プロジェクト: Jupiter04/MLLE
def run_learning(task_data, prior_model, prm, init_from_prior=True, verbose=1):

    # prm.optim_func, prm.optim_args = optim.EntropySGD, {'llr':0.01, 'lr':0.1, 'momentum':0.9, 'damp':0, 'weight_decay':1e-3, 'nesterov':True,
    #                  'L':20, 'eps':1e-3, 'g0':1e-4, 'g1':1e-3}

    # -------------------------------------------------------------------------------------------
    #  Setting-up
    # -------------------------------------------------------------------------------------------
    # Unpack parameters:
    # prm.optim_args['llr'] = 0.1
    # prm.optim_args['L'] = 20
    # # prm.optim_args['weight_decay'] = 1e-3
    # # prm.optim_args['g1'] = 0
    # prm.optim_args['g0'] = 1e-4
    optim_func, optim_args, lr_schedule =\
        prm.optim_func, prm.optim_args, prm.lr_schedule_test

    # prm.optim_func, prm.optim_args = optim.Adam, {'lr': prm.lr}  # 'weight_decay': 1e-4

    # lr_schedule = {'decay_factor': 0.1, 'decay_epochs': [15, 20]}

    # Loss criterion
    loss_criterion = get_loss_criterion(prm.loss_type)

    # Create posterior model for the new task:
    post_model = get_model(prm)

    if init_from_prior:
        post_model.load_state_dict(prior_model.state_dict())

        # prior_model_dict = prior_model.state_dict()
        # post_model_dict = post_model.state_dict()
        #
        # # filter out unnecessary keys:
        # prior_model_dict = {k: v for k, v in prior_model_dict.items() if '_log_var' in k or '_mu' in k}
        # # overwrite entries in the existing state dict:
        # post_model_dict.update(prior_model_dict)
        #
        # # #  load the new state dict
        # post_model.load_state_dict(post_model_dict)

        # add_noise_to_model(post_model, prm.kappa_factor)

    # The data-sets of the new task:
    train_loader = task_data
    test_loader = task_data['test']
    # n_train_samples = len(train_loader['train'].dataset)
    n_batches = len(train_loader)

    #  Get optimizer:
    optimizer = optim_func(
        filter(lambda p: p.requires_grad, post_model.parameters()), optim_args)

    # optimizer = optim_func(filter(lambda p: p.requires_grad, post_model.parameters()), optim_args['lr'])

    # -------------------------------------------------------------------------------------------
    #  Training epoch  function
    # -------------------------------------------------------------------------------------------

    def run_train_epoch(i_epoch):
        # log_interval = 500

        post_model.train()

        train_iterators = iter(train_loader['train'])

        for batch_idx, batch_data in enumerate(train_loader['train']):

            task_loss, info = get_objective(prior_model, prm, [train_loader],
                                            object.feval, [train_iterators],
                                            [post_model], loss_criterion, 1,
                                            [0])

            grad_step(task_loss[0], post_model, loss_criterion, optimizer, prm,
                      train_iterators, train_loader['train'], lr_schedule,
                      prm.optim_args['lr'], i_epoch)

            # for log_var in post_model.parameters():
            #     if log_var.requires_grad is False:
            #         log_var.data = log_var.data - (i_epoch + 1) * math.log(1 + prm.gamma1)

            # Print status:
            log_interval = 10
            if (batch_idx) % log_interval == 0:
                batch_acc = info['correct_count'] / info['sample_count']
                print(
                    cmn.status_string(i_epoch, prm.n_meta_train_epochs,
                                      batch_idx, n_batches, batch_acc) +
                    ' Empiric-Loss: {:.4f}'.format(info['avg_empirical_loss']))

    # -----------------------------------------------------------------------------------------------------------#
    # Update Log file
    if verbose == 1:
        write_to_log(
            'Total number of steps: {}'.format(n_batches *
                                               prm.n_meta_test_epochs), prm)

    # -------------------------------------------------------------------------------------------
    #  Run epochs
    # -------------------------------------------------------------------------------------------
    start_time = timeit.default_timer()

    # Training loop:
    for i_epoch in range(prm.n_meta_test_epochs):
        run_train_epoch(i_epoch)

    # Test:
    test_acc, test_loss = run_test_Bayes(post_model, test_loader,
                                         loss_criterion, prm)

    stop_time = timeit.default_timer()
    cmn.write_final_result(test_acc,
                           stop_time - start_time,
                           prm,
                           result_name=prm.test_type,
                           verbose=verbose)

    test_err = 1 - test_acc
    return test_err, post_model