Beispiel #1
0
            current_means = class_means[:, order[range(0, (b + 1) * P)]]
            ##############################################################
            # Calculate validation error of model on the cumul of classes:
            map_Y_valid_cumul = np.array(
                [order_list.index(i) for i in Y_valid_cumul])
            current_eval_set = merge_images_labels(X_valid_cumul,
                                                   map_Y_valid_cumul)
            evalset.imgs = evalset.samples = current_eval_set
            evalloader = torch.utils.data.DataLoader(
                evalset,
                batch_size=eval_batch_size,
                shuffle=False,
                num_workers=num_workers,
                pin_memory=True)
            cumul_acc = compute_accuracy(tg_model, tg_feature_model,
                                         current_means, evalloader)

            ###############################
            print('Saving protoset...')
            map_Y_protoset_cumuls = np.array([
                order_list.index(i) for i in np.concatenate(Y_protoset_cumuls)
            ])
            current_eval_set = merge_images_labels(
                np.concatenate(X_protoset_cumuls), map_Y_protoset_cumuls)
            save_protosets(current_eval_set, ckp_prefix, b, output_dir)
            ##############################################################
            if b == first_batch_number and cb_finetune:  #for the convenience of evaluation
                torch.save(
                    tg_model,
                    ckp_name.replace("/checkpoint/", "/checkpoint/AFTER_CBF_"))
                torch.save(
Beispiel #2
0
            ])
        else:
            indices = np.array([
                i in range(task_i * nb_cl, (task_i + 1) * nb_cl)
                for i in map_input_labels
            ])
        evalset.test_data = input_data[indices]
        evalset.test_labels = map_input_labels[indices]
        #print('Max and Min of valid labels: {}, {}'.format(min(evalset.test_labels), max(evalset.test_labels)))
        evalloader = torch.utils.data.DataLoader(evalset,
                                                 batch_size=128,
                                                 shuffle=False,
                                                 num_workers=2)
        acc = compute_accuracy(tg_model,
                               tg_feature_model,
                               current_means,
                               evalloader,
                               print_info=False)
        all_accu_matrix[step_i, task_i - start_iter] = torch.tensor(acc)

    # cumul
    indices = np.array(
        [i in range(0, (iteration + 1) * nb_cl) for i in map_input_labels])
    evalset.test_data = input_data[indices]
    evalset.test_labels = map_input_labels[indices]
    #print('Max and Min of valid labels: {}, {}'.format(min(evalset.test_labels), max(evalset.test_labels)))
    evalloader = torch.utils.data.DataLoader(evalset,
                                             batch_size=128,
                                             shuffle=False,
                                             num_workers=2)
    acc = compute_accuracy(tg_model,