コード例 #1
0
ファイル: MethodTester.py プロジェクト: zhiyugege/ifsl
 def sib_initialize(self, params, provide_original_image):
     novel_file = '%s/features/%s/%s/novel.hdf5' % (
         configs.sib_dir, params.dataset, params.model)
     image_file_path = "img_paths_%s_%s_%s.npy" % (
         params.dataset, params.model, params.method)
     if provide_original_image:
         self.cl_data_file, self.path_data_file = feat_loader.init_loader(
             novel_file, provide_original_image, image_file_path)
     else:
         self.cl_data_file = feat_loader.init_loader(
             novel_file, provide_original_image, image_file_path)
     self.image_size = 80
コード例 #2
0
ファイル: MethodTester.py プロジェクト: zhiyugege/ifsl
 def simpleshot_initialize(self, params, provide_original_image):
     novel_file = '%s/features/%s/%s/novel.hdf5' % (
         configs.simple_shot_dir, params.dataset, params.model)
     # novel_file = "/model/1154027137/ifsl_features/features/" + params.method + "_" + params.dataset + "_novel.hdf5"
     image_file_path = "img_paths_%s_%s_%s.npy" % (
         params.dataset, params.model, params.method)
     if provide_original_image:
         self.cl_data_file, self.path_data_file = feat_loader.init_loader(
             novel_file, provide_original_image, image_file_path)
     else:
         self.cl_data_file = feat_loader.init_loader(
             novel_file, provide_original_image, image_file_path)
     self.image_size = 84
コード例 #3
0
    def test(self, split='novel', epoch=0):
        self.outfile = self.outfile_template % split
        if split == 'novel':
            self.save_feature(self.novel_loader)
        else:
            self.save_feature(self.val_loader)
        cl_data_file = feat_loader.init_loader(self.outfile)

        acc_all = []
        for i in tqdm(range(self.params.test_epoch)):
            if self.params.fast_adapt:
                acc = self.model.fast_adapt(cl_data_file)
            else:
                acc = self.model.test_loop(cl_data_file)
            acc_all.append(acc)

        acc_all = np.asarray(acc_all)
        acc_mean = np.mean(acc_all)
        acc_std = np.std(acc_all)
        print('%d Test Acc = %4.2f%% +- %4.2f%%' %
              (self.params.test_epoch, acc_mean,
               1.96 * acc_std / np.sqrt(self.params.test_epoch)))
        if self.params.mode != 'test':
            self.writer.add_scalar('acc/%s_acc' % split, acc_mean, epoch)

        return acc_mean
コード例 #4
0
ファイル: MethodTester.py プロジェクト: zhiyugege/ifsl
    def baseline_s2m2_initialize(self, params, provide_original_image):
        if params.dataset in ['omniglot', 'cross_char']:
            assert params.model == 'Conv4' and not params.train_aug, 'omniglot only support Conv4 without augmentation'
            params.model = 'Conv4S'

        dataset = params.dataset
        if params.dataset == "cross":
            dataset = "miniImagenet"
        checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (
            configs.save_dir, dataset, params.model, params.method)

        if params.train_aug:
            checkpoint_dir += '_aug'
        if params.method not in ['baseline', 'baseline++', 'S2M2_R']:
            checkpoint_dir += '_%dway_%dshot' % (params.train_n_way,
                                                 params.n_shot)

        self.checkpoint_dir = checkpoint_dir

        split = params.split
        if params.save_iter != -1:
            split_str = split + "_" + str(params.save_iter)
        else:
            split_str = split

        # defaut split = novel, but you can also test base or val classes
        novel_file = os.path.join(
            checkpoint_dir.replace("checkpoints", "features"),
            split_str + ".hdf5")

        if params.dataset == "cross":
            novel_file = novel_file.replace("miniImagenet", "cross")

        file_path = "img_paths_%s_%s_%s.npy" % (params.dataset, params.model,
                                                params.method)
        if provide_original_image:
            self.cl_data_file, self.path_data_file = feat_loader.init_loader(
                novel_file, provide_original_image, file_path)
        else:
            self.cl_data_file = feat_loader.init_loader(
                novel_file, provide_original_image, file_path)
        self.image_size = 224
        if params.method == "S2M2_R":
            self.image_size = 80
コード例 #5
0
def single_test(params, results_logger):
    acc_all = []

    iter_num = 600

    few_shot_params = dict(n_way=params.test_n_way, n_support=params.n_shot)

    if params.dataset in ['omniglot', 'cross_char']:
        assert params.model == 'Conv4' and not params.train_aug, 'omniglot only support Conv4 without augmentation'
        params.model = 'Conv4S'

    if params.method == 'baseline':
        model = BaselineFinetune(model_dict[params.model], **few_shot_params)
    elif params.method == 'baseline++':
        model = BaselineFinetune(model_dict[params.model],
                                 loss_type='dist',
                                 **few_shot_params)
    elif params.method == 'protonet':
        model = ProtoNet(model_dict[params.model], **few_shot_params)
    elif params.method == 'DKT':
        model = DKT(model_dict[params.model], **few_shot_params)
    elif params.method == 'matchingnet':
        model = MatchingNet(model_dict[params.model], **few_shot_params)
    elif params.method in ['relationnet', 'relationnet_softmax']:
        if params.model == 'Conv4':
            feature_model = backbone.Conv4NP
        elif params.model == 'Conv6':
            feature_model = backbone.Conv6NP
        elif params.model == 'Conv4S':
            feature_model = backbone.Conv4SNP
        else:
            feature_model = lambda: model_dict[params.model](flatten=False)
        loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
        model = RelationNet(feature_model,
                            loss_type=loss_type,
                            **few_shot_params)
    elif params.method in ['maml', 'maml_approx']:
        backbone.ConvBlock.maml = True
        backbone.SimpleBlock.maml = True
        backbone.BottleneckBlock.maml = True
        backbone.ResNet.maml = True
        model = MAML(model_dict[params.model],
                     approx=(params.method == 'maml_approx'),
                     **few_shot_params)
        if params.dataset in ['omniglot', 'cross_char'
                              ]:  # maml use different parameter in omniglot
            model.n_task = 32
            model.task_update_num = 1
            model.train_lr = 0.1
    else:
        raise ValueError('Unknown method')

    model = model.cuda()

    checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (
        configs.save_dir, params.dataset, params.model, params.method)
    if params.train_aug:
        checkpoint_dir += '_aug'
    if not params.method in ['baseline', 'baseline++']:
        checkpoint_dir += '_%dway_%dshot' % (params.train_n_way, params.n_shot)

    # modelfile   = get_resume_file(checkpoint_dir)

    if not params.method in ['baseline', 'baseline++']:
        if params.save_iter != -1:
            modelfile = get_assigned_file(checkpoint_dir, params.save_iter)
        else:
            modelfile = get_best_file(checkpoint_dir)
        if modelfile is not None:
            tmp = torch.load(modelfile)
            model.load_state_dict(tmp['state'])
        else:
            print("[WARNING] Cannot find 'best_file.tar' in: " +
                  str(checkpoint_dir))

    split = params.split
    if params.save_iter != -1:
        split_str = split + "_" + str(params.save_iter)
    else:
        split_str = split
    if params.method in ['maml', 'maml_approx',
                         'DKT']:  # maml do not support testing with feature
        if 'Conv' in params.model:
            if params.dataset in ['omniglot', 'cross_char']:
                image_size = 28
            else:
                image_size = 84
        else:
            image_size = 224

        datamgr = SetDataManager(image_size,
                                 n_eposide=iter_num,
                                 n_query=15,
                                 **few_shot_params)

        if params.dataset == 'cross':
            if split == 'base':
                loadfile = configs.data_dir['miniImagenet'] + 'all.json'
            else:
                loadfile = configs.data_dir['CUB'] + split + '.json'
        elif params.dataset == 'cross_char':
            if split == 'base':
                loadfile = configs.data_dir['omniglot'] + 'noLatin.json'
            else:
                loadfile = configs.data_dir['emnist'] + split + '.json'
        else:
            loadfile = configs.data_dir[params.dataset] + split + '.json'

        novel_loader = datamgr.get_data_loader(loadfile, aug=False)
        if params.adaptation:
            model.task_update_num = 100  # We perform adaptation on MAML simply by updating more times.
        model.eval()
        acc_mean, acc_std = model.test_loop(novel_loader, return_std=True)

    else:
        novel_file = os.path.join(
            checkpoint_dir.replace("checkpoints",
                                   "features"), split_str + ".hdf5"
        )  # defaut split = novel, but you can also test base or val classes
        cl_data_file = feat_loader.init_loader(novel_file)

        for i in range(iter_num):
            acc = feature_evaluation(cl_data_file,
                                     model,
                                     n_query=15,
                                     adaptation=params.adaptation,
                                     **few_shot_params)
            acc_all.append(acc)

        acc_all = np.asarray(acc_all)
        acc_mean = np.mean(acc_all)
        acc_std = np.std(acc_all)
        print('%d Test Acc = %4.2f%% +- %4.2f%%' %
              (iter_num, acc_mean, 1.96 * acc_std / np.sqrt(iter_num)))
    with open('record/results.txt', 'a') as f:
        timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime())
        aug_str = '-aug' if params.train_aug else ''
        aug_str += '-adapted' if params.adaptation else ''
        if params.method in ['baseline', 'baseline++']:
            exp_setting = '%s-%s-%s-%s%s %sshot %sway_test' % (
                params.dataset, split_str, params.model, params.method,
                aug_str, params.n_shot, params.test_n_way)
        else:
            exp_setting = '%s-%s-%s-%s%s %sshot %sway_train %sway_test' % (
                params.dataset, split_str, params.model, params.method,
                aug_str, params.n_shot, params.train_n_way, params.test_n_way)
        acc_str = '%d Test Acc = %4.2f%% +- %4.2f%%' % (
            iter_num, acc_mean, 1.96 * acc_std / np.sqrt(iter_num))
        f.write('Time: %s, Setting: %s, Acc: %s \n' %
                (timestamp, exp_setting, acc_str))
        results_logger.log("single_test_acc", acc_mean)
        results_logger.log("single_test_acc_std",
                           1.96 * acc_std / np.sqrt(iter_num))
        results_logger.log("time", timestamp)
        results_logger.log("exp_setting", exp_setting)
        results_logger.log("acc_str", acc_str)
    return acc_mean
コード例 #6
0
                loadfile = configs.data_dir['emnist'] + split + '.json'
        else:
            loadfile = configs.data_dir[params.dataset] + split + '.json'

        novel_loader = datamgr.get_data_loader(loadfile, aug=False)
        if params.adaptation:
            model.task_update_num = 100  #We perform adaptation on MAML simply by updating more times.
        model.eval()
        acc_mean, acc_std = model.test_loop(novel_loader, return_std=True)

    else:
        novel_file = os.path.join(
            checkpoint_dir.replace("checkpoints",
                                   "features"), split_str + ".hdf5"
        )  #defaut split = novel, but you can also test base or val classes
        cl_data_file = feat_loader.init_loader(novel_file)

        for i in range(iter_num):
            acc = feature_evaluation(cl_data_file,
                                     model,
                                     n_query=15,
                                     adaptation=params.adaptation,
                                     **few_shot_params)
            acc_all.append(acc)

        acc_all = np.asarray(acc_all)
        acc_mean = np.mean(acc_all)
        acc_std = np.std(acc_all)
        print('%d Test Acc = %4.2f%% +- %4.2f%%' %
              (iter_num, acc_mean, 1.96 * acc_std / np.sqrt(iter_num)))
    with open('./record/results.txt', 'a') as f:
コード例 #7
0
        model.eval()
        acc_mean, acc_std = model.test_loop( novel_loader, return_std = True)

    else:
        rootdir = '/home/rajshekd/projects/FSG/PRODA/features'
        NovelF = ['omniglot/Conv4S_protonet_5way_5shot/vanila/noLatin.hdf5',
                  'omniglot/Conv4S_protonet_5way_5shot/vanila/novel.hdf5',
                  'cross_char/Conv4S_protonet_5way_5shot/vanila-Protonet/novel.hdf5']
        all_z = []
        for novel_file in NovelF:
            all_perm_ids_iter = []
            select_class_iter = []
            print(novel_file)
            # import ipdb
            # ipdb.set_trace()
            cl_data_file = feat_loader.init_loader(os.path.join(rootdir,novel_file))
            for i in range(iter_num):
                z_set, select_class, all_perm_ids = feature_evaluation(cl_data_file, model, n_query = 15)
                all_perm_ids_iter.append(all_perm_ids)
                select_class_iter.append(select_class)
                all_z.append(z_set)
        # final file
        cl_data_file = feat_loader.init_loader(
            os.path.join(
                rootdir, 'cross_char/Conv4S_protonet_5way_5shot/adversarial-concatZ_domainReg-0.1_lr-0.0001_endEpoch-4000_DiscM-2FC512/novel.hdf5'))
        for i in range(iter_num):
            z_set, select_class, all_perm_ids = feature_evaluation(cl_data_file, model, n_query=15, all_perm_ids =
            all_perm_ids_iter[i], select_class = select_class_iter[i])
            all_z.append(z_set)

        all_z = np.vstack(all_z)
コード例 #8
0
        split_str = split
    
#     model = get_model(params)
#     few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot)
    
#     if params.gpu_id:
#         set_gpu_id(params.gpu_id)
#         device = torch.device('cuda:'+str(params.gpu_id))
#         model = model.cuda()
#     else:
#         device = None
#         model = to_device(model)

    # draw whole data distribution or task sample distribution? 
    feat_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), split_str +".hdf5")
    cl_data_file = feat_loader.init_loader(feat_file) # dictionary, keys: class_num, content: list of features
    ''' features
    for Conv4, dim = 1600 = 64*5*5
    '''
    feat_list = []
    label_list = []
    num_classes = len(cl_data_file.keys()) # useless
    cls_transform = {} # transform class_id to 0~draw_n_classes-1 to draw color
    draw_n_classes = params.d_classes
    draw_samples_per_class = params.d_samples
    draw_class_indices = np.random.permutation(list(cl_data_file.keys()))[:draw_n_classes]
    for i, cls_idx in enumerate(draw_class_indices):
        cls_transform[cls_idx] = i
        cls_feats = cl_data_file[cls_idx]
        cls_n_samples = len(cls_feats)
        
コード例 #9
0
        try:
            model.load_state_dict(tmp['state'])
        except RuntimeError:
            print('warning! RuntimeError when load_state_dict()!')
            model.load_state_dict(tmp['state'], strict=False)
        except KeyError:
            for k in tmp['model_state']:  ##### revise latter
                if 'running' in k:
                    tmp['model_state'][k] = tmp['model_state'][k].squeeze()
            model.load_state_dict(tmp['model_state'], strict=False)
        except:
            raise

    # load feature file
    print('  load saved feature file')
    cl_data_file = feat_loader.init_loader(featurefile)

    # start evaluate
    print('  evaluate')
    for i in range(iter_num):
        acc = feature_evaluation(cl_data_file,
                                 model,
                                 n_query=15,
                                 **few_shot_params)
        acc_all.append(acc)

    # statics
    print('  get statics')
    acc_all = np.asarray(acc_all)
    acc_mean = np.mean(acc_all)
    acc_std = np.std(acc_all)
コード例 #10
0
import torch
import numpy as np
import random
import matplotlib.pyplot as plt
import seaborn as sns
import data.feature_loader as feat_loader
from sklearn.manifold import TSNE

sns.set_context("notebook", font_scale=1.1)
sns.set_style("ticks")
len_test = 20
len_type = 5
novel_file = "./novel.hdf5"
novel_both_file = "./novel_both.hdf5"

features = feat_loader.init_loader(novel_file)
features_both = feat_loader.init_loader(novel_both_file)

class_list = features.keys()
select_class = random.sample(class_list, len_type)

print(class_list)
#select_class = [84,85,86,87,88,89, 90, 91]
print(select_class)
z_all = []
z_both_all = []
for cl in select_class:
    img_feat = features[cl]
    img_both_feat = features_both[cl]

    print('len of cases: ', len(img_feat))
コード例 #11
0
ファイル: re_test.py プロジェクト: lightningbaby/fwt
            model.load_state_dict(tmp['state'])

#    except RuntimeError:
#      print('warning! RuntimeError when load_state_dict()!')
#      model.load_state_dict(tmp['state'], strict=False)
#    except KeyError:
#      for k in tmp['model_state']:   ##### revise latter
#        if 'running' in k:
#          tmp['model_state'][k] = tmp['model_state'][k].squeeze()
#      model.load_state_dict(tmp['model_state'], strict=False)
        except:
            raise

    # load feature file
    print('  load saved feature file')
    cl_data_file = feat_loader.init_loader(f)

    # start evaluate
    print('  evaluate')
    for i in range(iter_num):
        acc = feature_evaluation(cl_data_file,
                                 model,
                                 n_query=5,
                                 **few_shot_params)
        acc_all.append(acc)

    # statics
    print('  get statics')
    acc_all = np.asarray(acc_all)
    acc_mean = np.mean(acc_all)
    acc_std = np.std(acc_all)
コード例 #12
0
            model.task_update_num = 100 #We perform adaptation on MAML simply by updating more times.
        model.eval()
        acc_mean, acc_std = model.test_loop( novel_loader, return_std = True)

    else:
        #classes = [1, 20, 40, 60, 80]#1
        # classes = [1, 10, 30, 50, 70]#2
        # classes = [1, 20, 50, 80, 97]#3
        classes = [1, 23, 53, 83, 93]#4
        data = json.load(open('/home/rajshekd/projects/FSG/PRODA/filelists/vgg_flower/all.json'))
        imlist = []


        # adv
        novel_file = "/home/rajshekd/projects/FSG/PRODA/features/CUB_flowers/ResNet10_protonet_aug_5way_5shot/adversarial-ConcatZ_domainReg-0.1_lr-0.0001_DiscM-4096_Base2Base/all.hdf5"
        cl_data_file1 = feat_loader.init_loader(novel_file)
        acc1_avg = []
        for _ in range(600):
            acc1, supp, query, classes = feature_evaluation(cl_data_file1, model, select_class=classes)
            acc1_avg.append(acc1)
        acc1_avg = np.mean(acc1_avg)
        print ("accuracy: ",acc1_avg)
        # vanila
        novel_file = "/home/rajshekd/projects/FSG/PRODA/features/CUB_flowers/ResNet10_protonet_aug_5way_5shot/vanila/all.hdf5"
        cl_data_file2 = feat_loader.init_loader(novel_file)
        # ipdb.set_trace()
        acc2_avg = []
        for _ in range(600):
            acc2, _, _, classes = feature_evaluation(cl_data_file2, model, select_class=classes)
            acc2_avg.append(acc2)
        acc2_avg = np.mean(acc2_avg)
コード例 #13
0
ファイル: test.py プロジェクト: johnnyasd12/CloserLookFewShot
def exp_test(params, n_episodes, should_del_features=False):#, show_data=False):
    start_time = datetime.datetime.now()
    print('exp_test() started at',start_time)
    
    set_random_seed(0) # successfully reproduce "normal" testing. 
    
    if params.gpu_id:
        set_gpu_id(params.gpu_id)
    
#     acc_all = []

    model = get_model(params, 'test')
    
    ########## get settings ##########
    n_shot = params.test_n_shot if params.test_n_shot is not None else params.n_shot
    few_shot_params = dict(n_way = params.test_n_way , n_support = n_shot)
    if params.gpu_id:
        model = model.cuda()
    else:
        model = to_device(model)
    checkpoint_dir = get_checkpoint_dir(params)
    print('loading from:',checkpoint_dir)
    if params.save_iter != -1:
        modelfile   = get_assigned_file(checkpoint_dir, params.save_iter)
    else:
        modelfile   = get_best_file(checkpoint_dir)
    
    ########## load model ##########
    if modelfile is not None:
        if params.gpu_id is None:
            tmp = torch.load(modelfile)
        else: # TODO: figure out WTF is going on here
            print('params.gpu_id =', params.gpu_id)
            map_location = 'cuda:0'
#             gpu_str = 'cuda:' + '0'#str(params.gpu_id)
#             map_location = {'cuda:1':gpu_str, 'cuda:0':gpu_str} # see here: https://hackmd.io/koKAo6kURn2YBqjoXXDhaw#RuntimeError-CUDA-error-invalid-device-ordinal
            tmp = torch.load(modelfile, map_location=map_location)
#                 tmp = torch.load(modelfile)
        if not params.method in ['baseline', 'baseline++'] : 
            # if 'baseline' or 'baseline++' then NO NEED to load model !!!
            model.load_state_dict(tmp['state'])
            print('Model successfully loaded.')
        else:
            print('No need to load model for baseline/baseline++ when testing.')
        load_epoch = int(tmp['epoch'])
    
    ########## testing ##########
    if params.method in ['maml', 'maml_approx']: #maml do not support testing with feature
        image_size = get_img_size(params)
        load_file = get_loadfile_path(params, params.split)

        datamgr         = SetDataManager(image_size, n_episode = n_episodes, n_query = 15 , **few_shot_params)
        
        novel_loader     = datamgr.get_data_loader( loadfile, aug = False)
        if params.adaptation:
            model.task_update_num = 100 #We perform adaptation on MAML simply by updating more times.
        model.eval()
        acc_mean, acc_std = model.test_loop( novel_loader, return_std = True)
        
        ########## last record and post-process ##########
        torch.cuda.empty_cache()
        timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime())
        # TODO afterward: compute this
        acc_str = '%4.2f%% +- %4.2f%%' % (acc_mean, 1.96* acc_std/np.sqrt(n_episodes))
        # writing settings into csv
        acc_mean_str = '%4.2f' % (acc_mean)
        acc_std_str = '%4.2f' %(acc_std)
        # record beyond params
        extra_record = {'time':timestamp, 'acc_mean':acc_mean_str, 'acc_std':acc_std_str, 'epoch':load_epoch}
        if should_del_features:
            del_features(params)
        end_time = datetime.datetime.now()
        print('exp_test() start at', start_time, ', end at', end_time, '.\n')
        print('exp_test() totally took:', end_time-start_time)
        return extra_record, task_datas

    else: # not MAML
        acc_all = []
#         # draw_task: initialize task acc(actually can replace acc_all), img_path, img_is_correct, etc.
#         task_datas = [None]*n_episodes # list of dict
        # directly use extracted features
        all_feature_files = get_all_feature_files(params)
        
        if params.n_test_candidates is None: # common setting (no candidate)
            # draw_task: initialize task acc(actually can replace acc_all), img_path, img_is_correct, etc.
            task_datas = [None]*n_episodes # list of dict
            
            feature_file = all_feature_files[0]
            cl_feature, cl_filepath = feat_loader.init_loader(feature_file, return_path=True)
            cl_feature_single = [cl_feature]
            
            for i in tqdm(range(n_episodes)):
                # TODO afterward: fix data list? can only fix class list?
                task_data = feature_evaluation(
                    cl_feature_single, model, params=params, n_query=15, **few_shot_params, 
                    cl_filepath=cl_filepath,
                )
                acc = task_data['acc']
                acc_all.append(acc)
                task_datas[i] = task_data
            
            acc_all  = np.asarray(acc_all)
            acc_mean = np.mean(acc_all)
            acc_std  = np.std(acc_all)
            print('loaded from %d epoch model.' %(load_epoch))
            print('%d episodes, Test Acc = %4.2f%% +- %4.2f%%' %(n_episodes, acc_mean, 1.96* acc_std/np.sqrt(n_episodes)))
            
            ########## last record and post-process ##########
            torch.cuda.empty_cache()
            timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime())
            # TODO afterward: compute this
            acc_str = '%4.2f%% +- %4.2f%%' % (acc_mean, 1.96* acc_std/np.sqrt(n_episodes))
            # writing settings into csv
            acc_mean_str = '%4.2f' % (acc_mean)
            acc_std_str = '%4.2f' %(acc_std)
            # record beyond params
            extra_record = {'time':timestamp, 'acc_mean':acc_mean_str, 'acc_std':acc_std_str, 'epoch':load_epoch}
            if should_del_features:
                del_features(params)
            end_time = datetime.datetime.now()
            print('exp_test() start at', start_time, ', end at', end_time, '.\n')
            print('exp_test() totally took:', end_time-start_time)
            return extra_record, task_datas
        else: # n_test_candidates settings
                
            candidate_cl_feature = [] # features of each class of each candidates
            print('Loading features of %s candidates into dictionaries...' %(params.n_test_candidates))
            for n in tqdm(range(params.n_test_candidates)):
                nth_feature_file = all_feature_files[n]
                cl_feature, cl_filepath = feat_loader.init_loader(nth_feature_file, return_path=True)
                candidate_cl_feature.append(cl_feature)

            print('Evaluating...')
            
            # TODO: frac_acc_all
            is_single_exp = not isinstance(params.frac_ensemble, list)
            if is_single_exp:
                # draw_task: initialize task acc(actually can replace acc_all), img_path, img_is_correct, etc.
                task_datas = [None]*n_episodes # list of dict
                ########## test and record acc ##########
                for i in tqdm(range(n_episodes)):
                    # TODO afterward: fix data list? can only fix class list?

                    task_data = feature_evaluation(
                        candidate_cl_feature, model, params=params, n_query=15, **few_shot_params, 
                        cl_filepath=cl_filepath,
                    )
                    acc = task_data['acc']
                    acc_all.append(acc)
                    task_datas[i] = task_data
                    
                    collected = gc.collect()
#                     print("Garbage collector: collected %d objects." % (collected))

                acc_all  = np.asarray(acc_all)
                acc_mean = np.mean(acc_all)
                acc_std  = np.std(acc_all)
                print('loaded from %d epoch model.' %(load_epoch))
                print('%d episodes, Test Acc = %4.2f%% +- %4.2f%%' %(n_episodes, acc_mean, 1.96* acc_std/np.sqrt(n_episodes)))
                collected = gc.collect()
                print("garbage collector: collected %d objects." % (collected))
                
                ########## last record and post-process ##########
                torch.cuda.empty_cache()
                timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime())
                # TODO afterward: compute this
                acc_str = '%4.2f%% +- %4.2f%%' % (acc_mean, 1.96* acc_std/np.sqrt(n_episodes))
                # writing settings into csv
                acc_mean_str = '%4.2f' % (acc_mean)
                acc_std_str = '%4.2f' %(acc_std)
                # record beyond params
                extra_record = {'time':timestamp, 'acc_mean':acc_mean_str, 'acc_std':acc_std_str, 'epoch':load_epoch}
                if should_del_features:
                    del_features(params)
                end_time = datetime.datetime.now()
                print('exp_test() start at', start_time, ', end at', end_time, '.\n')
                print('exp_test() totally took:', end_time-start_time)
                return extra_record, task_datas
            else: ########## multi-frac_ensemble exps ##########
                
                ########## (haven't modified) test and record acc ##########
                n_fracs = len(params.frac_ensemble)
                
                ##### initialize frac_data #####
                frac_acc_alls = [[0]*n_episodes for _ in range(n_fracs)]
                frac_acc_means = [None]*n_fracs
                frac_acc_stds = [None]*n_fracs
                # draw_task: initialize task acc(actually can replace acc_all), img_path, img_is_correct, etc.
                ep_task_data_each_frac = [[None]*n_episodes for _ in range(n_fracs)] # list of list of dict

                for ep_id in tqdm(range(n_episodes)):
                    # TODO afterward: fix data list? can only fix class list?
                    
                    # TODO my_utils.py: feature_eval return frac_task_data
                    frac_task_data = feature_evaluation(
                        candidate_cl_feature, model, params=params, n_query=15, **few_shot_params, 
                        cl_filepath=cl_filepath,
                    )
                    for frac_id in range(n_fracs):
                        task_data = frac_task_data[frac_id]
                        # TODO: i think here's the problem???
                        acc = task_data['acc']
                        frac_acc_alls[frac_id][ep_id] = acc
                        ep_task_data_each_frac[frac_id][ep_id] = task_data
                        
                        collected = gc.collect()
#                         print("Garbage collector: collected %d objects." % (collected))
                    collected = gc.collect()
#                     print("Garbage collector: collected %d objects." % (collected))
                ### debug
#                 print('frac_acc_alls:', frac_acc_alls)
#                 yee
                for frac_id in range(n_fracs):
                    frac_acc_alls[frac_id]  = np.asarray(frac_acc_alls[frac_id])
                    acc_all = frac_acc_alls[frac_id]
                    acc_mean = np.mean(acc_all)
                    acc_std = np.std(acc_all)
                    frac_acc_means[frac_id] = acc_mean
                    frac_acc_stds[frac_id]  = acc_std
                    print('loaded from %d epoch model, frac_ensemble:'%(load_epoch), params.frac_ensemble[frac_id])
                    print('%d episodes, Test Acc = %4.2f%% +- %4.2f%%' %(n_episodes, acc_mean, 1.96* acc_std/np.sqrt(n_episodes)))
                
                ########## (haven't modified) last record and post-process ##########
                torch.cuda.empty_cache()
                timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime())
                # TODO afterward: compute this
#                 acc_str = '%4.2f%% +- %4.2f%%' % (acc_mean, 1.96* acc_std/np.sqrt(n_episodes))
                frac_extra_records = []
                for frac_id in range(n_fracs):
                    # writing settings into csv
                    acc_mean = frac_acc_means[frac_id]
                    acc_std = frac_acc_stds[frac_id]
                    acc_mean_str = '%4.2f' % (acc_mean)
                    acc_std_str = '%4.2f' %(acc_std)
                    # record beyond params
                    extra_record = {'time':timestamp, 'acc_mean':acc_mean_str, 'acc_std':acc_std_str, 'epoch':load_epoch}
                    frac_extra_records.append(extra_record)
                
                if should_del_features:
                    del_features(params)
                end_time = datetime.datetime.now()
                print('exp_test() start at', start_time, ', end at', end_time, '.\n')
                print('exp_test() totally took:', end_time-start_time)
                
                return frac_extra_records, ep_task_data_each_frac
コード例 #14
0
    params = parse_args('test')
    few_shot_params = dict(n_way=params.test_n_way, n_support=params.n_shot)

    checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (
        configs.save_dir, params.dataset, params.model, params.method)
    if params.train_aug:
        checkpoint_dir += '_aug'
    if not params.method in ['baseline', 'baseline++']:
        checkpoint_dir += '_%dway_%dshot' % (params.train_n_way, params.n_shot)

    split = params.split
    if params.save_iter != -1:
        split_str = split + "_" + str(params.save_iter)
    else:
        split_str = split

    novel_file = os.path.join(
        checkpoint_dir.replace("checkpoints", "features"), split_str + ".hdf5")
    outs = [
        './features/miniImagenet/ResNet34_baseline_aug/novel.hdf5',
        './features/miniImagenet/ResNet34_baseline_aug/base.hdf5',
        './features/miniImagenet/ResNet34_baseline_aug/val.hdf5'
    ]

    base_file = outs[1]
    val_file = outs[2]

    base_data_file = feat_loader.init_loader(base_file)
    val_data_file = feat_loader.init_loader(val_file)

    agent_train(base_data_file, val_data_file, n_query=15, **few_shot_params)
コード例 #15
0
def get_logits_targets(params):
    acc_all = []
    iter_num = 600
    few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot) 

    if params.dataset in ['omniglot', 'cross_char']:
        assert params.model == 'Conv4' and not params.train_aug ,'omniglot only support Conv4 without augmentation'
        params.model = 'Conv4S'

    if params.method == 'baseline':
        model           = BaselineFinetune( model_dict[params.model], **few_shot_params )
    elif params.method == 'baseline++':
        model           = BaselineFinetune( model_dict[params.model], loss_type = 'dist', **few_shot_params )
    elif params.method == 'protonet':
        model           = ProtoNet( model_dict[params.model], **few_shot_params )
    elif params.method == 'DKT':
        model           = DKT(model_dict[params.model], **few_shot_params)
    elif params.method == 'matchingnet':
        model           = MatchingNet( model_dict[params.model], **few_shot_params )
    elif params.method in ['relationnet', 'relationnet_softmax']:
        if params.model == 'Conv4': 
            feature_model = backbone.Conv4NP
        elif params.model == 'Conv6': 
            feature_model = backbone.Conv6NP
        elif params.model == 'Conv4S': 
            feature_model = backbone.Conv4SNP
        else:
            feature_model = lambda: model_dict[params.model]( flatten = False )
        loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
        model           = RelationNet( feature_model, loss_type = loss_type , **few_shot_params )
    elif params.method in ['maml' , 'maml_approx']:
        backbone.ConvBlock.maml = True
        backbone.SimpleBlock.maml = True
        backbone.BottleneckBlock.maml = True
        backbone.ResNet.maml = True
        model = MAML(  model_dict[params.model], approx = (params.method == 'maml_approx') , **few_shot_params )
        if params.dataset in ['omniglot', 'cross_char']: #maml use different parameter in omniglot
            model.n_task     = 32
            model.task_update_num = 1
            model.train_lr = 0.1
    else:
       raise ValueError('Unknown method')

    model = model.cuda()

    checkpoint_dir = '%s/checkpoints/%s/%s_%s' %(configs.save_dir, params.dataset, params.model, params.method)
    if params.train_aug:
        checkpoint_dir += '_aug'
    if not params.method in ['baseline', 'baseline++'] :
        checkpoint_dir += '_%dway_%dshot' %( params.train_n_way, params.n_shot)

    #modelfile   = get_resume_file(checkpoint_dir)

    if not params.method in ['baseline', 'baseline++'] : 
        if params.save_iter != -1:
            modelfile   = get_assigned_file(checkpoint_dir,params.save_iter)
        else:
            modelfile   = get_best_file(checkpoint_dir)
        if modelfile is not None:
            tmp = torch.load(modelfile)
            model.load_state_dict(tmp['state'])
        else:
            print("[WARNING] Cannot find 'best_file.tar' in: " + str(checkpoint_dir))

    split = params.split
    if params.save_iter != -1:
        split_str = split + "_" +str(params.save_iter)
    else:
        split_str = split
    if params.method in ['maml', 'maml_approx', 'DKT']: #maml do not support testing with feature
        if 'Conv' in params.model:
            if params.dataset in ['omniglot', 'cross_char']:
                image_size = 28
            else:
                image_size = 84 
        else:
            image_size = 224

        datamgr         = SetDataManager(image_size, n_eposide = iter_num, n_query = 15 , **few_shot_params)
        
        if params.dataset == 'cross':
            if split == 'base':
                loadfile = configs.data_dir['miniImagenet'] + 'all.json' 
            else:
                loadfile   = configs.data_dir['CUB'] + split +'.json'
        elif params.dataset == 'cross_char':
            if split == 'base':
                loadfile = configs.data_dir['omniglot'] + 'noLatin.json' 
            else:
                loadfile  = configs.data_dir['emnist'] + split +'.json' 
        else: 
            loadfile    = configs.data_dir[params.dataset] + split + '.json'

        novel_loader     = datamgr.get_data_loader( loadfile, aug = False)
        if params.adaptation:
            model.task_update_num = 100 #We perform adaptation on MAML simply by updating more times.
        model.eval()

        logits_list = list()
        targets_list = list()    
        for i, (x,_) in enumerate(novel_loader):
            logits = model.get_logits(x).detach()
            targets = torch.tensor(np.repeat(range(params.test_n_way), model.n_query)).cuda()
            logits_list.append(logits) #.cpu().detach().numpy())
            targets_list.append(targets) #.cpu().detach().numpy())
    else:
        novel_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), split_str +".hdf5")
        cl_data_file = feat_loader.init_loader(novel_file)
        logits_list = list()
        targets_list = list()
        n_query = 15
        n_way = few_shot_params['n_way']
        n_support = few_shot_params['n_support']
        class_list = cl_data_file.keys()
        for i in range(iter_num):
            #----------------------
            select_class = random.sample(class_list,n_way)
            z_all  = []
            for cl in select_class:
                img_feat = cl_data_file[cl]
                perm_ids = np.random.permutation(len(img_feat)).tolist()
                z_all.append( [ np.squeeze( img_feat[perm_ids[i]]) for i in range(n_support+n_query) ] )     # stack each batch
            z_all = torch.from_numpy(np.array(z_all))
            model.n_query = n_query
            logits  = model.set_forward(z_all, is_feature = True).detach()
            targets = torch.tensor(np.repeat(range(n_way), n_query)).cuda()
            logits_list.append(logits)
            targets_list.append(targets)
            #----------------------
    return torch.cat(logits_list, 0), torch.cat(targets_list, 0)
コード例 #16
0
ファイル: visualize.py プロジェクト: DingYuan0118/fsl_ssl
        # init
        sub_json_name, sub_json_path = produce_subjson_file(
            selected_classes_id, sub_meta, meta, params)
        # set the shuffle False to recognize the support sample
        sub_datamgr = SimpleDataManager(image_size,
                                        batch_size=params.test_bs,
                                        isAircraft=isAircraft,
                                        shuffle=False)
        sub_data_loader = sub_datamgr.get_data_loader(sub_json_path, aug=False)

        output_path = checkpoint_dir_test.replace("checkpoints", "features")
        output_name = sub_json_name.replace(".json", ".hdf5")
        output_file = Path(os.path.join(output_path, output_name)).as_posix()

        # pass if hdf5 file already exists
        if not os.path.exists(output_file):
            save_features(model, sub_data_loader, output_file)

        sub_novel_file = output_file
        # sub_novel_file = os.path.join( checkpoint_dir_test.replace("checkpoints","features"), split_str +"_shuffle_false.hdf5")
        print('load novel file from:', sub_novel_file)

        sub_cl_data_file = feat_loader.init_loader(sub_novel_file)
        acc, class_acc = feature_evaluation(sub_cl_data_file,
                                            model,
                                            n_query=65,
                                            adaptation=params.adaptation,
                                            **few_shot_params)
        print_class_acc(class_acc, class_names)
コード例 #17
0
ファイル: agtest.py プロジェクト: banjuanshua/Meta-Agent
if __name__ == '__main__':
    params = parse_args('test')
    few_shot_params = dict(n_way=params.test_n_way, n_support=params.n_shot)

    checkpoint_dir = '%s/checkpoints/%s/%s_%s' % (
        configs.save_dir, params.dataset, params.model, params.method)
    if params.train_aug:
        checkpoint_dir += '_aug'
    if not params.method in ['baseline', 'baseline++']:
        checkpoint_dir += '_%dway_%dshot' % (params.train_n_way, params.n_shot)

    split = params.split
    if params.save_iter != -1:
        split_str = split + "_" + str(params.save_iter)
    else:
        split_str = split

    novel_file = os.path.join(
        checkpoint_dir.replace("checkpoints", "features"), split_str + ".hdf5")
    outs = [
        './features/miniImagenet/ResNet34_baseline_aug/novel.hdf5',
        './features/miniImagenet/ResNet34_baseline_aug/base.hdf5',
        './features/miniImagenet/ResNet34_baseline_aug/val.hdf5'
    ]

    test_file = outs[0]
    test_data_file = feat_loader.init_loader(test_file)

    agent_test(test_data_file, n_query=15, **few_shot_params)