예제 #1
0
 def get_loader(self, subset):
     if self.name == "miniimagenet":
         dataset = MiniImageNet(subset)
         labels = dataset.labels
     elif self.name == "omniglot":
         dataset = Omniglot(root="data/", download=True,
                            transform=transforms.ToTensor(),
                            background=subset == 'train')
         labels = list(map(lambda x: x[1], dataset._flat_character_images))
     else:
         raise ValueError
     sampler = CategoriesSamplerMult(
             labels,
             n_batches=self.n_batches if subset == 'train' else 400,
             ways=dict(train=self.train_ways, valid=self.valid_ways)[subset],
             n_images=self.shots + self.queries,
             n_combinations=2)
     return DataLoader(dataset=dataset, batch_sampler=sampler,
                       num_workers=8, pin_memory=True)
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', default='0')
    parser.add_argument('--load', default='./save/proto-1/max-acc.pth')
    parser.add_argument('--batch', type=int, default=2000)
    parser.add_argument('--way', type=int, default=5)
    parser.add_argument('--shot', type=int, default=1)
    parser.add_argument('--query', type=int, default=30)
    parser.add_argument('--folds', type=int, default=2)
    args = parser.parse_args()
    pprint(vars(args))

    set_gpu(args.gpu)

    dataset = MiniImageNet('test')
    sampler = CategoriesSampler(dataset.label,
                                args.batch, args.way, args.folds * args.shot + args.query)
    loader = DataLoader(dataset, batch_sampler=sampler,
                        num_workers=8, pin_memory=True)

    model = Convnet().cuda()
    model.load_state_dict(torch.load(args.load))
    model.eval()

    ave_acc = Averager()
    s_label = torch.arange(args.train_way).repeat(args.shot).view(args.shot * args.train_way)
    s_onehot = torch.zeros(s_label.size(0), 20)
    s_onehot = s_onehot.scatter_(1, s_label.unsqueeze(dim=1), 1).cuda()

    for i, batch in enumerate(loader, 1):
예제 #3
0
    parser.add_argument('--save-epoch', type=int, default=20)
    parser.add_argument('--shot', type=int, default=1)
    parser.add_argument('--query', type=int, default=15)
    parser.add_argument('--train-way', type=int, default=5)
    parser.add_argument('--test-way', type=int, default=5)
    parser.add_argument('--save-path', default='./save/proto-1')
    parser.add_argument('--gpu', default='0')
    parser.add_argument('--base_model', default='resnet18')
    parser.add_argument('--use_CTM', type=int, default=1)
    args = parser.parse_args()
    pprint(vars(args))

    set_gpu(args.gpu)
    ensure_path(args.save_path)

    trainset = MiniImageNet('train')
    train_sampler = CategoriesSampler(trainset.label, 100, args.train_way,
                                      args.shot + args.query)
    train_loader = DataLoader(dataset=trainset,
                              batch_sampler=train_sampler,
                              num_workers=8,
                              pin_memory=True)

    valset = MiniImageNet('val')
    val_sampler = CategoriesSampler(valset.label, 400, args.test_way,
                                    args.shot + args.query)
    val_loader = DataLoader(dataset=valset,
                            batch_sampler=val_sampler,
                            num_workers=8,
                            pin_memory=True)
예제 #4
0
    parser.add_argument('--query', type=int, default=5)
    parser.add_argument('--query_val', type=int, default=15)
    parser.add_argument('--n_base_class', type=int, default=80)
    parser.add_argument('--train-way', type=int, default=20)
    parser.add_argument('--test-way', type=int, default=5)
    parser.add_argument('--save-path', default='./save/proto-5shot')
    parser.add_argument('--gpu', default='1')

    args = parser.parse_args()
    logname = 'baseline'
    logfile = open(osp.join(args.save_path, logname + '.txt'), 'w+')
    pprint(vars(args))

    set_gpu(args.gpu)

    trainset = MiniImageNet('trainvaltest')
    train_sampler = CategoriesSampler_train_100way(trainset.label, 100,
                                                   args.train_way, args.shot,
                                                   args.query,
                                                   args.n_base_class)
    train_loader = DataLoader(dataset=trainset,
                              batch_sampler=train_sampler,
                              num_workers=8,
                              pin_memory=True)

    #valset = MiniImageNet('test')
    valset = MiniImageNet('trainvaltest')
    val_sampler = CategoriesSampler_val_100way(valset.label, 400,
                                               args.test_way, args.shot,
                                               args.query_val)
    val_loader = DataLoader(dataset=valset,
예제 #5
0
def get_data(dataset, dset_dir, image_size, model, phase, unsupervised, spc,
             batch_size, workers):
    dataset = dataset.lower()
    if dataset == 'omniglot_original':
        dset_dir = os.path.join(dset_dir, 'omniglot')
        transformations = get_transform(dataset, image_size, model)
        if phase == 'train':
            train_data = Omniglot(
                root=dset_dir,
                phase='background',
                unsupervised=unsupervised,
                spc=spc,
                pre_transform=transformations['pre_transform'],
                transform=transformations['transform'],
                post_transform=transformations['post_transform'])
            data_loader = DataLoader(train_data,
                                     batch_size=batch_size,
                                     shuffle=True,
                                     num_workers=int(workers))
        else:
            test_data = Omniglot(
                root=dset_dir,
                phase='evaluation',
                spc=20,
                pre_transform=transformations['test_transform'])
            data_loader = DataLoader(test_data,
                                     batch_size=batch_size,
                                     shuffle=False,
                                     num_workers=int(workers))

    elif dataset == 'omniglot_aug':
        dset_dir = os.path.join(dset_dir, 'omniglot')
        transformations = get_transform(dataset, image_size, model)
        if phase == 'train':
            train_data = Omniglot(
                root=dset_dir,
                phase=phase,
                unsupervised=unsupervised,
                spc=spc,
                pre_transform=transformations['pre_transform'],
                transform=transformations['transform'],
                post_transform=transformations['post_transform'])
            data_loader = DataLoader(train_data,
                                     batch_size=batch_size,
                                     shuffle=True,
                                     num_workers=int(workers))
        else:
            test_data = Omniglot(
                root=dset_dir,
                phase=phase,
                spc=20,
                pre_transform=transformations['test_transform'])
            data_loader = DataLoader(test_data,
                                     batch_size=batch_size,
                                     shuffle=False,
                                     num_workers=int(workers))

    elif dataset == 'miniimagenet':
        dset_dir = os.path.join(dset_dir, 'miniImagenet')
        transformations = get_transform(dataset, image_size, model)
        if phase == 'train':
            train_data = MiniImageNet(
                root=dset_dir,
                phase=phase,
                unsupervised=unsupervised,
                spc=spc,
                pre_transform=transformations['pre_transform'],
                transform=transformations['transform'],
                post_transform=transformations['post_transform'])
            data_loader = DataLoader(train_data,
                                     batch_size=batch_size,
                                     shuffle=True,
                                     num_workers=int(workers))
        else:
            test_data = MiniImageNet(
                root=dset_dir,
                phase=phase,
                spc=600,
                pre_transform=transformations['test_transform'])
            data_loader = DataLoader(test_data,
                                     batch_size=batch_size,
                                     shuffle=False,
                                     num_workers=int(workers))

    else:
        raise NotImplementedError

    return data_loader
    parser.add_argument('--shot', type=int, default=5)
    parser.add_argument('--query', type=int, default=15)
    parser.add_argument('--train-way', type=int, default=5)
    parser.add_argument('--test-way', type=int, default=5)
    parser.add_argument('--save-path', default='./save/psn-model')
    parser.add_argument(
        '--data-path',
        default='./datapath')  # need to change to your data path
    parser.add_argument('--gpu', default='0')
    args = parser.parse_args()
    pprint(vars(args))

    set_gpu(args.gpu)
    #ensure_path(args.save_path)

    trainset = MiniImageNet('train', args.data_path)
    train_sampler = CategoriesSampler(
        trainset.label,
        100,  #100 episodes
        args.train_way,
        args.shot + args.query)
    train_loader = DataLoader(dataset=trainset,
                              batch_sampler=train_sampler,
                              num_workers=8,
                              pin_memory=True)

    valset = MiniImageNet('val', args.data_path)
    val_sampler = CategoriesSampler(
        valset.label,
        400,  #400 episodes
        args.test_way,
예제 #7
0
    noise = torch.distributions.Normal(loc=0, scale=.02)
    # trainset = MiniImageNet('train')
    # train_sampler = CategoriesSampler(trainset.label, 100,
    #                                   args.train_way, 2*args.shot + args.query)
    # train_loader = DataLoader(dataset=trainset, batch_sampler=train_sampler,
    #                           num_workers=args.num_workers, pin_memory=True)

    ssdata = SSMiniImageNet()
    ss_sampler = CategoriesSampler(ssdata.slabel, 100, args.train_way,
                                   2 * args.shot + args.query)
    ss_loader = DataLoader(dataset=ssdata,
                           batch_sampler=ss_sampler,
                           num_workers=args.num_workers,
                           pin_memory=True)

    valset = MiniImageNet('val')
    val_sampler = CategoriesSampler(valset.label, 400, args.test_way,
                                    args.shot + args.query)
    val_loader = DataLoader(dataset=valset,
                            batch_sampler=val_sampler,
                            num_workers=args.num_workers,
                            pin_memory=True)

    model = Convnet().cuda()
    if args.load is not 'na':
        print('Loading Model')
        model.load_state_dict(torch.load(args.load))

    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                   step_size=20,
예제 #8
0
    parser = argparse.ArgumentParser()
    parser.add_argument('--max-epoch', type=int, default=200)
    parser.add_argument('--save-epoch', type=int, default=20)
    parser.add_argument('--shot', type=int, default=1)
    parser.add_argument('--query', type=int, default=15)
    parser.add_argument('--train-way', type=int, default=30)
    parser.add_argument('--test-way', type=int, default=5)
    parser.add_argument('--save-path', default='./save/proto-1')
    parser.add_argument('--gpu', default='0')
    parser.add_argument('--folds', type=int, default=2)
    args = parser.parse_args()
    pprint(vars(args))

    #set_gpu(args.gpu)
    ensure_path(args.save_path)
    testset = MiniImageNet('test')
    test_sampler = CategoriesSampler(testset.label,
                                args.batch, args.way, args.folds * args.shot + args.query)
    test_loader = DataLoader(testset, batch_sampler=test_sampler,
                        num_workers=args.num_workers, pin_memory=True)

    trainset = MiniImageNet('train')
    train_sampler = CategoriesSampler(trainset.label, 100,
                                      args.train_way, args.shot + args.query)
    train_loader = DataLoader(dataset=trainset, batch_sampler=train_sampler,
                              num_workers=args.num_workers, pin_memory=True)

    valset = MiniImageNet('val')
    val_sampler = CategoriesSampler(valset.label, 400,
                                    args.test_way, args.shot + args.query)
    val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler,
예제 #9
0
    parser.add_argument('--max-epoch', type=int, default=200)
    parser.add_argument('--save-epoch', type=int, default=20)
    parser.add_argument('--shot', type=int, default=1)
    parser.add_argument('--query', type=int, default=15)
    parser.add_argument('--train-way', type=int, default=30)
    parser.add_argument('--test-way', type=int, default=5)
    parser.add_argument('--save-path', default='./save/proto-1')
    parser.add_argument('--gpu', default='0')
    args = parser.parse_args()
    pprint(vars(args))

    #set_gpu(args.gpu)
    ensure_path(args.save_path)

    trainset = MiniImageNet(
        '/mnt/lustre/dingmingyu/Research/da_zsl/dataset/mini-imagenet/',
        dataset='mini-imagenet',
        mode='train')
    train_sampler = CategoriesSampler(trainset.label, 100, args.train_way,
                                      args.shot + args.query)
    train_loader = DataLoader(dataset=trainset,
                              batch_sampler=train_sampler,
                              num_workers=8,
                              pin_memory=True)

    valset = MiniImageNet(
        '/mnt/lustre/dingmingyu/Research/da_zsl/dataset/mini-imagenet/',
        dataset='mini-imagenet',
        mode='val_new_domain')
    val_sampler = CategoriesSampler(valset.label, 400, args.test_way,
                                    args.shot + args.query)
    val_loader = DataLoader(dataset=valset,
예제 #10
0
    parser.add_argument('--save-path', default='./save/proto-1')
    parser.add_argument('--gpu', default='0')
    parser.add_argument('--load', default='na')
    parser.add_argument('--num_workers', type=int, default=8)
    parser.add_argument('--start_epoch', type=int, default=1)
    parser.add_argument('--noise', type=int, default=.02)
    parser.add_argument('--meta-size', type=int, default=5)
    args = parser.parse_args()
    pprint(vars(args))

    set_gpu(args.gpu)
    ensure_path(args.save_path)
    writer = SummaryWriter()
    # noise_sample = torch.distributions.normal(loc=0, scale=.02)
    noise = torch.distributions.Normal(loc=0, scale=.02)
    supset = MiniImageNet('sup')
    ssdata = SSMiniImageNet()
    ss_sampler = CategoriesSampler(ssdata.slabel, 100, args.train_way,
                                   2 * args.shot + args.query)
    ss_loader = DataLoader(dataset=ssdata,
                           batch_sampler=ss_sampler,
                           num_workers=args.num_workers,
                           pin_memory=True)
    # train_sampler = CategoriesSampler(supset.label, 100,
    #                                   args.train_way, 2*args.shot + args.query)
    # train_loader = DataLoader(dataset=supset, batch_sampler=train_sampler,
    #                           num_workers=args.num_workers, pin_memory=True)
    #
    # unsup = MiniImageNet('unsup')
    # unsup_sampler = CategoriesSampler(unsup.label, 100,
    #                                   args.train_way, args.uquery)
예제 #11
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', default='0')
    parser.add_argument('--load', default='./save/proto-1/max-acc.pth')
    parser.add_argument('--batch', type=int, default=2000)
    parser.add_argument('--way', type=int, default=5)
    parser.add_argument('--shot', type=int, default=1)
    parser.add_argument('--query', type=int, default=30)
    args = parser.parse_args()
    pprint(vars(args))

    set_gpu(args.gpu)

    dataset = MiniImageNet(
        '/mnt/lustre/dingmingyu/Research/da_zsl/dataset/mini-imagenet/',
        dataset='mini-imagenet',
        mode='test_new_domain_fsl')
    sampler = CategoriesSampler(dataset.label, args.batch, args.way,
                                args.shot + args.query)
    loader = DataLoader(dataset,
                        batch_sampler=sampler,
                        num_workers=8,
                        pin_memory=True)

    model = Convnet().cuda()
    model.load_state_dict(torch.load(args.load))
    model.eval()

    ave_acc = Averager()

    for i, batch in enumerate(loader, 1):
if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', default='0')
    parser.add_argument('--load', default='./save/psn-model/max-acc.pth')
    parser.add_argument('--batch', type=int, default=600)
    parser.add_argument('--way', type=int, default=5)
    parser.add_argument('--shot', type=int, default=5)
    parser.add_argument('--query', type=int, default=15)
    parser.add_argument('--data-path', default='/home/csimon/research/data/miniimagenet/split/') # need to change to your data path
    args = parser.parse_args()
    pprint(vars(args))

    set_gpu(args.gpu)

    dataset = MiniImageNet('test', args.data_path)
    sampler = CategoriesSampler(dataset.label,
                                args.batch, args.way, args.shot + args.query)
    loader = DataLoader(dataset, batch_sampler=sampler,
                        num_workers=8, pin_memory=True)

    model = ConvNet().cuda()
    model.load_state_dict(torch.load(args.load))
    model.eval()
    projection_pro = Projection(shot=2) #subspace dim

    ave_acc = Averager()
    acc_all=[]

    for i, batch in enumerate(loader, 1):
        data, _ = [_.cuda() for _ in batch]