Example #1
0
def get_dataloader(args):
    if args.dataset == 'MiniImageNet':
        # Handle MiniImageNet
        from model.dataloader.mini_imagenet import MiniImageNet as Dataset
        evalDataset = Dataset
    elif args.dataset == 'CUB':
        from model.dataloader.cub import CUB as Dataset
        evalDataset = Dataset
    elif args.dataset == 'TieredImageNet':
        from model.dataloader.tiered_imagenet import tieredImageNet as Dataset
        evalDataset = Dataset
    elif args.dataset == 'MiniImageNet2CUB':
        from model.dataloader.mini_imagenet import MiniImageNet as Dataset
        from model.dataloader.cub import CUB as evalDataset
    elif args.dataset == 'CUB2MiniImageNet':
        from model.dataloader.cub import CUB as Dataset
        from model.dataloader.mini_imagenet import MiniImageNet as evalDataset
    else:
        raise ValueError('Non-supported Dataset.')

    num_device = torch.cuda.device_count()
    num_episodes = args.episodes_per_epoch * num_device if args.multi_gpu else args.episodes_per_epoch
    num_workers = args.num_workers * num_device if args.multi_gpu else args.num_workers
    trainset = Dataset('train', args, augment=args.augment)
    args.num_class = trainset.num_class
    train_sampler = CategoriesSampler(trainset.label, num_episodes,
                                      max(args.way, args.num_classes),
                                      args.shot + args.query)

    train_loader = DataLoader(
        dataset=trainset,
        num_workers=num_workers if not args.debug_fast else 0,
        batch_sampler=train_sampler,
        pin_memory=True)

    #if args.multi_gpu and num_device > 1:
    #train_loader = MultiGPUDataloader(train_loader, num_device)
    #args.way = args.way * num_device

    valset = evalDataset('val', args)
    val_sampler = CategoriesSampler(valset.label, args.num_eval_episodes,
                                    args.eval_way,
                                    args.eval_shot + args.eval_query)
    val_loader = DataLoader(
        dataset=valset,
        batch_sampler=val_sampler,
        num_workers=args.num_workers if not args.debug_fast else 0,
        pin_memory=True)

    testset = evalDataset('test', args)
    test_sampler = CategoriesSampler(testset.label, args.num_test_episodes,
                                     args.eval_way,
                                     args.eval_shot + args.eval_query)
    test_loader = DataLoader(
        dataset=testset,
        batch_sampler=test_sampler,
        num_workers=args.num_workers if not args.debug_fast else 0,
        pin_memory=True)

    return train_loader, val_loader, test_loader
Example #2
0
def get_dataloader(args):
    if args.dataset == 'MiniImageNet':
        # Handle MiniImageNet
        from model.dataloader.mini_imagenet import MiniImageNet as Dataset
        args.dropblock_size = 5
    elif args.dataset == 'TieredImageNet':
        from model.dataloader.tiered_imagenet import tieredImageNet as Dataset
        args.dropblock_size = 5
    else:
        raise ValueError('Non-supported Dataset.')

    num_workers = args.num_workers
    trainset = Dataset('train', args, augment=False)
    args.num_class = trainset.num_class
    train_gfsl_loader = None
    train_fsl_loader = None
    proto_sampler = ClassSampler(trainset.label, 100)
    proto_loader = DataLoader(dataset=trainset,
                              batch_sampler=proto_sampler,
                              num_workers=num_workers,
                              pin_memory=True)

    valset = Dataset('val', args)
    val_sampler = CategoriesSampler(valset.label, 500,
                                    min(args.eval_way, valset.num_class),
                                    args.eval_shot + args.eval_query)
    val_fsl_loader = DataLoader(dataset=valset,
                                batch_sampler=val_sampler,
                                num_workers=num_workers,
                                pin_memory=True)

    testset = Dataset('test', args)
    test_sampler = CategoriesSampler(testset.label, args.num_eval_episodes,
                                     min(args.eval_way, testset.num_class),
                                     args.eval_shot + args.eval_query)
    test_fsl_loader = DataLoader(dataset=testset,
                                 batch_sampler=test_sampler,
                                 num_workers=num_workers,
                                 pin_memory=True)

    # prepare data loaders for GFSL test
    trainvalset = Dataset('aux_val', args)
    val_many_shot_sampler = RandomSampler(
        trainvalset.label, 500,
        min(args.eval_way, valset.num_class) * args.eval_query)
    val_gfsl_loader = DataLoader(dataset=trainvalset,
                                 batch_sampler=val_many_shot_sampler,
                                 num_workers=num_workers,
                                 pin_memory=True)

    traintestset = Dataset('aux_test', args)
    test_many_shot_sampler = RandomSampler(
        traintestset.label, args.num_eval_episodes,
        min(args.eval_way, testset.num_class) * args.eval_query)
    test_gfsl_loader = DataLoader(dataset=traintestset,
                                  batch_sampler=test_many_shot_sampler,
                                  num_workers=num_workers,
                                  pin_memory=True)
    return trainset, valset, trainvalset, testset, traintestset, train_fsl_loader, train_gfsl_loader, proto_loader, val_fsl_loader, val_gfsl_loader, test_fsl_loader, test_gfsl_loader
Example #3
0
def get_loader(args):
    if args.dataset == 'MiniImageNet':
        # Handle MiniImageNet
        from model.dataloader.mini_imagenet import MiniImageNet as Dataset
    elif args.dataset == 'CUB':
        from model.dataloader.cub import CUB as Dataset
    elif args.dataset == 'TieredImagenet':
        from model.dataloader.tiered_imagenet import tieredImageNet as Dataset
    else:
        raise ValueError('Non-supported Dataset.')

    if args.finetune:
        split = 'train_%d' % args.samples_per_class
    else:
        split = 'train'

    trainset = Dataset(split, False, args, augment=args.augment)
    train_loader = DataLoader(dataset=trainset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=4,
                              pin_memory=True)
    args.num_class = trainset.num_class
    valset = Dataset('val', False, args)
    args.num_val_class = valset.num_class
    val_sampler = CategoriesSampler(valset.label, 200, valset.num_class,
                                    1 + args.query)  # test on 16-way 1-shot
    val_loader = DataLoader(dataset=valset,
                            batch_sampler=val_sampler,
                            num_workers=4,
                            pin_memory=True)
    args.way = valset.num_class
    args.shot = 1
    return train_loader, val_loader
Example #4
0
 def test_process(self, testset):
     args = self.args
     record = np.zeros((args.num_test_episodes, 2))  # loss and acc
     label = torch.arange(args.eval_way, dtype=torch.int16).repeat(
         # args.num_tasks *
         args.eval_query)
     label = label.type(torch.LongTensor)
     if torch.cuda.is_available():
         label = label.cuda()
     test_sampler = CategoriesSampler(
         testset.label,
         args.num_test_episodes,  # args.num_eval_episodes,
         args.eval_way,
         args.eval_shot + args.eval_query)
     test_loader = DataLoader(dataset=testset,
                              batch_sampler=test_sampler,
                              num_workers=args.num_workers,
                              pin_memory=True)
     for i, batch in tqdm(enumerate(test_loader, 1),
                          total=len(test_loader)):
         data = batch[0]
         data = data.to(self.args.device)
         logits = self.model(data)
         loss = F.cross_entropy(logits, label)
         acc = count_acc(logits, label)
         record[i - 1, 0] = loss.item()
         record[i - 1, 1] = acc
     assert (i == record.shape[0])
     vl, _ = compute_confidence_interval(record[:, 0])
     va, vap = compute_confidence_interval(record[:, 1])
     print('{} way {} shot,Test acc={:.4f} + {:.4f}\n'.format(
         args.eval_way, args.eval_shot, va, vap))
     return vl, va, vap
Example #5
0
def get_loader(args):
    # if args.dataset == 'MiniImageNet':
    #     # Handle MiniImageNet
    #     from model.dataloader.mini_imagenet import MiniImageNet as Dataset
    # elif args.dataset == 'CUB':
    #     from model.dataloader.cub import CUB as Dataset
    # elif args.dataset == 'TieredImagenet':
    #     from model.dataloader.tiered_imagenet import tieredImageNet as Dataset
    # else:
    #     raise ValueError('Non-supported Dataset.')

    # if args.finetune:
    #     split = 'train_%d' % args.samples_per_class
    # else:
    #     split = 'train'
    test_transform = get_transform(args)
    trainset = MiniImageNet('train', test_transform)
    train_loader = DataLoader(dataset=trainset, batch_size=args.batch_size, shuffle=True, num_workers=8,
                              pin_memory=True)
    args.num_class = trainset.num_class
    valset = MiniImageNet('test', test_transform)
    args.way = 5
    val_sampler = CategoriesSampler(valset.labels, args.num_test_episodes, args.way,
                                    1 + args.query)  # test on 16-way 1-shot
    val_loader = DataLoader(dataset=valset, batch_sampler=val_sampler, num_workers=8, pin_memory=True)
    args.num_val_class = args.way
    args.shot = 1
    return train_loader, val_loader
Example #6
0
 def eval_process(self, args, epoch):
     valset = self.valset
     if args.model_class in ['QsimProtoNet', 'QsimMatchNet']:
         val_sampler = NegativeSampler(args, valset.label,
                                       args.num_eval_episodes,
                                       args.eval_way, args.eval_shot + args.eval_query)
     else:
         val_sampler = CategoriesSampler(valset.label,
                                         args.num_eval_episodes,
                                         args.eval_way, args.eval_shot + args.eval_query)
     val_loader = DataLoader(dataset=valset,
                             batch_sampler=val_sampler,
                             num_workers=args.num_workers,
                             pin_memory=True)
     vl, va, vap = self.evaluate(val_loader)
     self.logger.add_scalar('%dw%ds_val_loss' % (args.eval_way, args.eval_shot), float(vl),
                            self.train_epoch)
     self.logger.add_scalar('%dw%ds_val_acc' % (args.eval_way, args.eval_shot), float(va),
                            self.train_epoch)
     print('epoch {},{} way {} shot, val, loss={:.4f} acc={:.4f}+{:.4f}'.format(epoch, args.eval_way,
                                                                                args.eval_shot, vl, va,
                                                                                vap))
     return vl, va, vap
Example #7
0
    # train_embs = train_embs.cuda()

    # # np.random.choice(train_embs.size(0),args.num_train)
    # # (num_train,num_train)
    # tt_sim = torch.einsum('ij,kj->ik', train_embs, test_embs)
    # # (num_train,num_test)
    # tn_sim = torch.einsum('ij,kj->ik', train_embs, test_embs)
    # tt_sim = tt_sim.unsqueeze(2)
    # ttn_sim = tn_sim.unsqueeze(1) + tn_sim.unsqueeze(0) / 2
    # ttn_sim = tt_sim / ttn_sim

    print('sampling task')
    num_negs = train_labels.size(0)
    label = torch.arange(args.way, dtype=torch.int16).repeat(
        args.query * num_negs).cuda().long()
    test_sampler = CategoriesSampler(testset.label, args.num_test_episodes,
                                     args.way, args.shot + args.query)
    indexed_test_set = IndexedDataset(testset)
    test_loader = DataLoader(dataset=indexed_test_set,
                             batch_sampler=test_sampler,
                             num_workers=0,
                             pin_memory=True)
    reduce_label = torch.arange(args.way, dtype=torch.int16).repeat(
        args.query).cuda().long().view(args.query, args.way)
    neg_indexes = torch.arange(num_negs).cuda()
    pairs = []
    losses = []
    accs = []
    mean_benefit = 0.0
    count = 0
    position_stats = torch.zeros(num_negs, dtype=torch.long).cuda()
    with torch.no_grad():
Example #8
0
def get_dataloader(args):
    num_device = torch.cuda.device_count()
    num_episodes = args.episodes_per_epoch * num_device if args.multi_gpu else args.episodes_per_epoch
    num_workers = args.num_workers * num_device if args.multi_gpu else args.num_workers
    if args.additional == 'Mixed':
        from model.dataloader.mix_dataset import MixedDatasetWrapper
        trainset = get_dataset(args.dataset,
                               'train',
                               True,
                               args,
                               augment=args.augment)
        # args.num_class = unsupervised_trainset.num_class
        unsupervised_loader = DataLoader(dataset=trainset,
                                         batch_size=args.batch_size,
                                         shuffle=True,
                                         num_workers=num_workers,
                                         collate_fn=examplar_collate,
                                         pin_memory=True,
                                         drop_last=True)
        supervised_trainset = get_dataset(args.dataset,
                                          'train',
                                          False,
                                          args,
                                          augment=args.augment)
        args.num_classes = min(len(supervised_trainset.wnids),
                               args.num_classes)
        train_sampler = CategoriesSampler(supervised_trainset.label,
                                          num_episodes,
                                          max(args.way, args.num_classes),
                                          args.shot + args.query)

        supervised_loader = DataLoader(dataset=supervised_trainset,
                                       num_workers=num_workers,
                                       batch_sampler=train_sampler,
                                       pin_memory=True)
        dataset = MixedDatasetWrapper(supervised_loader, unsupervised_loader)
        train_loader = DataLoader(dataset=dataset,
                                  batch_size=1,
                                  shuffle=True,
                                  num_workers=num_workers,
                                  pin_memory=True)
    else:
        if args.finetune:
            split = 'train_%d_%d' % (args.finetune_ways,
                                     args.samples_per_class)
        else:
            split = 'train'
        trainset = get_dataset(args.dataset,
                               split,
                               args.unsupervised,
                               args,
                               augment=args.augment)
        args.num_classes = min(len(trainset.wnids), args.num_classes)
        if args.unsupervised:
            train_loader = DataLoader(dataset=trainset,
                                      batch_size=args.batch_size,
                                      shuffle=True,
                                      num_workers=num_workers,
                                      collate_fn=examplar_collate,
                                      pin_memory=True,
                                      drop_last=True)
        else:
            train_sampler = CategoriesSampler(trainset.label, num_episodes,
                                              max(args.way, args.num_classes),
                                              args.shot + args.query)

            train_loader = DataLoader(dataset=trainset,
                                      num_workers=num_workers,
                                      batch_sampler=train_sampler,
                                      pin_memory=True)
    if args.model_class == 'DummyProto':
        from model.dataloader.dummy_loader import DummyWrapper
        train_loader = DummyWrapper(args.dummy_samples, train_loader)
    # if args.multi_gpu and num_device > 1:
    # train_loader = MultiGPUDataloader(train_loader, num_device)
    # args.way = args.way * num_device

    valset = get_dataset(args.dataset, 'val', args.unsupervised, args)
    # val_sampler = CategoriesSampler(valset.label,
    #                                 args.num_eval_episodes,
    #                                 args.eval_way, args.eval_shot + args.eval_query)
    # val_loader = DataLoader(dataset=valset,
    #                         batch_sampler=val_sampler,
    #                         num_workers=args.num_workers,
    #                         pin_memory=True)
    #
    testsets = dict(((n, get_dataset(n, 'test', args.unsupervised, args))
                     for n in args.eval_dataset.split(',')))
    # testsets = TestDataset('test', args.unsupervised, args)
    # test_sampler = CategoriesSampler(testset.label,
    #                                  10000,  # args.num_eval_episodes,
    #                                  args.eval_way, args.eval_shot + args.eval_query)
    # test_loader = DataLoader(dataset=testset,
    #                          batch_sampler=test_sampler,
    #                          num_workers=args.num_workers,
    #                          pin_memory=True)
    args.image_shape = trainset.image_shape
    return train_loader, valset, testsets
Example #9
0
    elif args.dataset == 'CUB':
        from model.dataloader.cub import CUB as Dataset
    elif args.dataset == 'TieredImagenet':
        from model.dataloader.tiered_imagenet import tieredImageNet as Dataset
    else:
        raise ValueError('Non-supported Dataset.')

    trainset = Dataset('train', args, augment=True)
    train_loader = DataLoader(dataset=trainset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=8,
                              pin_memory=True)
    args.num_class = trainset.num_class
    valset = Dataset('val', args)
    val_sampler = CategoriesSampler(valset.label, 200, valset.num_class,
                                    1 + args.query)  # test on 16-way 1-shot
    val_loader = DataLoader(dataset=valset,
                            batch_sampler=val_sampler,
                            num_workers=8,
                            pin_memory=True)
    args.way = valset.num_class
    args.shot = 1

    # construct model
    model = Classifier(args)
    if 'Conv' in args.backbone_class:
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=args.lr,
                                     weight_decay=0.0005)
    elif 'Res' in args.backbone_class:
        optimizer = torch.optim.SGD(model.parameters(),
Example #10
0
    model.eval()

    trainset = get_dataset('MiniImageNet', 'train', False, args)

    class_mean = get_class_mean('MiniImageNet', args.backbone_class, trainset)
    # testsets = dict(((n, get_dataset(n, 'test', args.unsupervised, args)) for n in args.eval_dataset.split(',')))
    ensemble_result = []
    for n in args.datasets:
        print('----------- test on {} --------------'.format(n))
        valset = get_dataset(n, 'val', args.unsupervised, args)
        for i, (args.way, args.shot) in enumerate(valset.eval_setting):
            # train best gamma
            # valset = Dataset('val', args.unsupervised, args)
            valset = get_dataset(n, 'val', args.unsupervised, args)
            val_sampler = CategoriesSampler(valset.label, 500,
                                            min(args.way, valset.num_class),
                                            args.shot + args.query)
            val_loader = DataLoader(dataset=valset,
                                    batch_sampler=val_sampler,
                                    num_workers=8,
                                    pin_memory=True)
            # test_set = Dataset('test', args.unsupervised, args)
            test_set = get_dataset(n, 'test', args.unsupervised, args)
            sampler = CategoriesSampler(test_set.label, args.num_test_episodes,
                                        args.way, args.shot + args.query)
            loader = DataLoader(dataset=test_set,
                                batch_sampler=sampler,
                                num_workers=8,
                                pin_memory=True)
            shot_label = torch.arange(min(args.way, valset.num_class)).repeat(
                args.shot).numpy()