Example #1
0
def get_loader(dataset):
    from dataloader import FewShotDataloader
    dloader = FewShotDataloader(
        dataset=dataset,
        nKnovel=5,
        nKbase=0,
        nExemplars=1, # num training examples per novel category
        nTestNovel=30, # num test examples for all the novel categories
        nTestBase=0, # num test examples for all the base categories
        batch_size=8,
        num_workers=7,
        epoch_size=2000, # num of batches per epoch
    )
    return dloader
Example #2
0
# Set train and test datasets and the corresponding data loaders
if args_opt.testset:
    test_split = 'test'
    epoch_size = 600
else:
    test_split = 'val'
    epoch_size = 2000

nExemplars = config['data_test_opt']['nExemplars']
dloader_test = FewShotDataloader(
    dataset=MiniImageNet(phase=test_split),
    nKnovel=5,  # number of novel categories on each training episode.
    nKbase=64,  # number of base categories.
    nExemplars=nExemplars,  # num training examples per novel category
    nTestNovel=15 * 5,  # num test examples for all the novel categories
    nTestBase=15 * 5,  # num test examples for all the base categories
    batch_size=1,
    num_workers=0,
    epoch_size=epoch_size,  # num of batches per epoch
)

algorithm = alg.FewShot(config)
if args_opt.cuda:  # enable cuda
    algorithm.load_to_gpu()

# In evaluation mode we load the checkpoint with the highest novel category
# recognition accuracy on the validation set of MiniImagenet.
algorithm.load_checkpoint(epoch='*', train=False, suffix='.best')

# Run evaluation.
Example #3
0
    #    assert k in data_test_opt
    data_test_opt.update(args_opt.config_overwrite_data_test_opt)

#train_split, test_split = 'train', 'val'
train_split, test_split = args_opt.train_split, args_opt.test_split
dataset_train = dataset_cls(phase=train_split,
                            get_pickle_paths=get_pickle_paths)
dataset_test = dataset_cls(phase=test_split, get_pickle_paths=get_pickle_paths)

dloader_train = FewShotDataloader(
    dataset=dataset_train,
    nKnovel=data_train_opt['nKnovel'],
    nKbase=data_train_opt['nKbase'],
    nExemplars=data_train_opt[
        'nExemplars'],  # num training examples per novel category
    nTestNovel=data_train_opt[
        'nTestNovel'],  # num test examples for all the novel categories
    nTestBase=data_train_opt[
        'nTestBase'],  # num test examples for all the base categories
    batch_size=data_train_opt['batch_size'],
    num_workers=args_opt.num_workers,
    epoch_size=data_train_opt['epoch_size'],  # num of batches per epoch
)

dloader_test = FewShotDataloader(
    dataset=dataset_test,
    nKnovel=data_test_opt['nKnovel'],
    nKbase=data_test_opt['nKbase'],
    nExemplars=data_test_opt[
        'nExemplars'],  # num training examples per novel category
    nTestNovel=data_test_opt[
        'nTestNovel'],  # num test examples for all the novel categories
Example #4
0
def test_sample_episode(dataset):
    dloader = FewShotDataloader(dataset=dataset,
                                nKnovel=5,
                                nKbase=-1,
                                nExemplars=1,
                                nTestNovel=15 * 2,
                                nTestBase=15 * 2,
                                batch_size=1,
                                num_workers=1,
                                epoch_size=2)
    nKnovel = dloader.nKnovel
    nKbase = dloader.nKbase
    nTestBase = dloader.nTestBase
    nTestNovel = dloader.nTestNovel
    nExemplars = dloader.nExemplars

    label2ind = dloader.dataset.label2ind
    labelIds_base = dloader.dataset.labelIds_base
    all_img_ids_of_base_categories = []
    for label_id in labelIds_base:
        all_img_ids_of_base_categories += label2ind[label_id]

    if dloader.phase != 'train':
        labelIds_novel = dloader.dataset.labelIds_novel
        all_img_ids_of_novel_categories = []
        for label_id in labelIds_novel:
            all_img_ids_of_novel_categories += label2ind[label_id]

    for _ in range(100):
        Exemplars, Test, Kall, nKbase = dloader.sample_episode()

        Knovel = Kall[nKbase:]  # Category ids of the base categories.
        # Verify that the image ids of the training exemples for the novel
        # categories do not overal with the test examples for the novel
        # categories.
        test_novel = list(filter(lambda (_, label): (label >= nKbase), Test))
        test_novel = [img_id for (img_id, _) in test_novel]
        train_novel = [img_id for (img_id, _) in Exemplars]
        assert (len(set.intersection(set(test_novel), set(train_novel))) == 0)

        if dloader.phase != 'train':
            # Verify that the image id that are coming from the novel categories
            # (from both the training and the test examples of the novel
            # categoriesdo not belong training categories (i.e., the categories
            # that are used for training the model during the training
            # procedure).
            novel_img_ids = test_novel + train_novel

            assert (len(
                set.intersection(set(all_img_ids_of_base_categories),
                                 set(novel_img_ids))) == 0)
            assert (len(
                set.intersection(set(all_img_ids_of_novel_categories),
                                 set(novel_img_ids))) == len(novel_img_ids))

        # Verify Exemplars list.
        histE = [0 for i in range(nKnovel + nKbase)]
        for (_, label) in Exemplars:
            histE[label] += 1
        # Test that the label ids in the examplars list do not belong on the
        # base categories.
        assert (all(val == 0 for val in histE[:nKbase]))
        # Test that the label ids in the examplars list belong on the novel
        # categories and that for each novel category 'nExemplars' number of
        # examples have been sampled.
        assert (all(val == nExemplars for val in histE[nKbase:]))

        # Verify Test list.
        histT = [0 for i in range(nKnovel + nKbase)]
        for (_, label) in Test:
            histT[label] += 1
        # Test that the number of test examples comming from the base categories
        # is equal to nTestBase.
        if nKbase != 0:
            assert (reduce(lambda x, y: x + y, histT[:nKbase]) == nTestBase)
        # Test that the number of test examples comming from the novel
        # categories is equal to nTestNovel.
        if nKnovel != 0:
            assert (reduce(lambda x, y: x + y, histT[nKbase:]) == nTestNovel)

        # Verify that the Kbase and Knovel categories do not intersect.
        assert (len(set.intersection(set(Kall[:nKbase]),
                                     set(Kall[nKbase:]))) == 0)
Example #5
0
else:
    test_split = 'val'
    epoch_size = 2000

if args_opt.num_epochs is not None:
    epoch_size = args_opt.num_epochs

data_test_opt = config['data_test_opt']
dloader_test = FewShotDataloader(
    dataset=dataset_cls(phase=test_split, get_pickle_paths=get_pickle_paths),
    nKnovel=data_test_opt[
        'nKnovel'],  # number of novel categories on each training episode.
    nKbase=data_test_opt['nKbase'],  # number of base categories.
    nExemplars=data_test_opt[
        'nExemplars'],  # num training examples per novel category
    nTestNovel=data_test_opt[
        'nTestNovel'],  # num test examples for all the novel categories
    nTestBase=data_test_opt[
        'nTestBase'],  # num test examples for all the base categories
    batch_size=1,
    num_workers=0,
    epoch_size=epoch_size,  # num of batches per epoch
)

algorithm = alg.FewShot(config)
if args_opt.cuda:  # enable cuda
    algorithm.load_to_gpu()

# In evaluation mode we load the checkpoint with the highest novel category
# recognition accuracy on the validation set of MiniImagenet.
algorithm.load_checkpoint(epoch='*', train=False, suffix='.best')