示例#1
0
def get_data_fs(env_config, load_train=False):
    """Gets few-shot dataset."""
    train_split = env_config.train_fs_split
    if train_split is None or (train_split == env_config.train_split
                               and not load_train):
        data_train_fs = None
    else:
        data_train_fs = get_dataset(env_config.dataset, env_config.data_folder,
                                    env_config.train_fs_split)
    if env_config.val_fs_split is None:
        data_val_fs = None
    else:
        data_val_fs = get_dataset(env_config.dataset, env_config.data_folder,
                                  env_config.val_fs_split)
    if env_config.test_fs_split is None:
        data_test_fs = None
    else:
        data_test_fs = get_dataset(env_config.dataset, env_config.data_folder,
                                   env_config.test_fs_split)
    return {
        'train_fs': data_train_fs,
        'val_fs': data_val_fs,
        'test_fs': data_test_fs,
        'metadata': env_config
    }
示例#2
0
def get_datasets(dataset, metadata, nshot, num_test, batch_size, num_gpu,
                 nclasses_a, nclasses_train, nclasses_val, nclasses_test,
                 old_and_new, seed, is_eval):
    """Builds datasets"""
    # ------------------------------------------------------------------------
    # Datasets
    train_dataset_a = get_dataset(
        dataset,
        metadata['trainsplit_a_train'],
        nclasses_train,
        nshot,
        label_ratio=metadata['label_ratio'],
        num_test=num_test // num_gpu,
        aug_90=False,
        num_unlabel=0,
        shuffle_episode=False,
        seed=seed,
        image_split_file=metadata['image_split_file_a_train'],
        nclasses=nclasses_a)
    if metadata['trainsplit_b'] == metadata['trainsplit_a_train']:
        train_dataset_b = train_dataset_a
    else:
        train_dataset_b = get_dataset(
            dataset,
            metadata['trainsplit_b'],
            nclasses_train,
            nshot,
            label_ratio=1.0,
            num_test=num_test // num_gpu,
            aug_90=False,
            num_unlabel=0,
            shuffle_episode=False,
            seed=seed,
            image_split_file=metadata['image_split_file_b'])
    trainval_dataset_a = get_dataset(
        dataset,
        metadata['trainsplit_a_val'],
        nclasses_train,
        nshot,
        label_ratio=metadata['label_ratio'],
        num_test=num_test // num_gpu,
        aug_90=False,
        num_unlabel=0,
        shuffle_episode=False,
        seed=seed,
        image_split_file=metadata['image_split_file_a_val'],
        nclasses=nclasses_a)
    traintest_dataset_a = get_dataset(
        dataset,
        metadata['trainsplit_a_test'],
        nclasses_train,
        nshot,
        label_ratio=metadata['label_ratio'],
        num_test=num_test // num_gpu,
        aug_90=False,
        num_unlabel=0,
        shuffle_episode=False,
        seed=seed,
        image_split_file=metadata['image_split_file_a_test'],
        nclasses=nclasses_a)
    val_dataset = get_dataset(dataset,
                              'val',
                              nclasses_val,
                              nshot,
                              label_ratio=1.0,
                              num_test=num_test // num_gpu,
                              aug_90=False,
                              num_unlabel=0,
                              shuffle_episode=False,
                              seed=seed)
    test_dataset = get_dataset(dataset,
                               "test",
                               nclasses_test,
                               nshot,
                               num_test=num_test // num_gpu,
                               label_ratio=1.0,
                               aug_90=False,
                               num_unlabel=0,
                               shuffle_episode=False,
                               seed=seed)

    # ------------------------------------------------------------------------
    # Task A iterators
    task_a_iter = get_iter(train_dataset_a.get_size(),
                           train_dataset_a.get_batch_idx,
                           batch_size // num_gpu,
                           cycle=True,
                           shuffle=True,
                           max_queue_size=10,
                           num_threads=2,
                           seed=seed)
    task_a_val_iter = get_iter(trainval_dataset_a.get_size(),
                               trainval_dataset_a.get_batch_idx,
                               batch_size // num_gpu,
                               cycle=True,
                               shuffle=True,
                               max_queue_size=10,
                               num_threads=2,
                               seed=seed)
    task_a_test_iter = get_iter(traintest_dataset_a.get_size(),
                                traintest_dataset_a.get_batch_idx,
                                batch_size // num_gpu,
                                cycle=True,
                                shuffle=True,
                                max_queue_size=10,
                                num_threads=2,
                                seed=seed)

    # ------------------------------------------------------------------------
    # Task B iterators
    task_b_iter = get_concurrent_iterator(train_dataset_b,
                                          max_queue_size=2,
                                          num_threads=1)
    task_b_val_iter = get_concurrent_iterator(val_dataset,
                                              max_queue_size=2,
                                              num_threads=1)
    task_b_test_iter = get_concurrent_iterator(test_dataset,
                                               max_queue_size=2,
                                               num_threads=1)

    # ------------------------------------------------------------------------
    # Task B iterators for old and new (wrapper)
    if old_and_new:
        task_a_iter_old = get_iter(train_dataset_a.get_size(),
                                   train_dataset_a.get_batch_idx,
                                   num_test * nclasses_train // num_gpu,
                                   cycle=True,
                                   shuffle=True,
                                   max_queue_size=10,
                                   num_threads=1,
                                   seed=seed + 1)
        task_a_val_iter_old = get_iter(trainval_dataset_a.get_size(),
                                       trainval_dataset_a.get_batch_idx,
                                       num_test * nclasses_val // num_gpu,
                                       cycle=True,
                                       shuffle=True,
                                       max_queue_size=10,
                                       num_threads=1,
                                       seed=seed + 1)
        task_a_test_iter_old = get_iter(traintest_dataset_a.get_size(),
                                        traintest_dataset_a.get_batch_idx,
                                        num_test * nclasses_test // num_gpu,
                                        cycle=True,
                                        shuffle=True,
                                        max_queue_size=10,
                                        num_threads=1,
                                        seed=seed + 1)

        if nclasses_a == -1:
            num_classes_a = metadata['num_classes_a']
        else:
            num_classes_a = nclasses_a
        task_b_iter = preprocess_old_and_new(num_classes_a, task_a_iter_old,
                                             task_b_iter)
        task_b_val_iter = preprocess_old_and_new(num_classes_a,
                                                 task_a_val_iter_old,
                                                 task_b_val_iter)
        task_b_test_iter = preprocess_old_and_new(num_classes_a,
                                                  task_a_test_iter_old,
                                                  task_b_test_iter)

    results = {}
    results['a_train'] = task_a_iter
    results['a_val'] = task_a_val_iter
    results['a_test'] = task_a_test_iter
    results['b_train'] = task_b_iter
    results['b_val'] = task_b_val_iter
    results['b_test'] = task_b_test_iter

    return results
示例#3
0
def main():
    if FLAGS.num_test == -1 and (FLAGS.dataset == "tiered-imagenet"
                                 or FLAGS.dataset == 'mini-imagenet'):
        num_test = 5
    else:
        num_test = FLAGS.num_test
    config = get_config(FLAGS.dataset, FLAGS.model)
    nclasses_train = FLAGS.nclasses_train
    nclasses_eval = FLAGS.nclasses_eval

    # Which training split to use.
    train_split_name = 'train'
    if FLAGS.use_test:
        log.info('Using the test set')
        test_split_name = 'test'
    else:
        log.info('Not using the test set, using val')
        test_split_name = 'val'

    log.info('Use split `{}` for training'.format(train_split_name))

    # Whether doing 90 degree augmentation.
    if 'mini-imagenet' in FLAGS.dataset or 'tiered-imagenet' in FLAGS.dataset:
        _aug_90 = False
    else:
        _aug_90 = True

    nshot = FLAGS.nshot
    meta_train_dataset = get_dataset(FLAGS.dataset,
                                     train_split_name,
                                     nclasses_train,
                                     nshot,
                                     num_test=num_test,
                                     aug_90=_aug_90,
                                     num_unlabel=FLAGS.num_unlabel,
                                     shuffle_episode=False,
                                     seed=FLAGS.seed)
    meta_train_dataset = get_concurrent_iterator(meta_train_dataset,
                                                 max_queue_size=100,
                                                 num_threads=5)
    meta_test_dataset = get_dataset(FLAGS.dataset,
                                    test_split_name,
                                    nclasses_eval,
                                    nshot,
                                    num_test=num_test,
                                    aug_90=_aug_90,
                                    num_unlabel=FLAGS.num_unlabel,
                                    shuffle_episode=False,
                                    label_ratio=1,
                                    seed=FLAGS.seed)
    meta_test_dataset = get_concurrent_iterator(meta_test_dataset,
                                                max_queue_size=100,
                                                num_threads=5)
    m, mvalid = _get_model(config, nclasses_train, nclasses_eval)

    sconfig = tf.ConfigProto()
    sconfig.gpu_options.allow_growth = True
    with tf.Session(config=sconfig) as sess:
        if FLAGS.pretrain is not None:
            ckpt = tf.train.latest_checkpoint(
                os.path.join(FLAGS.results, FLAGS.pretrain))
            saver = tf.train.Saver()
            saver.restore(sess, ckpt)
        else:
            sess.run(tf.global_variables_initializer())
            train(sess, config, m, meta_train_dataset, mvalid,
                  meta_test_dataset)

        results_train = evaluate(sess, mvalid, meta_train_dataset)
        results_test = evaluate(sess, mvalid, meta_test_dataset)

        log.info("Final train acc {:.3f}% ({:.3f}%)".format(
            results_train['acc'] * 100.0, results_train['acc_ci'] * 100.0))
        log.info("Final test acc {:.3f}% ({:.3f}%)".format(
            results_test['acc'] * 100.0, results_test['acc_ci'] * 100.0))
def main():
    # ------------------------------------------------------------------------
    # Flags.
    if FLAGS.num_test == -1 and (FLAGS.dataset == "tiered-imagenet"
                                 or FLAGS.dataset == 'mini-imagenet'):
        num_test = 5
    else:
        num_test = FLAGS.num_test
    nclasses_train = FLAGS.nclasses_train
    nclasses_eval = FLAGS.nclasses_eval

    # Whether doing 90 degree augmentation.
    if 'mini-imagenet' in FLAGS.dataset or 'tiered-imagenet' in FLAGS.dataset:
        _aug_90 = False
        input_shape = [84, 84, 3]
        feature_shape = [1600]
    else:
        _aug_90 = True
        input_shape = [28, 28, 1]
        feature_shape = [64]

    nshot = FLAGS.nshot
    dataset = FLAGS.dataset

    meta_train_dataset = get_dataset(FLAGS.dataset,
                                     'train',
                                     nclasses_train,
                                     nshot,
                                     num_test=num_test,
                                     aug_90=_aug_90,
                                     num_unlabel=FLAGS.num_unlabel,
                                     shuffle_episode=FLAGS.shuffle_episode,
                                     seed=FLAGS.seed)
    meta_val_dataset = get_dataset(FLAGS.dataset,
                                   'val',
                                   nclasses_eval,
                                   nshot,
                                   num_test=num_test,
                                   aug_90=_aug_90,
                                   num_unlabel=FLAGS.num_unlabel,
                                   shuffle_episode=FLAGS.shuffle_episode,
                                   seed=FLAGS.seed)
    meta_test_dataset = get_dataset(FLAGS.dataset,
                                    "test",
                                    nclasses_eval,
                                    nshot,
                                    num_test=num_test,
                                    aug_90=_aug_90,
                                    num_unlabel=FLAGS.num_unlabel,
                                    shuffle_episode=FLAGS.shuffle_episode,
                                    seed=FLAGS.seed)

    # ------------------------------------------------------------------------
    # Get embedding model.
    def get_emb_model(config, dataset, is_training=True):
        log.info('Building embedding model')
        with log.verbose_level(2):
            x = tf.placeholder(
                tf.float32,
                [None, config.height, config.width, config.num_channel],
                name='x')
            y = tf.placeholder(tf.int64, [None], name='y')
            with tf.variable_scope('EmbeddingModel'):
                emb_model = SupervisedModel(config,
                                            x,
                                            y,
                                            dataset.num_classes,
                                            is_training=is_training)
            log.info('Training embedding model in fully supervised mode')
        return emb_model

    # Get supervised training logging function.
    def get_logging_fn(sess, log_folder):
        exp_logger = get_exp_logger(sess, log_folder)

        def _logging_fn(niter, data):
            # log.info(
            #     'Step {} Train Cost {:.3e} Train Acc {:.3f} Test Cost {:.3e} Test Acc {:.3f}'.
            #     format(niter, data['train_cost'], data['train_acc'] * 100.0, data[
            #         'test_cost'], data['test_acc'] * 100.0))
            for key in data:
                exp_logger.log(key, niter, data[key])
            exp_logger.flush()

        return _logging_fn

    # ------------------------------------------------------------------------
    # Pretrain an embedding model with train dataset (for new version of the paper).
    ckpt_train = os.path.join('results', dataset, 'supv_emb_model_train',
                              'model.ckpt')
    log_folder_train = os.path.join('results', dataset, 'supv_emb_model_train')
    ckpt_dir = os.path.dirname(ckpt_train)
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)
    if not os.path.exists(ckpt_train + '.meta'):
        with tf.Graph().as_default(), tf.Session() as sess:
            config = get_config(dataset, 'basic-pretrain')
            emb_model_train = get_emb_model(config, meta_train_dataset)
            logging_fn = get_logging_fn(sess, log_folder_train)
            supervised_pretrain(sess,
                                emb_model_train,
                                meta_train_dataset,
                                num_steps=config.max_train_steps,
                                logging_fn=logging_fn)

            # Save model to a checkpoint.
            saver = tf.train.Saver()
            saver.save(sess, ckpt_train)
    else:
        log.info('Checkpoint found. Skip pretraining.')

    # ------------------------------------------------------------------------
    # Run nearest neighbor in the pixel space.
    with tf.Graph().as_default(), tf.Session() as sess:
        log.info('Nearest neighbor baseline in the pixel space')
        run_nn(sess, meta_test_dataset, num_episodes=FLAGS.num_eval_episode)

    # ------------------------------------------------------------------------
    # Run logistic regression in the pixel space.
    with tf.Graph().as_default(), tf.Session() as sess:
        log.info('Logistic regression in the pixel space')
        run_lr(sess,
               meta_test_dataset,
               input_shape,
               feature_shape,
               num_episodes=FLAGS.num_eval_episode)

    # ------------------------------------------------------------------------
    # Run nearest neighbor in the embedding space, using train model.
    with tf.Graph().as_default(), tf.Session() as sess:
        log.info(
            'Nearest neighbor baseline in feature space, pretrained features, train'
        )
        config = get_config(dataset, 'basic-pretrain')
        emb_model_train = get_emb_model(config,
                                        meta_train_dataset,
                                        is_training=False)
        saver = tf.train.Saver()
        saver.restore(sess, ckpt_train)
        run_nn(sess,
               meta_test_dataset,
               emb_model=emb_model_train,
               num_episodes=FLAGS.num_eval_episode)

    # ------------------------------------------------------------------------
    # Run nearest neighbor in the embedding space, using train model, with random features.
    with tf.Graph().as_default(), tf.Session() as sess:
        log.info('Nearest neighbor baseline in feature space, random features')
        config = get_config(dataset, 'basic-pretrain')
        emb_model_train = get_emb_model(config,
                                        meta_train_dataset,
                                        is_training=False)
        sess.run(tf.global_variables_initializer())
        run_nn(sess,
               meta_test_dataset,
               emb_model=emb_model_train,
               num_episodes=FLAGS.num_eval_episode)

    # ------------------------------------------------------------------------
    # Run logistic regression in the embedding space, using train model.
    with tf.Graph().as_default(), tf.Session() as sess:
        log.info(
            'Logistic regression in the feature space, pretrained features, train'
        )
        config = get_config(dataset, 'basic-pretrain')
        emb_model_train = get_emb_model(config,
                                        meta_train_dataset,
                                        is_training=False)
        saver = tf.train.Saver()
        saver.restore(sess, ckpt_train)
        run_lr(sess,
               meta_test_dataset,
               input_shape,
               feature_shape,
               num_episodes=FLAGS.num_eval_episode,
               emb_model=emb_model_train)

    # ------------------------------------------------------------------------
    # Run logistic regression in the embedding space, using train model, with random features.
    with tf.Graph().as_default(), tf.Session() as sess:
        log.info('Logistic regression in the feature space, random features')
        config = get_config(dataset, 'basic-pretrain')
        emb_model_train = get_emb_model(config,
                                        meta_train_dataset,
                                        is_training=False)
        sess.run(tf.global_variables_initializer())
        run_lr(sess,
               meta_test_dataset,
               input_shape,
               feature_shape,
               num_episodes=FLAGS.num_eval_episode,
               emb_model=emb_model_train)
示例#5
0
文件: run_eval.py 项目: k-r-allen/imp
def main(args):
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    random.seed(args.seed)
    if args.num_test == -1 and (args.dataset == "tiered-imagenet"
                                or args.dataset == 'mini-imagenet'):
        num_test = 5  #to avoid too much computation
    else:
        num_test = args.num_test

    config = get_config(args.dataset, args.model)

    # Which testing split to use.
    train_split_name = 'train'
    if args.use_test:
        test_split_name = 'test'
    else:
        test_split_name = 'val'

    # Whether doing 90 degree augmentation.
    if 'omniglot' not in args.dataset:
        _aug_90 = False
    else:
        _aug_90 = True

    nshot = args.nshot
    meta_train_dataset = get_dataset(args,
                                     args.dataset,
                                     'train',
                                     args.nclasses_train,
                                     nshot,
                                     num_test=num_test,
                                     label_ratio=args.label_ratio,
                                     aug_90=_aug_90,
                                     num_unlabel=args.num_unlabel,
                                     seed=args.seed,
                                     mode_ratio=args.mode_ratio,
                                     cat_way=args.nsuperclassestrain)

    meta_test_dataset = get_dataset(args,
                                    args.dataset,
                                    test_split_name,
                                    args.nclasses_eval,
                                    nshot,
                                    num_test=num_test,
                                    aug_90=_aug_90,
                                    num_unlabel=args.num_unlabel_test,
                                    label_ratio=1,
                                    seed=args.seed,
                                    cat_way=args.nsuperclasseseval)

    m = _get_model(config)

    if args.eval:
        m = torch.load(os.path.join(args.results, args.pretrain))
    else:
        optimizer = optim.RMSprop(m.parameters(),
                                  lr=config.learn_rate,
                                  eps=1e-10,
                                  alpha=0.9,
                                  momentum=0.0)
        train(config,
              m,
              optimizer,
              meta_train_dataset,
              meta_val_dataset=meta_test_dataset)

    output = evaluate(m, meta_test_dataset, num_episodes=args.num_eval_episode)
    print(np.mean(output['acc']), (output['acc_ci']))