def test_output_shapes_generate_train(self):
        rds = np.random.RandomState(123)
        dataset = MNISTRegressionDataset(random_state=rds)

        for n_tasks in [24, 2]:
            for n_samples in [1, 85]:
                data_test = dataset.generate_meta_train_data(
                    n_tasks=n_tasks, n_samples=n_samples)

                assert len(data_test) == n_tasks

                for (x_train, t_train) in data_test:
                    assert x_train.shape[0] == t_train.shape[0]
                    assert x_train.shape[1] == 2
    def test_seed_reproducability(self):
        rds = np.random.RandomState(55)
        dataset = MNISTRegressionDataset(random_state=rds)
        data_test_1 = dataset.generate_meta_test_data(n_tasks=2,
                                                      n_samples_context=5,
                                                      n_samples_test=10)
        data_train_1 = dataset.generate_meta_train_data(n_tasks=5,
                                                        n_samples=20)

        rds = np.random.RandomState(55)
        dataset = MNISTRegressionDataset(random_state=rds)
        data_test_2 = dataset.generate_meta_test_data(n_tasks=2,
                                                      n_samples_context=5,
                                                      n_samples_test=10)
        data_train_2 = dataset.generate_meta_train_data(n_tasks=5,
                                                        n_samples=20)

        for test_tuple_1, test_tuple_2 in zip(data_test_1, data_test_2):
            for data_array_1, data_array_2 in zip(test_tuple_1, test_tuple_2):
                assert np.array_equal(data_array_1, data_array_2)

        for train_tuple_1, train_tuple_2 in zip(data_train_1, data_train_2):
            for data_array_1, data_array_2 in zip(train_tuple_1,
                                                  train_tuple_2):
                assert np.array_equal(data_array_1, data_array_2)
def main(argv):
    # setup logging

    logger, exp_dir = setup_exp_doc(FLAGS.exp_name)

    if FLAGS.dataset == 'swissfel':
        data_train, _, data_test = provide_data(dataset='swissfel')
    else:
        if FLAGS.dataset == 'sin-nonstat':
            dataset = SinusoidNonstationaryDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'sin':
            dataset = SinusoidDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'cauchy':
            dataset = CauchyDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'mnist':
            dataset = MNISTRegressionDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'physionet':
            dataset = PhysionetDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'gp-funcs':
            dataset = GPFunctionsDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        else:
            raise NotImplementedError('Does not recognize dataset flag')

        data_train = dataset.generate_meta_train_data(n_tasks=FLAGS.n_train_tasks, n_samples=FLAGS.n_train_samples)
        data_test = dataset.generate_meta_test_data(n_tasks=FLAGS.n_test_tasks, n_samples_context=FLAGS.n_context_samples,
                                                    n_samples_test=FLAGS.n_test_samples)

    nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])

    torch.set_num_threads(FLAGS.n_threads)

    gp_meta = GPRegressionMetaLearned(data_train,
                                      learning_mode=FLAGS.learning_mode,
                                      num_iter_fit=FLAGS.n_iter_fit,
                                      covar_module=FLAGS.covar_module,
                                      mean_module=FLAGS.mean_module,
                                      kernel_nn_layers=nn_layers,
                                      mean_nn_layers=nn_layers,
                                      weight_decay=FLAGS.weight_decay,
                                      lr_params=FLAGS.lr,
                                      lr_decay=FLAGS.lr_decay,
                                      random_seed=FLAGS.seed,
                                      task_batch_size=FLAGS.batch_size,
                                      optimizer=FLAGS.optimizer,
                                      normalize_data=FLAGS.normalize_data
                                      )

    gp_meta.meta_fit(valid_tuples=data_test[:100], log_period=2000)

    test_ll, rmse, calib_err = gp_meta.eval_datasets(data_test)

    # save results
    results_dict = {
        'test_ll': test_ll,
        'test_rmse': rmse,
        'calib_err': calib_err
    }
    print(results_dict)
    save_results(results_dict, exp_dir, log=True)
    def test_output_shapes_generate_test(self):
        rds = np.random.RandomState(123)
        dataset = MNISTRegressionDataset(random_state=rds)

        for n_tasks in [1, 5]:
            for n_samples_context in [1, 85]:
                for n_samples_test in [-1, 23]:

                    data_test = dataset.generate_meta_test_data(
                        n_tasks=n_tasks,
                        n_samples_context=n_samples_context,
                        n_samples_test=n_samples_test)

                    assert len(data_test) == n_tasks

                    for (x_context, t_context, x_test, t_test) in data_test:
                        assert x_context.shape[0] == t_context.shape[0]
                        assert x_context.shape[1] == x_test.shape[1] == 2

                    if n_samples_test == -1:
                        assert x_context.shape[0] + x_test.shape[0] == 28**2
def main(argv):
    # setup logging

    logger, exp_dir = setup_exp_doc(FLAGS.exp_name)

    if FLAGS.dataset == 'swissfel':
        raise NotImplementedError
    else:
        if FLAGS.dataset == 'sin-nonstat':
            dataset = SinusoidNonstationaryDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'sin':
            dataset = SinusoidDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'cauchy':
            dataset = CauchyDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'mnist':
            dataset = MNISTRegressionDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'physionet':
            dataset = PhysionetDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'gp-funcs':
            dataset = GPFunctionsDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        else:
            raise NotImplementedError('Does not recognize dataset flag')

        meta_train_data = dataset.generate_meta_test_data(
            n_tasks=1024,
            n_samples_context=FLAGS.n_context_samples,
            n_samples_test=FLAGS.n_test_samples)
        meta_test_data = dataset.generate_meta_test_data(
            n_tasks=FLAGS.n_test_tasks,
            n_samples_context=FLAGS.n_context_samples,
            n_samples_test=FLAGS.n_test_samples)

    nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])
    torch.set_num_threads(FLAGS.n_threads)

    # only take meta-train context for training
    meta_train_data = meta_train_data[:FLAGS.n_train_tasks]

    data_train = [(context_x, context_y)
                  for context_x, context_y, _, _ in meta_train_data]
    assert len(data_train) == FLAGS.n_train_tasks

    gp_meta = GPRegressionMetaLearned(data_train,
                                      learning_mode=FLAGS.learning_mode,
                                      num_iter_fit=FLAGS.n_iter_fit,
                                      covar_module=FLAGS.covar_module,
                                      mean_module=FLAGS.mean_module,
                                      kernel_nn_layers=nn_layers,
                                      mean_nn_layers=nn_layers,
                                      weight_decay=FLAGS.weight_decay,
                                      lr_params=FLAGS.lr,
                                      lr_decay=FLAGS.lr_decay,
                                      random_seed=FLAGS.seed,
                                      task_batch_size=FLAGS.batch_size,
                                      optimizer=FLAGS.optimizer,
                                      normalize_data=FLAGS.normalize_data)

    gp_meta.meta_fit(log_period=1000)

    test_ll_meta_train, test_rmse_meta_train, calib_err_meta_train = gp_meta.eval_datasets(
        meta_train_data)
    test_ll_meta_test, test_rmse_meta_test, calib_err_test = gp_meta.eval_datasets(
        meta_test_data)

    # save results
    results_dict = {
        'test_ll_meta_train': test_ll_meta_train,
        'test_ll_meta_test': test_ll_meta_test,
        'test_rmse_meta_train': test_rmse_meta_train,
        'test_rmse_meta_test': test_rmse_meta_test,
        'calib_err_meta_train': calib_err_meta_train,
        'calib_err_test': calib_err_test
    }

    pprint(results_dict)

    save_results(results_dict, exp_dir, log=True)
Esempio n. 6
0
def main(argv):
    # setup logging

    logger, exp_dir = setup_exp_doc(FLAGS.exp_name)

    if FLAGS.dataset == 'sin-nonstat':
        dataset = SinusoidNonstationaryDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    elif FLAGS.dataset == 'sin':
        dataset = SinusoidDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    elif FLAGS.dataset == 'cauchy':
        dataset = CauchyDataset(random_state=np.random.RandomState(FLAGS.seed +
                                                                   1))
    elif FLAGS.dataset == 'mnist':
        dataset = MNISTRegressionDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    elif FLAGS.dataset == 'physionet':
        dataset = PhysionetDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    elif FLAGS.dataset == 'gp-funcs':
        dataset = GPFunctionsDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    else:
        raise NotImplementedError('Does not recognize dataset flag')

    data_train = dataset.generate_meta_train_data(
        n_tasks=FLAGS.n_train_tasks, n_samples=FLAGS.n_train_samples)
    data_test = dataset.generate_meta_test_data(
        n_tasks=FLAGS.n_test_tasks,
        n_samples_context=FLAGS.n_context_samples,
        n_samples_test=FLAGS.n_test_samples)

    nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])

    torch.set_num_threads(FLAGS.n_threads)

    gp_meta = GPRegressionMetaLearnedVI(
        data_train,
        weight_prior_std=FLAGS.weight_prior_std,
        prior_factor=FLAGS.prior_factor,
        covar_module=FLAGS.covar_module,
        mean_module=FLAGS.mean_module,
        kernel_nn_layers=nn_layers,
        mean_nn_layers=nn_layers,
        random_seed=FLAGS.seed,
        optimizer=FLAGS.optimizer,
        lr=FLAGS.lr,
        lr_decay=FLAGS.lr_decay,
        num_iter_fit=FLAGS.n_iter_fit,
        svi_batch_size=FLAGS.svi_batch_size,
        normalize_data=FLAGS.normalize_data,
        cov_type=FLAGS.cov_type,
        task_batch_size=FLAGS.task_batch_size)

    gp_meta.meta_fit(valid_tuples=data_test[:100], log_period=1000)

    test_ll_bayes, rmse_bayes, calib_err_bayes = gp_meta.eval_datasets(
        data_test, mode='Bayes')
    test_ll_map, rmse_map, calib_err_map = gp_meta.eval_datasets(data_test,
                                                                 mode='MAP')

    # save results
    results_dict = {
        'test_ll_bayes': test_ll_bayes,
        'test_rmse_bayes': rmse_bayes,
        'calib_err_bayes': calib_err_bayes,
        'test_ll_map': test_ll_map,
        'rmse_map': rmse_map,
        'calib_err_map': calib_err_map
    }
    print(results_dict)
    save_results(results_dict, exp_dir, log=True)