def test_no_noise(self):
        dataset = SinusoidDataset(
            amp_low=1.0,
            amp_high=1.0,
            period_low=1.0,
            period_high=1.0,
            x_shift_mean=0.0,
            x_shift_std=0.0,
            y_shift_mean=0.0,
            y_shift_std=0.0,
            slope_mean=1.0,
            slope_std=0.00,
            noise_std=0.00,
            x_low=5,
            x_high=-5,
        )

        data_tuples = dataset.generate_meta_train_data(n_tasks=2,
                                                       n_samples=500)

        true_fn = lambda x: x + np.sin(x)

        for data_tuple in data_tuples:
            x_train, y_train = data_tuple
            y_true = true_fn(x_train)
            abs_diff = np.mean(np.abs(y_true - y_train))
            self.assertAlmostEqual(abs_diff, 0.0)
    def test_context_test_consistency(self):
        dataset = SinusoidDataset(noise_std=0.00, x_low=1, x_high=1)

        data_tuples = dataset.generate_meta_test_data(n_tasks=10,
                                                      n_samples_context=1,
                                                      n_samples_test=1)

        for data_tuple in data_tuples:
            x_context, y_context, x_test, y_test = data_tuple
            assert np.array_equal(y_context, y_test)
            print(y_context, y_test)
    def test_seed_reproducability(self):
        rds = np.random.RandomState(55)
        dataset = SinusoidDataset(random_state=rds)
        data_test_1 = dataset.generate_meta_test_data(n_tasks=2,
                                                      n_samples_context=5,
                                                      n_samples_test=10)
        data_train_1 = dataset.generate_meta_train_data(n_tasks=5,
                                                        n_samples=20)

        rds = np.random.RandomState(55)
        dataset = SinusoidDataset(random_state=rds)
        data_test_2 = dataset.generate_meta_test_data(n_tasks=2,
                                                      n_samples_context=5,
                                                      n_samples_test=10)
        data_train_2 = dataset.generate_meta_train_data(n_tasks=5,
                                                        n_samples=20)

        for test_tuple_1, test_tuple_2 in zip(data_test_1, data_test_2):
            for data_array_1, data_array_2 in zip(test_tuple_1, test_tuple_2):
                assert np.array_equal(data_array_1, data_array_2)

        for train_tuple_1, train_tuple_2 in zip(data_train_1, data_train_2):
            for data_array_1, data_array_2 in zip(train_tuple_1,
                                                  train_tuple_2):
                assert np.array_equal(data_array_1, data_array_2)
def main(argv):
    # setup logging

    logger, exp_dir = setup_exp_doc(FLAGS.exp_name)

    if FLAGS.dataset == 'swissfel':
        data_train, _, data_test = provide_data(dataset='swissfel')
    else:
        if FLAGS.dataset == 'sin-nonstat':
            dataset = SinusoidNonstationaryDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'sin':
            dataset = SinusoidDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'cauchy':
            dataset = CauchyDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'mnist':
            dataset = MNISTRegressionDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'physionet':
            dataset = PhysionetDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'gp-funcs':
            dataset = GPFunctionsDataset(random_state=np.random.RandomState(FLAGS.seed + 1))
        else:
            raise NotImplementedError('Does not recognize dataset flag')

        data_train = dataset.generate_meta_train_data(n_tasks=FLAGS.n_train_tasks, n_samples=FLAGS.n_train_samples)
        data_test = dataset.generate_meta_test_data(n_tasks=FLAGS.n_test_tasks, n_samples_context=FLAGS.n_context_samples,
                                                    n_samples_test=FLAGS.n_test_samples)

    nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])

    torch.set_num_threads(FLAGS.n_threads)

    gp_meta = GPRegressionMetaLearned(data_train,
                                      learning_mode=FLAGS.learning_mode,
                                      num_iter_fit=FLAGS.n_iter_fit,
                                      covar_module=FLAGS.covar_module,
                                      mean_module=FLAGS.mean_module,
                                      kernel_nn_layers=nn_layers,
                                      mean_nn_layers=nn_layers,
                                      weight_decay=FLAGS.weight_decay,
                                      lr_params=FLAGS.lr,
                                      lr_decay=FLAGS.lr_decay,
                                      random_seed=FLAGS.seed,
                                      task_batch_size=FLAGS.batch_size,
                                      optimizer=FLAGS.optimizer,
                                      normalize_data=FLAGS.normalize_data
                                      )

    gp_meta.meta_fit(valid_tuples=data_test[:100], log_period=2000)

    test_ll, rmse, calib_err = gp_meta.eval_datasets(data_test)

    # save results
    results_dict = {
        'test_ll': test_ll,
        'test_rmse': rmse,
        'calib_err': calib_err
    }
    print(results_dict)
    save_results(results_dict, exp_dir, log=True)
示例#5
0
import os
import sys
import numpy as np

# add BADE_DIR to path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)


""" Generate meta-training and meta-testing data """

from experiments.data_sim import SinusoidDataset

random_state = np.random.RandomState(26)
task_environment = SinusoidDataset(random_state=random_state)

meta_train_data = task_environment.generate_meta_train_data(n_tasks=20, n_samples=5)
meta_test_data = task_environment.generate_meta_test_data(n_tasks=20, n_samples_context=5, n_samples_test=50)


""" Meta-Training w/ PACOH-MAP """

from meta_learn import GPRegressionMetaLearned

random_gp = GPRegressionMetaLearned(meta_train_data, weight_decay=0.2, num_iter_fit=12000, random_seed=30)
random_gp.meta_fit(meta_test_data, log_period=1000)


""" Meta-Testing w/ PACOH-MAP"""

print('\n')
        if lr_decay < 1.0:
            self.lr_scheduler = torch.optim.lr_scheduler.StepLR(self.optimizer,
                                                                1000,
                                                                gamma=lr_decay)
        else:
            self.lr_scheduler = DummyLRScheduler()

    def _vectorize_pred_dist(self, pred_dist):
        return torch.distributions.Normal(pred_dist.mean, pred_dist.stddev)


if __name__ == "__main__":
    from experiments.data_sim import GPFunctionsDataset, SinusoidDataset

    data_sim = SinusoidDataset(random_state=np.random.RandomState(29))
    meta_train_data = data_sim.generate_meta_train_data(n_tasks=20,
                                                        n_samples=10)
    meta_test_data = data_sim.generate_meta_test_data(n_tasks=50,
                                                      n_samples_context=10,
                                                      n_samples_test=160)

    NN_LAYERS = (32, 32, 32, 32)

    plot = False
    from matplotlib import pyplot as plt

    if plot:
        for x_train, y_train in meta_train_data:
            plt.scatter(x_train, y_train)
        plt.title('sample from the GP prior')
示例#7
0
            ]
        return temp_params


if __name__ == "__main__":

    from experiments.data_sim import SinusoidDataset
    import torch
    import numpy as np

    import os
    print(os.getenv("PYCHARM_DISPLAY_PORT"))

    torch.set_num_threads(2)

    dataset = SinusoidDataset()
    meta_train_data = dataset.generate_meta_train_data(n_tasks=5000,
                                                       n_samples=10)
    meta_test_data = dataset.generate_meta_test_data(n_tasks=1000,
                                                     n_samples_context=5,
                                                     n_samples_test=100)

    meta_learner = MAMLRegression(meta_train_data,
                                  task_batch_size=10,
                                  num_iter_fit=10000)
    meta_learner.meta_fit(meta_test_data[:200], log_period=1000)

    for i in range(4):
        x_context, y_context, x_test, y_test = meta_test_data[i]
        idx = np.argsort(x_test, axis=0).flatten()
        x_test, y_test = x_test[idx], y_test[idx]
def main(argv):
    # setup logging

    logger, exp_dir = setup_exp_doc(FLAGS.exp_name)

    if FLAGS.dataset == 'swissfel':
        raise NotImplementedError
    else:
        if FLAGS.dataset == 'sin-nonstat':
            dataset = SinusoidNonstationaryDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'sin':
            dataset = SinusoidDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'cauchy':
            dataset = CauchyDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'mnist':
            dataset = MNISTRegressionDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'physionet':
            dataset = PhysionetDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        elif FLAGS.dataset == 'gp-funcs':
            dataset = GPFunctionsDataset(
                random_state=np.random.RandomState(FLAGS.seed + 1))
        else:
            raise NotImplementedError('Does not recognize dataset flag')

        meta_train_data = dataset.generate_meta_test_data(
            n_tasks=1024,
            n_samples_context=FLAGS.n_context_samples,
            n_samples_test=FLAGS.n_test_samples)
        meta_test_data = dataset.generate_meta_test_data(
            n_tasks=FLAGS.n_test_tasks,
            n_samples_context=FLAGS.n_context_samples,
            n_samples_test=FLAGS.n_test_samples)

    nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])
    torch.set_num_threads(FLAGS.n_threads)

    # only take meta-train context for training
    meta_train_data = meta_train_data[:FLAGS.n_train_tasks]

    data_train = [(context_x, context_y)
                  for context_x, context_y, _, _ in meta_train_data]
    assert len(data_train) == FLAGS.n_train_tasks

    gp_meta = GPRegressionMetaLearned(data_train,
                                      learning_mode=FLAGS.learning_mode,
                                      num_iter_fit=FLAGS.n_iter_fit,
                                      covar_module=FLAGS.covar_module,
                                      mean_module=FLAGS.mean_module,
                                      kernel_nn_layers=nn_layers,
                                      mean_nn_layers=nn_layers,
                                      weight_decay=FLAGS.weight_decay,
                                      lr_params=FLAGS.lr,
                                      lr_decay=FLAGS.lr_decay,
                                      random_seed=FLAGS.seed,
                                      task_batch_size=FLAGS.batch_size,
                                      optimizer=FLAGS.optimizer,
                                      normalize_data=FLAGS.normalize_data)

    gp_meta.meta_fit(log_period=1000)

    test_ll_meta_train, test_rmse_meta_train, calib_err_meta_train = gp_meta.eval_datasets(
        meta_train_data)
    test_ll_meta_test, test_rmse_meta_test, calib_err_test = gp_meta.eval_datasets(
        meta_test_data)

    # save results
    results_dict = {
        'test_ll_meta_train': test_ll_meta_train,
        'test_ll_meta_test': test_ll_meta_test,
        'test_rmse_meta_train': test_rmse_meta_train,
        'test_rmse_meta_test': test_rmse_meta_test,
        'calib_err_meta_train': calib_err_meta_train,
        'calib_err_test': calib_err_test
    }

    pprint(results_dict)

    save_results(results_dict, exp_dir, log=True)
示例#9
0
def main(argv):
    # setup logging

    logger, exp_dir = setup_exp_doc(FLAGS.exp_name)

    if FLAGS.dataset == 'sin-nonstat':
        dataset = SinusoidNonstationaryDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    elif FLAGS.dataset == 'sin':
        dataset = SinusoidDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    elif FLAGS.dataset == 'cauchy':
        dataset = CauchyDataset(random_state=np.random.RandomState(FLAGS.seed +
                                                                   1))
    elif FLAGS.dataset == 'mnist':
        dataset = MNISTRegressionDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    elif FLAGS.dataset == 'physionet':
        dataset = PhysionetDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    elif FLAGS.dataset == 'gp-funcs':
        dataset = GPFunctionsDataset(
            random_state=np.random.RandomState(FLAGS.seed + 1))
    else:
        raise NotImplementedError('Does not recognize dataset flag')

    data_train = dataset.generate_meta_train_data(
        n_tasks=FLAGS.n_train_tasks, n_samples=FLAGS.n_train_samples)
    data_test = dataset.generate_meta_test_data(
        n_tasks=FLAGS.n_test_tasks,
        n_samples_context=FLAGS.n_context_samples,
        n_samples_test=FLAGS.n_test_samples)

    nn_layers = tuple([FLAGS.layer_size for _ in range(FLAGS.num_layers)])

    torch.set_num_threads(FLAGS.n_threads)

    gp_meta = GPRegressionMetaLearnedVI(
        data_train,
        weight_prior_std=FLAGS.weight_prior_std,
        prior_factor=FLAGS.prior_factor,
        covar_module=FLAGS.covar_module,
        mean_module=FLAGS.mean_module,
        kernel_nn_layers=nn_layers,
        mean_nn_layers=nn_layers,
        random_seed=FLAGS.seed,
        optimizer=FLAGS.optimizer,
        lr=FLAGS.lr,
        lr_decay=FLAGS.lr_decay,
        num_iter_fit=FLAGS.n_iter_fit,
        svi_batch_size=FLAGS.svi_batch_size,
        normalize_data=FLAGS.normalize_data,
        cov_type=FLAGS.cov_type,
        task_batch_size=FLAGS.task_batch_size)

    gp_meta.meta_fit(valid_tuples=data_test[:100], log_period=1000)

    test_ll_bayes, rmse_bayes, calib_err_bayes = gp_meta.eval_datasets(
        data_test, mode='Bayes')
    test_ll_map, rmse_map, calib_err_map = gp_meta.eval_datasets(data_test,
                                                                 mode='MAP')

    # save results
    results_dict = {
        'test_ll_bayes': test_ll_bayes,
        'test_rmse_bayes': rmse_bayes,
        'calib_err_bayes': calib_err_bayes,
        'test_ll_map': test_ll_map,
        'rmse_map': rmse_map,
        'calib_err_map': calib_err_map
    }
    print(results_dict)
    save_results(results_dict, exp_dir, log=True)