Пример #1
0
def test_dgp_sample(two_layer_model: Callable[[TensorType], DeepGP]) -> None:
    x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
    model = DeepGaussianProcess(
        two_layer_model(x),
        optimizer=tf.optimizers.Adam(),
    )
    num_samples = 50
    test_x = tf.constant([[2.5]], dtype=gpflow.default_float())
    samples = model.sample(test_x, num_samples)

    assert samples.shape == [num_samples, 1, 1]

    sample_mean = tf.reduce_mean(samples, axis=0)
    sample_variance = tf.reduce_mean((samples - sample_mean)**2)

    reference_model = two_layer_model(x)

    def get_samples(query_points: TensorType, num_samples: int) -> TensorType:
        samples = []
        for _ in range(num_samples):
            samples.append(sample_dgp(reference_model)(query_points))
        return tf.stack(samples)

    ref_samples = get_samples(test_x, num_samples)

    ref_mean = tf.reduce_mean(ref_samples, axis=0)
    ref_variance = tf.reduce_mean((ref_samples - ref_mean)**2)

    error = 1 / tf.sqrt(tf.cast(num_samples, tf.float32))
    npt.assert_allclose(sample_mean, ref_mean, atol=2 * error)
    npt.assert_allclose(sample_mean, 0, atol=error)
    npt.assert_allclose(sample_variance, ref_variance, atol=4 * error)
Пример #2
0
def test_dgp_update_raises_for_invalid_shapes(two_layer_model: Callable[
    [TensorType], DeepGP], new_data: Dataset) -> None:
    x = tf.zeros([1, 4])
    dgp = two_layer_model(x)
    model = DeepGaussianProcess(dgp)

    with pytest.raises(ValueError):
        model.update(new_data)
Пример #3
0
def test_dgp_predict() -> None:
    x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())

    reference_model = single_layer_dgp_model(x)
    model = DeepGaussianProcess(single_layer_dgp_model(x))

    test_x = tf.constant([[2.5]], dtype=gpflow.default_float())

    ref_mean, ref_var = reference_model.predict_f(test_x)
    f_mean, f_var = model.predict(test_x)

    npt.assert_allclose(f_mean, ref_mean)
    npt.assert_allclose(f_var, ref_var)
Пример #4
0
def test_dgp_optimize_with_defaults(two_layer_model: Callable[[TensorType],
                                                              DeepGP],
                                    keras_float: None) -> None:
    x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
    y_observed = fnc_2sin_x_over_3(x_observed)
    data = x_observed, y_observed
    dataset = Dataset(*data)
    optimizer = tf.optimizers.Adam()
    model = DeepGaussianProcess(two_layer_model(x_observed),
                                optimizer=optimizer)
    elbo = model.model_gpflux.elbo(data)
    model.optimize(dataset)
    assert model.model_gpflux.elbo(data) > elbo
Пример #5
0
def test_dgp_update(two_layer_model: Callable[[TensorType], DeepGP]) -> None:
    x = tf.zeros([1, 4])
    dgp = two_layer_model(x)
    model = DeepGaussianProcess(dgp)

    assert model.model_gpflux.num_data == 1

    for layer in model.model_gpflux.f_layers:
        assert layer.num_data == 1

    model.update(Dataset(tf.zeros([5, 4]), tf.zeros([5, 1])))

    assert model.model_gpflux.num_data == 5

    for layer in model.model_gpflux.f_layers:
        assert layer.num_data == 5
Пример #6
0
def test_dgp_model_attribute(
        two_layer_model: Callable[[TensorType], DeepGP]) -> None:
    x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
    dgp = two_layer_model(x)
    model = DeepGaussianProcess(dgp)

    assert model.model_gpflux is dgp
Пример #7
0
def test_dgp_raises_for_keras_layer() -> None:
    keras_layer_1 = tf.keras.layers.Dense(50, activation="relu")
    keras_layer_2 = tf.keras.layers.Dense(2, activation="relu")

    kernel = gpflow.kernels.SquaredExponential()
    num_inducing = 5
    inducing_variable = gpflow.inducing_variables.InducingPoints(
        np.concatenate(
            [
                np.random.randn(num_inducing, 2),
            ],
            axis=1,
        ))
    gp_layer = gpflux.layers.GPLayer(
        kernel,
        inducing_variable,
        num_data=5,
        num_latent_gps=1,
        mean_function=gpflow.mean_functions.Zero(),
    )

    likelihood_layer = gpflux.layers.LikelihoodLayer(
        gpflow.likelihoods.Gaussian(0.01))

    dgp = DeepGP([keras_layer_1, keras_layer_2, gp_layer], likelihood_layer)

    with pytest.raises(ValueError):
        DeepGaussianProcess(dgp)
Пример #8
0
def test_dgp_optimize(two_layer_model: Callable[[TensorType], DeepGP],
                      batch_size: int, keras_float: None) -> None:
    x_observed = np.linspace(0, 100, 100).reshape((-1, 1))
    y_observed = fnc_2sin_x_over_3(x_observed)
    data = x_observed, y_observed
    dataset = Dataset(*data)

    optimizer = tf.optimizers.Adam()

    fit_args = {"batch_size": batch_size, "epochs": 10, "verbose": 0}

    model = DeepGaussianProcess(two_layer_model(x_observed), optimizer,
                                fit_args)
    elbo = model.model_gpflux.elbo(data)
    model.optimize(dataset)
    assert model.model_gpflux.elbo(data) > elbo
Пример #9
0
def test_dgp_raises_for_non_tf_optimizer(
        two_layer_model: Callable[[TensorType], DeepGP]) -> None:
    x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
    dgp = two_layer_model(x)
    optimizer = gpflow.optimizers.Scipy()

    with pytest.raises(ValueError):
        DeepGaussianProcess(dgp, optimizer=optimizer)
Пример #10
0
def test_dgp_loss(two_layer_model: Callable[[TensorType], DeepGP]) -> None:
    x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
    y = fnc_3x_plus_10(x)

    reference_model = two_layer_model(x)
    model = DeepGaussianProcess(two_layer_model(x))
    internal_model = model.model_gpflux

    npt.assert_allclose(internal_model.elbo((x, y)),
                        reference_model.elbo((x, y)),
                        rtol=1e-6)
Пример #11
0
def test_dgp_resets_lr_with_lr_schedule(two_layer_model: Callable[[TensorType],
                                                                  DeepGP],
                                        keras_float: None) -> None:
    x = tf.constant(np.arange(5).reshape(-1, 1), dtype=gpflow.default_float())
    y = fnc_3x_plus_10(x)

    epochs = 10
    init_lr = 0.01

    def scheduler(epoch: int, lr: float) -> float:
        if epoch == epoch // 2:
            return lr * 0.1
        else:
            return lr

    fit_args = {
        "epochs": epochs,
        "batch_size": 100,
        "verbose": 0,
        "callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler),
    }

    optimizer = tf.optimizers.Adam(init_lr)

    model = DeepGaussianProcess(two_layer_model(x),
                                optimizer=optimizer,
                                fit_args=fit_args)

    npt.assert_allclose(model.model_keras.optimizer.lr.numpy(),
                        init_lr,
                        rtol=1e-6)

    dataset = Dataset(x, y)

    model.optimize(dataset)

    npt.assert_allclose(model.model_keras.optimizer.lr.numpy(),
                        init_lr,
                        rtol=1e-6)
Пример #12
0
    def build_model(data: Dataset) -> DeepGaussianProcess:
        epochs = int(2e3)
        batch_size = 100

        dgp = two_layer_dgp_model(data.query_points)

        def scheduler(epoch: int, lr: float) -> float:
            if epoch == epochs // 2:
                return lr * 0.1
            else:
                return lr

        optimizer = tf.optimizers.Adam(0.01)
        fit_args = {
            "batch_size": batch_size,
            "epochs": epochs,
            "verbose": 0,
            "callbacks": tf.keras.callbacks.LearningRateScheduler(scheduler),
        }

        return DeepGaussianProcess(model=dgp, optimizer=optimizer, fit_args=fit_args)
Пример #13
0
def build_dgp_model(data):
    variance = tf.math.reduce_variance(data.observations)

    dgp = build_vanilla_deep_gp(data.query_points, num_layers=2, num_inducing=100)
    dgp.f_layers[-1].kernel.kernel.variance.assign(variance)
    dgp.f_layers[-1].mean_function = gpflow.mean_functions.Constant()
    dgp.likelihood_layer.likelihood.variance.assign(1e-5)
    set_trainable(dgp.likelihood_layer.likelihood.variance, False)

    epochs = 200
    batch_size = 100

    optimizer = tf.optimizers.Adam(0.01)
    # These are just arguments for the Keras `fit` method.
    fit_args = {
        "batch_size": batch_size,
        "epochs": epochs,
        "verbose": 0,
    }

    return DeepGaussianProcess(model=dgp, optimizer=optimizer, fit_args=fit_args)