예제 #1
0
def build_gpflux_deep_gp(layer_sizes, num_data):
    gp_layers = build_gp_layers(layer_sizes, num_data)
    likelihood = Gaussian()

    likelihood_layer = LikelihoodLayer(likelihood)
    model = DeepGP(gp_layers, likelihood_layer).as_training_model()
    return model, None
def test_cde_direct_parametrization(test_data, w_dim, use_keras_compile):
    """Test a directly parameterized CDE, using functional API, both eager or compiled.
    Test that the losses decrease."""

    tf.random.set_seed(0)
    np.random.seed(0)

    # 1. Set up data
    x_data, y_data = test_data
    num_data, x_dim = x_data.shape

    # 2. Set up layers
    prior_means = np.zeros(w_dim)
    prior_std = np.ones(w_dim)
    encoder = DirectlyParameterizedNormalDiag(num_data, w_dim)
    prior = tfp.distributions.MultivariateNormalDiag(prior_means, prior_std)

    lv = LatentVariableLayer(prior, encoder)
    [gp] = build_gp_layers([x_dim + w_dim, 1], num_data)
    likelihood_layer = LikelihoodLayer(Gaussian())

    # 3. Build the model
    dgp = DeepGP([lv, gp], likelihood_layer)
    model = dgp.as_training_model()

    # 4. Train the model and check 2nd half of loss is lower than first
    loss_history = train_model(x_data, y_data, model, use_keras_compile)
    epochs = len(loss_history)
    assert np.all(loss_history[:(epochs // 2)] > loss_history[(epochs // 2):])
예제 #3
0
def test_call_shapes(GPflowLikelihood):
    gp_layer, (X, Y) = setup_gp_layer_and_data(num_inducing=5)
    likelihood_layer = LikelihoodLayer(GPflowLikelihood())

    # Run tests with gp layer outputting f_mean, f_var
    f_distribution = gp_layer(X)
    y_dist_params = likelihood_layer(f_distribution)

    assert y_dist_params.y_mean.shape == f_distribution.shape
    assert y_dist_params.y_var.shape == f_distribution.scale.diag.shape
    # The mean might not change but the covariance should
    assert f_distribution.variance().shape == y_dist_params.y_var.shape
    assert np.all(y_dist_params.y_var != f_distribution.variance())
    np.testing.assert_array_equal(y_dist_params.f_var,
                                  f_distribution.variance())
    np.testing.assert_array_equal(y_dist_params.f_mean, f_distribution.mean())
예제 #4
0
def test_likelihood_layer_and_likelihood_loss_give_equal_results():
    np.random.seed(123)
    f_mean = np.random.randn(7, 1)
    f_scale = np.random.randn(7, 1)**2
    targets = np.random.randn(7, 1)

    f_dist = tfp.distributions.MultivariateNormalDiag(loc=f_mean,
                                                      scale_diag=f_scale)
    likelihood = gpflow.likelihoods.Gaussian(0.123)

    # evaluate layer object
    likelihood_layer = LikelihoodLayer(likelihood)
    _ = likelihood_layer(f_dist, targets=targets, training=True)
    [layer_loss] = likelihood_layer.losses

    # evaluate loss object
    likelihood_loss = LikelihoodLoss(likelihood)
    loss_loss = likelihood_loss(targets, f_dist)

    np.testing.assert_allclose(layer_loss, loss_loss)
예제 #5
0
def test_likelihood_layer_losses(GPflowLikelihood):
    gp_layer, (X, Y) = setup_gp_layer_and_data(num_inducing=5)
    likelihood = GPflowLikelihood()
    likelihood_layer = LikelihoodLayer(likelihood)

    # Run tests with gp layer output as distribution
    f_distribution = gp_layer(X)

    _ = likelihood_layer(f_distribution)
    [keras_loss] = likelihood_layer.losses

    assert keras_loss == 0.0

    _ = likelihood_layer(f_distribution, targets=Y, training=True)
    [keras_loss] = likelihood_layer.losses

    f_mean = f_distribution.loc
    f_var = f_distribution.scale.diag**2
    expected_loss = np.mean(
        -likelihood.variational_expectations(f_mean, f_var, Y))

    np.testing.assert_almost_equal(keras_loss, expected_loss, decimal=5)
예제 #6
0
파일: deep_gp.py 프로젝트: henrymoss/GPflux
 def __init__(
     self,
     f_layers: List[tf.keras.layers.Layer],
     likelihood: Union[
         gpflux.layers.LikelihoodLayer,
         gpflow.likelihoods.Likelihood],  # fully-qualified for autoapi
     *,
     input_dim: Optional[int] = None,
     target_dim: Optional[int] = None,
     default_model_class: Type[tf.keras.Model] = tf.keras.Model,
     num_data: Optional[int] = None,
 ):
     """
     :param f_layers: The layers ``[f₁, f₂, …, fₙ]`` describing the latent
         function ``f(x) = fₙ(⋯ (f₂(f₁(x))))``.
     :param likelihood: The layer for the likelihood ``p(y|f)``. If this is a
         GPflow likelihood, it will be wrapped in a :class:`~gpflux.layers.LikelihoodLayer`.
         Alternatively, you can provide a :class:`~gpflux.layers.LikelihoodLayer` explicitly.
     :param input_dim: The input dimensionality.
     :param target_dim: The target dimensionality.
     :param default_model_class: The default for the *model_class* argument of
         :meth:`as_training_model` and :meth:`as_prediction_model`;
         see the :attr:`default_model_class` attribute.
     :param num_data: The number of points in the training dataset; see the
         :attr:`num_data` attribute.
         If you do not specify a value for this parameter explicitly, it is automatically
         detected from the :attr:`~gpflux.layers.GPLayer.num_data` attribute in the GP layers.
     """
     self.inputs = tf.keras.Input((input_dim, ), name="inputs")
     self.targets = tf.keras.Input((target_dim, ), name="targets")
     self.f_layers = f_layers
     if isinstance(likelihood, gpflow.likelihoods.Likelihood):
         self.likelihood_layer = LikelihoodLayer(likelihood)
     else:
         self.likelihood_layer = likelihood
     self.default_model_class = default_model_class
     self.num_data = self._validate_num_data(f_layers, num_data)
예제 #7
0
# Z_aug = np.random.rand(3 * 5,).astype(default_float())
# num_inducing_points = Z.shape[0]
# axs[0].plot(Z, q, 'o', color='black')

# Shallow sparse GP
Z1 = np.linspace(min(X), max(X),
                 num=num_inducing_points)[..., None].astype(default_float())
# Z1 = Z[..., None]
feat1 = SharedIndependentInducingVariables(InducingPoints(Z1))
kern1 = SharedIndependent(Kernel(lengthscales=lengthscale,
                                 variance=outer_variance),
                          output_dim=1)
layer1 = GPLayer(kern1, feat1, X.shape[0], mean_function=Zero(), white=False)
# layer1.q_mu.assign(value=q[..., None])

lik_layer = LikelihoodLayer(Gaussian(variance=likelihood_variance))

model = DeepGP([layer1], lik_layer, input_dim=1, output_dim=1)
model.compile(tf.optimizers.Adam(learning_rate=learning_rate))
callbacks = [
    tf.keras.callbacks.ReduceLROnPlateau(
        monitor="loss",
        patience=patience,
        factor=factor,
        verbose=verbose,
        min_lr=min_learning_rate,
    )
]
_ = model.fit(x=(X_train, y_train),
              y=None,
              batch_size=X.shape[0],
예제 #8
0
def build_LVGPGP_model(x_dim, w_dim, y_dim, num_data):
    lv_layer = build_latent_layer(w_dim, x_dim, y_dim)
    layer_dims = [x_dim + w_dim, x_dim + w_dim, y_dim]
    gp_layers = build_gp_layers(layer_dims, num_data)
    likelihood_layer = LikelihoodLayer(Gaussian(0.1))
    return DeepGP([lv_layer] + gp_layers, likelihood_layer, num_data=num_data)