Ejemplo n.º 1
0
def test_bayesian_gplvm_2d():
    Q = 2  # latent dimensions
    x_data_mean = pca_reduce(Data.Y, Q)
    kernel = gpflow.kernels.SquaredExponential()

    m = gpflow.models.BayesianGPLVM(Data.Y,
                                    x_data_mean,
                                    np.ones((Data.N, Q)),
                                    kernel,
                                    num_inducing_variables=Data.M)

    log_likelihood_initial = m.log_likelihood()
    opt = gpflow.optimizers.Scipy()

    @tf.function(autograph=False)
    def objective_closure():
        return -m.log_marginal_likelihood()

    opt.minimize(objective_closure,
                 m.trainable_variables,
                 options=dict(maxiter=2))
    assert m.log_likelihood() > log_likelihood_initial

    # test prediction
    Xtest = Data.rng.randn(10, Q)
    mu_f, var_f = m.predict_f(Xtest)
    mu_fFull, var_fFull = m.predict_f(Xtest, full_cov=True)
    np.testing.assert_allclose(mu_fFull, mu_f)

    for i in range(Data.D):
        np.testing.assert_allclose(var_f[:, i], np.diag(var_fFull[:, :, i]))
Ejemplo n.º 2
0
def test_bayesian_gplvm_2d():
    Q = 2  # latent dimensions
    X_data_mean = pca_reduce(Data.Y, Q)
    kernel = gpflow.kernels.SquaredExponential()

    m = gpflow.models.BayesianGPLVM(Data.Y,
                                    X_data_mean,
                                    np.ones((Data.N, Q)),
                                    kernel,
                                    num_inducing_variables=Data.M)

    elbo_initial = m.elbo()
    opt = gpflow.optimizers.Scipy()
    opt.minimize(m.training_loss,
                 m.trainable_variables,
                 options=dict(maxiter=2))
    assert m.elbo() > elbo_initial

    # test prediction
    Xtest = Data.rng.randn(10, Q)
    mu_f, var_f = m.predict_f(Xtest)
    mu_fFull, var_fFull = m.predict_f(Xtest, full_cov=True)
    np.testing.assert_allclose(mu_fFull, mu_f)

    for i in range(Data.D):
        np.testing.assert_allclose(var_f[:, i], np.diag(var_fFull[:, :, i]))
    num_training_points=NUM_TRAINING_POINTS, observation_noise_variance=0.01)

# plt.scatter(Y_bg[:, 0], Y_bg[:, 1])
# plt.scatter(Y_fg[:, 0], Y_fg[:, 1])
# plt.show()

Y = tf.convert_to_tensor(Y_fg, dtype=default_float())

print("Number of points: {} and Number of dimensions: {}".format(
    Y.shape[0], Y.shape[1]))

latent_dim = 2  # number of latent dimensions
num_inducing = 20  # number of inducing pts
num_data = Y.shape[0]  # number of data points

X_mean_init = ops.pca_reduce(Y, latent_dim)
# X_mean_init = tf.ones((num_data, latent_dim), dtype=default_float())
X_var_init = tf.ones((num_data, latent_dim), dtype=default_float())

np.random.seed(1)  # for reproducibility
inducing_variable = tf.convert_to_tensor(np.random.permutation(
    X_mean_init.numpy())[:num_inducing],
                                         dtype=default_float())

# lengthscales = tf.convert_to_tensor([1.0] * latent_dim, dtype=default_float())
# kernel = gpflow.kernels.RBF(lengthscales=lengthscales)
kernel = gpflow.kernels.Cosine()

gplvm = gpflow.models.BayesianGPLVM(
    Y,
    X_data_mean=X_mean_init,
Ejemplo n.º 4
0
# %% [markdown]
# ## Model construction
#
# We start by initializing the required variables:

# %%
latent_dim = 2  # number of latent dimensions
num_inducing = 20  # number of inducing pts
num_data = Y.shape[0]  # number of data points

# %% [markdown]
# Initialize via PCA:

# %%
x_mean_init = tf.convert_to_tensor(ops.pca_reduce(Y, latent_dim),
                                   dtype=default_float())
x_var_init = tf.convert_to_tensor(np.ones((num_data, latent_dim)),
                                  dtype=default_float())

# %% [markdown]
# Pick inducing inputs randomly from dataset initialization:

# %%
inducing_variable = tf.convert_to_tensor(np.random.permutation(
    x_mean_init.numpy())[:num_inducing],
                                         dtype=default_float())

# %% [markdown]
# We construct a Squared Exponential (SE) kernel operating on the two-dimensional latent space.
# The `ARD` parameter stands for Automatic Relevance Determination, which in practice means that
Ejemplo n.º 5
0
    Y = np.array(mocap[1])
    print("shape Y", shape(Y))
    # Y = np.reshape(Y, (np.shape(Y)[0], -1))
    Y = Y[:, 0, :]
    Y = tf.convert_to_tensor(Y, dtype=default_float())

print("shape Y", np.shape(Y))
print('Number of points: {} and Number of dimensions: {}'.format(
    Y.shape[0], Y.shape[1]))

latent0_dim = 2  # number of latent dimensions
latent1_dim = 8  # number of latent dimensions from hidden layer
# num_inducing = 20  # number of inducing pts
num_data = Y.shape[0]  # number of data points

x_mean_latent = tf.convert_to_tensor(ops.pca_reduce(Y, latent1_dim),
                                     dtype=default_float())
x_mean_init = tf.convert_to_tensor(ops.pca_reduce(x_mean_latent, latent0_dim),
                                   dtype=default_float())

lengthscale0 = tf.convert_to_tensor([1.0] * latent0_dim, dtype=default_float())
lengthscale1 = tf.convert_to_tensor([1.0] * latent1_dim, dtype=default_float())
kernel0 = gpflow.kernels.RBF(lengthscale=lengthscale0)
kernel1 = gpflow.kernels.RBF(lengthscale=lengthscale1)

print("shape x_mean_latent", np.shape(x_mean_latent))
print("shape x_mean_init", np.shape(x_mean_init))
model = myGPLVM(Y,
                latent_data=x_mean_latent,
                x_data_mean=x_mean_init,
                kernel=[kernel0, kernel1])
Ejemplo n.º 6
0
    def __init__(
        self,
        data: OutputData,
        kernel: Optional[Kernel] = None,
        latent_dimensions: Optional[int] = 2,
        num_inducing_variables: Optional[int] = None,
        inducing_variable=None,
        *,
        mean_function=None,
        q_diag: bool = False,
        q_mu=None,
        q_sqrt=None,
        whiten: bool = False,
    ):
        """
        - kernel, likelihood, inducing_variables, mean_function are appropriate
          GPflow objects
        - num_latent_gps is the number of latent processes to use, defaults to 2, as
          the dimensionality reduction is at dimensions 2
        - q_diag is a boolean. If True, the covariance is approximated by a
          diagonal matrix.
        - whiten is a boolean. If True, we use the whitened representation of
          the inducing points.
        - num_data is the total number of observations, defaults to X.shape[0]
          (relevant when feeding in external minibatches)
        """

        self.latent_dimensions = latent_dimensions

        #grab data
        self.data = data_input_to_tensor(data)

        #define lat-space initialization
        X_data_mean = pca_reduce(data, self.latent_dimensions)

        num_data, num_latent_gps = data.shape

        self.num_data = num_data

        X_data_var = tf.ones((self.num_data, self.latent_dimensions),
                             dtype=default_float())

        assert X_data_var.ndim == 2

        #def kernel
        if kernel is None:
            kernel = gpflow.kernels.SquaredExponential()

        #init Parameters latent
        self.X_data_mean = Parameter(X_data_mean)
        self.X_data_var = Parameter(X_data_var, transform=positive())

        #init parameter inducing point
        if (inducing_variable is None) == (num_inducing_variables is None):
            raise ValueError(
                "BayesianGPLVM needs exactly one of `inducing_variable` and `num_inducing_variables`"
            )

        if inducing_variable is None:
            # By default we initialize by subset of initial latent points
            # Note that tf.random.shuffle returns a copy, it does not shuffle in-place
            #maybe use k-means clustering
            Z = tf.random.shuffle(X_data_mean)[:num_inducing_variables]
            inducing_variable = InducingPoints(Z)

        self.inducing_variable = inducingpoint_wrapper(inducing_variable)

        #loss placeholder for analysis purpuse
        self.loss_placeholder = defaultdict(
            list, {k: []
                   for k in ("KL_x", "ELBO", "KL_u")})

        # deal with parameters for the prior mean variance of X
        X_prior_mean = tf.zeros((self.num_data, self.latent_dimensions),
                                dtype=default_float())
        X_prior_var = tf.ones((self.num_data, self.latent_dimensions),
                              dtype=default_float())

        self.X_prior_mean = tf.convert_to_tensor(np.atleast_1d(X_prior_mean),
                                                 dtype=default_float())
        self.X_prior_var = tf.convert_to_tensor(np.atleast_1d(X_prior_var),
                                                dtype=default_float())

        #sanity check

        assert np.all(X_data_mean.shape == X_data_var.shape)
        assert X_data_mean.shape[0] == self.data.shape[
            0], "X mean and Y must be same size."
        assert X_data_var.shape[0] == self.data.shape[
            0], "X var and Y must be same size."
        assert X_data_mean.shape[1] == self.latent_dimensions
        assert self.X_prior_mean.shape[0] == self.num_data
        assert self.X_prior_mean.shape[1] == self.latent_dimensions
        assert self.X_prior_var.shape[0] == self.num_data
        assert self.X_prior_var.shape[1] == self.latent_dimensions

        # init the super class, accept args
        super().__init__(kernel, likelihoods.Gaussian(variance=0.1),
                         mean_function, num_latent_gps)
        self.q_diag = q_diag
        self.whiten = whiten
        #self.inducing_variable = inducingpoint_wrapper(inducing_variable)

        # init variational parameters
        num_inducing = self.inducing_variable.num_inducing
        self._init_variational_parameters(num_inducing, q_mu, q_sqrt, q_diag)
Ejemplo n.º 7
0
    def __init__(
        self,
        data: OutputData,
        X_data_mean: Optional[tf.Tensor] = None,
        X_data_var: Optional[tf.Tensor] = None,
        kernel: Optional[Kernel] = None,
        num_inducing_variables: Optional[int] = None,
        inducing_variable=None,
        X_prior_mean=None,
        X_prior_var=None,
    ):
        """
        Initialise Bayesian GPLVM object. This method only works with a Gaussian likelihood.

        :param data: data matrix, size N (number of points) x D (dimensions)
        :param X_data_mean: initial latent positions, size N (number of points) x Q (latent dimensions).
        :param X_data_var: variance of latent positions ([N, Q]), for the initialisation of the latent space.
        :param kernel: kernel specification, by default Squared Exponential
        :param num_inducing_variables: number of inducing points, M
        :param inducing_variable: matrix of inducing points, size M (inducing points) x Q (latent dimensions). By default
            random permutation of X_data_mean.
        :param X_prior_mean: prior mean used in KL term of bound. By default 0. Same size as X_data_mean.
        :param X_prior_var: prior variance used in KL term of bound. By default 1.
        """

        self.latent_dimensions = 2
        #grab data
        self.data = data_input_to_tensor(data)

        #define lat-space initialization
        if X_data_mean is None:
            X_data_mean = pca_reduce(data, self.latent_dimensions)

        num_data, num_latent_gps = X_data_mean.shape

        self.num_data = num_data

        if X_data_var is None:
            X_data_var = tf.ones((self.num_data, self.latent_dimensions),
                                 dtype=default_float())

        assert X_data_var.ndim == 2

        self.output_dim = self.data.shape[-1]  #num_latent maybe

        #def kernel
        if kernel is None:
            kernel = gpflow.kernels.SquaredExponential()

        #init GPMODEL
        super().__init__(kernel,
                         likelihoods.Gaussian(variance=0.1),
                         num_latent_gps=num_latent_gps)

        #init Parameters latent
        self.X_data_mean = Parameter(X_data_mean)
        self.X_data_var = Parameter(X_data_var, transform=positive())

        #init parameter inducing point
        if (inducing_variable is None) == (num_inducing_variables is None):
            raise ValueError(
                "BayesianGPLVM needs exactly one of `inducing_variable` and `num_inducing_variables`"
            )

        if inducing_variable is None:
            # By default we initialize by subset of initial latent points
            # Note that tf.random.shuffle returns a copy, it does not shuffle in-place
            #maybe use k-means clustering
            Z = tf.random.shuffle(X_data_mean)[:num_inducing_variables]
            inducing_variable = InducingPoints(Z)

        self.inducing_variable = inducingpoint_wrapper(inducing_variable)

        #loss placeholder for analysis purpuse
        self.loss_placeholder = defaultdict(list,
                                            {k: []
                                             for k in ("KL_x", "ELBO")})

        # deal with parameters for the prior mean variance of X
        if X_prior_mean is None:
            X_prior_mean = tf.zeros((self.num_data, self.latent_dimensions),
                                    dtype=default_float())
        if X_prior_var is None:
            X_prior_var = tf.ones((self.num_data, self.latent_dimensions),
                                  dtype=default_float())

        self.X_prior_mean = tf.convert_to_tensor(np.atleast_1d(X_prior_mean),
                                                 dtype=default_float())
        self.X_prior_var = tf.convert_to_tensor(np.atleast_1d(X_prior_var),
                                                dtype=default_float())

        #sanity check

        assert np.all(X_data_mean.shape == X_data_var.shape)
        assert X_data_mean.shape[0] == self.data.shape[
            0], "X mean and Y must be same size."
        assert X_data_var.shape[0] == self.data.shape[
            0], "X var and Y must be same size."
        assert X_data_mean.shape[1] == self.latent_dimensions
        assert self.X_prior_mean.shape[0] == self.num_data
        assert self.X_prior_mean.shape[1] == self.latent_dimensions
        assert self.X_prior_var.shape[0] == self.num_data
        assert self.X_prior_var.shape[1] == self.latent_dimensions