Пример #1
0
    def setUp(self):
        with self.test_session():
            self.rng = np.random.RandomState(0)

            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(3, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov = t.forward(unconstrained)

            variance = 0.3 + self.rng.rand()

            k1 = ekernels.RBF(1, variance, active_dims=[0])
            k2 = ekernels.RBF(1, variance, active_dims=[1])
            klin = ekernels.Linear(1, variance, active_dims=[1])
            self.ekernels = [k1, k2, klin]

            k1 = ekernels.RBF(2, variance)
            k2 = ekernels.RBF(2, variance)
            klin = ekernels.Linear(2, variance)
            self.pekernels = [k1, k2, klin]

            k1 = kernels.RBF(1, variance, active_dims=[0])
            klin = kernels.Linear(1, variance, active_dims=[1])
            self.kernels = [k1, klin]

            k1 = kernels.RBF(2, variance)
            klin = kernels.Linear(2, variance)
            self.pkernels = [k1, klin]
Пример #2
0
    def setUp(self):
        self.test_graph = tf.Graph()
        with self.test_context():
            self.N = 4
            self.D = 2
            self.rng = np.random.RandomState(0)
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(3, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov = t.forward(unconstrained)

            variance = 0.3 + self.rng.rand()

            k1 = ekernels.RBF(1, variance, active_dims=[0])
            k2 = ekernels.RBF(1, variance, active_dims=[1])
            klin = ekernels.Linear(1, variance, active_dims=[1])
            self.ekernels = [k1, k2, klin]  # Kernels doing the expectation in closed form, doing the slicing

            k1 = ekernels.RBF(1, variance)
            k2 = ekernels.RBF(1, variance)
            klin = ekernels.Linear(1, variance)
            self.pekernels = [k1, k2, klin]  # kernels doing the expectations in closed form, without slicing

            k1 = kernels.RBF(1, variance, active_dims=[0])
            klin = kernels.Linear(1, variance, active_dims=[1])
            self.kernels = [k1, klin]

            k1 = kernels.RBF(1, variance)
            klin = kernels.Linear(1, variance)
            self.pkernels = [k1, klin]
Пример #3
0
    def setUp(self):
        self.test_graph = tf.Graph()
        with self.test_context():
            self.rng = np.random.RandomState(0)

            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(3, self.D)
            unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
            t = TriDiagonalBlockRep()
            self.Xcov_pairwise = t.forward(unconstrained)
            self.Xcov = self.Xcov_pairwise[0]  # no cross-covariances

            variance = 0.3 + self.rng.rand()

            k1 = ekernels.RBF(1, variance, active_dims=[0])
            k2 = ekernels.RBF(1, variance, active_dims=[1])
            klin = ekernels.Linear(1, variance, active_dims=[1])
            self.ekernels = [k1, k2, klin]

            k1 = ekernels.RBF(2, variance)
            k2 = ekernels.RBF(2, variance)
            klin = ekernels.Linear(2, variance)
            self.pekernels = [k1, k2, klin]

            k1 = kernels.RBF(1, variance, active_dims=[0])
            klin = kernels.Linear(1, variance, active_dims=[1])
            self.kernels = [k1, klin]

            k1 = kernels.RBF(2, variance)
            klin = kernels.Linear(2, variance)
            self.pkernels = [k1, klin]
Пример #4
0
    def myKL2(self):
        X = np.array([[1., 2., 3.], [1., 2.1, 3.], [1.1, 2., 3.],
                      [1., 2., 3.1]])
        Y = np.array([[1.], [2.], [.2], [3.]])
        Z = np.array([[1., 2., 3.], [1.3, 2.2, 3.1]])
        A = np.tril(np.random.rand(6, 6))  #"cholesky" of S_M
        B = np.random.rand(6, 1)  #mu_M
        all_kernels = [
            kernels.RBF(3),
            kernels.RBF(2, lengthscales=3., variance=2.)
        ]
        all_Zs, all_mfs = init_linear(X, Z, all_kernels)
        mylayers = Fully_Coupled_Layers(X,
                                        Y,
                                        Z,
                                        all_kernels,
                                        all_mfs,
                                        all_Zs,
                                        mu_M=B,
                                        S_M=A)
        kl = mylayers.KL()
        session = get_default_session()
        kl = session.run(kl)

        Kmm1 = all_kernels[0].compute_K_symm(
            all_Zs[0]) + np.eye(Z.shape[0]) * settings.jitter
        Kmm2 = all_kernels[1].compute_K_symm(
            all_Zs[1]) + np.eye(all_Zs[1].shape[0]) * settings.jitter
        K_big = scipy.linalg.block_diag(Kmm1, Kmm1, Kmm2)
        tfKL = gauss_kl(tf.constant(B),
                        tf.constant(A[np.newaxis]),
                        K=tf.constant(K_big))

        sess = tf.Session()
        return kl, sess.run(tfKL)
Пример #5
0
    def setUp(self):
        self.test_graph = tf.Graph()
        self.rng = np.random.RandomState(
            1)  # this seed works with 60 GH points
        self.N = 4
        self.D = 2
        self.Xmu = self.rng.rand(self.N, self.D)
        self.Z = self.rng.rand(2, self.D)

        unconstrained = self.rng.randn(self.N, 2 * self.D, self.D)
        t = TriDiagonalBlockRep()
        self.Xcov = t.forward(unconstrained)

        # Set up "normal" kernels
        ekernel_classes = [ekernels.RBF, ekernels.Linear]
        kernel_classes = [kernels.RBF, kernels.Linear]
        params = [(self.D, 0.3 + self.rng.rand(),
                   self.rng.rand(2) + [0.5, 1.5], None, True),
                  (self.D, 0.3 + self.rng.rand(), None)]
        self.ekernels = [c(*p) for c, p in zip(ekernel_classes, params)]
        self.kernels = [c(*p) for c, p in zip(kernel_classes, params)]

        # Test summed kernels, non-overlapping
        rbfvariance = 0.3 + self.rng.rand()
        rbfard = [self.rng.rand() + 0.5]
        linvariance = 0.3 + self.rng.rand()
        self.kernels.append(
            kernels.Add([
                kernels.RBF(1, rbfvariance, rbfard, [1], False),
                kernels.Linear(1, linvariance, [0])
            ]))
        self.kernels[-1].input_size = self.kernels[-1].input_dim
        for k in self.kernels[-1].kern_list:
            k.input_size = self.kernels[-1].input_size
        self.ekernels.append(
            ekernels.Add([
                ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                ekernels.Linear(1, linvariance, [0])
            ]))
        self.ekernels[-1].input_size = self.ekernels[-1].input_dim
        for k in self.ekernels[-1].kern_list:
            k.input_size = self.ekernels[-1].input_size

        # Test summed kernels, overlapping
        rbfvariance = 0.3 + self.rng.rand()
        rbfard = [self.rng.rand() + 0.5]
        linvariance = 0.3 + self.rng.rand()
        self.kernels.append(
            kernels.Add([
                kernels.RBF(self.D, rbfvariance, rbfard, active_dims=[0, 1]),
                kernels.Linear(self.D, linvariance, active_dims=[0, 1])
            ]))
        self.ekernels.append(
            ekernels.Add([
                ekernels.RBF(self.D, rbfvariance, rbfard, active_dims=[0, 1]),
                ekernels.Linear(self.D, linvariance, active_dims=[0, 1])
            ]))

        self.assertTrue(self.ekernels[-2].on_separate_dimensions)
        self.assertTrue(not self.ekernels[-1].on_separate_dimensions)
Пример #6
0
def rbf_lin_sum_kern2():
    return kernels.Sum([
        kernels.Linear(Data.D_in, variance=rng.rand()),
        kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.),
        kernels.Linear(Data.D_in, variance=rng.rand()),
        kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.),
    ])
Пример #7
0
    def test_2d(self):
        with self.test_context():
            # test default Z on 2_D example
            Q = 2  # latent dimensions
            X_mean = gpflow.models.PCA_reduce(self.Y, Q)
            k = kernels.RBF(Q, ARD=False)
            m = gpflow.models.BayesianGPLVM(X_mean=X_mean,
                                            X_var=np.ones((self.N, Q)),
                                            Y=self.Y,
                                            kern=k,
                                            M=self.M)
            linit = m.compute_log_likelihood()
            opt = gpflow.train.ScipyOptimizer()
            opt.minimize(m, maxiter=2)
            self.assertTrue(m.compute_log_likelihood() > linit)

            # test prediction
            Xtest = self.rng.randn(10, Q)
            mu_f, var_f = m.predict_f(Xtest)
            mu_fFull, var_fFull = m.predict_f_full_cov(Xtest)
            self.assertTrue(np.allclose(mu_fFull, mu_f))
            # check full covariance diagonal
            for i in range(self.D):
                self.assertTrue(
                    np.allclose(var_f[:, i], np.diag(var_fFull[:, :, i])))
Пример #8
0
 def __init__(self,
              Y,
              latent_dim,
              X_mean=None,
              kern=None,
              mean_function=Zero()):
     """
     Initialise GPLVM object. This method only works with a Gaussian likelihood.
     :param Y: data matrix, size N (number of points) x D (dimensions)
     :param Z: matrix of inducing points, size M (inducing points) x Q (latent dimensions)
     :param X_mean: latent positions (N x Q), for the initialisation of the latent space.
     :param kern: kernel specification, by default RBF
     :param mean_function: mean function, by default None.
     """
     if kern is None:
         kern = kernels.RBF(latent_dim, ARD=True)
     if X_mean is None:
         X_mean = PCA_initialization(Y, latent_dim)
     assert X_mean.shape[1] == latent_dim, \
         'Passed in number of latent ' + str(latent_dim) + ' does not match initial X ' + str(X_mean.shape[1])
     self.num_latent = X_mean.shape[1]
     assert Y.shape[
         1] >= self.num_latent, 'More latent dimensions than observed.'
     GPR.__init__(self, X_mean, Y, kern, mean_function=mean_function)
     del self.X  # in GPLVM this is a Param
     self.X = Param(X_mean)
Пример #9
0
    def setUp(self):
        with self.test_session():
            self._threshold = 0.5
            self.rng = np.random.RandomState(0)
            self.N = 4
            self.D = 2

            # Test summed kernels, non-overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()

            self.kernel = kernels.Prod([
                kernels.RBF(1, rbfvariance, rbfard, [1], False),
                kernels.Linear(1, linvariance, [0])
            ])

            self.ekernel = ekernels.Prod([
                ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                ekernels.Linear(1, linvariance, [0])
            ])

            self.Xmu = self.rng.rand(self.N, self.D)
            self.Xcov = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(2, self.D)
Пример #10
0
    def train(self, Y):
        self.models = []
        N = Y.shape[0]

        for i in range(self.n_layers):
            M = self.inducing_pts[i]
            kernel = kernels.RBF(self.kernel_dims[i],
                                 ARD=True,
                                 active_dims=slice(0, self.kernel_dims[i]))
            Q = self.latent_dims[i]
            X_mean = gpflow.models.PCA_reduce(Y, Q)
            Z = np.random.permutation(X_mean.copy())[:M]

            m1 = gpflow.models.BayesianGPLVM(X_mean=X_mean,
                                             X_var=0.1 * np.ones((N, Q)),
                                             Y=Y,
                                             kern=kernel,
                                             M=M,
                                             Z=Z)
            m1.likelihood.variance = 0.01
            m1.compile()
            opt = gpflow.train.ScipyOptimizer()
            opt.minimize(m1,
                         maxiter=gpflow.test_util.notebook_niter(
                             self.max_iters))
            self.models.append(m1)
            Y = m1.X_mean.read_value()

            self.means = Y.copy()
Пример #11
0
def rbf_lin_prod_kern():
    return kernels.Product([
        kernels.RBF(1,
                    variance=rng.rand(),
                    lengthscales=rng.rand() + 1.,
                    active_dims=[0]),
        kernels.Linear(1, variance=rng.rand(), active_dims=[1])
    ])
Пример #12
0
def create_meta_learning_model(**kwargs):
    Z = np.random.randn(kwargs["n_inducing_points"],
                        kwargs["dim_in"] + kwargs["dim_h"])
    mean_func = None
    kernel = kernels.RBF(kwargs["dim_in"] + kwargs["dim_h"], ARD=True)
    likelihood = likelihoods.MultiGaussian(dim=kwargs["dim_out"])
    latent_to_conf_space_kernel = kernels.RBF(kwargs["dim_h"], ARD=True)
    latent_to_conf_space_likelihood = likelihoods.Gaussian()

    model = MLSVGP(
        dim_in=kwargs["dim_in"],
        dim_out=kwargs["dim_out"],
        dim_h=kwargs["dim_h"],
        num_h=kwargs["n_envs"],
        kern=kernel,
        likelihood=likelihood,
        mean_function=mean_func,
        Z=Z,
        observed_config_space_dim=kwargs["observed_config_space_dim"],
        latent_to_conf_space_kernel=latent_to_conf_space_kernel,
        latent_to_conf_space_likelihood=latent_to_conf_space_likelihood)

    return model
Пример #13
0
def create_model(**kwargs):

    if "BASESVGP" in kwargs["model_name"]:

        Z = np.random.randn(kwargs["n_inducing"], kwargs["dim_in"])
        mean_func = None
        kernel = kernels.RBF(kwargs["dim_in"], ARD=True)
        likelihood = likelihoods.MultiGaussian(dim=kwargs["dim_out"])

        model = BASESVGP(dim_in=kwargs["dim_in"],
                         dim_out=kwargs["dim_out"],
                         kern=kernel,
                         likelihood=likelihood,
                         mean_function=mean_func,
                         Z=Z,
                         name=kwargs["model_name"])

    elif kwargs["model_name"] == "MLSVGP":

        Z = np.random.randn(kwargs["n_inducing"],
                            kwargs["dim_in"] + kwargs["dim_h"])
        mean_func = None
        kernel = kernels.RBF(kwargs["dim_in"] + kwargs["dim_h"], ARD=True)
        likelihood = likelihoods.MultiGaussian(dim=kwargs["dim_out"])

        model = MLSVGP(dim_in=kwargs["dim_in"],
                       dim_out=kwargs["dim_out"],
                       dim_h=kwargs["dim_h"],
                       num_h=kwargs["n_envs"],
                       kern=kernel,
                       likelihood=likelihood,
                       mean_function=mean_func,
                       Z=Z,
                       name=kwargs["model_name"])

    return model
Пример #14
0
 def test_1d(self):
     with self.test_context():
         Q = 1  # latent dimensions
         k = kernels.RBF(Q)
         Z = np.linspace(0, 1, self.M)
         Z = np.expand_dims(Z, Q)  # inducing points
         m = gpflow.models.BayesianGPLVM(X_mean=np.zeros((self.N, Q)),
                                         X_var=np.ones((self.N, Q)),
                                         Y=self.Y,
                                         kern=k,
                                         M=self.M,
                                         Z=Z)
         linit = m.compute_log_likelihood()
         opt = gpflow.train.ScipyOptimizer()
         opt.minimize(m, maxiter=2)
         self.assertTrue(m.compute_log_likelihood() > linit)
Пример #15
0
    def _conv_layer(self,
                    NHWC_X,
                    M,
                    feature_map,
                    filter_size,
                    stride,
                    layer_params=None):
        if layer_params is None:
            layer_params = {}
        NHWC = NHWC_X.shape
        view = FullView(input_size=NHWC[1:3],
                        filter_size=filter_size,
                        feature_maps=NHWC[3],
                        stride=stride)

        if self.flags.identity_mean:
            conv_mean = Conv2dMean(filter_size,
                                   NHWC[3],
                                   feature_map,
                                   stride=stride)
        else:
            conv_mean = gpflow.mean_functions.Zero()
        conv_mean.set_trainable(False)

        output_shape = image_HW(view.patch_count) + [feature_map]

        H_X = identity_conv(NHWC_X, filter_size, NHWC[3], feature_map, stride)
        if len(layer_params) == 0:
            conv_features = PatchInducingFeatures.from_images(
                NHWC_X, M, filter_size)
        else:
            conv_features = PatchInducingFeatures(layer_params.get('Z'))

        patch_length = filter_size**2 * NHWC[3]
        if self.flags.base_kernel == 'rbf':
            lengthscales = layer_params.get('base_kernel/lengthscales', 5.0)
            variance = layer_params.get('base_kernel/variance', 5.0)
            base_kernel = kernels.RBF(patch_length,
                                      variance=variance,
                                      lengthscales=lengthscales)
        elif self.flags.base_kernel == 'acos':
            base_kernel = kernels.ArcCosine(patch_length, order=0)
        else:
            raise ValueError("Not a valid base-kernel value")

        q_mu = layer_params.get('q_mu')
        q_sqrt = layer_params.get('q_sqrt')

        conv_layer = ConvLayer(base_kernel=base_kernel,
                               mean_function=conv_mean,
                               feature=conv_features,
                               view=view,
                               white=self.flags.white,
                               gp_count=feature_map,
                               q_mu=q_mu,
                               q_sqrt=q_sqrt)

        if q_sqrt is None:
            # Start with low variance.
            conv_layer.q_sqrt = conv_layer.q_sqrt.value * 1e-5

        return conv_layer, H_X
Пример #16
0
def rbf_prod_seperate_dims():
    return kernels.Product([
        kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand(), active_dims=[0]),
        kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand(), active_dims=[1])
    ])
Пример #17
0
    def setUp(self):
        with self.test_session():
            self.rng = np.random.RandomState(0)
            self.N = 4
            self.D = 2
            self.Xmu = self.rng.rand(self.N, self.D)
            self.Z = self.rng.rand(2, self.D)

            self.Xcov_diag = 0.05 + self.rng.rand(self.N, self.D)
            self.Xcov = np.zeros(
                (self.Xcov_diag.shape[0], self.Xcov_diag.shape[1],
                 self.Xcov_diag.shape[1]))
            self.Xcov[
                (np.s_[:], ) +
                np.diag_indices(self.Xcov_diag.shape[1])] = self.Xcov_diag

            # Set up "normal" kernels
            ekernel_classes = [ekernels.RBF, ekernels.Linear]
            kernel_classes = [kernels.RBF, kernels.Linear]
            params = [(self.D, 0.3 + self.rng.rand(),
                       self.rng.rand(2) + [0.5, 1.5], None, True),
                      (self.D, 0.3 + self.rng.rand(), None)]
            self.ekernels = [c(*p) for c, p in zip(ekernel_classes, params)]
            self.kernels = [c(*p) for c, p in zip(kernel_classes, params)]

            # Test summed kernels, non-overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()
            self.kernels.append(
                kernels.Add([
                    kernels.RBF(1, rbfvariance, rbfard, [1], False),
                    kernels.Linear(1, linvariance, [0])
                ]))
            self.kernels[-1].input_size = self.kernels[-1].input_dim
            for k in self.kernels[-1].kern_list:
                k.input_size = self.kernels[-1].input_size
            self.ekernels.append(
                ekernels.Add([
                    ekernels.RBF(1, rbfvariance, rbfard, [1], False),
                    ekernels.Linear(1, linvariance, [0])
                ]))
            self.ekernels[-1].input_size = self.ekernels[-1].input_dim
            for k in self.ekernels[-1].kern_list:
                k.input_size = self.ekernels[-1].input_size

            # Test summed kernels, overlapping
            rbfvariance = 0.3 + self.rng.rand()
            rbfard = [self.rng.rand() + 0.5]
            linvariance = 0.3 + self.rng.rand()
            self.kernels.append(
                kernels.Add([
                    kernels.RBF(self.D, rbfvariance, rbfard),
                    kernels.Linear(self.D, linvariance)
                ]))
            self.ekernels.append(
                ekernels.Add([
                    ekernels.RBF(self.D, rbfvariance, rbfard),
                    ekernels.Linear(self.D, linvariance)
                ]))

            self.assertTrue(self.ekernels[-2].on_separate_dimensions)
            self.assertTrue(not self.ekernels[-1].on_separate_dimensions)
Пример #18
0
data = np.load('data/three_phase_oil_flow.npz')
Y = data['Y']
labels = data['labels']

print('Number of points x Number of dimensions', Y.shape)

Q = 5
M = 20  # number of inducing pts
N = Y.shape[0]
X_mean = gpflow.models.PCA_reduce(Y, Q)  # Initialise via PCA
Z = np.random.permutation(X_mean.copy())[:M]

fHmmm = False
if (fHmmm):
    k = (kernels.RBF(3, ARD=True, active_dims=slice(0, 3)) +
         kernels.Linear(2, ARD=False, active_dims=slice(3, 5)))
else:
    k = (kernels.RBF(3, ARD=True, active_dims=[0, 1, 2]) +
         kernels.Linear(2, ARD=False, active_dims=[3, 4]))

m = gpflow.models.BayesianGPLVM(X_mean=X_mean,
                                X_var=0.1 * np.ones((N, Q)),
                                Y=Y,
                                kern=k,
                                M=M,
                                Z=Z)
m.likelihood.variance = 0.01

opt = gpflow.train.ScipyOptimizer()
m.compile()
Пример #19
0
import numpy as np
import gpflow
import gpflow.kernels as kernels

from gpflow.models.vgpds import VGPDS
from gpflow.training.tensorflow_optimizer import _TensorFlowOptimizer
from gpflow.training.scipy_optimizer import ScipyOptimizer

import matplotlib.pyplot as plt

T = np.linspace(0, 4 * np.pi, 40)

kern = kernels.RBF(2, ARD=True)

#generate some data
Y1 = np.sin(T)
Y2 = np.cos(Y1)
Y3 = Y1 - Y2
Y4 = Y1 + Y2
Y5 = np.multiply(Y1, Y2)

Y = np.stack((Y1, Y2, Y3, Y4, Y5), axis=1)

m = VGPDS(Y, T, kern, num_latent=2)

m.likelihood.variance = 0.01

print('initial parameters')
print(m.as_pandas_table())

print('optimizing parameters')
Пример #20
0
    def __init__(self,
                 Y,
                 T,
                 kern,
                 M=10,
                 num_latent=3,
                 dynamic_kern=None,
                 Z=None,
                 KL_weight=None):
        """
        Initialise VGPDS. This only works with Gaussian Likelihood for now

        :param Y: data matrix, size T (number of time points) x D (dimensions)
        :param T: time vector, positive real value, size 1 x T
        :param kern: Mapping kernel X -> Y specification, by default RBF
        :param M: Number of inducing points
        :param num_latent: Number of latent dimension. This is automatically found unless user force latent dimension
        :param force_latent_dim: Specify whether strict latent dimension is enforced
        :param dynamic_kern: temporal dynamics kernel specification, by default RBF
        :param Z: matrix of inducing points
        :param KL_weight: Weight of KL . weight of bound = 1 - w(KL)
        """
        X_mean = large_PCA(Y, num_latent)

        GPModel.__init__(self,
                         X_mean,
                         Y,
                         kern,
                         likelihood=likelihoods.Gaussian(),
                         mean_function=Zero())
        del self.X  # This is a params

        self.T = np.transpose(T[np.newaxis])
        self.num_latent = num_latent
        if KL_weight is None:
            self.KL_weight = 0.5
        else:
            assert KL_weight <= 1
            assert KL_weight >= 0
            self.KL_weight = KL_weight

        #This is only one way to initialize mu_bar_q
        mu_bar_q = X_mean
        lambda_q = np.ones((self.T.shape[0], self.num_latent))

        if dynamic_kern is None:
            self.dynamic_kern = kernels.RBF(1) + kernels.Bias(
                1) + kernels.White(1)
        else:
            self.dynamic_kern = dynamic_kern

        self.mu_bar_q = Parameter(mu_bar_q)
        self.lambda_q = Parameter(lambda_q)

        self.num_time, self.num_latent = X_mean.shape
        self.output_dim = Y.shape[1]

        # inducing points
        if Z is None:
            # By default we initialize by subset of initial latent points
            Z = np.random.permutation(X_mean.copy())[:M]

        self.feature = features.InducingPoints(Z)

        assert len(self.feature) == M
Пример #21
0
    def test_kernelsActiveDims(self):
        ''' Test sum and product compositional kernels '''
        with self.test_context():
            Q = 2  # latent dimensions
            X_mean = gpflow.models.PCA_reduce(self.Y, Q)
            kernsQuadratu = [
                kernels.RBF(1, active_dims=[0]) + kernels.Linear(1, active_dims=[1]),
                kernels.RBF(1, active_dims=[0]) + kernels.Periodic(1, active_dims=[1]),
                kernels.RBF(1, active_dims=[0]) * kernels.Linear(1, active_dims=[1]),
                kernels.RBF(Q)+kernels.Linear(Q)]  # non-overlapping
            kernsAnalytic = [
                ekernels.Sum([
                    ekernels.RBF(1, active_dims=[0]),
                    ekernels.Linear(1, active_dims=[1])]),
                ekernels.Sum([
                    ekernels.RBF(1, active_dims=[0]),
                    kernels.Periodic(1, active_dims=[1])]),
                ekernels.Product([
                    ekernels.RBF(1, active_dims=[0]),
                    ekernels.Linear(1, active_dims=[1])]),
                ekernels.Sum([
                    ekernels.RBF(Q),
                    ekernels.Linear(Q)])
            ]
            fOnSeparateDims = [True, True, True, False]
            Z = np.random.permutation(X_mean.copy())[:self.M]
            # Also test default N(0,1) is used
            X_prior_mean = np.zeros((self.N, Q))
            X_prior_var = np.ones((self.N, Q))
            Xtest = self.rng.randn(10, Q)

        for kq, ka, sepDims in zip(kernsQuadratu, kernsAnalytic, fOnSeparateDims):
            with self.test_context():
                kq.num_gauss_hermite_points = 20  # speed up quadratic for tests
                # RBF should throw error if quadrature is used
                ka.kern_list[0].num_gauss_hermite_points = 0
                if sepDims:
                    self.assertTrue(
                        ka.on_separate_dimensions,
                        'analytic kernel must not use quadrature')
                mq = gpflow.models.BayesianGPLVM(
                    X_mean=X_mean,
                    X_var=np.ones((self.N, Q)),
                    Y=self.Y,
                    kern=kq,
                    M=self.M,
                    Z=Z,
                    X_prior_mean=X_prior_mean,
                    X_prior_var=X_prior_var)
                ma = gpflow.models.BayesianGPLVM(
                    X_mean=X_mean,
                    X_var=np.ones((self.N, Q)),
                    Y=self.Y,
                    kern=ka,
                    M=self.M,
                    Z=Z)
                ql = mq.compute_log_likelihood()
                al = ma.compute_log_likelihood()
                self.assertTrue(np.allclose(ql, al, atol=1e-2),
                                'Likelihood not equal %f<>%f' % (ql, al))
                mu_f_a, var_f_a = ma.predict_f(Xtest)
                mu_f_q, var_f_q = mq.predict_f(Xtest)
                self.assertTrue(np.allclose(mu_f_a, mu_f_q, atol=1e-4),
                                ('Posterior means different', mu_f_a-mu_f_q))
                self.assertTrue(np.allclose(mu_f_a, mu_f_q, atol=1e-4),
                                ('Posterior vars different', var_f_a-var_f_q))
Пример #22
0
dataset = data.MultiTaskData(
    X, Y, D, ARGS.n_train_data, ARGS.n_tasks, ARGS.n_train_tasks,
    ARGS.dim_in, ARGS.dim_out, normalize=True)

""" Train MLGP """

X_train = dataset.data["training"]["X"].reshape(-1, ARGS.dim_in)
Z = X_train.copy()
np.random.shuffle(Z)
Z = Z[:ARGS.n_inducing]
Z = np.hstack([Z, np.zeros_like(Z)])

#mean_func = None
mean_func = mean_functions.Linear(A=np.ones((ARGS.dim_in+ARGS.dim_h, ARGS.dim_out)), b=None, name=ARGS.model_name+"_linear_mean")
kernel = kernels.RBF(ARGS.dim_in+ARGS.dim_h, ARD=True, name=ARGS.model_name+"_kernel")
likelihood = likelihoods.MultiGaussian(dim=ARGS.dim_out, name=ARGS.model_name+"_likelihood")

# Create graph
model = MLSVGP(
    dim_in=ARGS.dim_in, dim_out=ARGS.dim_out, dim_h=ARGS.dim_h, num_h=ARGS.n_tasks,
    kern=kernel, likelihood=likelihood, mean_function=mean_func, Z=Z,
    max_lik_h=ARGS.max_lik_h, name=ARGS.model_name)

likelihood = model._build_likelihood()
objective = -likelihood

if ARGS.train:
    session, saver = training.train(model, objective, dataset, ARGS)
else:
    session = tf.Session()
Пример #23
0
def bayesian_gplvm_example():
    np.random.seed(42)

    pods.datasets.overide_manual_authorize = True  # Dont ask to authorize.
    gpflow.settings.numerics.quadrature = 'error'  # Throw error if quadrature is used for kernel expectations.

    # Data.
    data = pods.datasets.oil_100()
    Y = data['X']
    print('Number of points X Number of dimensions', Y.shape)
    data['citation']

    # Model construction.
    Q = 5
    M = 20  # Number of inducing pts.
    N = Y.shape[0]
    X_mean = gpflow.models.PCA_reduce(Y, Q)  # Initialise via PCA.
    Z = np.random.permutation(X_mean.copy())[:M]

    fHmmm = False
    if fHmmm:
        k = (kernels.RBF(3, ARD=True, active_dims=slice(0, 3)) +
             kernels.Linear(2, ARD=False, active_dims=slice(3, 5)))
    else:
        k = (kernels.RBF(3, ARD=True, active_dims=[0, 1, 2]) +
             kernels.Linear(2, ARD=False, active_dims=[3, 4]))

    m = gpflow.models.BayesianGPLVM(X_mean=X_mean,
                                    X_var=0.1 * np.ones((N, Q)),
                                    Y=Y,
                                    kern=k,
                                    M=M,
                                    Z=Z)
    m.likelihood.variance = 0.01

    opt = gpflow.train.ScipyOptimizer()
    m.compile()
    opt.minimize(m)  #, options=dict(disp=True, maxiter=100))

    # Compute and sensitivity to input.
    #	Sensitivity is a measure of the importance of each latent dimension.
    kern = m.kern.kernels[0]
    sens = np.sqrt(kern.variance.read_value()) / kern.lengthscales.read_value()
    print(m.kern)
    print(sens)
    fig, ax = plt.subplots()
    ax.bar(np.arange(len(kern.lengthscales.read_value())),
           sens,
           0.1,
           color='y')
    ax.set_title('Sensitivity to latent inputs')

    # Plot vs PCA.
    XPCAplot = gpflow.models.PCA_reduce(data['X'], 2)
    f, ax = plt.subplots(1, 2, figsize=(10, 6))
    labels = data['Y'].argmax(axis=1)
    colors = cm.rainbow(np.linspace(0, 1, len(np.unique(labels))))

    for i, c in zip(np.unique(labels), colors):
        ax[0].scatter(XPCAplot[labels == i, 0],
                      XPCAplot[labels == i, 1],
                      color=c,
                      label=i)
        ax[0].set_title('PCA')
        ax[1].scatter(m.X_mean.read_value()[labels == i, 1],
                      m.X_mean.read_value()[labels == i, 2],
                      color=c,
                      label=i)
        ax[1].set_title('Bayesian GPLVM')
Пример #24
0
def rbf_kern():
    return kernels.RBF(Data.D_in, variance=rng.rand(), lengthscales=rng.rand() + 1.)
Пример #25
0
def rbf_kern_2():
    # Additional cached rbf kernel for rbf cross covariance tests
    return kernels.RBF(Data.D_in,
                       variance=rng.rand(),
                       lengthscales=rng.rand() + 1.)
Пример #26
0
def rbf_kern_act_dim_1():
    return kernels.RBF(1, variance=rng.rand(), lengthscales=rng.rand() + 1., active_dims=[1])
Пример #27
0
class Data:
    rng = np.random.RandomState(1)
    num_data = 5
    num_ind = 4
    D_in = 2
    D_out = 2

    Xmu = rng.randn(num_data, D_in)
    L = gen_L(rng, num_data, D_in, D_in)
    Xvar = np.array([l @ l.T for l in L])
    Z = rng.randn(num_ind, D_in)

    # distributions don't need to be compiled (No Parameter objects)
    # but the members should be Tensors created in the same graph
    graph = tf.Graph()
    with test_util.session_context(graph) as sess:
        gauss = Gaussian(tf.constant(Xmu), tf.constant(Xvar))
        dirac = Gaussian(tf.constant(Xmu),
                         tf.constant(np.zeros((num_data, D_in, D_in))))
        gauss_diag = DiagonalGaussian(tf.constant(Xmu),
                                      tf.constant(rng.rand(num_data, D_in)))
        dirac_diag = DiagonalGaussian(tf.constant(Xmu),
                                      tf.constant(np.zeros((num_data, D_in))))
        dirac_markov_gauss = MarkovGaussian(
            tf.constant(Xmu), tf.constant(np.zeros((2, num_data, D_in, D_in))))

        # create the covariance for the pairwise markov-gaussian
        dummy_gen = lambda rng, n, *shape: np.array(
            [rng.randn(*shape) for _ in range(n)])
        L_mg = dummy_gen(rng, num_data, D_in, 2 * D_in)  # N+1 x D x 2D
        LL = np.concatenate((L_mg[:-1], L_mg[1:]), 1)  # N x 2D x 2D
        Xcov = LL @ np.transpose(LL, (0, 2, 1))
        Xc = np.concatenate((Xcov[:, :D_in, :D_in], Xcov[-1:, D_in:, D_in:]),
                            0)  # N+1 x D x D
        Xcross = np.concatenate(
            (Xcov[:, :D_in, D_in:], np.zeros(
                (1, D_in, D_in))), 0)  # N+1 x D x D
        Xcc = np.stack([Xc, Xcross])  # 2 x N+1 x D x D

        markov_gauss = MarkovGaussian(Xmu, Xcc)

    with gpflow.decors.defer_build():
        # features
        ip = features.InducingPoints(Z)
        # kernels
        rbf_prod_seperate_dims = kernels.Product([
            kernels.RBF(1,
                        variance=rng.rand(),
                        lengthscales=rng.rand(),
                        active_dims=[0]),
            kernels.RBF(1,
                        variance=rng.rand(),
                        lengthscales=rng.rand(),
                        active_dims=[1])
        ])

        rbf_lin_sum = kernels.Sum([
            kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()),
            kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand()),
            kernels.Linear(D_in, variance=rng.rand())
        ])

        rbf = kernels.RBF(D_in, variance=rng.rand(), lengthscales=rng.rand())

        lin_kern = kernels.Linear(D_in, variance=rng.rand())

        # mean functions
        lin = mean_functions.Linear(rng.rand(D_in, D_out), rng.rand(D_out))
        iden = mean_functions.Identity(
            D_in)  # Note: Identity can only be used if Din == Dout
        zero = mean_functions.Zero(output_dim=D_out)
        const = mean_functions.Constant(rng.rand(D_out))