Esempio n. 1
0
    def __init__(self,
                 input_dim,
                 numc,
                 lengthscales=None,
                 variances=None,
                 frequencies=None):
        gpflow.kernels.Kern.__init__(self, input_dim, active_dims=None)
        self.ARD = False
        self.numc = numc

        if lengthscales == None:
            lengthscales = 1.
            variances = 0.125 * np.ones((numc, 1))
            frequencies = 1. * np.arange(1, numc + 1)

        self.lengthscales = Param(lengthscales, transforms.Logistic(0., 10.))
        for i in range(
                self.numc
        ):  # generate a param object for each  var, and freq, they must be (numc,) arrays.
            setattr(self, 'variance_' + str(i + 1),
                    Param(variances[i], transforms.Logistic(0., 0.25)))
            setattr(self, 'frequency_' + str(i + 1),
                    Param(frequencies[i], transforms.positive))

        for i in range(self.numc):
            exec('self.variance_' + str(i + 1) + '.fixed = ' + str(True))
Esempio n. 2
0
    def __init__(self, X, Y, kern, likelihood, mu_old, Su_old, Kaa_old, Z_old, Z, mean_function=Zero(),
                 num_latent=None, q_diag=False, whiten=True, minibatch_size=None):

        # sort out the X, Y into MiniBatch objects.
        if minibatch_size is None:
            minibatch_size = X.shape[0]
        self.num_data = X.shape[0]
        X = MinibatchData(X, minibatch_size, np.random.RandomState(0))
        Y = MinibatchData(Y, minibatch_size, np.random.RandomState(0))

        # init the super class, accept args
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function)
        self.q_diag, self.whiten = q_diag, whiten
        self.Z = Param(Z)
        self.num_latent = num_latent or Y.shape[1]
        self.num_inducing = Z.shape[0]

        # init variational parameters
        self.q_mu = Param(np.zeros((self.num_inducing, self.num_latent)))
        if self.q_diag:
            self.q_sqrt = Param(np.ones((self.num_inducing, self.num_latent)),
                                transforms.positive)
        else:
            q_sqrt = np.array([np.eye(self.num_inducing)
                               for _ in range(self.num_latent)]).swapaxes(0, 2)
            # , transforms.LowerTriangular(q_sqrt.shape[2]))  # Temp remove transform
            self.q_sqrt = Param(q_sqrt)

        self.mu_old = DataHolder(mu_old, on_shape_change='pass')
        self.M_old = Z_old.shape[0]
        self.Su_old = DataHolder(Su_old, on_shape_change='pass')
        self.Kaa_old = DataHolder(Kaa_old, on_shape_change='pass')
        self.Z_old = DataHolder(Z_old, on_shape_change='pass')
Esempio n. 3
0
    def __init__(self, X, Y, X_prime):
        """
        X is a data vector, size N x 1
        X_prime is a vector, size M x 1
        Y is a data vector, size N x 1 

        This is a vanilla implementation of a GP with unimodality contraints and HMC sampling
        Refer:
        https://bayesopt.github.io/papers/2017/9.pdf
        """
        X_concat = np.vstack([X, X_prime])
        UnimodalGP.__init__(self)
        self.X_concat = DataHolder(X_concat)
        self.Y = DataHolder(Y)
        self.X = DataHolder(X)
        self.X_prime = DataHolder(X_prime)
        self.num_data = X_concat.shape[0]
        self.num_x_points = X.shape[0]
        self.num_der_points = X_prime.shape[0]
        self.num_latent = Y.shape[1]

        self.Vf = Param(np.zeros((self.num_data, self.num_latent)))
        self.Vf.prior = Gaussian(0., 1.)

        self.Vg = Param(np.zeros((2 * self.num_der_points, self.num_latent)))
        self.Vg.prior = Gaussian(0., 1.)
Esempio n. 4
0
    def __init__(self,
                 input_dim,
                 num_partials,
                 lengthscales=None,
                 variances=None,
                 frequencies=None):
        gpflow.kernels.Kern.__init__(self, input_dim, active_dims=None)
        len_l = []
        var_l = []
        freq_l = []
        self.ARD = False
        self.num_partials = num_partials

        if lengthscales.all() == None:
            lengthscales = 1. * np.ones((num_partials, 1))
            variances = 0.125 * np.ones((num_partials, 1))
            frequencies = 1. * (1. + np.arange(num_partials))

        for i in range(self.num_partials):
            len_l.append(Param(lengthscales[i], transforms.Logistic(0., 2.)))
            var_l.append(Param(variances[i], transforms.Logistic(0., 1.)))
            freq_l.append(Param(frequencies[i], transforms.positive))

        self.lengthscales = ParamList(len_l)
        self.variance = ParamList(var_l)
        self.frequency = ParamList(freq_l)
Esempio n. 5
0
    def __init__(self, input_dim, a=1.0, b=1.0, active_dims=None):
        """
        """
        gpflow.kernels.Kern.__init__(self, input_dim, active_dims)

        self.a = Param(a)
        self.b = Param(b)
Esempio n. 6
0
    def __init__(self,
                 input_dim,
                 energy=np.asarray([1.]),
                 frequency=np.asarray([2 * np.pi]),
                 variance=1.0,
                 features_as_params=False):
        """
        - input_dim is the dimension of the input to the kernel
        - variance is the (initial) value for the variance parameter(s)
          if ARD=True, there is one variance per input
        - active_dims is a list of length input_dim which controls
          which columns of X are used.
        """
        gpflow.kernels.Kern.__init__(self, input_dim, active_dims=None)
        self.num_features = len(frequency)
        self.variance = Param(variance, transforms.Logistic(0., 0.25))

        if features_as_params:
            energy_list = []
            frequency_list = []
            for i in range(energy.size):
                energy_list.append(Param(energy[i], transforms.positive))
                frequency_list.append(Param(frequency[i], transforms.positive))

            self.energy = ParamList(energy_list)
            self.frequency = ParamList(frequency_list)
        else:
            self.energy = energy
            self.frequency = frequency
Esempio n. 7
0
    def __init__(self,
                 input_dim,
                 variance=1.,
                 lengthscales=None,
                 energy=None,
                 frequencies=None,
                 len_fixed=True):
        gpflow.kernels.Kern.__init__(self, input_dim, active_dims=None)
        energy_l = []
        freq_l = []
        self.ARD = False
        self.num_partials = len(energy)

        for i in range(self.num_partials):
            energy_l.append(Param(energy[i], transforms.positive))
            freq_l.append(Param(frequencies[i], transforms.positive))

        self.energy = ParamList(energy_l)
        self.frequency = ParamList(freq_l)
        self.variance = Param(variance, transforms.positive)
        self.lengthscales = Param(lengthscales, transforms.positive)

        self.vars_n_freqs_fixed(fix_energy=True, fix_freq=True)
        if len_fixed:
            self.lengthscales.fixed = True
    def __init__(self, input_dim, energy=np.asarray([1.]), frequency=np.asarray([2*np.pi]), variance=1.,
                 lengthscales=1., len_fixed=False):
        gpflow.kernels.Stationary.__init__(self, input_dim, variance=variance, lengthscales=lengthscales,
                                           active_dims=None, ARD=False)
        # self.variance = Param(variance, transforms.positive())
        # self.lengthscale = Param(lengthscale, transforms.positive())

        self.num_partials = len(frequency)

        energy_list = []
        frequency_list = []

        for i in range(self.num_partials):
            energy_list.append(Param(energy[i], transforms.positive))
            frequency_list.append(Param(frequency[i], transforms.positive))

        self.energy = ParamList(energy_list)
        self.frequency = ParamList(frequency_list)

        # self.energy.fixed = True
        # self.frequency.fixed = True

        # self.energy = energy
        # self.frequency = frequency

        if len_fixed:
            self.lengthscales.fixed = True
Esempio n. 9
0
 def navigate_from(self, Pnew, n, r=0.1):
     self.r2 = r * r
     self.P_path = np.array([Pnew]).reshape([-1, 2])
     Pnew = Pnew.reshape([1, 2])
     self.Pnew = Param(np.tile(Pnew, (n, 1)))
     self.Pinit = Param(np.array(Pnew))
     self.Pinit.fixed = True
     self.n = n
Esempio n. 10
0
 def __init__(self, mu=None, lengthscale=None, signal_variance=None):
     mu = np.zeros(1) if mu is None else mu
     lengthscale = 0.2 * np.ones(1) if lengthscale is None else lengthscale
     signal_variance = 3 * np.ones(
         1) if signal_variance is None else signal_variance
     MeanFunction.__init__(self)
     self.signal_variance = Param(signal_variance, transforms.positive)
     self.lengthscale = Param(lengthscale, transforms.positive)
     self.mu = Param(mu)
Esempio n. 11
0
 def __init__(self,
              Y,
              latent_dim,
              X_mean=None,
              kern=None,
              mean_function=Zero()):
     """
     Initialise GPLVM object. This method only works with a Gaussian likelihood.
     :param Y: data matrix, size N (number of points) x D (dimensions)
     :param Z: matrix of inducing points, size M (inducing points) x Q (latent dimensions)
     :param X_mean: latent positions (N x Q), for the initialisation of the latent space.
     :param kern: kernel specification, by default RBF
     :param mean_function: mean function, by default None.
     """
     if kern is None:
         kern = kernels.RBF(latent_dim, ARD=True)
     if X_mean is None:
         X_mean = PCA_initialization(Y, latent_dim)
     assert X_mean.shape[1] == latent_dim, \
         'Passed in number of latent ' + str(latent_dim) + ' does not match initial X ' + str(X_mean.shape[1])
     self.num_latent = X_mean.shape[1]
     assert Y.shape[
         1] >= self.num_latent, 'More latent dimensions than observed.'
     GPR.__init__(self, X_mean, Y, kern, mean_function=mean_function)
     del self.X  # in GPLVM this is a Param
     self.X = Param(X_mean)
Esempio n. 12
0
    def __init__(self,
                 X,
                 Y,
                 kern,
                 mu_old,
                 Su_old,
                 Kaa_old,
                 Z_old,
                 Z,
                 mean_function=Zero()):
        """
        X is a data matrix, size N x D
        Y is a data matrix, size N x R
        Z is a matrix of pseudo inputs, size M x D
        kern, mean_function are appropriate gpflow objects
        mu_old, Su_old are mean and covariance of old q(u)
        Z_old is the old inducing inputs
        This method only works with a Gaussian likelihood.
        """
        X = DataHolder(X, on_shape_change='pass')
        Y = DataHolder(Y, on_shape_change='pass')
        likelihood = likelihoods.Gaussian()
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function)
        self.Z = Param(Z)
        self.num_data = X.shape[0]
        self.num_latent = Y.shape[1]

        self.mu_old = DataHolder(mu_old, on_shape_change='pass')
        self.M_old = Z_old.shape[0]
        self.Su_old = DataHolder(Su_old, on_shape_change='pass')
        self.Kaa_old = DataHolder(Kaa_old, on_shape_change='pass')
        self.Z_old = DataHolder(Z_old, on_shape_change='pass')
Esempio n. 13
0
    def navigate(self, delta=0.1):
        def _find_neighbors(P, n=8):
            embeds = self.X_mean.value
            dist = np.sum(np.square(P - embeds), axis=-1)
            dist = np.sqrt(dist)
            idx = np.argsort(dist)
            nei = embeds[idx[:n + 1], :]
            return nei, dist[idx[:n + 1]]

        n = 8
        Pnew = self.Pnew.value
        X, dist = _find_neighbors(Pnew, n)
        print('\t dist', dist)
        A = 0.04
        B = 2
        dist = dist.reshape([-1, 1])
        F = A / (np.power(dist, 1.2)) - B / np.power(dist, 0.2)
        F = np.minimum(F, 0.1)
        pdir = Pnew - X
        pdir = pdir / dist
        t = delta
        xx = 0.5 * t * t * F
        xx = np.sum(pdir * xx, axis=0)

        p, W = pca(X, n=1)
        W = W.reshape([2])
        W = W / np.sqrt(np.sum(W * W))
        if len(self.P_path) > 1 and W.dot(self.P_path[-1, :] -
                                          self.P_path[-2, :]) < 0:
            W = -1 * W
        Pnew = self.Pnew.value + W * delta + xx
        self.Pnew = Param(Pnew)
        self.P_path = np.append(self.P_path, Pnew, axis=0)

        return Pnew, W * 0.1, xx
Esempio n. 14
0
 def __init__(self):
     """
     Likelihood for Gaussian Process with unimodality constraints
     """
     Likelihood.__init__(self)
     self.nuf = 1. / 1e-6
     self.nug = 1. / 1e-6
     self.noise_variance = Param(1.0, transforms.positive)
Esempio n. 15
0
 def __init__(self, X_variational_mean, X_variational_var, t):
     """
     
     :param X_variational_mean: initial latent variational distribution mean, size N (number of points) x Q (latent dimensions)
     :param X_variational_var: initial latent variational distribution std (N x Q)
     :param t: time stamps for the variational prior kernel, need to ba an np.narray. 
     """
     super(GPTimeSeries, self).__init__(name='GPTimeSeries')
     self.X_variational_mean = Param(X_variational_mean)
     self.X_variational_var = Param(X_variational_var, transforms.positive)
     assert X_variational_var.ndim == 2, "the dimensionality of variational prior covariance needs to be 2."
     assert np.all(X_variational_mean.shape == X_variational_var.shape), "the shape of variational prior mean and variational prior covariance needs to be equal."
     self.num_latent = X_variational_mean.shape[1]
     self.num_data = X_variational_mean.shape[0]
     assert (isinstance(t, np.ndarray)), "time stamps need to be a numpy array."
     t = DataHolder(t)
     self.t = t
Esempio n. 16
0
    def __init__(self, input_dim, frequency=None, energy=None, variance=1.0):
        gpflow.kernels.Kern.__init__(self, input_dim, active_dims=None)

        self.ARD = False
        self.num_partials = len(frequency)

        self.energy = energy
        self.variance = Param(variance, transforms.positive)
        self.frequency = frequency
Esempio n. 17
0
    def locate_skeletons(self, new_ske):
        embeds = self.X_mean.value
        skeletons = self.skeletons
        new_ske = new_ske.reshape([-1, self.nkpts, 3])

        def cal_distSkeletons(shapesA, shapesB):
            dist = np.sqrt(((shapesB - shapesA)**2).sum(axis=-1))
            dist = dist.sum(axis=-1)
            return dist

        dist = cal_distSkeletons(new_ske, skeletons)
        dist = dist.reshape([-1])
        idx = np.argsort(dist)
        pStart = embeds[idx[0], :]
        pStart = pStart.reshape([1, 2])

        self.Xnew = Param(pStart)
        self.new_ske = Param(new_ske)
        self.new_ske.fixed = True
Esempio n. 18
0
    def __init__(self, actions_list, kerns_list, X_grid):
        """
        Collaboartive preference learning through occupants' actions/behavior
        Uses information : feature value before action and after action
        Inputs:
        actions_list : all actions data list (list of actions taken by each occupant) N_i X (2M) where,
        N_i : total number of actions taken by occupant i
        first half of 2M columns : feature values before action
        second half of 2M columns : feature values after action
        L : length of the list = total number of occupants
        kerns_list : kernels associated with all of the latent GPs 
        """

        # feature grid points
        self.X_grid = X_grid
        self.num_x_grid = self.X_grid.shape[0]

        # num of latent GPs
        self.num_latent_gps = len(kerns_list)

        # num of occupants
        self.num_occupants = len(actions_list)

        # generating indexes for actions : concerned matrix indices
        # U is calculated at all of the X_grid points
        # When it comes to actions taken by each occupant, we need to find the index associated with each actions
        # actions list is the input list, each element of which is the numpy matrix representing feature value before action and after action
        prev_ind_list, cur_ind_list = all_occ_prev_cur_act_ind(
            actions_list, X_grid)

        GPCollabPrefLearn.__init__(self, prev_ind_list, cur_ind_list, X_grid,
                                   kerns_list)

        # Prior for H (latent GP matrix) setup

        # HMC sampling setup (standard normal distribution : whitening variables)
        self.V_h = Param(np.zeros((self.num_latent_gps, self.num_x_grid)))
        self.V_h.prior = Gaussian(0., 1.)

        self.W = Param(np.random.randn(self.num_occupants,
                                       self.num_latent_gps))
Esempio n. 19
0
    def __init__(self, input_dim, xkern, fkern, variance=1.0, plen=1.0, pvar=1.0):

        gpflow.kernels.Stationary.__init__(self, input_dim=input_dim, active_dims=None, ARD=False)
        eyem = tf.eye(xkern.size, dtype=float_type)
        self.variance = Param(variance, transforms.positive)
        self.plen = plen
        self.pvar = pvar
        self.fkern = fkern
        self.xkern = xkern
        self.kern = gpflow.kernels.RBF(input_dim=input_dim, variance=self.pvar, lengthscales=self.plen)
        self.cov = tf.matmul(eyem, self.kern.compute_K_symm(xkern))
        self.icov = tf.matrix_inverse(self.cov + jitter*eyem)
Esempio n. 20
0
    def compile(self, session=None, graph=None, optimizer=None):
        """
        Before calling the standard compile function, check to see if the size
        of the data has changed and add parameters appropriately.

        This is necessary because the shape of the parameters depends on the
        shape of the data.
        """
        if not self.num_data == self.X_concat.shape[0]:
            #print "wink wink"
            self.num_data = self.X_concat.shape[0]
            self.num_x_points = self.X.shape[0]
            self.num_der_points = self.X_prime.shape[0]
            self.Vf = Param(np.zeros((self.num_data, self.num_latent)))
            self.Vf.prior = Gaussian(0., 1.)
            self.Vg = Param(
                np.zeros((2 * self.num_der_points, self.num_latent)))
            self.Vg.prior = Gaussian(0., 1.)

        return super(UnimodalGPMC, self).compile(session=session,
                                                 graph=graph,
                                                 optimizer=optimizer)
Esempio n. 21
0
    def __init__(self, embeds, m, name='Model'):
        Model.__init__(self, name)

        self.nkpts = len(m.Y.value[0])
        self.npts = len(embeds)
        embeds = np.array(m.X_mean.value)
        self.X_mean = Param(embeds)
        self.Z = Param(np.array(m.Z.value))
        self.kern = deepcopy(m.kern)
        self.X_var = Param(np.array(m.X_var.value))
        self.Y = m.Y
        self.likelihood = likelihoods.Gaussian()
        self.mean_function = Zero()

        self.likelihood._check_targets(self.Y.value)
        self._session = None

        self.X_mean.fixed = True
        self.Z.fixed = True
        self.kern.fixed = True
        self.X_var.fixed = True
        self.likelihood.fixed = True
Esempio n. 22
0
    def local_pca(self, delta=0.1):
        def _find_neighbors(P, n=8):
            embeds = self.X_mean.value
            dist = np.sum(np.square(P - embeds), axis=-1)
            idx = np.argsort(dist)[::-1]
            nei = embeds[idx[:n + 1], :]
            return nei

        X = _find_neighbors(self.Pnew.value, 8)
        p, W = pca(X)
        W = W / np.sqrt(np.sum(W * W))
        Pnew = self.Pnew.value + W * delta
        self.Pnew = Param(Pnew)
        return Pnew
Esempio n. 23
0
    def __init__(self, embeds, skeletons, dist, m, name='Model'):
        Model.__init__(self, name)
        self.skeletons = skeletons
        self.dist_embeds = dist
        self.nkpts = len(skeletons[0, :])
        self.npts = len(embeds)
        self.dist_skeletons = np.ones([self.npts, self.npts]) * -1
        embeds = np.array(m.X_mean.value)
        self.X_mean = Param(embeds)
        self.Z = Param(np.array(m.Z.value))
        self.kern = deepcopy(m.kern)
        self.X_var = Param(np.array(m.X_var.value))
        self.Y = m.Y
        self.likelihood = likelihoods.Gaussian()
        self.mean_function = Zero()

        self.likelihood._check_targets(self.Y.value)
        self._session = None

        self.X_mean.fixed = True
        self.Z.fixed = True
        self.kern.fixed = True
        self.X_var.fixed = True
        self.likelihood.fixed = True
Esempio n. 24
0
    def __init__(self, X, Y, kern, Z, alpha, mean_function=Zero()):
        """
        X is a data matrix, size N x D
        Y is a data matrix, size N x R
        Z is a matrix of pseudo inputs, size M x D
        kern, mean_function are appropriate gpflow objects

        This method only works with a Gaussian likelihood.
        """
        X = DataHolder(X, on_shape_change='pass')
        Y = DataHolder(Y, on_shape_change='pass')
        likelihood = likelihoods.Gaussian()
        GPModel.__init__(self, X, Y, kern, likelihood, mean_function)
        self.Z = Param(Z)
        self.num_data = X.shape[0]
        self.num_latent = Y.shape[1]
        self.alpha = alpha
Esempio n. 25
0
    def __init__(self, X_variational_mean, X_variational_var, Y, kern, t, kern_t, M , Z=None):
        """
        Initialization of Bayesian Gaussian Process Dynamics Model. This method only works with Gaussian likelihood.
        :param X_variational_mean: initial latent positions, size N (number of points) x Q (latent dimensions).
        :param X_variational_var: variance of latent positions (N x Q), for the initialisation of the latent space.
        :param Y: data matrix, size N (number of points) x D (dimensions).
        :param kern: kernel specification, by default RBF.
        :param t: time stamps.
        :param kern_t: dynamics kernel specification, by default RBF.
        :param M: number of inducing points.
        :param Z: matrix of inducing points, size M (inducing points) x Q (latent dimensions), By default
                  random permutation of X_mean.
        """
        super(BayesianDGPLVM, self).__init__(name='BayesianDGPLVM')
        self.kern = kern
        assert len(X_variational_mean) == len(X_variational_var), 'must be same amount of time series'
        self.likelihood = likelihoods.Gaussian()

        # multiple sequences
        series = []
        for i in range(len(X_variational_mean)):
            series.append(GPTimeSeries(X_variational_mean[i], X_variational_var[i], t[i]))
        self.series = ParamList(series)

        # inducing points
        if Z is None:
            # By default we initialize by permutation of initial
            Z = np.random.permutation(np.concatenate(X_variational_mean, axis=0).copy())[:M]
        else:
            assert Z.shape[0] == M
        self.Z = Param(Z)

        self.kern_t = kern_t
        self.Y = DataHolder(Y)
        self.M = M
        self.n_s = 0
Esempio n. 26
0
 def __init__(self, input_dim, variance=1., frequency=1.):
     gpflow.kernels.Kern.__init__(self, input_dim, active_dims=None)
     self.variance = Param(variance, transforms.positive)
     self.frequency = Param(frequency, transforms.positive)
Esempio n. 27
0
    def __init__(self,
                 X,
                 Y,
                 kernf,
                 kerng,
                 likelihood,
                 Zf,
                 Zg,
                 mean_function=None,
                 minibatch_size=None,
                 name='model'):
        Model.__init__(self, name)
        self.mean_function = mean_function or Zero()
        self.kernf = kernf
        self.kerng = kerng
        self.likelihood = likelihood
        self.whiten = False
        self.q_diag = True

        # save initial attributes for future plotting purpose
        Xtrain = DataHolder(X)
        Ytrain = DataHolder(Y)
        self.Xtrain, self.Ytrain = Xtrain, Ytrain

        # sort out the X, Y into MiniBatch objects.
        if minibatch_size is None:
            minibatch_size = X.shape[0]
        self.num_data = X.shape[0]
        self.num_latent = Y.shape[1]  # num_latent will be 1
        self.X = MinibatchData(X, minibatch_size, np.random.RandomState(0))
        self.Y = MinibatchData(Y, minibatch_size, np.random.RandomState(0))

        # Add variational paramters
        self.Zf = Param(Zf)
        self.Zg = Param(Zg)
        self.num_inducing_f = Zf.shape[0]
        self.num_inducing_g = Zg.shape[0]

        # init variational parameters
        self.u_fm = Param(
            np.random.randn(self.num_inducing_f, self.num_latent) * 0.01)
        self.u_gm = Param(
            np.random.randn(self.num_inducing_g, self.num_latent) * 0.01)

        if self.q_diag:
            self.u_fs_sqrt = Param(
                np.ones((self.num_inducing_f, self.num_latent)),
                transforms.positive)
            self.u_gs_sqrt = Param(
                np.ones((self.num_inducing_g, self.num_latent)),
                transforms.positive)
        else:
            u_fs_sqrt = np.array([
                np.eye(self.num_inducing_f) for _ in range(self.num_latent)
            ]).swapaxes(0, 2)
            self.u_fs_sqrt = Param(
                u_fs_sqrt, transforms.LowerTriangular(u_fs_sqrt.shape[2]))

            u_gs_sqrt = np.array([
                np.eye(self.num_inducing_g) for _ in range(self.num_latent)
            ]).swapaxes(0, 2)
            self.u_gs_sqrt = Param(
                u_gs_sqrt, transforms.LowerTriangular(u_gs_sqrt.shape[2]))
Esempio n. 28
0
    def __init__(self,
                 X_variational_mean,
                 X_variational_var,
                 Y,
                 Kern,
                 M,
                 Z=None,
                 X_prior_mean=None,
                 X_prior_var=None):
        """
        Initialise Bayesian GPLVM object. This method only works with a Gaussian likelihood.
        :param X_variational_mean: initial latent variational distribution mean, size N (number of points) x Q (latent dimensions)
        :param X_variational_var: initial latent variational distribution std (N X Q)
        :param Y: data matrix, size N (number of points) x D (dimensions)
        :param Kern: kernal specification, by default RBF-ARD
        :param M: number of inducing points 
        :param Z: matrix of inducing points, size M (inducing points) x Q (latent dimensions). By default
                  random permutation of X_mean.
        :param X_prior_mean: prior mean used in KL term of bound. By default 0. Same size as X_mean.
        :param X_prior_var: prior variance used in KL term of bound. By default 1.
        """
        GPModel.__init__(self,
                         X_variational_mean,
                         Y,
                         Kern,
                         likelihood=likelihoods.Gaussian(),
                         mean_function=Zero())
        del self.X  # in GPLVM this is a Param
        self.X_variational_mean = Param(X_variational_mean)

        assert X_variational_var.ndim == 2, 'Incorrect number of dimensions for X_std.'
        self.X_variational_var = Param(X_variational_var, transforms.positive)
        self.num_data = X_variational_mean.shape[0]
        self.output_dim = Y.shape[1]

        assert np.all((X_variational_mean.shape == X_variational_var.shape))
        assert X_variational_mean.shape[0] == Y.shape[
            0], 'X variational mean and Y must be the same size.'
        assert X_variational_var.shape[0] == Y.shape[
            0], 'X variational std and Y must be the same size.'

        # inducing points
        if Z is None:
            # By default it's initialized by random permutation of the latent inputs.
            Z = np.random.permutation(X_variational_mean.copy())[:M]
        else:
            assert Z.shape[
                0] == M, 'Only M inducing points are allowed, however {} are provided.'.format(
                    Z.shape[0])
        self.Z = Param(Z)
        self.num_latent = Z.shape[1]
        assert X_variational_mean.shape[1] == self.num_latent

        # Prior mean and variance for X TODO: the dynamic case is different
        if X_prior_mean is None:
            X_prior_mean = np.zeros((self.num_data, self.num_latent))
        self.X_prior_mean = X_prior_mean
        if X_prior_var is None:
            X_prior_var = np.ones((self.num_data, self.num_latent))
        self.X_prior_var = X_prior_var

        assert X_prior_var.shape[0] == self.num_data
        assert X_prior_var.shape[1] == self.num_latent
        assert X_prior_mean.shape[0] == self.num_data
        assert X_prior_mean.shape[1] == self.num_latent
Esempio n. 29
0
 def __init__(self, input_dim, variance=1., lengthscales=1., gamma=1.):
     gpflow.kernels.Stationary.__init__(self,
                                        input_dim=input_dim,
                                        variance=variance,
                                        lengthscales=lengthscales)
     self.gamma = Param(gamma, transforms.Logistic(0.00001, 2.))
Esempio n. 30
0
 def __init__(self, input_dim, gpm, variance=1.0):
     gpflow.kernels.Kern.__init__(self, input_dim, active_dims=None)
     self.variance = Param(variance, transforms.positive)
     self.m = gpm
     self.m.fixed = True