예제 #1
0
def test_gaussian_whiten(Xdata, Xnew, kernel, mu, sqrt):
    """
    Make sure that predicting using the whitened representation is the
    same as the non-whitened one.
    """
    F_sqrt = tf.convert_to_tensor(rng.rand(Nn, Ln))

    K = kernel(Xdata)
    L = tf.linalg.cholesky(K)
    V = tf.linalg.triangular_solve(L, mu, lower=True)
    V_prime = tf.linalg.diag(tf.transpose(F_sqrt))
    common_shape = tf.broadcast_static_shape(V_prime.shape, L.shape)
    L = tf.broadcast_to(L, common_shape)
    V_sqrt = tf.linalg.triangular_solve(L,
                                        tf.linalg.diag(tf.transpose(F_sqrt)),
                                        lower=True)

    Fstar_mean, Fstar_var = conditional(Xnew, Xdata, kernel, mu, q_sqrt=F_sqrt)
    Fstar_w_mean, Fstar_w_var = conditional(Xnew,
                                            Xdata,
                                            kernel,
                                            V,
                                            q_sqrt=V_sqrt,
                                            white=True)

    mean_diff = Fstar_w_mean - Fstar_mean
    var_diff = Fstar_w_var - Fstar_var

    assert_allclose(mean_diff, 0, atol=4)
    assert_allclose(var_diff, 0, atol=4)
예제 #2
0
def test_whiten(Xdata, Xnew, kernel, mu, sqrt):
    """
    Make sure that predicting using the whitened representation is the
    sameas the non-whitened one.
    """

    K = kernel(Xdata) + tf.eye(Nn, dtype=default_float()) * 1e-6
    L = tf.linalg.cholesky(K)
    V = tf.linalg.triangular_solve(L, mu, lower=True)
    mean1, var1 = conditional(Xnew, Xdata, kernel, mu)
    mean2, var2 = conditional(Xnew, Xdata, kernel, V, white=True)

    assert_allclose(mean1, mean2)
    assert_allclose(var1, var2)
예제 #3
0
        def __call__(self, X_new: TensorType) -> tf.Tensor:
            N_old = tf.shape(self.f)[0]
            N_new = tf.shape(X_new)[0]

            if self.X is None:
                self.X = X_new
            else:
                self.X = tf.concat([self.X, X_new], axis=0)

            mean, cov = conditional(
                self.X,
                inducing_variable,
                kernel,
                q_mu,
                q_sqrt=q_sqrt,
                white=whiten,
                full_cov=True,
            )  # mean: [N_old+N_new, P], cov: [P, N_old+N_new, N_old+N_new]
            mean = tf.linalg.matrix_transpose(mean)  # [P, N_old+N_new]
            f_old = tf.linalg.matrix_transpose(self.f)  # [P, N_old]
            f_new = draw_conditional_sample(mean, cov, f_old)  # [P, N_new]
            f_new = tf.linalg.matrix_transpose(f_new)  # [N_new, P]
            self.f = tf.concat([self.f, f_new], axis=0)  # [N_old + N_new, P]

            tf.debugging.assert_equal(tf.shape(self.f),
                                      [N_old + N_new, self.P])
            tf.debugging.assert_equal(tf.shape(f_new), [N_new, self.P])

            return f_new
 def conditional_fn(X):
     return conditional(X,
                        inducing_variable,
                        kernel,
                        DataQuad.q_mu,
                        q_sqrt=DataQuad.q_sqrt,
                        white=white)
예제 #5
0
        def f_conditional(Xnew, full_cov=False):
            mean, var = conditional(Xnew, self.feature, self.kernel, self.q_mu,
                                    q_sqrt=self.q_sqrt,
                                    full_cov=full_cov,
                                    white=True)

            return mean + self.mean_function(Xnew), var
예제 #6
0
    def _build_predict(self, Xnew, full_cov=False, full_output_cov=False):
        """
        Compute the mean and variance of :math:`p(f_* \\mid y)`.

        Parameters
        ----------
        Xnew : np.array, shape=(N, K)
        full_cov : bool
        full_output_cov : bool

        Returns
        -------
        mus, vars :
            Mean and covariances of the variational approximation to the GP applied to the K input dimensions of Xnew.
            Dimensions: mus= N x K and vars= N x K (x K)

        """
        # Reshape to obtain correct covariance
        num_data_new = tf.shape(Xnew)[0]
        Xnew = tf.reshape(Xnew, [-1, 1])

        # Compute conditional
        mu_tmp, var_tmp = conditional(Xnew, self.feature, self.kern, self.q_mu, q_sqrt=self.q_sqrt,
                                      full_cov=full_cov,
                                      white=self.whiten, full_output_cov=full_output_cov)

        # Reshape to N x K
        mu = tf.reshape(mu_tmp + self.mean_function(Xnew), [num_data_new, self.num_classes])
        var = tf.reshape(var_tmp, [num_data_new, self.num_classes])

        return mu, var
예제 #7
0
    def test_f_moments(self):
        with self.test_context() as sess:
            m = self.prepare()
            X_samples, F_samples, f_mus, f_vars = sess.run(
                m._build_linear_time_q_sample(return_f_moments=True,
                                              sample_f=True,
                                              sample_u=False))
            f_mus_batch, f_vars_batch = conditional(
                tf.reshape(X_samples[:-1], [-1, self.E]),
                m.Z,
                m.kern,
                m.Umu.constrained_tensor,
                white=self.white,
                q_sqrt=m.Ucov_chol.constrained_tensor)
            f_mus_batch += m.mean_fn(tf.reshape(X_samples[:-1], [-1, self.E]))

            f_mus_batch = sess.run(f_mus_batch).reshape(
                self.T - 1, self.n_samples, self.E)
            f_vars_batch = sess.run(f_vars_batch).reshape(
                self.T - 1, self.n_samples, self.E)

            assert_allclose(f_mus, f_mus_batch)
            assert_allclose(f_vars, f_vars_batch)

            X_samples_2, f_mus_2, f_vars_2 = sess.run(
                m._build_linear_time_q_sample(return_f_moments=True,
                                              sample_f=False,
                                              sample_u=False))
            f_mus_batch_2, f_vars_batch_2 = conditional(
                tf.reshape(X_samples_2[:-1], [-1, self.E]),
                m.Z,
                m.kern,
                m.Umu.constrained_tensor,
                white=self.white,
                q_sqrt=m.Ucov_chol.constrained_tensor)
            f_mus_batch_2 += m.mean_fn(
                tf.reshape(X_samples_2[:-1], [-1, self.E]))

            f_mus_batch_2 = sess.run(f_mus_batch_2).reshape(
                self.T - 1, self.n_samples, self.E)
            f_vars_batch_2 = sess.run(f_vars_batch_2).reshape(
                self.T - 1, self.n_samples, self.E)

            assert_allclose(f_mus_2, f_mus_batch_2)
            assert_allclose(f_vars_2, f_vars_batch_2)
예제 #8
0
 def conditional_ND(self, Xnew, full_cov=False):
     mu, var = conditional(Xnew,
                           self.X,
                           self.kern,
                           self.q_mu,
                           full_cov=full_cov,
                           q_sqrt=None,
                           white=True)
     return mu + self.mean_function(Xnew), var
예제 #9
0
파일: svgp.py 프로젝트: Mr-G1998/mogpe
 def single_sample_conditional(q_mu):
     return conditional(Xnew,
                        self.inducing_variable,
                        self.kernel,
                        q_mu,
                        q_sqrt=q_sqrt,
                        full_cov=full_cov,
                        white=self.whiten,
                        full_output_cov=full_output_cov)
예제 #10
0
 def conditional(self, X, full_cov=False):
     mean, var = conditional(X,
                             self.Z,
                             self.kern,
                             self.q_mu,
                             q_sqrt=self.q_sqrt,
                             full_cov=full_cov,
                             whiten=True)
     return mean + self.mean_function(X), var
        def single_sample_conditional(X, select, full_cov=False):

            if test == 0:

                if select != None:
                    Z = tf.gather(self.feature.Z, select, axis=1)
                else:
                    Z = self.feature.Z
                # temp_kern = RBF(self.temp_kern_shape, lengthscales=self.kern.lengthscales.value, variance=self.kern.variance.value)
                # temp_kern.lengthscales.set_trainable(False)
                # temp_kern.variance.set_trainable(False)
                padd = tf.zeros(
                    [tf.shape(Z)[0], self.kern.input_dim - tf.shape(Z)[1]],
                    dtype=tf.float64)
                Z = tf.concat([Z, padd], 1)
                padd = tf.zeros(
                    [tf.shape(X)[0], self.kern.input_dim - tf.shape(X)[1]],
                    dtype=tf.float64)
                X = tf.concat([X, padd], 1)
                select = tf.random_shuffle(tf.range(tf.shape(
                    self.q_mu)[1]))[:tf.cast(
                        (1.0 - self.dropout) *
                        tf.cast(tf.shape(self.q_mu)[1], tf.float64), tf.int32)]
                select = tf.contrib.framework.sort(select)
                q_mu_temp = tf.gather(self.q_mu, select, axis=1)
                q_sqrt_temp = tf.gather(self.q_sqrt, select, axis=0)
                '''
                Z =  np.take((self.feature.Z).eval(),select,axis=1)
                temp_kern = gpflow.kernels.RBF(select.shape[0], lengthscales=self.kern.lengthscales, variance=self.kern.variance)
                select  = np.random.choice((tf.convert_to_tensor(self.q_mu.shape[1])).eval()-1, size=int((1-self.dropout)*float(((tf.convert_to_tensor(self.q_mu.shape[1])).eval()-1))), replace = False)
                select = np.sort(select)
                q_mu_temp = np.take((self.q_mu).eval(),select,axis=1)
                q_sqrt_temp = np.take((self.q_sqrt).eval(),select,axis=0)
                transform = transforms.LowerTriangular(Z.shape[0], num_matrices=q_mu_temp.shape[1])
                q_sqrt_temp = Parameter(q_sqrt_temp, transform=transform)
                Z = tf.constant(Z)
                q_mu_temp = tf.constant(q_mu_temp)
                q_sqrt_temp = tf.constant(q_sqrt_temp)
                '''

            else:
                Z = self.feature.Z
                # temp_kern = self.kern
                q_mu_temp = self.q_mu
                q_sqrt_temp = self.q_sqrt

            self.q_mu_temp = q_mu_temp
            self.q_sqrt_temp = q_sqrt_temp

            mean, var = conditional(X,
                                    Z,
                                    self.kern,
                                    q_mu_temp,
                                    q_sqrt=q_sqrt_temp,
                                    full_cov=full_cov,
                                    white=True)
            return mean + self.mean_function(X), var, select
 def single_sample_conditional(X, full_cov=False):
     mean, var = conditional(X,
                             self.feature.Z,
                             self.kern,
                             self.q_mu,
                             q_sqrt=self.q_sqrt,
                             full_cov=full_cov,
                             white=True)
     return mean + self.mean_function(X), var
예제 #13
0
def test_diag(Xdata, Xnew, kernel, mu, sqrt, chol, white):
    Fstar_mean_1, Fstar_var_1 = conditional(Xnew,
                                            Xdata,
                                            kernel,
                                            mu,
                                            q_sqrt=sqrt,
                                            white=white)
    Fstar_mean_2, Fstar_var_2 = conditional(Xnew,
                                            Xdata,
                                            kernel,
                                            mu,
                                            q_sqrt=chol,
                                            white=white)

    mean_diff = Fstar_mean_1 - Fstar_mean_2
    var_diff = Fstar_var_1 - Fstar_var_2

    assert_allclose(mean_diff, 0)
    assert_allclose(var_diff, 0)
예제 #14
0
 def _build_predict(self, Xnew, full_cov=False, full_output_cov=False):
     mu, var = conditional(Xnew,
                           self.feature,
                           self.kern,
                           self.q_mu,
                           q_sqrt=self.q_sqrt,
                           full_cov=full_cov,
                           white=self.whiten,
                           full_output_cov=full_output_cov)
     return mu + self.mean_function(Xnew), var
예제 #15
0
 def _build_predict_f(self, X):
     f_mu, f_var = conditional(X,
                               self.Z,
                               self.kern,
                               self.Umu,
                               q_sqrt=self.Ucov_chol,
                               white=True)
     n_mean_inputs = self.mean_fn.input_dim if hasattr(
         self.mean_fn, "input_dim") else self.latent_dim
     f_mu += self.mean_fn(X[:, :n_mean_inputs])
     return f_mu, f_var
예제 #16
0
 def conditional_closure(Xnew, *, full_cov, full_output_cov):
     return conditional(
         Xnew,
         inducing_variable,
         kernel,
         q_mu,
         q_sqrt=q_sqrt,
         white=whiten,
         full_cov=full_cov,
         full_output_cov=full_output_cov,
     )
예제 #17
0
 def single_t_moments(X, U_samples):
     f_mu, f_var = conditional(X,
                               m.Z,
                               m.kern,
                               tf.constant(
                                   U_samples,
                                   dtype=gp.settings.float_type),
                               q_sqrt=None,
                               white=self.white)
     f_mu += m.mean_fn(X)
     return f_mu, f_var
예제 #18
0
    def build_predict(self, Xnew):
        '''
        This method builds latent variables - f, g, \Phi(g) from inducing distributions
        '''
        # Get conditionals
        # returns mean, variance for marginal distributions q(f) and q(g)
        # q(f) = \int q(f|u_f) q(u_f) du_f
        # q(f) = N(f|A*u_fm,Kfnn + A(u_fs - Kfmm)t(A))  A = Kfnm*inv(Kfmm)
        fmean, fvar = conditionals.conditional(Xnew,
                                               self.Zf,
                                               self.kernf,
                                               self.u_fm,
                                               full_cov=False,
                                               q_sqrt=self.u_fs_sqrt,
                                               whiten=self.whiten)
        fmean = fmean + self.mean_function(Xnew)

        gmean, gvar = conditionals.conditional(Xnew,
                                               self.Zg,
                                               self.kerng,
                                               self.u_gm,
                                               full_cov=False,
                                               q_sqrt=self.u_gs_sqrt,
                                               whiten=self.whiten)

        # probit transformed expectations for  gamma
        ephi_g, ephi2_g, evar_phi_g = self.ProbitExpectations(gmean, gvar)

        # compute augmented f
        # from above computations we have
        # p(f)   = N(f| A*u_fm, Kfnn + A(u_fs - Kfmm)t(A))  A = Kfnm*inv(Kfmm)
        # p(f|g) = N(f| diag(ephi_g)* A*u_fm, diag(evar_phi_g)) * (Kfnn + A(u_fs - Kfmm)t(A)))
        gfmean = tf.multiply(ephi_g, fmean)
        gfvar = tf.multiply(ephi2_g, fvar)
        gfmeanu = tf.multiply(evar_phi_g, tf.square(fmean))

        # return mean and variancevectors of following in order -
        # augmented f, f, g, \Phi(g)
        return gfmean, gfvar, gfmeanu, fmean, fvar, gmean, gvar, ephi_g, evar_phi_g
예제 #19
0
 def predict_f(self, Xnew, full_cov=False, *, Kuu=None):  #q(f)的近似
     """
     VBPP-specific conditional on the approximate posterior q(u), including a
     constant mean function.
     """
     mean, var = conditional(Xnew,
                             self.inducing_variable,
                             self.kernel,
                             self.q_mu[:, None],
                             full_cov=full_cov,
                             q_sqrt=self.q_sqrt[None, :, :])
     # TODO make conditional() use Kuu if available
     return mean + self.beta0, var
예제 #20
0
    def _build_predict(self, Xnew, full_cov=False, full_output_cov=False):
        # register Kernel implementations for SpectralSVGP
        from gpflow import name_scope
        from gpflow.dispatch import dispatch

        @conditional.register(object, type(self.feature), type(self), object)
        @name_scope("conditional")
        def _conditional(Xnew,
                         feat,
                         kern,
                         f,
                         *,
                         full_cov=False,
                         full_output_cov=False,
                         q_sqrt=None,
                         white=False):
            # find correct function signature from the dispatcher
            cond = conditional.dispatch(object, type(self.feature),
                                        gpflow.kernels.Kernel, object)
            return cond(Xnew,
                        feat,
                        kern,
                        f,
                        full_cov=full_cov,
                        full_output_cov=full_output_cov,
                        q_sqrt=q_sqrt,
                        white=white)

        @dispatch(type(self.feature), type(self))
        def Kuu(feat, kern, *, jitter=0.0):
            with gpflow.decors.params_as_tensors_for(feat):
                Kzz = kern.K(feat.Z)
                Kzz += jitter * tf.eye(len(feat),
                                       dtype=settings.dtypes.float_type)
            return Kzz

        @dispatch(type(self.feature), type(self), object)
        def Kuf(feat, kern, Xnew):
            with gpflow.decors.params_as_tensors_for(feat):
                Kzx = kern.K(feat.Z, Xnew)
            return Kzx

        mu, var = conditional(Xnew,
                              self.feature,
                              self,
                              self.q_mu,
                              q_sqrt=self.q_sqrt,
                              full_cov=full_cov,
                              white=self.whiten,
                              full_output_cov=full_output_cov)
        return mu + self.mean_function(Xnew), var
예제 #21
0
 def predict_f(self, Xnew: InputData, full_cov=False, full_output_cov=False) -> MeanAndVariance:
     q_mu = self.q_mu
     q_sqrt = self.q_sqrt
     mu, var = conditional(
         Xnew,
         self.inducing_variable,
         self.kernel,
         q_mu,
         q_sqrt=q_sqrt,
         full_cov=full_cov,
         white=self.whiten,
         full_output_cov=full_output_cov,
     )
     # tf.debugging.assert_positive(var)  # We really should make the tests pass with this here
     return mu + self.mean_function(Xnew), var
예제 #22
0
    def predict_f(self, X_onedim, full_cov=False, full_output_cov=False):
        """
        Predict the one-dimensional latent function

        Parameters
        ----------
        X_onedim

        Returns
        -------

        """
        # Compute conditional
        mu, var = conditional(X_onedim, self.feature, self.kern, self.q_mu, q_sqrt=self.q_sqrt,
                              full_cov=full_cov,
                              white=self.whiten, full_output_cov=full_output_cov)

        return mu + self.mean_function(X_onedim), var
예제 #23
0
    def predict(
        self,
        inputs: TensorType,
        *,
        full_cov: bool = False,
        full_output_cov: bool = False,
    ) -> Tuple[tf.Tensor, tf.Tensor]:
        """
        Make a prediction at N test inputs for the Q outputs of this layer,
        including the mean function contribution.

        The covariance and its shape is determined by *full_cov* and *full_output_cov* as follows:

        +--------------------+---------------------------+--------------------------+
        | (co)variance shape | ``full_output_cov=False`` | ``full_output_cov=True`` |
        +--------------------+---------------------------+--------------------------+
        | ``full_cov=False`` | [N, Q]                    | [N, Q, Q]                |
        +--------------------+---------------------------+--------------------------+
        | ``full_cov=True``  | [Q, N, N]                 | [N, Q, N, Q]             |
        +--------------------+---------------------------+--------------------------+

        :param inputs: The inputs to predict at, with a shape of [N, D], where D is
            the input dimensionality of this layer.
        :param full_cov: Whether to return full covariance (if `True`) or
            marginal variance (if `False`, the default) w.r.t. inputs.
        :param full_output_cov: Whether to return full covariance (if `True`)
            or marginal variance (if `False`, the default) w.r.t. outputs.

        :returns: posterior mean (shape [N, Q]) and (co)variance (shape as above) at test points
        """
        mean_function = self.mean_function(inputs)
        mean_cond, cov = conditional(
            inputs,
            self.inducing_variable,
            self.kernel,
            self.q_mu,
            q_sqrt=self.q_sqrt,
            full_cov=full_cov,
            full_output_cov=full_output_cov,
            white=self.whiten,
        )

        return mean_cond + mean_function, cov
예제 #24
0
    def conditional(self, X, inputs=None, add_noise=True, Lm=None):
        N = tf.shape(X)[0]
        if X.shape.ndims == 3:
            X_in = X if inputs is None else tf.concat([X, tf.tile(inputs[None, :, :], [N, 1, 1])], -1)
            X_in = tf.reshape(X_in, [-1, self.dim + self.input_dim])
        else:
            X_in = X if inputs is None else tf.concat([X, tf.tile(inputs[None, :], [N, 1])], -1)
        mu, var = conditional(X_in, self.Z, self.kern, self.Umu, q_sqrt=self.Ucov_chol, white=True, Lm=Lm)
        n_mean_inputs = self.mean_fn.input_dim if hasattr(self.mean_fn, "input_dim") else self.dim
        mu += self.mean_fn(X_in[:, :n_mean_inputs])

        if X.shape.ndims == 3:
            T = tf.shape(X)[1]
            mu = tf.reshape(mu, [N, T, self.dim])
            var = tf.reshape(var, [N, T, self.dim])

        if add_noise:
            var += self.Q_sqrt ** 2.
        return mu, var
예제 #25
0
    def build_predict_h(self, Xnew, full_cov=False):
        """
        Predict latent gps H values at new points ``Xnew''
        Xnew is a data matrix, point at which we want to predict
        This method computes p(H*|L = L_h_i*V_h_i)
        """
        mu_list = []
        var_list = []
        V_h_splits = tf.split(self.V_h, num_or_size_splits=self.num_latent_gps)
        for i in xrange(self.num_latent_gps):
            mu_i, var_i = conditional(Xnew,
                                      self.X_grid,
                                      self.kerns_list[i],
                                      tf.transpose(V_h_splits[i]),
                                      full_cov=full_cov,
                                      q_sqrt=None,
                                      whiten=True)
            mu_list.append(mu_i)
            var_list.append(var_i)

        return mu_list, var_list
예제 #26
0
    def test_transition_KLs_MC(self):
        with self.test_context() as sess:
            shape = [self.T - 1, self.n_samples, self.E]
            X_samples = tf.placeholder(gp.settings.float_type, shape=shape)
            feed_dict = {X_samples: np.random.randn(*shape)}

            m = self.prepare()
            f_mus, f_vars = conditional(tf.reshape(X_samples, [-1, self.E]),
                                        m.Z,
                                        m.kern,
                                        m.Umu.constrained_tensor,
                                        white=self.white,
                                        q_sqrt=m.Ucov_chol.constrained_tensor)
            f_mus += m.mean_fn(tf.reshape(X_samples, [-1, self.E]))

            gpssm_KLs = m._build_transition_KLs(
                tf.reshape(f_mus, [m.T - 1, m.n_samples, m.latent_dim]),
                tf.reshape(f_vars, [m.T - 1, m.n_samples, m.latent_dim]))

            f_samples = f_mus + tf.sqrt(f_vars) * tf.random_normal(
                [(self.T - 1) * self.n_samples, self.E],
                dtype=gp.settings.float_type,
                seed=self.seed)

            q_mus = m.As.constrained_tensor[:, None, :] * tf.reshape(f_samples, shape) \
                    + m.bs.constrained_tensor[:, None, :]
            q_mus = tf.reshape(q_mus, [-1, self.E])
            q_covs = tf.reshape(
                tf.tile(m.S_chols.constrained_tensor[:, None, :],
                        [1, self.n_samples, 1]), [-1, self.E])
            mc_KLs = KL_samples(q_mus - f_samples,
                                Q_chol=q_covs,
                                P_chol=m.Q_sqrt.constrained_tensor)
            mc_KLs = tf.reduce_mean(tf.reshape(mc_KLs, shape[:-1]), -1)

            assert_allclose(*sess.run([gpssm_KLs, mc_KLs],
                                      feed_dict=feed_dict),
                            rtol=0.5 * 1e-2)
예제 #27
0
def test_q_sqrt_constraints(Xdata, Xnew, kernel, mu, white):
    """ Test that sending in an unconstrained q_sqrt returns the same conditional
    evaluation and gradients. This is important to match the behaviour of the KL, which
    enforces q_sqrt is triangular.
    """

    tril = np.tril(rng.randn(Ln, Nn, Nn))

    q_sqrt_constrained = Parameter(tril, transform=triangular())
    q_sqrt_unconstrained = Parameter(tril)

    diff_before_gradient_step = (q_sqrt_constrained -
                                 q_sqrt_unconstrained).numpy()
    assert_allclose(diff_before_gradient_step, 0)

    Fstars = []
    for q_sqrt in [q_sqrt_constrained, q_sqrt_unconstrained]:

        with tf.GradientTape() as tape:
            _, Fstar_var = conditional(Xnew,
                                       Xdata,
                                       kernel,
                                       mu,
                                       q_sqrt=q_sqrt,
                                       white=white)

        grad = tape.gradient(Fstar_var, q_sqrt.unconstrained_variable)
        q_sqrt.unconstrained_variable.assign_sub(grad)
        Fstars.append(Fstar_var)

    diff_Fstar_before_gradient_step = Fstars[0] - Fstars[1]
    assert_allclose(diff_Fstar_before_gradient_step, 0)

    diff_after_gradient_step = (q_sqrt_constrained -
                                q_sqrt_unconstrained).numpy()
    assert_allclose(diff_after_gradient_step, 0)
예제 #28
0
        def f_conditional(Xnew, full_cov=False):
            mean = []
            var = []
            if self._shared_Z:
                feats = [self.feature for _ in range(self.num_kernels)]
            else:
                feats = [feat for feat in self.feature]
            for i, (k, feat) in enumerate(zip(self.kernel, feats)):
                m, v = conditional(Xnew, feat, k, self.q_mu[:,(i*self.offset):((i+1)*self.offset)],
                                   q_sqrt=self.q_sqrt[(i*self.offset):((i+1)*self.offset),:,:,],
                                   full_cov=full_cov,
                                   white=True)
                mean.append(m)

                #temporary fix
                if full_cov:
                    var.append(tf.transpose(v))
                else:
                    var.append(v)

            mean = tf.concat(mean, axis=-1) #NxK
            var = tf.concat(var, axis=-1) #NxK or NxNxK

            return mean + self.mean_function(Xnew), var
 def mean_sq_fn(X):
     mean, _ = conditional(X, feat, kern, q_mu, q_sqrt=q_sqrt, white=white)
     return (mean + effective_mean(X)) ** 2
 def var_fn(X):
     _, var = conditional(X, feat, kern, q_mu, q_sqrt=q_sqrt, white=white)
     return var