Exemple #1
0
    def _build_predict(self, Xnew, full_cov=False):
        """
        Construct a tensorflow function to compute predictions

        :param Xnew: Test data matrix of size NxD
        :param full_cov: If True, returns full covariance function
        :return: TF tensor of size NxR
        """
        transfer_param = 2 * (1 / (1 + self.mu) ** self.b) - 1

        y_target = self.Y - self.mean_function(self.X)
        y_source = self.Y_source - self.mean_function(self.X_source)
        y = tf.concat([y_target, y_source], axis=0)
        K_new_target = self.kern.K(self.X, Xnew)
        K_new_source = transfer_param * self.kern.K(self.X_source, Xnew)

        K_new = tf.concat([K_new_target, K_new_source], axis=0)

        K_cross = transfer_param * self.kern.K(self.X, self.X_source)
        K_source = self.kern.K(self.X_source) \
                   + tf.eye(tf.shape(self.X_source)[0], dtype=settings.float_type)\
                   * self.source_likelihood.variance
        K_target = self.kern.K(self.X) \
                   + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type)\
                   * self.likelihood.variance

        C = tf.concat([
            tf.concat([K_target, tf.transpose(K_cross)], axis=0),
            tf.concat([K_cross, K_source], axis=0)
        ], axis=1)

        Knn = self.kern.K(Xnew) if full_cov else self.kern.Kdiag(Xnew)
        f_mean, f_var = base_conditional(K_new, C, Knn, y, full_cov=full_cov, white=False)
        return f_mean + self.mean_function(Xnew), f_var
Exemple #2
0
    def _build_likelihood(self):
        """
        Construct tensorflow fucntion to compute the marginal likelihood

        :returns: TF tensor
        """

        transfer_param = 2 * (1 / (1 + self.mu) ** self.b) - 1
        K_cross = transfer_param * self.kern.K(self.X_source, self.X)
        K_source = self.kern.K(self.X_source) \
                   + tf.eye(tf.shape(self.X_source)[0], dtype=settings.float_type)\
                   * self.source_likelihood.variance
        K_target = self.kern.K(self.X) \
                   + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type)\
                   * self.likelihood.variance

        m, C = base_conditional(K_cross, K_source, K_target, self.Y_source, full_cov=True)
        # L_source = tf.cholesky(K_source)
        # A = tf.matrix_triangular_solve(L_source, K_cross, lower=True)
        # A = tf.matrix_triangular_solve(tf.transpose(L_source), A, lower=True)
        # m = self.mean_function(self.X) + tf.matmul(A, self.Y_source, transpose_a=True)
        #
        # C = K_target - tf.transpose(tf.matmul(A, K_cross))
        m = self.mean_function(self.X) + m
        L = tf.cholesky(C)[0]

        logpdf = multivariate_normal(self.Y, m, L)

        return tf.reduce_sum(logpdf)
Exemple #3
0
    def predict_f(self,
                  Xnew: InputData,
                  full_cov: bool = False,
                  full_output_cov: bool = False) -> MeanAndVariance:
        r"""
        This method computes predictions at X \in R^{N \x D} input points

        .. math::
            p(F* | Y)

        where F* are points on the GP at new data points, Y are noisy observations at training data points.
        Note that full_cov => full_output_cov (regardless of the ordinate given for full_output_cov), to avoid ambiguity.
        """
        full_output_cov = True if full_cov else full_output_cov
        Xnew = tf.reshape(data_input_to_tensor(Xnew), (-1, self._M))
        n = Xnew.shape[0]
        f_mean, f_var = base_conditional(Kmn=self.kernel(self._X, Xnew),
                                         Kmm=self.likelihood.add_to(self.KXX),
                                         Knn=self.kernel(Xnew, Xnew),
                                         f=self._Y - self._mean,
                                         full_cov=True,
                                         white=False)
        f_mean += tf.reshape(self.mean_function(Xnew), f_mean.shape)
        f_mean_shape = (self._L, n)
        f_mean = tf.reshape(f_mean, f_mean_shape)
        f_var = tf.reshape(f_var, f_mean_shape * 2)
        if full_output_cov:
            einsum = 'LNln -> LlNn'
        else:
            einsum = 'LNLn -> LNn'
        f_var = tf.einsum(einsum, f_var)
        if not full_cov:
            f_var = tf.einsum('...NN->...N', f_var)
        perm = tuple(reversed(range(tf.rank(f_var))))
        return tf.transpose(f_mean), tf.transpose(f_var, perm)
Exemple #4
0
 def _build_predict(self, X_new, full_cov=False, full_output_cov=False, return_Kzz=False):
     
     num_samples = tf.shape(X_new)[0]
     Kzz, Kzx, Kxx = Kuu_Kuf_Kff(self.feature, self.kern, X_new, jitter=settings.jitter, full_f_cov=full_cov)
     f_mean, f_var = base_conditional(Kzx, Kzz, Kxx, self.q_mu, full_cov=full_cov, q_sqrt=tf.matrix_band_part(self.q_sqrt, -1, 0), white=self.whiten)
     f_mean += self.mean_function(X_new)
     f_var = _expand_independent_outputs(f_var, full_cov, full_output_cov)
     
     if return_Kzz:
         return f_mean, f_var, Kzz
     else:
         return f_mean, f_var
Exemple #5
0
 def build_predict_psi(self, Hnew, full_cov=False):
     H_sample = tf.gather(self.H[:, :self.dim_h], self.H_unique_ph)
     y = self.psi_ph - self.mean_psi(H_sample)
     Kmn = self.configuration_kernel.K(H_sample, Hnew)
     Kmm_sigma = self.configuration_kernel.K(H_sample) + tf.eye(
         tf.shape(H_sample)[0],
         dtype=settings.float_type) * self.configuration_likelihood.variance
     Knn = self.configuration_kernel.K(
         Hnew) if full_cov else self.configuration_kernel.Kdiag(Hnew)
     f_mean, f_var = base_conditional(
         Kmn, Kmm_sigma, Knn, y, full_cov=full_cov,
         white=False)  # N x P, N x P or P x N x N
     return f_mean + self.mean_psi(Hnew), f_var
Exemple #6
0
	def _build_predict(self, Xnew, full_cov=False):
		y = self.Y - self.mean_function(self.X)
		Kmn = self.kern.K(self.X, Xnew)
		Kmm_sigma = self.kern.K(self.X) + tf.eye(tf.shape(self.X)[0], dtype=settings.float_type) * self.likelihood.variance
		###
#		Kmm_sigma = tf.Print(Kmm_sigma, [tf.shape(Kmm_sigma)], message="K.shape: ", summarize=10)
#		Kmm_sigma = tf.Print(Kmm_sigma, [tf.sqrt(tf.reduce_mean((Kmm_sigma-tf.transpose(Kmm_sigma))**2))], message="K-K.T MSE: ", summarize=10)
#		Kmm_sigma = tf.Print(Kmm_sigma, [tf.reduce_min(Kmm_sigma), tf.reduce_max(Kmm_sigma)], message="K min max: ", summarize=10)
#		eig, _ = tf.linalg.eigh(Kmm_sigma)
#		Kmm_sigma = tf.Print(Kmm_sigma, [tf.reduce_min(eig), tf.reduce_max(eig)], message="K min max eig: ", summarize=10)
		###
		Knn = self.kern.K(Xnew) if full_cov else self.kern.Kdiag(Xnew)
		f_mean, f_var = base_conditional(Kmn, Kmm_sigma, Knn, y, full_cov=full_cov, white=False)  # N x P, N x P or P x N x N
		return f_mean + self.mean_function(Xnew), f_var
def my_conditional(Xnew,
                   X,
                   kern,
                   f,
                   *,
                   full_cov=False,
                   q_sqrt=None,
                   white=False):
    num_data = tf.shape(X)[0]  # M
    Kmm = kern.Kzz(X) + tf.eye(
        num_data, dtype=settings.float_type) * settings.numerics.jitter_level
    Kmn = kern.Kzx(X, Xnew)
    if full_cov:
        Knn = kern.K(Xnew)
    else:
        Knn = kern.Kdiag(Xnew)
    return base_conditional(Kmn,
                            Kmm,
                            Knn,
                            f,
                            full_cov=full_cov,
                            q_sqrt=q_sqrt,
                            white=white)