Esempio n. 1
0
    def _partition_and_stitch(self, args, func_name):
        """
        args is a list of tensors, to be passed to self.likelihoods.<func_name>

        args[-1] is the 'Y' argument, which contains the indexes to self.likelihoods.

        This function splits up the args using dynamic_partition, calls the
        relevant function on the likelihoods, and re-combines the result.
        """
        # get the index from Y
        Y = args[-1]
        ind = Y[:, -1]
        ind = tf.cast(ind, tf.int32)
        Y = Y[:, :-1]
        args[-1] = Y

        # split up the arguments into chunks corresponding to the relevant likelihoods
        args = zip(*[tf.dynamic_partition(X, ind, self.num_likelihoods) for X in args])

        # apply the likelihood-function to each section of the data
        with params_as_tensors_for(self, convert=False):
            funcs = [getattr(lik, func_name) for lik in self.likelihood_list]
        results = [f(*args_i) for f, args_i in zip(funcs, args)]

        # stitch the results back together
        partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind, self.num_likelihoods)
        results = tf.dynamic_stitch(partitions, results)

        return results
Esempio n. 2
0
 def _predict_non_logged_density(self, Fmu, Fvar, Y):
     with params_as_tensors_for(self.invlink):
         gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)
         p = self.invlink.prob_is_largest(Y, Fmu, Fvar, gh_x, gh_w)
         den = p * (1. - self.invlink.epsilon) + (1. - p) * (
             self.invlink._eps_K1)
     return den
Esempio n. 3
0
def Kuf(feat, kern, X_new):
    with params_as_tensors_for(feat):
        if feat.learn_weights:
            Kzx = kern.K_tens_vs_seq(feat.Z, X_new, return_levels=True, increments=feat.increments)
            Kzx = Kzx[0] + tf.reduce_sum(tf.matmul(feat.W, Kzx[1:]), axis=0)
        else:
            Kzx = kern.K_tens_vs_seq(feat.Z, X_new, increments=feat.increments)
    return Kzx
Esempio n. 4
0
def Kuf(feat, kern, X_new):
    with params_as_tensors_for(feat):
        if feat.learn_weights:
            Kzx = kern.K(feat.Z, X_new, presliced_X=True, return_levels=True)
            Kzx = Kzx[0] + tf.reduce_sum(tf.matmul(feat.W, Kzx[1:]), axis=0)
        else:
            Kzx = kern.K(feat.Z, X_new, presliced_X=True)
    return Kzx
Esempio n. 5
0
 def variational_expectations(self, Fmu, Fvar, Y):
     with params_as_tensors_for(self.invlink):
         gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)
         Fvar = Fvar + self.a
         p = self.invlink.prob_is_largest(Y, Fmu, Fvar, gh_x, gh_w)
         ve = p * tf.log(1. - self.invlink.epsilon) + (1. - p) * tf.log(
             self.invlink._eps_K1)
     return ve
Esempio n. 6
0
 def _predict_non_logged_density(self, Fmu, Fvar, Y):
     if isinstance(self.invlink, RobustMax):
         with params_as_tensors_for(self.invlink):
             gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)
             p = self.invlink.prob_is_largest(Y, Fmu, Fvar, gh_x, gh_w)
             den = p * (1. - self.invlink.epsilon) + (1. - p) * (self.invlink._eps_K1)
         return den
     else:
         raise NotImplementedError
Esempio n. 7
0
 def variational_expectations(self, Fmu, Fvar, Y):
     if isinstance(self.invlink, RobustMax):
         with params_as_tensors_for(self.invlink):
             gh_x, gh_w = hermgauss(self.num_gauss_hermite_points)
             p = self.invlink.prob_is_largest(Y, Fmu, Fvar, gh_x, gh_w)
             ve = p * tf.log(1. - self.invlink.epsilon) + (1. - p) * tf.log(self.invlink._eps_K1)
         return ve
     else:
         raise NotImplementedError
Esempio n. 8
0
def Kuu(feat, kern, *, jitter=0.0, full_f_cov=False):
    with params_as_tensors_for(feat):
        if feat.learn_weights:
            Kzz = kern.K_tens(feat.Z, return_levels=True, increments=feat.increments)
            Kzz = Kzz[0] + tf.reduce_sum(tf.matmul(tf.matmul(feat.W, Kzz[1:]), feat.W, transpose_b=True), axis=0)
        else:
            Kzz = kern.K_tens(feat.Z, increments=feat.increments)
        Kzz += jitter * tf.eye(len(feat), dtype=settings.dtypes.float_type)
    return Kzz
Esempio n. 9
0
def Kuu(feat, kern, *, jitter=0.0):
    with params_as_tensors_for(feat):
        if feat.learn_weights:
            Kzz = kern.K(feat.Z, return_levels=True, presliced=True)
            Kzz = Kzz[0] + tf.reduce_sum(tf.matmul(tf.matmul(feat.W, Kzz[1:]), feat.W, transpose_b=True), axis=0)
        else:
            Kzz = kern.K(feat.Z, presliced=True)
        Kzz += jitter * tf.eye(len(feat), dtype=settings.dtypes.float_type)
    return Kzz
Esempio n. 10
0
 def logp(self, F, Y):
     if isinstance(self.invlink, RobustMax):
         with params_as_tensors_for(self.invlink):
             hits = tf.equal(tf.expand_dims(tf.argmax(F, 1), 1), tf.cast(Y, tf.int64))
             yes = tf.ones(tf.shape(Y), dtype=settings.float_type) - self.invlink.epsilon
             no = tf.zeros(tf.shape(Y), dtype=settings.float_type) + self.invlink._eps_K1
             p = tf.where(hits, yes, no)
         return tf.log(p)
     else:
         raise NotImplementedError
Esempio n. 11
0
def Kuu_Kuf_Kff(feat, kern, X_new, *, jitter=0.0, full_f_cov=False):
    with params_as_tensors_for(feat):
        if feat.learn_weights:
            Kzz, Kzx, Kxx = kern.K_tens_n_seq_covs(feat.Z, X_new, full_X_cov=full_f_cov, return_levels=True, increments=feat.increments)
            Kzz = Kzz[0] + tf.reduce_sum(tf.matmul(tf.matmul(feat.W, Kzz[1:]), feat.W, transpose_b=True), axis=0)
            Kzx = Kzx[0] + tf.reduce_sum(tf.matmul(feat.W, Kzx[1:]), axis=0)
            Kxx = tf.reduce_sum(Kxx, axis=0)
        else:
            Kzz, Kzx, Kxx = kern.K_tens_n_seq_covs(feat.Z, X_new, full_X_cov=full_f_cov, increments=feat.increments)
        Kzz += jitter * tf.eye(len(feat), dtype=settings.dtypes.float_type)
        if full_f_cov:
            Kxx += jitter * tf.eye(tf.shape(X)[0], dtype=settings.dtypes.float_type)
        else:
            Kxx += jitter
    return Kzz, Kzx, Kxx
Esempio n. 12
0
    def predict_mean_and_var(self, Fmu, Fvar, Xnew):
        ind = Xnew[:, -1]
        ind = tf.cast(ind, tf.int32)
        args = [Fmu, Fvar]
        args = zip(
            *
            [tf.dynamic_partition(X, ind, self.num_likelihoods) for X in args])

        with params_as_tensors_for(self, convert=False):
            funcs = [
                getattr(lik, 'predict_mean_and_var')
                for lik in self.likelihood_list
            ]
        results = [f(*args_i) for f, args_i in zip(funcs, args)]

        mu = [result[0] for result in results]
        var = [result[1] for result in results]

        partitions = tf.dynamic_partition(tf.range(0, tf.size(ind)), ind,
                                          self.num_likelihoods)
        mu = tf.dynamic_stitch(partitions, mu)
        var = tf.dynamic_stitch(partitions, var)

        return mu, var
Esempio n. 13
0
def Kuu(feat, kern, jitter=0.0):
    """
    """
    with params_as_tensors_for(feat, kern):
        return kern.K(tf.reshape(feat.Z, (len(feat), -1)))
Esempio n. 14
0
def Kuf(feat, kern, Xnew):
    """
    """
    with params_as_tensors_for(feat, kern):
        K = kern.K(tf.reshape(feat.Z, (len(feat), -1)), Xnew)
    return K