def _predict_density(self, Xnew, Ynew, num_samples):
     Fmean, Fvar = self.build_predict(Xnew, full_cov=False, S=num_samples)
     S, N, D = shape_as_list(Fmean)
     Ynew = tile_over_samples(Ynew, num_samples)
     flat_arrays = [tf.reshape(a, [S*N, -1]) for a in [Fmean, Fvar, Ynew]]
     l_flat = self.likelihood.predict_density(*flat_arrays)
     l = tf.reshape(l_flat, [S, N])
     log_num_samples = tf.log(tf.cast(num_samples, float_type))
     return tf.reduce_logsumexp(l - log_num_samples, axis=0)
    def build_likelihood(self):
        Fmean, Fvar = self.build_predict(self.X, full_cov=False, S=self.num_samples)

        S, N, D = shape_as_list(Fmean)
        Y = tile_over_samples(self.Y, self.num_samples)
        
        f = lambda a: self.likelihood.variational_expectations(a[0], a[1], a[2])
        var_exp = tf.map_fn(f, (Fmean, Fvar, Y), dtype=float_type)
        var_exp = tf.stack(var_exp) #SN
        
        var_exp = tf.reduce_mean(var_exp, 0) # S,N -> N. Average over samples
        L = tf.reduce_sum(var_exp) # N -> scalar. Sum over data (minibatch)

        KL = 0.
        for layer in self.layers:
            KL += layer.KL()

        scale = tf.cast(self.num_data, float_type)
        scale /= tf.cast(tf.shape(self.X)[0], float_type)  # minibatch size
        return L * scale - KL
 def predict_y(self, Xnew, num_samples):
     Fmean, Fvar = self.build_predict(Xnew, full_cov=False, S=num_samples)
     S, N, D = shape_as_list(Fmean)
     flat_arrays = [tf.reshape(a, [S*N, -1]) for a in [Fmean, Fvar]]
     Y_mean, Y_var = self.likelihood.predict_mean_and_var(*flat_arrays)
     return [tf.reshape(a, [S, N, self.D_Y]) for a in [Y_mean, Y_var]]