Exemplo n.º 1
0
    def lowerBound(self, x, y, z, z_m, z_lv):
	""" Compute densities and lower bound given all inputs (mc_samps X n_obs X n_dim) """
	l_px = self.compute_logpx(x,y,z)
	l_py = dgm.multinoulliUniformLogDensity(y)
	l_pz = dgm.standardNormalLogDensity(z)
	l_qz = dgm.gaussianLogDensity(z, z_m, z_lv)
	return tf.reduce_mean(l_px + l_py + l_pz - l_qz, axis=0)
Exemplo n.º 2
0
 def qy_loss(self, x, y=None, a=None, expand_y=True):
     if a is None:
         _, _, a = self.sample_a(x)
         qy_in = tf.reshape(
             tf.concat([
                 tf.tile(tf.expand_dims(x, 0), [self.mc_samples, 1, 1]), a
             ],
                       axis=-1), [-1, self.n_x + self.n_a])
     else:
         qy_in = tf.reshape(tf.concat([x, a], axis=-1),
                            [-1, self.n_x + self.n_a])
     y_ = tf.reshape(self.q_y_ax_model(qy_in),
                     [self.mc_samples, -1, self.n_y])
     if y is not None and expand_y == True:
         y = tf.tile(tf.expand_dims(y, 0), [self.mc_samples, 1, 1])
     if y is None:
         return dgm.multinoulliUniformLogDensity(y_)
     else:
         return dgm.multinoulliLogDensity(y, y_)
Exemplo n.º 3
0
 def lowerBound(self, x, y, z, z_m, z_lv, a, qa_m, qa_lv):
     """ Helper function for loss computations. Assumes each input is a rank(3) tensor """
     pa_in = tf.reshape(tf.concat([y, z], axis=-1),
                        [-1, self.n_y + self.n_z])
     pa_m, pa_lv = dgm.forwardPassGauss(pa_in,
                                        self.pa_yz,
                                        self.n_hid,
                                        self.nonlinearity,
                                        self.bn,
                                        scope='pa_yz')
     pa_m, pa_lv = tf.reshape(pa_m,
                              [self.mc_samples, -1, self.n_a]), tf.reshape(
                                  pa_lv, [self.mc_samples, -1, self.n_a])
     l_px = self.compute_logpx(x, y, z, a)
     l_py = dgm.multinoulliUniformLogDensity(y)
     l_pz = dgm.standardNormalLogDensity(z)
     l_pa = dgm.gaussianLogDensity(a, pa_m, pa_lv)
     l_qz = dgm.gaussianLogDensity(z, z_m, z_lv)
     l_qa = dgm.gaussianLogDensity(a, qa_m, qa_lv)
     return tf.reduce_mean(l_px + l_py + l_pz + l_pa - l_qz - l_qa, axis=0)
Exemplo n.º 4
0
 def qy_loss(self, x, y=None):
     y_ = self.q_y_x_model(x)
     if y is None:
         return dgm.multinoulliUniformLogDensity(y_)
     else:
         return dgm.multinoulliLogDensity(y, y_)