def predict(self, x, a=None, training=True, n_samps=5): """ predict y for given x with q(y|x,a) """ if a is None: _, _, a = self.sample_pa(x) py_in = tf.reshape( tf.concat( [tf.tile(tf.expand_dims(x, 0), [self.mc_samples, 1, 1]), a], axis=-1), [-1, self.n_x + self.n_a]) self.W = bnn.sampleCatBNN(self.py_xa, self.n_hid) preds = dgm.forwardPassCat(py_in, self.W, self.n_hid, self.nonlinearity, self.bn, scope='py_xa') preds = tf.expand_dims(tf.reduce_mean( tf.reshape(preds, [self.mc_samples, -1, self.n_y]), 0), axis=0) for sample in range(n_samps - 1): self.W = bnn.sampleCatBNN(self.py_xa, self.n_hid) preds_new = dgm.forwardPassCat(py_in, self.W, self.n_hid, self.nonlinearity, self.bn, scope='py_xa') logits = tf.reduce_mean(tf.reshape( preds_new, [self.mc_samples, -1, self.n_y]), axis=0) preds = tf.concat([preds, tf.expand_dims(logits, 0)], 0) return tf.reduce_mean(preds, 0)
def predict(self, x, n_samps=5, training=True): """ predict y for given x with p(y|x) """ self.W = bnn.sampleCatBNN(self.py_x, self.n_hid) preds = dgm.forwardPassCat(x, self.W, self.n_hid, self.nonlinearity, self.bn, training=training, scope='py_x') preds = tf.expand_dims(preds, axis=0) for sample in range(n_samps-1): self.W = bnn.sampleCatBNN(self.py_x, self.n_hid) preds_new = dgm.forwardPassCat(x, self.W, self.n_hid, self.nonlinearity, self.bn, training=training, scope='py_x') preds = tf.concat([preds, tf.expand_dims(preds_new, axis=0)], axis=0) return tf.reduce_mean(preds, axis=0)
def predict(self, x, n_iters=150): """ predict y for given x with p(y|x,z) """ _, _, z = self.sample_z(x) n_u = tf.shape(x)[0] x_ = tf.tile(tf.expand_dims(x,0), [self.mc_samples, 1,1]) py_in = tf.reshape(tf.concat([x_, z], axis=-1), [-1,self.n_x+self.n_z]) y_ = dgm.forwardPassCat(py_in, self.py_xz, self.n_hid, self.nonlinearity, self.bn, scope='py_xz') return tf.reduce_mean(tf.reshape(tf.expand_dims(y_,0), [self.mc_samples, -1, self.n_y]), axis=0)
def predict(self, x, training=True): """ predict y for given x with q(y|x) """ return dgm.forwardPassCat(x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, training=training, scope='qy_x')
def unlabeled_loss(self, x): n_u = tf.shape(x)[0] qy_l = dgm.forwardPassCat(x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, scope='qy_x') x_r = tf.tile(x, [self.n_y,1]) y_u = tf.reshape(tf.tile(tf.eye(self.n_y), [1, n_u]), [-1, self.n_y]) z_m, z_lv, z = self.sample_z(x_r,y_u) x_ = tf.tile(tf.expand_dims(x_r,0),[self.mc_samples,1,1]) y_ = tf.tile(tf.expand_dims(y_u,0),[self.mc_samples,1,1]) lb_u = tf.transpose(tf.reshape(self.lowerBound(x_, y_, z, z_m, z_lv), [self.n_y, n_u])) lb_u = tf.reduce_sum(qy_l * lb_u, axis=-1) qy_entropy = -tf.reduce_sum(qy_l * tf.log(qy_l + 1e-10), axis=-1) return lb_u + qy_entropy
def encode(self, x, y=None, n_iters=100): """ encode a new example into z-space (labeled or unlabeled) """ if y is None: y = tf.one_hot( tf.argmax(dgm.forwardPassCat(x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, scope='qy_x'), axis=1), self.n_y) _, _, z = self.sample_z(x, y) return z
def predictq(self, x, a=None, training=True): """ predict y for given x with q(y|x,a) """ if a is None: _, _, a = self.sample_qa(x) qy_in = tf.reshape( tf.concat( [tf.tile(tf.expand_dims(x, 0), [self.mc_samples, 1, 1]), a], axis=-1), [-1, self.n_x + self.n_a]) predictions = dgm.forwardPassCat(qy_in, self.qy_xa, self.n_hid, self.nonlinearity, self.bn, training, 'qy_xa') return tf.reduce_mean(tf.reshape(predictions, [self.mc_samples, -1, self.n_y]), axis=0)
def unlabeled_loss(self, x): """ compute the unlabeled loss """ z_m, z_lv, z = self.sample_z(x) x_r, z_r = tf.tile(tf.tile(tf.expand_dims(x,0), [self.mc_samples,1,1]), [1,self.n_y,1]), tf.tile(z, [1,self.n_y,1]) z_mr, z_lvr = tf.tile(tf.expand_dims(z_m,0), [1,self.n_y,1]), tf.tile(tf.expand_dims(z_lv,0), [1,self.n_y,1]) y_u = tf.reshape(tf.tile(tf.eye(self.n_y), [1, tf.shape(self.x_u)[0]]), [-1, self.n_y]) y_u, n_u = tf.tile(tf.expand_dims(y_u,0), [self.mc_samples,1,1]), tf.shape(x)[0] lb_u = tf.transpose(tf.reshape(self.lowerBound(x_r, y_u, z_r, z_mr, z_lvr), [self.n_y, n_u])) qy_in = tf.reshape(tf.concat([tf.tile(tf.expand_dims(x,0),[self.mc_samples, 1,1]), z], axis=-1), [-1, self.n_x+self.n_z]) qy_l = dgm.forwardPassCat(qy_in, self.qy_xz, self.n_hid, self.nonlinearity, self.bn, scope='qy_xz') qy_l = tf.reduce_mean(tf.reshape(qy_l, [self.mc_samples, -1, self.n_y]), axis=0) lb_u = tf.reduce_sum(qy_l * lb_u, axis=-1) qy_entropy = -tf.reduce_sum(qy_l * tf.log(qy_l + 1e-10), axis=-1) return lb_u + qy_entropy
def build_model(self): """ Define model components and variables """ self.create_placeholders() self.initialize_networks() ## model variables and relations ## # infernce # self.y_ = dgm.forwardPassCatLogits(self.x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, scope='qy_x', reuse=False) self.qz_in = tf.concat([self.x, self.y], axis=-1) self.qz_mean, self.qz_lv, self.z_ = dgm.samplePassGauss( self.qz_in, self.qz_xy, self.n_hid, self.nonlinearity, self.bn, scope='qz_xy', reuse=False) # generative # self.z_prior = tf.random_normal([100, self.n_z]) if self.x_dist == 'Gaussian': self.px_mean, self.px_lv, self.x_ = dgm.samplePassGauss( self.z_prior, self.px_z, self.n_hid, self.nonlinearity, self.bn, scope='px_z', reuse=False) elif self.x_dist == 'Bernoulli': self.x_ = dgm.forwardPassBernoulli(self.z_prior, self.px_z, self.n_hid, self.nonlinearity, self.bn, scope='px_z', reuse=False) self.py_in = tf.concat([self.x_, self.z_prior], axis=-1) self.py_ = dgm.forwardPassCat(self.py_in, self.py_xz, self.n_hid, self.nonlinearity, self.bn, scope='py_xz', reuse=False) self.predictions = self.predict(self.x)
def predict(self, x, n_iters=150): """ predict y for given x with p(y|x,z) """ y_ = dgm.forwardPassCat(x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, scope='qy_x') yq = y_ y_ = tf.one_hot(tf.argmax(y_, axis=1), self.n_y) y_samps = tf.expand_dims(y_, axis=2) for i in range(n_iters): _, _, z = self.sample_z(x, y_) z = tf.reshape(z, [-1, self.n_z]) py_in = tf.concat([x, z], axis=-1) y_ = dgm.forwardPassCat(py_in, self.py_xz, self.n_hid, self.nonlinearity, self.bn, scope='py_xz') y_samps = tf.concat([y_samps, tf.expand_dims(y_, axis=2)], axis=2) y_ = tf.one_hot(tf.argmax(y_, axis=1), self.n_y) return tf.reduce_mean(y_samps, axis=2)
def encode(self, x, y=None, n_iters=100): """ encode a new example into z-space (labeled or unlabeled) """ _, _, a = dgm.samplePassGauss(x, self.qa_x, self.n_hid, self.nonlinearity, self.bn, True, 'qa_x') if y is None: h = tf.concat([x, a], axis=1) y = tf.one_hot( tf.argmax(dgm.forwardPassCat(h, self.qy_xa, self.n_hid, self.nonlinearity, self.bn, True, 'qa_x'), axis=1), self.n_y) h = tf.concat([x, y, a], axis=1) _, _, z = dgm.samplePassGauss(h, self.qz_xya, self.n_hid, self.nonlinearity, self.bn, True, 'qz_xya') return z
def unlabeled_loss(self, x): """ compute the unlabeled loss """ qy_l = dgm.forwardPassCat(x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, scope='qy_x') x_r = tf.tile(x, [self.n_y, 1]) y_u = tf.reshape(tf.tile(tf.eye(self.n_y), [1, tf.shape(self.x_u)[0]]), [-1, self.n_y]) n_u = tf.shape(x)[0] lb_u = tf.transpose( tf.reshape(self.labeled_loss(x_r, y_u), [self.n_y, n_u])) lb_u = tf.reduce_sum(qy_l * lb_u, axis=-1) qy_entropy = -tf.reduce_sum(qy_l * tf.log(qy_l + 1e-10), axis=-1) return lb_u + qy_entropy
def build_model(self): """ Define model components and variables """ self.create_placeholders() self.initialize_networks() ## model variables and relations ## # inference # self.y_ = dgm.forwardPassCatLogits(self.x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, scope='qy_x', reuse=False) self.qz_in = tf.concat([self.x, self.y], axis=-1) self.qz_mean, self.qz_lv, self.z_ = dgm.samplePassGauss(self.qz_in, self.qz_xy, self.n_hid, self.nonlinearity, self.bn, scope='qz_xy', reuse=False) # generative # self.z_prior = tf.random_normal([tf.shape(self.y)[0], self.n_z]) self.px_in = tf.concat([self.y, self.z_prior], axis=-1) if self.x_dist == 'Gaussian': self.px_mean, self.px_lv, self.x_ = dgm.samplePassGauss(self.px_in, self.px_yz, self.n_hid, self.nonlinearity, self.bn, scope='px_yz', reuse=False) self.x_ = tf.reshape(self.x_, [-1, self.n_x]) elif self.x_dist == 'Bernoulli': self.x_ = dgm.forwardPassBernoulli(self.px_in, self.px_yz, self.n_hid, self.nonlinearity, self.bn, scope='px_yz', reuse=False) self.W = bnn.sampleCatBNN(self.py_x, self.n_hid) self.py = dgm.forwardPassCat(self.x_, self.W, self.n_hid, self.nonlinearity, self.bn, scope='py_x', reuse=False) self.predictions = self.predict(self.x, n_samps=50, training=False)
def predictq(self, x): """ predict y for given x with q(y|x) """ return dgm.forwardPassCat(x, self.qy_x, self.n_hid, self.nonlinearity, self.bn, scope='qy_x')