def _get_hard_v(self, num_samples): h = sample_from_bernoulli( tf.constant(.5, shape=(num_samples, self.dim_h2v[0]), dtype=tf.float32)) v = sample_from_bernoulli(self.h2v(h)) return v.numpy()
def get_v(self, num_samples): z = sample_from_bernoulli( tf.constant(.5, shape=(num_samples, self.dim_z2h[0]), dtype=tf.float32)) h = sample_from_bernoulli(self.z2h(z)) v = sample_from_bernoulli(self.h2v(h)) return v.numpy()
def get_independent_means(self, num_samples, burn_in_steps=100000, random=True): v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32) if random: v = sample_from_bernoulli(v + 0.2) # data average for i in xrange(burn_in_steps): _, v = self.gibbs_vhv(v) h_1 = sample_from_bernoulli(self.vis2hid(v)) v_1 = self.hid2vis(h_1) return v_1.numpy()
def sample(self, num_samples): z = sample_from_bernoulli( tf.constant(.5, shape=(num_samples, self.dims[0]), dtype=tf.float32)) h2 = gumbel_sigmoid_sample(self.inference(z), self.temp, self.hard) return h2
def get_independent_means(self, num_samples, burn_in_steps=100000): v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32) for i in xrange(burn_in_steps): _, v = self.gibbs_vhv(v) h_1 = sample_from_bernoulli(self.vis2hid(v)) v_1 = self.hid2vis(h_1)[0] return v_1.numpy()
def get_h(self, num_samples, burn_in_steps=1000, random=True): v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32) if random: v = sample_from_bernoulli(v + 0.5) # data average for i in xrange(burn_in_steps): h, v = self.gibbs_vhv(v) return h.numpy()
def reconstruction_error(self, v_0): h_1 = sample_from_bernoulli(self.vis2hid(v_0)) v_1_logits = tf.matmul(h_1, tf.transpose(self.w)) + self.vis_b return tf.reduce_mean(tf.reduce_sum( tf.nn.sigmoid_cross_entropy_with_logits(labels=v_0, logits=v_1_logits), axis=1), axis=0)
def get_h_and_v(self, num_samples): z = sample_from_bernoulli( tf.constant(.5, shape=(num_samples, self.dim_z2h[0]), dtype=tf.float32)) h = gumbel_sigmoid_sample(self.z2h(z), self.temp, self.hard) v = gumbel_sigmoid_sample(self.h2v(h), self.temp, self.hard) return h, v
def logprobs_all(self, num_samples): z = sample_from_bernoulli( tf.constant(.5, shape=(num_samples, self.dims[0]), dtype=tf.float32)) mu = self.inference(z) h = gumbel_sigmoid_sample(mu, self.temp, self.hard) logp_z = bernoulli_log_likelihood( z, tf.constant(.5, shape=z.shape, dtype=tf.float32)) logp_h_given_z = bernoulli_log_likelihood(h, mu) return logp_z + logp_h_given_z, z, h
def cd_step(self, v, train_mc_steps): h = sample_from_bernoulli(self.vis2hid(v)) h_list = [ h, ] v_list = [] for i in xrange(train_mc_steps): new_v, new_h = self.gibbs_hvh(h_list[-1]) v_list.append(new_v) h_list.append(new_h) chain_end = tf.stop_gradient(v_list[-1]) return chain_end
def get_independent_means(self, num_samples, burn_in_steps=100000, random=True): v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32) if random: v = sample_from_gaussian(v, 2. * tf.log(self.sigma)) + self.vis_b for i in xrange(burn_in_steps): _, v = self.gibbs_vhv(v) h_1 = sample_from_bernoulli(self.vis2hid(v)) v_1 = self.hid2vis(h_1) return v_1.numpy()
def loop_body(i, tmp_v): corr_term = tf.reduce_sum(tf.matmul(tmp_v, self.l) * tmp_v, axis=1) corr_term -= tf.reduce_sum(tf.expand_dims(tmp_v[:, i], axis=1) * tf.expand_dims(self.l[:, i], axis=0), axis=1) corr_term -= tf.reduce_sum(tf.expand_dims(tmp_v[:, i], axis=1) * tf.expand_dims(self.l[i, :], axis=0), axis=1) corr_term += tmp_v[:, i] * tmp_v[:, i] * self.l[i, i] mu_i = tf.sigmoid(uncorr_term[:, i] + corr_term) sample_i = sample_from_bernoulli(mu_i) return tf.expand_dims(mu_i, axis=1), tf.expand_dims(sample_i, axis=1)
def get_independent_samples(self, num_samples, burn_in_steps=100000, random=True, initial_v=None): if initial_v is not None: v = initial_v else: v = tf.zeros([num_samples, self.vis_dim], dtype=tf.float32) if random: v = sample_from_bernoulli(v + 0.2) # data average for i in xrange(burn_in_steps): _, v = self.gibbs_vhv(v) return v.numpy()
def _test_gumbel(self, num_samples=1): z = sample_from_bernoulli( tf.constant(.5, shape=(num_samples, self.dim_z2h[0]), dtype=tf.float32)) h_mu = self.z2h(z) h_hard = gumbel_sigmoid_sample(h_mu, self.temp, True) h_soft = gumbel_sigmoid_sample(h_mu, self.temp, False) print 'h_mu\n', h_mu, 'h_hard\n', h_hard, 'h_soft\n', h_soft v_mu = self.h2v(h_hard) v_hard = gumbel_sigmoid_sample(v_mu, self.temp, True) v_soft = gumbel_sigmoid_sample(v_mu, self.temp, False) print 'v_mu\n', v_mu, 'v_hard\n', v_hard, 'v_soft\n', v_soft v_mu = self.h2v(h_soft) v_hard = gumbel_sigmoid_sample(v_mu, self.temp, True) v_soft = gumbel_sigmoid_sample(v_mu, self.temp, False) print 'v_mu\n', v_mu, 'v_hard\n', v_hard, 'v_soft\n', v_soft exit()
def get_samples_single_chain(self, num_samples, adjacent_samples=10, steps_between_samples=1000, burn_in_steps=100000, random=True): assert num_samples % adjacent_samples == 0 v = tf.zeros([1, self.vis_dim], dtype=tf.float32) if random: v = sample_from_bernoulli(v + 0.2) # data average for i in xrange(burn_in_steps): _, v = self.gibbs_vhv(v) sample_list = [] for i in xrange(num_samples / adjacent_samples): for j in xrange(adjacent_samples): _, v = self.gibbs_vhv(v) sample_list.append(v.numpy()) for i in xrange(steps_between_samples): _, v = self.gibbs_vhv(v) return np.vstack(sample_list)
def approximate_gibbs_hvh(self, h1, enc): v = sample_from_bernoulli(self.h1_to_v(h1)) new_h1 = enc.get_hard_h1(v) return v, new_h1
def gibbs_hvh(self, h_0): v_1 = sample_from_gaussian(self.hid2vis(h_0), 2 * tf.log(self.sigma)) v_1 = tf.stop_gradient(v_1) h_1 = sample_from_bernoulli(self.vis2hid(v_1)) return v_1, h_1
def get_v(self, num_samples): h = self.prior_h.sample(num_samples) v = sample_from_bernoulli(self.h2v(h)) return v.numpy()
def get_h_hard(self, x): return sample_from_bernoulli(self.inference(x))
def gibbs_vhv(self, v_0): h_1 = sample_from_bernoulli(self.vis2hid(v_0)) v_1 = self.hid2vis(h_1, v_0)[1] return h_1, v_1
def gibbs_hvh(self, h1): v = sample_from_bernoulli(self.h1_to_v(h1)) h2 = sample_from_bernoulli(self.h1_to_h2(h1)) new_h1 = sample_from_bernoulli(self.vh2_to_h1(v, h2)) return v, new_h1, h2
def gibbs_vhv(self, v_0): h_1 = sample_from_bernoulli(self.vis2hid(v_0)) v_1 = sample_from_gaussian(self.hid2vis(h_1), 2 * tf.log(self.sigma)) v_1 = tf.stop_gradient(v_1) return h_1, v_1
def gibbs_hvh(self, h_0, v_0): v_1 = self.hid2vis(h_0, v_0)[1] h_1 = sample_from_bernoulli(self.vis2hid(v_1)) return v_1, h_1
def _gibbs_vhv(self, v, h2): h1 = sample_from_bernoulli(self.vh2_to_h1(v, h2)) new_v = self.h1_to_v(h1) new_h2 = sample_from_bernoulli(self.h1_to_h2(h1)) return new_v, h1, new_h2