Esempio n. 1
0
 def train_step(images, labels):
     images = u.sample_bernoulli(images)
     with tf.GradientTape() as tape:
         loss = model(images, training=True, iw_k=p.training.train_iw_k)
         loss_train = loss + tf.reduce_sum(model.losses)
     gradients = tape.gradient(loss_train, model.trainable_variables)
     optimizer.apply_gradients(zip(gradients, model.trainable_variables))
Esempio n. 2
0
 def _z_space_reconstruction_loss(self, z):
     hprob = tf.nn.sigmoid(tf.matmul(z, self.weights) + self.hbias)
     hsample = utils.sample_bernoulli(hprob)
     zprob = tf.nn.sigmoid(
         tf.matmul(hprob, self.weights, transpose_b=True) + self.zbias)
     assert z.get_shape().ndims == 2, 'z space must be 1D'
     instance_losses = tf.reduce_sum(tf.square(z - zprob), 1)
     return tf.reduce_mean(instance_losses)
Esempio n. 3
0
 def test_step(images, labels, iw_k=1):
     images = u.sample_bernoulli(images)
     loss_test = model(images, iw_k=iw_k)
     metric_mean(loss_test)
Esempio n. 4
0
 def _sample_probs(self, states, offset):
     """In place bernoulli sampling given a list of probs (in tensors)."""
     for i in range(offset, self.num_layers, 2):
         states[i] = utils.sample_bernoulli(states[i])
Esempio n. 5
0
 def vhv(self, vis_samples):
     hid_samples = utils.sample_bernoulli(self._compute_up(vis_samples))
     vprob = self._compute_down(hid_samples)
     vis_samples = utils.sample_bernoulli(vprob)
     return vprob, vis_samples