def _print_verbose1(self, epoch, fd, sess, acc_train, acc_test, avg_var=None): am_test, alv_test, a_test = self._sample_a(self.x_test, 1) am_train, alv_train, a_train = self._sample_a(self.x_train, 1) zm_test, zlv_test, z_test = self._sample_Z(self.x_test, self.y_test, a_test, 1) zm_train, zlv_train, z_train = self._sample_Z(self.x_train, self.y_train, a_train, 1) lpx_test, lpx_train, klz_test, klz_train = sess.run([ self._compute_logpx(self.x_test, z_test, self.y_test), self._compute_logpx(self.x_train, z_train, self.y_train), dgm._gauss_kl(zm_test, tf.exp(zlv_test)), dgm._gauss_kl(zm_train, tf.exp(zlv_train)) ], feed_dict=fd) print( 'Epoch: {}, logpx: {:5.3f}, klz: {:5.3f}, Train: {:5.3f}, Test: {:5.3f}' .format(epoch, np.mean(lpx_train), np.mean(klz_train), acc_train, acc_test))
def _compute_ELBO(self, x): z_mean, z_log_var, z = self._sample_Z(x) KLz = dgm._gauss_kl(z_mean, z_log_var) l_qz = dgm._gauss_logp(z, z_mean, z_log_var) l_pz = dgm._gauss_logp(z, tf.zeros_like(z), tf.ones_like(z)) l_px = self._compute_logpx(x, z) total_elbo = l_px - self.beta * (KLz) return tf.reduce_mean(total_elbo), tf.reduce_mean(l_px), tf.reduce_mean(KLz)
def _labeled_loss(self, x, y): """ Compute necessary terms for labeled loss (per data point) """ q_mean, q_log_var, z = self._sample_Z(x, y, self.Z_SAMPLES) l_px = self._compute_logpx(x, z) l_py = self._compute_logpy(y, x, z) l_pz = dgm._gauss_logp(z, tf.zeros_like(z), tf.log(tf.ones_like(z))) l_qz = dgm._gauss_logp(z, q_mean, q_log_var) klz = dgm._gauss_kl(q_mean, q_log_var) return l_px + l_py + self.beta * (l_pz - l_qz)
def _print_verbose1(self, epoch, fd, sess): self.phase = False zm_test, zlv_test, z_test = self._sample_Z(self.x_test, self.y_test, 1) zm_train, zlv_train, z_train = self._sample_Z(self.x_train, self.y_train, 1) lpx_test, lpx_train, klz_test, klz_train, acc_train, acc_test = sess.run( [ self._compute_logpx(self.x_test, z_test, self.y_test), self._compute_logpx(self.x_train, z_train, self.y_train), dgm._gauss_kl(zm_test, tf.exp(zlv_test)), dgm._gauss_kl(zm_train, tf.exp(zlv_train)), self.train_acc, self.test_acc ], feed_dict=fd) print( 'Epoch: {}, logpx: {:5.3f}, klz: {:5.3f}, Train: {:5.3f}, Test: {:5.3f}' .format(epoch, np.mean(lpx_train), np.mean(klz_train), acc_train, acc_test))
def _kl_W(self): kl = 0 for i in range(len(self.NUM_HIDDEN)): mean, logvar = self.Pzx_y['W' + str(i) + '_mean'], self.Pzx_y['W' + str(i) + '_logvar'] kl += tf.reduce_sum(dgm._gauss_kl(mean, logvar)) mean, logvar = self.Pzx_y['b' + str(i) + '_mean'], self.Pzx_y['b' + str(i) + '_logvar'] kl += tf.reduce_sum( dgm._gauss_kl(tf.expand_dims(mean, 1), tf.expand_dims(logvar, 1))) mean, logvar = self.Pzx_y['Wout_mean'], self.Pzx_y['Wout_logvar'] kl += tf.reduce_sum(dgm._gauss_kl(mean, logvar)) mean, logvar = self.Pzx_y['bout_mean'], self.Pzx_y['bout_logvar'] kl += tf.reduce_sum( dgm._gauss_kl(tf.expand_dims(mean, 1), tf.expand_dims(logvar, 1))) return kl
def _labeled_loss_W(self, x, y): """ Compute necessary terms for labeled loss (per data point) """ d = tf.cast(self.TRAINING_SIZE, tf.float32) q_mean, q_logvar, z = self._sample_Z(x, y, self.Z_SAMPLES) l_px = self._compute_logpx(x, z) l_py = self._compute_logpy(y, x, z) l_pz = dgm._gauss_logp(z, tf.zeros_like(z), tf.log(tf.ones_like(z))) l_qz = dgm._gauss_logp(z, q_mean, q_logvar) klz = dgm._gauss_kl(q_mean, q_logvar) klw = self._kl_W() / d return l_px + l_py + self.beta * (l_pz - l_qz) - klw