def create_loss_optimizer(self): print('[*] Defining Loss Functions and Optimizer...') with tf.name_scope('reconstruct'): self.reconstruction = losses.get_ell(self.x_batch_flat, self.x_recons_flat) self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction) with tf.name_scope('prior_recons'): self.prior_recons = losses.get_ell(self.sample_flat, self.sample_recons_flat) self.prior_recons_m = tf.reduce_mean(self.prior_recons) with tf.variable_scope("L2_loss", reuse=self.reuse): tv = tf.trainable_variables() self.L2_loss = tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv if 'post_' in v.name]) with tf.variable_scope('ae_loss', reuse=self.reuse): self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction), self.l2*self.L2_loss, name='ae_loss') + self.prior_recons_m with tf.variable_scope('bayae_loss', reuse=self.reuse): if self.isConv: self.bay_kl = -1 * losses.get_QP_kl(self.post_mean, self.post_var, \ tf.reshape(self.prior_mean, [self.MC_samples, self.batch_size, self.latent_dim]), tf.reshape(self.prior_var, [self.MC_samples, self.batch_size, self.latent_dim]) ) else: self.bay_kl = -1 * losses.get_QP_kl(self.post_mean, self.post_var, \ self.prior_mean, self.prior_var) self.bayae_loss = tf.add(tf.cast(self.num_batches, 'float32')*self.ae_loss, self.bay_kl, name='bayae_loss') with tf.variable_scope("optimizer" ,reuse=self.reuse): self.optimizer = tf.train.AdamOptimizer(self.lr) self.train_step = self.optimizer.minimize(self.ae_loss, global_step=self.global_step_tensor) self.losses = ['Total', 'AE', 'reconstruction', 'L2', 'prior_reconst', 'bayesian_KL']
def create_loss_optimizer(self): print('[*] Defining Loss Functions and Optimizer...') with tf.name_scope('reconstruct'): self.reconstruction = losses.get_ell(self.x_batch_flat, self.x_recons_flat) self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction) with tf.variable_scope('L2_loss', reuse=self.reuse): tv = tf.trainable_variables() self.L2_loss = tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv ]) with tf.variable_scope('ae_loss', reuse=self.reuse): self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction), self.l2*self.L2_loss, name='ae_loss') with tf.variable_scope('kl_loss', reuse=self.reuse): self.kl_loss = losses.get_kl(self.encoder_mean, self.encoder_logvar) self.kl_loss_m = tf.reduce_mean(self.kl_loss) with tf.variable_scope('vae_loss', reuse=self.reuse): self.vae_loss = tf.add(self.ae_loss, self.kl_loss_m) with tf.variable_scope('annvae_loss', reuse=self.reuse): c = losses.anneal(self.c_max, self.global_step_tensor, self.itr_thd) regularizer = self.ann_gamma * tf.math.abs(self.kl_loss_m - c) self.annvae_loss = tf.add(self.ae_loss, regularizer) with tf.variable_scope("optimizer" ,reuse=self.reuse): self.optimizer = tf.train.AdamOptimizer(self.lr) self.train_step = self.optimizer.minimize(self.annvae_loss, global_step=self.global_step_tensor) self.losses = ['AnnVAE','VAE', 'AE', 'reconstruction', 'L2']
def create_loss_optimizer(self): print('[*] Defining Loss Functions and Optimizer...') with tf.name_scope('reconstruct'): self.reconstruction = losses.get_ell(self.x_batch_flat, self.x_recons_flat) self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction) with tf.variable_scope('L2_loss', reuse=self.reuse): tv = tf.trainable_variables() self.L2_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in tv]) with tf.variable_scope('ae_loss', reuse=self.reuse): self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction), self.l2 * self.L2_loss, name='ae_loss') with tf.variable_scope('kl_loss', reuse=self.reuse): self.kl_loss = losses.get_kl(self.encoder_mean, self.encoder_logvar) self.kl_loss_m = tf.reduce_mean(self.kl_loss) with tf.variable_scope('vae_loss', reuse=self.reuse): self.vae_loss = tf.add(self.ae_loss, self.kl_loss_m) with tf.variable_scope('bvae_loss', reuse=self.reuse): regularizer = tf.multiply(self.beta, self.kl_loss_m) self.bvae_loss = tf.add(self.ae_loss, regularizer) with tf.variable_scope('btcvae_loss', reuse=self.reuse): """ Based on Equation 4 with alpha = gamma = 1 of "Isolating Sources of Disentanglement in Variational Autoencoders" (https: // arxiv.org / pdf / 1802.04942). If alpha = gamma = 1, Eq 4 can be written as ELBO + (1 - beta) * TC. """ tc = tf.multiply(1-self.beta, self.total_correlation(self.latent_batch, self.encoder_mean, \ self.encoder_logvar)) regularizer = tf.add(self.kl_loss_m, tc) self.btcvae_loss = tf.add(self.ae_loss, regularizer) with tf.variable_scope("optimizer", reuse=self.reuse): self.optimizer = tf.train.AdamOptimizer(self.lr) self.train_step = self.optimizer.minimize( self.btcvae_loss, global_step=self.global_step_tensor) self.losses = [ 'Beta-TCVAE', 'Beta-VAE', 'VAE', 'AE', 'reconstruction', 'L2' ]
def create_loss_optimizer(self): print('[*] Defining Loss Functions and Optimizer...') with tf.name_scope('reconstruct'): self.reconstruction = losses.get_ell(self.x_batch_flat, self.x_recons_flat) self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction) with tf.variable_scope("L2_loss", reuse=self.reuse): tv = tf.trainable_variables() self.L2_loss = tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv ]) with tf.variable_scope('ae_loss', reuse=self.reuse): self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction), self.l2*self.L2_loss, name='ae_loss') with tf.variable_scope("optimizer" ,reuse=self.reuse): self.optimizer = tf.train.AdamOptimizer(self.lr) self.train_step = self.optimizer.minimize(self.ae_loss, global_step=self.global_step_tensor) self.losses = ['AE', 'reconstruction', 'L2']