def create_loss_optimizer(self): print('[*] Defining Loss Functions and Optimizer...') with tf.name_scope('prior_recons'): self.prior_recons = losses.get_reconst_loss(self.sample_flat, self.sample_recons_flat, self.config.prior_reconst_loss) self.prior_recons_m = tf.reduce_mean(self.prior_recons) with tf.name_scope('reconstruct'): self.reconstruction = losses.get_reconst_loss(self.x_batch_flat, self.x_recons_flat, self.config.reconst_loss) self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction) with tf.variable_scope('L2_loss', reuse=self.config.reuse): tv = tf.trainable_variables() self.L2_loss = tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv if 'post_' in v.name]) with tf.variable_scope('encoder_loss', reuse=self.config.config.reuse): self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction), self.config.l2*self.L2_loss, name='encoder_loss') with tf.variable_scope('divergence_cost', reuse=self.config.config.reuse): self.divergence_cost = losses.get_self_divergence(self.encoder_mean, self.encoder_logvar, self.config.div_cost) self.div_cost_m = tf.reduce_mean(self.divergence_cost) with tf.variable_scope('vae_loss', reuse=self.config.reuse): self.vae_loss = tf.add(self.ae_loss, self.div_cost_m) with tf.variable_scope('annvae_loss', reuse=self.config.reuse): c = self.anneal(self.config.c_max, self.global_step_tensor, self.config.itr_thd) self.anneal_reg = self.config.ann_gamma * tf.math.abs(self.div_cost_m - c) self.annvae_loss = tf.add(self.ae_loss, self.anneal_reg) with tf.variable_scope('bayae_loss', reuse=self.config.reuse): if self.config.isConv: self.bay_div = -1 * losses.get_divergence(self.encoder_mean, self.encoder_var, \ tf.reshape(self.prior_mean, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]), \ tf.reshape(self.prior_var, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]), self.config.prior_div_cost) else: self.bay_div = -1 * losses.get_divergence(self.encoder_mean, self.encoder_var, \ self.prior_mean, self.prior_var, self.config.prior_div_cost) self.bayae_loss = tf.add(tf.cast(self.config.num_batches, 'float32') * self.ae_loss, self.bay_div, name='bayae_loss') self.bayvae_loss = tf.add(tf.cast(self.config.num_batches, 'float32') * self.vae_loss, self.bay_div, name='bayvae_loss') self.annbayvae_loss = tf.add(tf.cast(self.config.num_batches, 'float32') * self.annvae_loss, self.bay_div, name='bayvae_loss') with tf.variable_scope('optimizer', reuse=self.config.reuse): self.optimizer = tf.train.AdamOptimizer(self.lr) self.train_step = self.optimizer.minimize(self.annbayvae_loss, global_step=self.global_step_tensor) self.losses = ['ELBO_AnnBayVAE', 'BayVAE', 'BayAE', 'AE', 'Recons_{}'.format(self.config.reconst_loss), 'Div_{}'.format(self.config.div_cost), 'Regul_anneal_reg', 'Regul_L2', 'prior_recons_{}'.format(self.config.prior_reconst_loss), 'bayesian_div_{}'.format(self.config.prior_div_cost)]
def create_loss_optimizer(self): print('[*] Defining Loss Functions and Optimizer...') with tf.name_scope('reconstruct'): self.reconstruction = losses.get_reconst_loss( self.x_batch_flat, self.x_recons_flat, self.config.reconst_loss) self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction) with tf.name_scope('prior_recons'): self.prior_recons = losses.get_reconst_loss( self.sample_flat, self.sample_recons_flat, self.config.prior_reconst_loss) self.prior_recons_m = tf.reduce_mean(self.prior_recons) with tf.variable_scope("L2_loss", reuse=self.config.reuse): tv = tf.trainable_variables() self.L2_loss = tf.reduce_sum( [tf.nn.l2_loss(v) for v in tv if 'post_' in v.name]) with tf.variable_scope('encoder_loss', reuse=self.config.reuse): self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction), self.config.l2 * self.L2_loss, name='encoder_loss') + self.prior_recons_m with tf.variable_scope('bayae_loss', reuse=self.config.reuse): if self.config.isConv: self.bay_div = -1 * losses.get_divergence(self.post_mean, self.post_var, \ tf.reshape(self.prior_mean, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]), tf.reshape(self.prior_var, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]), self.config.prior_div_cost) else: self.bay_div = -1 * losses.get_divergence(self.post_mean, self.post_var, \ self.prior_mean, self.prior_var, self.config.prior_div_cost) self.bayae_loss = tf.add( tf.cast(self.config.num_batches, 'float32') * self.ae_loss, self.bay_div, name='bayae_loss') with tf.variable_scope("optimizer", reuse=self.config.reuse): self.optimizer = tf.train.AdamOptimizer(self.lr) self.train_step = self.optimizer.minimize( self.bayae_loss, global_step=self.global_step_tensor) self.losses = [ 'ELBO_BayAE', 'AE', 'Recons_{}'.format(self.config.reconst_loss), 'Regul_L2', 'prior_recons_{}'.format(self.config.prior_reconst_loss), 'bayesian_div_{}'.format(self.config.prior_div_cost) ]