Beispiel #1
0
    def create_loss_optimizer(self):
        print('[*] Defining Loss Functions and Optimizer...')
        with tf.name_scope('prior_recons'):
            self.prior_recons = losses.get_reconst_loss(self.sample_flat, self.sample_recons_flat, self.config.prior_reconst_loss)
        self.prior_recons_m = tf.reduce_mean(self.prior_recons)

        with tf.name_scope('reconstruct'):
            self.reconstruction = losses.get_reconst_loss(self.x_batch_flat, self.x_recons_flat, self.config.reconst_loss)
        self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction)

        with tf.variable_scope('L2_loss', reuse=self.config.reuse):
            tv = tf.trainable_variables()
            self.L2_loss = tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv if 'post_' in v.name])
        
        with tf.variable_scope('encoder_loss', reuse=self.config.config.reuse):
            self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction), self.config.l2*self.L2_loss, name='encoder_loss')

        with tf.variable_scope('divergence_cost', reuse=self.config.config.reuse):
            self.divergence_cost = losses.get_self_divergence(self.encoder_mean, self.encoder_logvar, self.config.div_cost)
        self.div_cost_m = tf.reduce_mean(self.divergence_cost)

        with tf.variable_scope('vae_loss', reuse=self.config.reuse):
            self.vae_loss = tf.add(self.ae_loss, self.div_cost_m)

        with tf.variable_scope('annvae_loss', reuse=self.config.reuse):
            c = self.anneal(self.config.c_max, self.global_step_tensor, self.config.itr_thd)
            self.anneal_reg = self.config.ann_gamma * tf.math.abs(self.div_cost_m - c)
            self.annvae_loss = tf.add(self.ae_loss, self.anneal_reg)

        with tf.variable_scope('bayae_loss', reuse=self.config.reuse):
            if self.config.isConv:
                self.bay_div = -1 * losses.get_divergence(self.encoder_mean, self.encoder_var, \
                                                          tf.reshape(self.prior_mean, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]), \
                                                          tf.reshape(self.prior_var, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]),
                                                          self.config.prior_div_cost)
            else:
                self.bay_div = -1 * losses.get_divergence(self.encoder_mean, self.encoder_var, \
                                                          self.prior_mean, self.prior_var,
                                                          self.config.prior_div_cost)
            self.bayae_loss = tf.add(tf.cast(self.config.num_batches, 'float32') * self.ae_loss, self.bay_div, name='bayae_loss')
            self.bayvae_loss = tf.add(tf.cast(self.config.num_batches, 'float32') * self.vae_loss, self.bay_div, name='bayvae_loss')
            self.annbayvae_loss = tf.add(tf.cast(self.config.num_batches, 'float32') * self.annvae_loss, self.bay_div,
                                      name='bayvae_loss')

        with tf.variable_scope('optimizer', reuse=self.config.reuse):
            self.optimizer = tf.train.AdamOptimizer(self.lr)
            self.train_step = self.optimizer.minimize(self.annbayvae_loss, global_step=self.global_step_tensor)

        self.losses = ['ELBO_AnnBayVAE', 'BayVAE', 'BayAE', 'AE', 'Recons_{}'.format(self.config.reconst_loss),
                       'Div_{}'.format(self.config.div_cost),
                       'Regul_anneal_reg', 'Regul_L2', 'prior_recons_{}'.format(self.config.prior_reconst_loss),
                       'bayesian_div_{}'.format(self.config.prior_div_cost)]
Beispiel #2
0
    def create_loss_optimizer(self):
        print('[*] Defining Loss Functions and Optimizer...')
        with tf.name_scope('reconstruct'):
            self.reconstruction = losses.get_reconst_loss(
                self.x_batch_flat, self.x_recons_flat,
                self.config.reconst_loss)
        self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction)

        with tf.name_scope('prior_recons'):
            self.prior_recons = losses.get_reconst_loss(
                self.sample_flat, self.sample_recons_flat,
                self.config.prior_reconst_loss)
        self.prior_recons_m = tf.reduce_mean(self.prior_recons)

        with tf.variable_scope("L2_loss", reuse=self.config.reuse):
            tv = tf.trainable_variables()
            self.L2_loss = tf.reduce_sum(
                [tf.nn.l2_loss(v) for v in tv if 'post_' in v.name])

        with tf.variable_scope('encoder_loss', reuse=self.config.reuse):
            self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction),
                                  self.config.l2 * self.L2_loss,
                                  name='encoder_loss') + self.prior_recons_m

        with tf.variable_scope('bayae_loss', reuse=self.config.reuse):
            if self.config.isConv:
                self.bay_div = -1 * losses.get_divergence(self.post_mean, self.post_var, \
                                                      tf.reshape(self.prior_mean, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]), tf.reshape(self.prior_var, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]),
                                                      self.config.prior_div_cost)
            else:
                self.bay_div = -1 * losses.get_divergence(self.post_mean, self.post_var, \
                                                          self.prior_mean, self.prior_var,
                                                          self.config.prior_div_cost)

            self.bayae_loss = tf.add(
                tf.cast(self.config.num_batches, 'float32') * self.ae_loss,
                self.bay_div,
                name='bayae_loss')

        with tf.variable_scope("optimizer", reuse=self.config.reuse):
            self.optimizer = tf.train.AdamOptimizer(self.lr)
            self.train_step = self.optimizer.minimize(
                self.bayae_loss, global_step=self.global_step_tensor)

        self.losses = [
            'ELBO_BayAE', 'AE', 'Recons_{}'.format(self.config.reconst_loss),
            'Regul_L2',
            'prior_recons_{}'.format(self.config.prior_reconst_loss),
            'bayesian_div_{}'.format(self.config.prior_div_cost)
        ]
    def create_loss_optimizer(self):
        print('[*] Defining Loss Functions and Optimizer...')
        with tf.name_scope('reconstruct'):
            self.reconstruction = losses.get_reconst_loss(
                self.x_batch_flat, self.x_recons_flat,
                self.config.reconst_loss)
        self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction)

        with tf.variable_scope('L2_loss', reuse=self.config.reuse):
            tv = tf.trainable_variables()
            self.L2_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in tv])

        with tf.variable_scope('encoder_loss', reuse=self.config.reuse):
            self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction),
                                  self.config.l2 * self.L2_loss,
                                  name='encoder_loss')

        with tf.variable_scope('dipae_loss', reuse=self.config.reuse):
            self.covar_reg = self.regularizer(self.encoder_mean,
                                              self.encoder_var)
            self.dipae_loss = tf.add(self.ae_loss, self.covar_reg)

        with tf.variable_scope("optimizer", reuse=self.config.reuse):
            self.optimizer = RAdamOptimizer(self.lr)
            self.train_step = self.optimizer.minimize(
                self.dipae_loss, global_step=self.global_step_tensor)

        self.losses = [
            'ELBO_DIPcovAE', 'Regul_Covariance_Prior', 'AE',
            'Recons_{}'.format(self.config.reconst_loss), 'Regul_L2'
        ]
    def create_loss_optimizer(self):
        print('[*] Defining Loss Functions and Optimizer...')
        with tf.name_scope('reconstruct'):
            self.reconstruction = losses.get_reconst_loss(
                self.x_batch_flat, self.x_recons_flat,
                self.config.reconst_loss)
        self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction)

        with tf.variable_scope('L2_loss', reuse=self.config.reuse):
            tv = tf.trainable_variables()
            self.L2_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in tv])

        with tf.variable_scope('encoder_loss', reuse=self.config.reuse):
            self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction),
                                  self.config.l2 * self.L2_loss,
                                  name='encoder_loss')

        with tf.variable_scope('divergence_cost', reuse=self.config.reuse):
            self.divergence_cost = losses.get_self_divergence(
                self.encoder_mean, self.encoder_logvar, self.config.div_cost)
        self.div_cost_m = tf.reduce_mean(self.divergence_cost)

        with tf.variable_scope('vae_loss', reuse=self.config.reuse):
            self.vae_loss = tf.add(self.ae_loss, self.div_cost_m)

        with tf.variable_scope('optimizer', reuse=self.config.reuse):
            self.optimizer = tf.train.AdamOptimizer(self.lr)
            self.train_step = self.optimizer.minimize(
                self.vae_loss, global_step=self.global_step_tensor)

        self.losses = [
            'ELBO_VAE', 'AE', 'Recons_{}'.format(self.config.reconst_loss),
            'Div_{}'.format(self.config.div_cost), 'Regul_L2'
        ]
Beispiel #5
0
    def create_loss_optimizer(self):
        print('[*] Defining Loss Functions and Optimizer...')
        with tf.name_scope('reconstruct'):
            self.reconstruction = losses.get_reconst_loss(
                self.x_batch_flat, self.x_recons_flat,
                self.config.reconst_loss)
        self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction)

        with tf.variable_scope('L2_loss', reuse=self.config.reuse):
            tv = tf.trainable_variables()
            self.L2_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in tv])

        with tf.variable_scope('encoder_loss', reuse=self.config.reuse):
            self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction),
                                  self.config.l2 * self.L2_loss,
                                  name='encoder_loss')

        with tf.variable_scope('divergence_cost', reuse=self.config.reuse):
            self.divergence_cost = losses.get_self_divergence(
                self.encoder_mean, self.encoder_logvar, self.config.div_cost)
        self.div_cost_m = tf.reduce_mean(self.divergence_cost)

        with tf.variable_scope('vae_loss', reuse=self.config.reuse):
            self.vae_loss = tf.add(self.ae_loss, self.div_cost_m)

        with tf.variable_scope('bvae_loss', reuse=self.config.reuse):
            self.beta_reg = tf.multiply(self.config.beta, self.div_cost_m)
            self.bvae_loss = tf.add(self.ae_loss, self.beta_reg)

        with tf.variable_scope('btcvae_loss', reuse=self.config.reuse):
            """
            Based on Equation 4 with alpha = gamma = 1 of "Isolating Sources of Disentanglement in Variational
            Autoencoders"
            (https: // arxiv.org / pdf / 1802.04942).
            If alpha = gamma = 1, Eq 4 can be
            written as ELBO + (1 - beta) * TC.
            """
            tc =  tf.multiply(1-self.config.beta, self.total_correlation(self.latent_batch, self.encoder_mean, \
                                                                  self.encoder_logvar))
            self.tc_beta_reg = tf.add(self.div_cost_m, tc)
            self.btcvae_loss = tf.add(self.ae_loss, self.tc_beta_reg)

        with tf.variable_scope("optimizer", reuse=self.config.reuse):
            self.optimizer = RAdamOptimizer(self.lr)
            self.train_step = self.optimizer.minimize(
                self.btcvae_loss, global_step=self.global_step_tensor)

        self.losses = [
            'ELBO_Beta-TC-VAE', 'Beta-VAE', 'VAE', 'AE',
            'Recons_{}'.format(self.config.reconst_loss), 'Regul_tc_beta_reg',
            'Regul_beta_reg', 'Div_{}'.format(self.config.div_cost), 'Regul_L2'
        ]
    def create_loss_optimizer(self):
        print('[*] Defining Loss Functions and Optimizer...')
        with tf.name_scope('reconstruct'):
            self.reconstruction = losses.get_reconst_loss(self.x_batch_flat, self.x_recons_flat, self.config.reconst_loss)
        self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction)

        with tf.variable_scope('L2_loss', reuse=self.config.reuse):
            tv = tf.trainable_variables()
            self.L2_loss = tf.reduce_sum([ tf.nn.l2_loss(v) for v in tv ])
        
        with tf.variable_scope('encoder_loss', reuse=self.config.reuse):
            self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction), self.config.l2*self.L2_loss, name='encoder_loss')

        with tf.variable_scope('divergence_cost', reuse=self.config.reuse):
            self.divergence_cost = losses.get_self_divergence(self.encoder_mean, self.encoder_logvar, self.config.div_cost)
        self.div_cost_m = tf.reduce_mean(self.divergence_cost)

        with tf.variable_scope('vae_loss', reuse=self.config.reuse):
            self.vae_loss = tf.add(self.ae_loss, self.div_cost_m)

        with tf.variable_scope('bvae_loss', reuse=self.config.reuse):
            self.beta_reg = tf.multiply(self.config.beta, self.div_cost_m)
            self.bvae_loss = tf.add(self.ae_loss, self.beta_reg)

        with tf.variable_scope('annvae_loss', reuse=self.config.reuse):
            c = self.anneal(self.config.c_max, self.global_step_tensor, self.config.itr_thd)
            self.anneal_reg = self.config.ann_gamma * tf.math.abs(self.div_cost_m - c)
            self.annvae_loss = tf.add(self.ae_loss, self.anneal_reg)
            self.annbvae_loss = tf.add(self.bvae_loss, self.anneal_reg)

        with tf.variable_scope("optimizer" ,reuse=self.config.reuse):
            self.optimizer = tf.train.AdamOptimizer(self.lr)
            self.train_step = self.optimizer.minimize(self.annbvae_loss, global_step=self.global_step_tensor)

        self.losses = ['ELBO_AnnBeta-VAE', 'Beta-VAE', 'annVAE', 'VAE', 'AE', 'Recons_{}'.format(self.config.reconst_loss),
                       'Div_{}'.format(self.config.div_cost), \
                       'Regul_beta_reg', 'Regul_anneal_reg', 'Div_{}'.format(self.config.div_cost),
                       'Regul_L2']
    def create_loss_optimizer(self):
        print('[*] Defining Loss Functions and Optimizer...')
        with tf.name_scope('prior_recons'):
            self.prior_recons = losses.get_reconst_loss(
                self.sample_flat, self.sample_recons_flat,
                self.config.prior_reconst_loss)
        self.prior_recons_m = tf.reduce_mean(self.prior_recons)

        with tf.name_scope('reconstruct'):
            self.reconstruction = losses.get_reconst_loss(
                self.x_batch_flat, self.x_recons_flat,
                self.config.reconst_loss)
        self.loss_reconstruction_m = tf.reduce_mean(self.reconstruction)

        with tf.variable_scope('L2_loss', reuse=self.config.reuse):
            tv = tf.trainable_variables()
            self.L2_loss = tf.reduce_sum([tf.nn.l2_loss(v) for v in tv])

        with tf.variable_scope('encoder_loss', reuse=self.config.reuse):
            self.ae_loss = tf.add(tf.reduce_mean(self.reconstruction),
                                  self.config.l2 * self.L2_loss,
                                  name='encoder_loss')

        with tf.variable_scope('vae_loss', reuse=self.config.reuse):
            self.vae_loss = tf.add(self.ae_loss, self.div_cost_m)

        with tf.variable_scope('divergence_cost', reuse=self.config.reuse):
            self.divergence_cost = losses.get_self_divergence(
                self.encoder_mean, self.encoder_logvar, self.config.div_cost)
        self.div_cost_m = tf.reduce_mean(self.divergence_cost)

        with tf.variable_scope('vae_loss', reuse=self.config.reuse):
            self.vae_loss = tf.add(self.ae_loss, self.div_cost_m)

        with tf.variable_scope('bvae_loss', reuse=self.config.reuse):
            self.beta_reg = tf.multiply(self.config.beta, self.div_cost_m)
            self.bvae_loss = tf.add(self.ae_loss, self.beta_reg)

        with tf.variable_scope('bayae_loss', reuse=self.config.reuse):
            if self.config.isConv:
                self.bay_div = -1 * losses.get_QP_kl(self.encoder_mean, self.encoder_var, \
                                                          tf.reshape(self.prior_mean, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]), \
                                                          tf.reshape(self.prior_var, [self.config.MC_samples, self.config.batch_size, self.config.latent_dim]))
            else:
                self.bay_div = -1 * losses.get_QP_kl(self.encoder_mean, self.encoder_var, \
                                                          self.prior_mean, self.prior_var)
            self.bayae_loss = tf.add(
                tf.cast(self.config.ntrain_batches, 'float32') * self.ae_loss,
                self.bay_div,
                name='bayae_loss')
            self.bayvae_loss = tf.add(
                tf.cast(self.config.ntrain_batches, 'float32') * self.vae_loss,
                self.bay_div,
                name='bayvae_loss')
            self.baybvae_loss = tf.add(
                tf.cast(self.config.ntrain_batches, 'float32') *
                self.bvae_loss,
                self.bay_div,
                name='baybvae_loss')

        with tf.variable_scope("optimizer", reuse=self.config.reuse):
            self.optimizer = RAdamOptimizer(self.lr)
            self.train_step = self.optimizer.minimize(
                self.baybvae_loss, global_step=self.global_step_tensor)

        self.losses = ['ELBO_BayBeta-VAE', \
                       'BayVAE', 'BayAE', 'prior_recons_{}'.format(self.config.prior_reconst_loss), \
                       'Beta-VAE', 'VAE', 'AE', 'Recons_{}'.format(self.config.reconst_loss), \
                       'bayesian_div_{}'.format(self.config.prior_div_cost), \
                       'Regul_beta_reg', 'Div_{}'.format(self.config.div_cost), \
                       'Regul_L2']