Exemplo n.º 1
0
    def get_energy1(self, z):
        decoder_out = self.decoder(z)
        E = get_reconstr_err(decoder_out, self.x, self.config)
        # Prior
        E += tf.reduce_sum(0.5 * tf.square(z) + 0.5 * np.log(2 * np.pi), [1])

        return E
Exemplo n.º 2
0
    def __init__(self,
                 encoder,
                 decoder,
                 x_real,
                 z_sampled,
                 config,
                 beta=1,
                 is_training=True):
        self.encoder = encoder
        self.decoder = decoder
        self.config = config
        self.x_real = x_real
        self.z_sampled = z_sampled
        self.beta = beta

        cond_dist = config['cond_dist']
        output_size = config['output_size']
        c_dim = config['c_dim']
        z_dist = config['z_dist']

        factor = 1. / (output_size * output_size * c_dim)

        # Set up adversary and contrasting distribution
        self.z_mean, self.log_z_std = encoder(x_real, is_training=is_training)
        self.z_std = tf.exp(self.log_z_std)
        self.z_real = self.z_mean + z_sampled * self.z_std
        self.decoder_out = decoder(self.z_real, is_training=is_training)

        # Primal loss
        self.reconst_err = get_reconstr_err(self.decoder_out,
                                            self.x_real,
                                            config=config)
        self.KL = get_KL(self.z_mean, self.log_z_std, z_dist)
        self.ELBO = -self.reconst_err - self.KL
        self.loss = factor * tf.reduce_mean(self.reconst_err + beta * self.KL)

        # Mean values
        self.ELBO_mean = tf.reduce_mean(self.ELBO)
        self.KL_mean = tf.reduce_mean(self.KL)
        self.reconst_err_mean = tf.reduce_mean(self.reconst_err)
Exemplo n.º 3
0
    def __init__(self,
                 encoder,
                 decoder,
                 encoder_aux,
                 decoder_aux,
                 x_real,
                 z_eps,
                 x_eps,
                 a_eps1,
                 a_eps2,
                 config,
                 beta=1,
                 is_training=True):
        self.encoder = encoder
        self.decoder = decoder
        self.encoder_aux = encoder_aux
        self.decoder_aux = decoder_aux
        self.config = config
        self.x_real = x_real
        self.z_sampled = z_eps
        self.beta = beta

        cond_dist = config['cond_dist']
        output_size = config['output_size']
        c_dim = config['c_dim']
        z_dist = config['z_dist']

        factor = 1. / (output_size * output_size * c_dim)

        # Set up adversary and contrasting distribution
        self.a_mean1, self.log_a_std1 = encoder_aux(x_real,
                                                    is_training=is_training)
        self.a_std1 = tf.exp(self.log_a_std1)
        self.a1 = self.a_mean1 + a_eps1 * self.a_std1

        self.z_mean, self.log_z_std = encoder(x_real,
                                              self.a1,
                                              is_training=is_training)
        self.z_std = tf.exp(self.log_z_std)
        self.z_real = self.z_mean + z_eps * self.z_std

        self.decoder_out = decoder(self.z_real, is_training=is_training)
        self.x_reconstr_sample = get_decoder_samples(self.decoder_out, config)

        self.a_mean2, self.log_a_std2 = decoder_aux(self.x_reconstr_sample,
                                                    self.z_real,
                                                    is_training=is_training)
        self.a_std2 = tf.exp(self.log_a_std2)
        self.a2 = self.a_mean2 + a_eps2 * self.a_std2

        # Primal loss
        self.reconst_err = get_reconstr_err(self.decoder_out,
                                            self.x_real,
                                            config=config)

        self.z_logq = get_pdf_gauss(self.z_mean, self.log_z_std, self.z_real)
        self.z_logp = get_pdf_stdgauss(self.z_real)
        self.KL_z = -self.z_logp + self.z_logq

        self.a_logq = get_pdf_gauss(self.a_mean1, self.log_a_std1, self.a1)
        self.a_logp = get_pdf_gauss(self.a_mean2, self.log_a_std2, self.a1)
        self.KL_a = -self.a_logp + self.a_logq

        self.KL = self.KL_z + self.KL_a

        self.ELBO = -self.reconst_err - self.KL
        self.loss = factor * tf.reduce_mean(self.reconst_err + beta * self.KL)

        # Mean values
        self.ELBO_mean = tf.reduce_mean(self.ELBO)
        self.KL_mean = tf.reduce_mean(self.KL)
        self.reconst_err_mean = tf.reduce_mean(self.reconst_err)
Exemplo n.º 4
0
    def __init__(self,
                 encoder,
                 decoder,
                 adversary,
                 x_real,
                 z_sampled,
                 config,
                 beta=1,
                 is_training=True):
        self.encoder = encoder
        self.decoder = decoder
        self.adversary = adversary
        self.config = config
        self.x_real = x_real
        self.z_sampled = z_sampled
        self.beta = beta

        is_ac = config['is_ac']
        cond_dist = config['cond_dist']
        output_size = config['output_size']
        c_dim = config['c_dim']
        z_dist = config['z_dist']

        factor = 1. / (output_size * output_size * c_dim)

        # Set up adversary and contrasting distribution
        if not is_ac:
            self.z_real = encoder(x_real, is_training=is_training)
            self.z_mean, self.z_var = tf.zeros_like(self.z_real), tf.ones_like(
                self.z_real)
            self.z_std = tf.ones_like(self.z_real)
        else:
            self.z_real, z_mean, z_var = encoder(x_real,
                                                 is_training=is_training)
            self.z_mean, self.z_var = tf.stop_gradient(
                z_mean), tf.stop_gradient(z_var)
            self.z_std = tf.sqrt(self.z_var + 1e-4)

        self.z_norm = (self.z_real - self.z_mean) / self.z_std
        Td = adversary(self.z_norm, x_real, is_training=is_training)
        Ti = adversary(self.z_sampled, x_real, is_training=is_training)
        logz = get_zlogprob(self.z_real, z_dist)
        logr = -0.5 * tf.reduce_sum(
            self.z_norm * self.z_norm + tf.log(self.z_var) + np.log(2 * np.pi),
            [1])

        self.decoder_out = decoder(self.z_real, is_training=is_training)

        # Primal loss
        self.reconst_err = get_reconstr_err(self.decoder_out,
                                            self.x_real,
                                            config=config)
        self.KL = Td + logr - logz
        self.ELBO = -self.reconst_err - self.KL
        self.loss_primal = factor * tf.reduce_mean(self.reconst_err +
                                                   self.beta * self.KL)

        # Mean values
        self.ELBO_mean = tf.reduce_mean(self.ELBO)
        self.KL_mean = tf.reduce_mean(self.KL)
        self.reconst_err_mean = tf.reduce_mean(self.reconst_err)

        # Dual loss
        d_loss_d = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=Td,
                                                    labels=tf.ones_like(Td)))
        d_loss_i = tf.reduce_mean(
            tf.nn.sigmoid_cross_entropy_with_logits(logits=Ti,
                                                    labels=tf.zeros_like(Ti)))

        self.loss_dual = d_loss_i + d_loss_d