コード例 #1
0
    def train_critic(self, batch):
        if self.params['data']['drop_weights']:
            x, dlls = batch
        else:
            x, dlls, weight = batch
        x = x.to(self.params["device"]).type(torch.float)
        dlls = dlls.to(self.params["device"]).type(torch.float)
        weight = weight.to(self.params["device"]).type(torch.float)

        noized_x = torch.cat(
            [
                x,
                get_noise(x.shape[0], self.params['noise_dim']).to(
                    self.params["device"])
            ],
            dim=1,
        )

        real_full = torch.cat([dlls, x], dim=1)
        generated = torch.cat([self.generator_model(noized_x), x], dim=1)

        crit_fake_pred = self.critic_model(generated.detach())
        crit_real_pred = self.critic_model(real_full)

        epsilon = torch.rand(real_full.size(0),
                             1,
                             device=self.params["device"],
                             requires_grad=True)
        gradient = get_gradient(self.critic_model,
                                real_full,
                                generated.detach(),
                                epsilon=epsilon)
        gp = gradient_penalty(gradient)
        critic_loss = torch.mean((crit_fake_pred - crit_real_pred) *
                                 weight) + self.params['c_lambda'] * gp

        critic_result = {'C/loss': critic_loss, 'gradient_penalty': gp}

        return critic_result
コード例 #2
0
xa_sample = tf.placeholder(tf.float32, shape=[None, img_size, img_size, 3])
_b_sample = tf.placeholder(tf.float32, shape=[None, n_att])

# generate
z = Genc(xa)
xb_ = Gdec(z, _b)
with tf.control_dependencies([xb_]):
    xa_ = Gdec(z, _a)

# discriminate
xa_logit_wgan, xa_logit_att = D(xa)
xb__logit_wgan, xb__logit_att = D(xb_)

# x discriminator losses
x_gp = models.gradient_penalty(xa, xb_, D)
x_wd = tf.reduce_mean(xa_logit_wgan) - tf.reduce_mean(xb__logit_wgan)
xa_loss_att = tf.losses.sigmoid_cross_entropy(a, xa_logit_att)
d_loss = -x_wd + x_gp * 10.0 + xa_loss_att

# x generator losses
xa__loss_rec = tf.losses.absolute_difference(xa, xa_)
xb__loss_att = tf.losses.sigmoid_cross_entropy(b, xb__logit_att)
xb__loss_wgan = -tf.reduce_mean(xb__logit_wgan)
g_loss = xb__loss_wgan + xb__loss_att * 10.0 + xa__loss_rec * 100.0

# optim
d_var = tl.trainable_variables('D')
d_step = tf.train.AdamOptimizer(lr, beta1=0.5).minimize(d_loss, var_list=d_var)

g_var = tl.trainable_variables('G')
コード例 #3
0
# generate
z = Genc(xa)
xb_ = Gdec(z, _b)
with tf.control_dependencies([xb_]):
    xa_ = Gdec(z, _a)

# discriminate
xa_logit_gan, xa_logit_att = D(xa)
xb__logit_gan, xb__logit_att = D(xb_)

# discriminator losses
if mode == 'wgan':  # wgan-gp
    wd = tf.reduce_mean(xa_logit_gan) - tf.reduce_mean(xb__logit_gan)
    d_loss_gan = -wd
    gp = models.gradient_penalty(D, xa, xb_)
elif mode == 'lsgan':  # lsgan-gp
    xa_gan_loss = tf.losses.mean_squared_error(tf.ones_like(xa_logit_gan), xa_logit_gan)
    xb__gan_loss = tf.losses.mean_squared_error(tf.zeros_like(xb__logit_gan), xb__logit_gan)
    d_loss_gan = xa_gan_loss + xb__gan_loss
    gp = models.gradient_penalty(D, xa)
elif mode == 'dcgan':  # dcgan-gp
    xa_gan_loss = tf.losses.sigmoid_cross_entropy(tf.ones_like(xa_logit_gan), xa_logit_gan)
    xb__gan_loss = tf.losses.sigmoid_cross_entropy(tf.zeros_like(xb__logit_gan), xb__logit_gan)
    d_loss_gan = xa_gan_loss + xb__gan_loss
    gp = models.gradient_penalty(D, xa)

xa_loss_att = tf.losses.sigmoid_cross_entropy(a, xa_logit_att)

d_loss = d_loss_gan + gp * 10.0 + xa_loss_att