예제 #1
0
def make_lie_group_norm_loss(group_feats_G,
                             lie_alg_feats,
                             lie_alg_basis_norm,
                             minibatch_size,
                             hy_hes,
                             hy_commute=0):
    '''
    lie_alg_basis_norm: [1, lat_dim, mat_dim, mat_dim]
    '''
    _, lat_dim, mat_dim, _ = lie_alg_basis_norm.get_shape().as_list()
    lie_alg_basis_norm_col = tf.reshape(lie_alg_basis_norm,
                                        [lat_dim, 1, mat_dim, mat_dim])
    lie_alg_basis_outer_mul = tf.matmul(
        lie_alg_basis_norm,
        lie_alg_basis_norm_col)  # [lat_dim, lat_dim, mat_dim, mat_dim]
    hessian_mask = 1. - tf.eye(
        lat_dim,
        dtype=lie_alg_basis_outer_mul.dtype)[:, :, tf.newaxis, tf.newaxis]
    lie_alg_basis_mul_ij = lie_alg_basis_outer_mul * hessian_mask  # XY
    lie_alg_commutator = lie_alg_basis_mul_ij - tf.transpose(
        lie_alg_basis_mul_ij, [0, 1, 3, 2])
    loss = 0.
    hessian_loss = tf.reduce_mean(
        tf.reduce_sum(tf.square(lie_alg_basis_mul_ij), axis=[2, 3]))
    hessian_loss = autosummary('Loss/hessian', hessian_loss)
    hessian_loss *= hy_hes
    loss += hessian_loss
    if hy_commute > 0:
        print('using commute loss')
        commute_loss = tf.reduce_mean(
            tf.reduce_sum(tf.square(lie_alg_commutator), axis=[2, 3]))
        commute_loss = autosummary('Loss/commute', commute_loss)
        commute_loss *= hy_commute
        loss += commute_loss
    return loss
예제 #2
0
def Reconstruction_loss(fake_image, landmark_label, coeff_label, FaceRender):
    landmark_label = landmark_label * 224. / 256.

    fake_image = (fake_image + 1) * 127.5
    fake_image = tf.clip_by_value(fake_image, 0, 255)
    fake_image = tf.transpose(fake_image, perm=[0, 2, 3, 1])
    fake_image = tf.reverse(fake_image, [3])  #RGBtoBGR
    fake_image = tf.image.resize_images(fake_image,
                                        size=[224, 224],
                                        method=tf.image.ResizeMethod.BILINEAR)

    # input to R_Net should have a shape of [batchsize,224,224,3], color range from 0-255 in BGR order.
    with tf.variable_scope(tf.get_variable_scope(), reuse=tf.AUTO_REUSE):
        coeff = R_Net(fake_image, is_training=False, reuse=tf.AUTO_REUSE)
    landmark_p = FaceRender.Get_landmark(coeff)  #224*224

    landmark_weight = tf.ones([1, 68])
    landmark_weight = tf.reshape(landmark_weight, [1, 68, 1])
    lm_loss = tf.reduce_mean(
        tf.square((landmark_p - landmark_label) / 224) * landmark_weight)

    fake_gamma = coeff[:, 227:254]
    render_gamma = coeff_label[:, 227:254]

    gamma_loss = tf.reduce_mean(tf.abs(fake_gamma - render_gamma))

    lm_loss = autosummary('Loss/lm_loss', lm_loss)
    gamma_loss = autosummary('Loss/gamma_loss', gamma_loss)

    return lm_loss, gamma_loss
예제 #3
0
def make_lie_group_loss_all(group_feats_E, group_feats_G, lie_alg_feats,
                            lie_alg_basis, minibatch_size, hy_rec, hy_dcp,
                            hy_hes, hy_lin, hy_ncut):
    mat_dim = group_feats_G.get_shape().as_list()[1]

    # [1, lat_dim, mat_dim, mat_dim]
    _, lat_dim, mat_dim, _ = lie_alg_basis.get_shape().as_list()
    lie_alg_basis_col = tf.reshape(lie_alg_basis,
                                   [lat_dim, 1, mat_dim, mat_dim])
    lie_alg_basis_mul = tf.matmul(lie_alg_basis, lie_alg_basis_col)
    lie_alg_basis_mask = 1. - tf.eye(
        lat_dim, dtype=lie_alg_basis_mul.dtype)[:, :, tf.newaxis, tf.newaxis]
    lie_alg_basis_mul = lie_alg_basis_mul * lie_alg_basis_mask

    lie_alg_basis_linear = lie_alg_basis * lie_alg_basis_col
    lie_alg_basis_linear = lie_alg_basis_linear * (1. - lie_alg_basis_mask)

    if group_feats_E is None:
        rec_loss = 0
    else:
        rec_loss = tf.reduce_mean(
            tf.reduce_sum(tf.square(group_feats_E - group_feats_G),
                          axis=[1, 2]))
    rec_loss = autosummary('Loss/lie_vae_rec_loss', rec_loss)
    spl_loss = tf.reduce_sum(
        tf.square(lie_alg_basis_mul -
                  tf.transpose(lie_alg_basis_mul, perm=[1, 0, 2, 3])))
    spl_loss = autosummary('Loss/lie_vae_spl_loss', spl_loss)
    hessian_loss = tf.reduce_sum(tf.square(lie_alg_basis_mul))
    hessian_loss = autosummary('Loss/lie_vae_hessian_loss', hessian_loss)
    linear_loss = tf.reduce_sum(tf.square(lie_alg_basis_linear))
    linear_loss = autosummary('Loss/lie_vae_linear_loss', linear_loss)
    loss = hy_rec * rec_loss + hy_dcp * spl_loss + \
        hy_hes * hessian_loss + hy_lin * linear_loss
    return loss
예제 #4
0
def betatc_vae(E,
               G,
               opt,
               training_set,
               minibatch_size,
               reals,
               labels,
               latent_type='normal',
               hy_beta=1,
               recons_type='bernoulli_loss'):
    _ = opt, training_set
    means, log_var = get_return_v(
        E.get_output_for(reals, labels, is_training=True), 2)
    kl_loss = compute_gaussian_kl(means, log_var)
    kl_loss = autosummary('Loss/kl_loss', kl_loss)
    sampled = sample_from_latent_distribution(means, log_var)
    reconstructions = get_return_v(
        G.get_output_for(sampled, labels, is_training=True), 1)
    reconstruction_loss = make_reconstruction_loss(reals,
                                                   reconstructions,
                                                   recons_type=recons_type)
    # reconstruction_loss = tf.reduce_mean(reconstruction_loss)
    reconstruction_loss = autosummary('Loss/recons_loss', reconstruction_loss)

    tc = (hy_beta - 1.) * total_correlation(sampled, means, log_var)
    # return tc + kl_loss
    elbo = reconstruction_loss + kl_loss
    elbo = autosummary('Loss/betatc_vae_elbo', elbo)
    loss = elbo + tc
    loss = autosummary('Loss/betatc_vae_loss', loss)
    return loss
예제 #5
0
def D_logistic_simplegp(G, D, opt, training_set, minibatch_size, reals, labels, r1_gamma=10.0, r2_gamma=0.0): # pylint: disable=unused-argument
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
    fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = tf.nn.softplus(fake_scores_out)  # -log(1 - logistic(fake_scores_out))
    loss += tf.nn.softplus(-real_scores_out)  # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type

    if r1_gamma != 0.0:
        with tf.name_scope('R1Penalty'):
            real_loss = opt.apply_loss_scaling(tf.reduce_sum(real_scores_out))
            real_grads = opt.undo_loss_scaling(fp32(tf.gradients(real_loss, [reals])[0]))
            r1_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1,2,3])
            r1_penalty = autosummary('Loss/r1_penalty', r1_penalty)
        loss += r1_penalty * (r1_gamma * 0.5)

    if r2_gamma != 0.0:
        with tf.name_scope('R2Penalty'):
            fake_loss = opt.apply_loss_scaling(tf.reduce_sum(fake_scores_out))
            fake_grads = opt.undo_loss_scaling(fp32(tf.gradients(fake_loss, [fake_images_out])[0]))
            r2_penalty = tf.reduce_sum(tf.square(fake_grads), axis=[1,2,3])
            r2_penalty = autosummary('Loss/r2_penalty', r2_penalty)
        loss += r2_penalty * (r2_gamma * 0.5)
    return loss
예제 #6
0
def D_wgan_gp(G, D, opt, training_set, minibatch_size, reals, labels, wgan_lambda=10.0, wgan_epsilon=0.001, wgan_target=1.0):
    _ = opt, training_set
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    if augment:
        fake_images_out_pre_augment = tf.transpose(fake_images_out, [0, 2, 3, 1])
        fake_images_out_post_augment = tf.map_fn(misc.apply_random_aug, fake_images_out_pre_augment)
        fake_images_out = tf.transpose(fake_images_out_post_augment, [0, 3, 1, 2])
    real_scores_out = D.get_output_for(reals, labels, is_training=True)
    fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = fake_scores_out - real_scores_out
    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
        mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
        mixed_scores_out = D.get_output_for(mixed_images_out, labels, is_training=True)
        mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
        mixed_grads = tf.gradients(tf.reduce_sum(mixed_scores_out), [mixed_images_out])[0]
        mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
        mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
        reg = gradient_penalty * (wgan_lambda / (wgan_target**2))
    return loss, reg
예제 #7
0
파일: loss.py 프로젝트: habout632/GAN.pth
def D_logistic_r2(G,
                  D,
                  opt,
                  training_set,
                  minibatch_size,
                  reals,
                  labels,
                  gamma=10.0):
    _ = opt, training_set
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = D.get_output_for(reals, labels, is_training=True)
    fake_scores_out = D.get_output_for(fake_images_out,
                                       labels,
                                       is_training=True)
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = tf.nn.softplus(fake_scores_out)  # -log(1-sigmoid(fake_scores_out))
    loss += tf.nn.softplus(-real_scores_out)  # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type

    with tf.name_scope('GradientPenalty'):
        fake_grads = tf.gradients(tf.reduce_sum(fake_scores_out),
                                  [fake_images_out])[0]
        gradient_penalty = tf.reduce_sum(tf.square(fake_grads), axis=[1, 2, 3])
        gradient_penalty = autosummary('Loss/gradient_penalty',
                                       gradient_penalty)
        reg = gradient_penalty * (gamma * 0.5)
    return loss, reg
예제 #8
0
파일: loss.py 프로젝트: ak9250/few-shot-gan
def D_ns_diffaug_r1(G,
                    D,
                    training_set,
                    minibatch_size,
                    reals,
                    gamma=10,
                    policy='color,translation,cutout',
                    **kwargs):
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    labels = training_set.get_random_labels_tf(minibatch_size)
    rho = np.array([1])
    fakes = G.get_output_for(latents, labels, rho, is_training=True)
    real_scores = D.get_output_for(DiffAugment(reals,
                                               policy=policy,
                                               channels_first=True),
                                   labels,
                                   is_training=True)
    fake_scores = D.get_output_for(DiffAugment(fakes,
                                               policy=policy,
                                               channels_first=True),
                                   labels,
                                   is_training=True)
    real_scores = autosummary('Loss/scores/real', real_scores)
    fake_scores = autosummary('Loss/scores/fake', fake_scores)
    D_loss = tf.nn.softplus(fake_scores) + tf.nn.softplus(-real_scores)
    D_loss = autosummary('Loss/D_loss', D_loss)
    with tf.name_scope('GradientPenalty'):
        real_grads = tf.gradients(tf.reduce_sum(real_scores), [reals])[0]
        gradient_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1, 2, 3])
        gradient_penalty = autosummary('Loss/gradient_penalty',
                                       gradient_penalty)
        D_reg = gradient_penalty * (gamma * 0.5)
    return D_loss, D_reg
예제 #9
0
def D_wgan_gp(G, D, opt, training_set, minibatch_size, reals, labels, # pylint: disable=unused-argument
    wgan_lambda     = 10.0,     # Weight for the gradient penalty term.
    wgan_epsilon    = 0.001,    # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target     = 1.0):     # Target value for gradient magnitudes.

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
    fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
        mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
        mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True))
        mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
        mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon
    return loss
예제 #10
0
def D_wgan(
    G,
    D,
    opt,
    training_set,
    minibatch_size,
    reals,
    labels,  # pylint: disable=unused-argument
    wgan_epsilon=0.001,
):  # Weight for the epsilon term, \epsilon_{drift}.

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
    fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
    real_scores_out = autosummary("Loss/scores/real", real_scores_out)
    fake_scores_out = autosummary("Loss/scores/fake", fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope("EpsilonPenalty"):
        epsilon_penalty = autosummary(
            "Loss/epsilon_penalty", tf.square(real_scores_out)
        )
    loss += epsilon_penalty * wgan_epsilon
    return loss
예제 #11
0
def factor_vae_G(E,
                 G,
                 D,
                 opt,
                 training_set,
                 minibatch_size,
                 reals,
                 labels,
                 latent_type='normal',
                 hy_gamma=1,
                 recons_type='bernoulli_loss'):
    _ = opt, training_set
    means, log_var = get_return_v(
        E.get_output_for(reals, labels, is_training=True), 2)
    kl_loss = compute_gaussian_kl(means, log_var)
    kl_loss = autosummary('Loss/kl_loss', kl_loss)
    sampled = sample_from_latent_distribution(means, log_var)
    reconstructions = get_return_v(
        G.get_output_for(sampled, labels, is_training=True), 1)

    logits, probs = get_return_v(D.get_output_for(sampled, is_training=True),
                                 2)
    # tc = E[log(p_real)-log(p_fake)] = E[logit_real - logit_fake]
    tc_loss = logits[:, 0] - logits[:, 1]
    # tc_loss = tf.reduce_mean(tc_loss, axis=0)
    reconstruction_loss = make_reconstruction_loss(reals,
                                                   reconstructions,
                                                   recons_type=recons_type)
    # reconstruction_loss = tf.reduce_mean(reconstruction_loss)
    reconstruction_loss = autosummary('Loss/recons_loss', reconstruction_loss)
    elbo = reconstruction_loss + kl_loss
    elbo = autosummary('Loss/fac_vae_elbo', elbo)
    loss = elbo + hy_gamma * tc_loss
    loss = autosummary('Loss/fac_vae_loss', loss)
    return loss
예제 #12
0
def so_vae(E,
           G,
           opt,
           training_set,
           minibatch_size,
           reals,
           labels,
           hy_1p=0,
           hy_beta=1,
           latent_type='normal',
           recons_type='bernoulli_loss'):
    _ = opt, training_set
    means, log_var = get_return_v(
        E.get_output_for(reals, labels, is_training=True), 2)
    kl_loss = compute_gaussian_kl(means, log_var)
    kl_loss = autosummary('Loss/kl_loss', kl_loss)
    sampled = sample_from_latent_distribution(means, log_var)

    reconstructions, lie_groups_as_fm, _, _, lie_algs, lie_alg_basis, _, lie_vars = get_return_v(
        G.get_output_for(sampled, labels, is_training=True), 8)
    # lie_groups_as_fm: [b, lat_dim, mat_dim, mat_dim]
    # lie_algs: [b, lat_dim, mat_dim, mat_dim]
    # lie_alg_basis: [1, lat_dim, mat_dim, mat_dim]

    reconstruction_loss = make_reconstruction_loss(reals,
                                                   reconstructions,
                                                   recons_type=recons_type)
    # reconstruction_loss = tf.reduce_mean(reconstruction_loss)
    reconstruction_loss = autosummary('Loss/recons_loss', reconstruction_loss)

    elbo = reconstruction_loss + hy_beta * kl_loss
    elbo = autosummary('Loss/so_vae_elbo', elbo)
    loss = elbo + hy_1p * tf.reduce_sum(lie_vars * lie_vars)
    loss = autosummary('Loss/so_vae_loss', loss)
    return loss
예제 #13
0
def G_logistic_ns_pathreg(G, D, opt, training_set, minibatch_size, pl_minibatch_shrink=2, pl_decay=0.01, pl_weight=2.0):
    _ = opt
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    labels = training_set.get_random_labels_tf(minibatch_size)
    fake_images_out, fake_dlatents_out = G.get_output_for(latents, labels, is_training=True, return_dlatents=True)
    if augment:
        fake_images_out_pre_augment = tf.transpose(fake_images_out, [0, 2, 3, 1])
        fake_images_out_post_augment = tf.map_fn(misc.apply_random_aug, fake_images_out_pre_augment)
        fake_images_out = tf.transpose(fake_images_out_post_augment, [0, 3, 1, 2])
    fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
    loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))

    # Path length regularization.
    with tf.name_scope('PathReg'):

        # Evaluate the regularization term using a smaller minibatch to conserve memory.
        if pl_minibatch_shrink > 1:
            pl_minibatch = minibatch_size // pl_minibatch_shrink
            pl_latents = tf.random_normal([pl_minibatch] + G.input_shapes[0][1:])
            pl_labels = training_set.get_random_labels_tf(pl_minibatch)
            fake_images_out, fake_dlatents_out = G.get_output_for(pl_latents, pl_labels, is_training=True, return_dlatents=True)
            # TODO: applying augmentations here fails with the following error:
            # TypeError: Second-order gradient for while loops not supported.
            # setting pl_minibatch_shrink to 1 would work - but will have a higher memory usage
            # if augment:
            #     fake_images_out_pre_augment = tf.transpose(fake_images_out, [0, 2, 3, 1])
            #     fake_images_out_post_augment = tf.map_fn(misc.apply_random_aug, fake_images_out_pre_augment)
            #     fake_images_out = tf.transpose(fake_images_out_post_augment, [0, 3, 1, 2])

        # Compute |J*y|.
        pl_noise = tf.random_normal(tf.shape(fake_images_out)) / np.sqrt(np.prod(G.output_shape[2:]))
        pl_grads = tf.gradients(tf.reduce_sum(fake_images_out * pl_noise), [fake_dlatents_out])[0]
        pl_lengths = tf.sqrt(tf.reduce_mean(tf.reduce_sum(tf.square(pl_grads), axis=2), axis=1))
        pl_lengths = autosummary('Loss/pl_lengths', pl_lengths)

        # Track exponential moving average of |J*y|.
        with tf.control_dependencies(None):
            pl_mean_var = tf.Variable(name='pl_mean', trainable=False, initial_value=0.0, dtype=tf.float32)
        pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
        pl_update = tf.assign(pl_mean_var, pl_mean)

        # Calculate (|J*y|-a)^2.
        with tf.control_dependencies([pl_update]):
            pl_penalty = tf.square(pl_lengths - pl_mean)
            pl_penalty = autosummary('Loss/pl_penalty', pl_penalty)

        # Apply weight.
        #
        # Note: The division in pl_noise decreases the weight by num_pixels, and the reduce_mean
        # in pl_lengths decreases it by num_affine_layers. The effective weight then becomes:
        #
        # gamma_pl = pl_weight / num_pixels / num_affine_layers
        # = 2 / (r^2) / (log2(r) * 2 - 2)
        # = 1 / (r^2 * (log2(r) - 1))
        # = ln(2) / (r^2 * (ln(r) - ln(2))
        #
        reg = pl_penalty * pl_weight

    return loss, reg
예제 #14
0
def D_logistic_r1_vc2_info_gan(G,
                               D,
                               opt,
                               training_set,
                               minibatch_size,
                               reals,
                               labels,
                               gamma=10.0,
                               latent_type='uniform',
                               D_global_size=0):
    _ = opt, training_set
    discrete_latents = None
    if D_global_size > 0:
        discrete_latents = tf.random.uniform([minibatch_size],
                                             minval=0,
                                             maxval=D_global_size,
                                             dtype=tf.int32)
        discrete_latents = tf.one_hot(discrete_latents, D_global_size)

    if latent_type == 'uniform':
        latents = tf.random.uniform([minibatch_size] +
                                    [G.input_shapes[0][1] - D_global_size],
                                    minval=-2,
                                    maxval=2)
    elif latent_type == 'normal':
        latents = tf.random_normal([minibatch_size] +
                                   [G.input_shapes[0][1] - D_global_size])
    elif latent_type == 'trunc_normal':
        latents = tf.random.truncated_normal(
            [minibatch_size] + [G.input_shapes[0][1] - D_global_size])
    else:
        raise ValueError('Latent type not supported: ' + latent_type)
    if D_global_size > 0:
        latents = tf.concat([discrete_latents, latents], axis=1)

    fake_images_out, atts = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = D.get_output_for(reals,
                                       labels,
                                       atts,
                                       is_training=True,
                                       return_preds=False)
    fake_scores_out = D.get_output_for(fake_images_out,
                                       labels,
                                       atts,
                                       is_training=True,
                                       return_preds=False)
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = tf.nn.softplus(fake_scores_out)  # -log(1-sigmoid(fake_scores_out))
    loss += tf.nn.softplus(-real_scores_out)  # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type

    with tf.name_scope('GradientPenalty'):
        real_grads = tf.gradients(tf.reduce_sum(real_scores_out), [reals])[0]
        gradient_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1, 2, 3])
        gradient_penalty = autosummary('Loss/gradient_penalty',
                                       gradient_penalty)
        reg = gradient_penalty * (gamma * 0.5)
    return loss, reg
예제 #15
0
def D_hinge(G, D, opt, training_set, minibatch_size, reals, labels):  # pylint: disable=unused-argument
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
    fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = tf.maximum(0., 1. + fake_scores_out) + tf.maximum(0., 1. - real_scores_out)
    return loss
예제 #16
0
def G_wgan(G, D, opt, training_set, minibatch_size):
    _ = opt
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    labels = training_set.get_random_labels_tf(minibatch_size)
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
    loss = -fake_scores_out
    autosummary('G_wgan_00/total_loss', loss)
    return loss, None
예제 #17
0
def G_logistic_ns(G, D, opt, training_set, minibatch_size):
    _ = opt
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    labels = training_set.get_random_labels_tf(minibatch_size)
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
    loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
    autosummary('G_logistic_ns_00/total_loss', loss)
    return loss, None
예제 #18
0
def G_logistic(G, D, opt, training_set, minibatch_size):
    _ = opt
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    labels = training_set.get_random_labels_tf(minibatch_size)
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
    loss = -tf.nn.softplus(fake_scores_out) # log(1-sigmoid(fake_scores_out)) # pylint: disable=invalid-unary-operand-type
    autosummary('G_logistic_00/total_loss', loss)
    return loss, None
예제 #19
0
def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels):  # pylint: disable=unused-argument
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
    fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = tf.nn.softplus(fake_scores_out)  # -log(1 - logistic(fake_scores_out))
    loss += tf.nn.softplus(-real_scores_out)  # -log(logistic(real_scores_out)) # temporary pylint workaround # pylint: disable=invalid-unary-operand-type
    return loss
예제 #20
0
파일: loss.py 프로젝트: RuiLiFeng/noise
def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels, **kwargs):
    _ = opt, training_set
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = D.get_output_for(reals, labels, is_training=True)
    fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = tf.nn.softplus(fake_scores_out) # -log(1-sigmoid(fake_scores_out))
    loss += tf.nn.softplus(-real_scores_out) # -log(sigmoid(real_scores_out)) # pylint: disable=invalid-unary-operand-type
    return loss, None
예제 #21
0
def D_logistic(G, D, opt, training_set, minibatch_size, reals, labels):  # pylint: disable=unused-argument
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
    fake_scores_out = fp32(
        D.get_output_for(fake_images_out, labels, is_training=True))
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = tf.nn.softplus(fake_scores_out)
    loss += tf.nn.softplus(-real_scores_out)
    return loss  # Loss_D = log(exp(D(G(z))) + 1) + log(exp(-D(x)) + 1)
예제 #22
0
def G_masked_logistic_ns_l1(G, D, opt, training_set, minibatch_size, reals, masks, l1_weight=0):
    _ = opt
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    labels = training_set.get_random_labels_tf(minibatch_size)
    fake_images_out = G.get_output_for(latents, labels, reals, masks, is_training=True)
    fake_scores_out = D.get_output_for(fake_images_out, labels, masks, is_training=True)
    logistic_loss = tf.nn.softplus(-fake_scores_out) # -log(sigmoid(fake_scores_out))
    logistic_loss = autosummary('Loss/logistic_loss', logistic_loss)
    l1_loss = tf.reduce_mean(tf.abs(fake_images_out - reals), axis=[1,2,3])
    l1_loss = autosummary('Loss/l1_loss', l1_loss)
    loss = logistic_loss + l1_loss * l1_weight
    return loss, None
예제 #23
0
def lie_vae_with_split(E,
                       G,
                       opt,
                       training_set,
                       minibatch_size,
                       reals,
                       labels,
                       latent_type='normal',
                       hy_dcp=1,
                       hy_hes=0,
                       hy_lin=0,
                       hy_ncut=1,
                       hy_rec=1,
                       recons_type='bernoulli_loss'):
    _ = opt, training_set
    means, log_var, group_feats_E = get_return_v(
        E.get_output_for(reals, labels, is_training=True), 3)
    kl_loss = compute_gaussian_kl(means, log_var)
    kl_loss = autosummary('Loss/kl_loss', kl_loss)

    mat_dim = int(math.sqrt(group_feats_E.get_shape().as_list()[1]))
    assert mat_dim * mat_dim == group_feats_E.get_shape().as_list()[1]
    group_feats_E = tf.reshape(group_feats_E,
                               [minibatch_size, mat_dim, mat_dim])

    sampled = sample_from_latent_distribution(means, log_var)

    sampled_split_ls = split_latents(sampled, minibatch_size, hy_ncut=hy_ncut)
    sampled_split = tf.concat(sampled_split_ls, axis=0)
    labels_split = tf.concat([labels] * len(sampled_split_ls), axis=0)

    sampled_all = tf.concat([sampled, sampled_split], axis=0)
    labels_all = tf.concat([labels, labels_split], axis=0)

    reconstructions, group_feats_G, _, _, lie_alg_feats, lie_alg_basis = get_return_v(
        G.get_output_for(sampled_all, labels_all, is_training=True), 6)
    lie_group_loss = make_lie_group_loss_with_split(
        group_feats_E, group_feats_G, lie_alg_feats, lie_alg_basis,
        minibatch_size, hy_rec, hy_dcp, hy_hes, hy_lin, hy_ncut)
    lie_group_loss = autosummary('Loss/lie_group_loss', lie_group_loss)

    reconstruction_loss = make_reconstruction_loss(
        reals, reconstructions[:minibatch_size], recons_type=recons_type)
    # reconstruction_loss = tf.reduce_mean(reconstruction_loss)
    reconstruction_loss = autosummary('Loss/recons_loss', reconstruction_loss)

    elbo = reconstruction_loss + kl_loss
    elbo = autosummary('Loss/lie_vae_elbo', elbo)
    loss = elbo + lie_group_loss

    loss = autosummary('Loss/lie_vae_loss', loss)
    return loss
예제 #24
0
파일: loss.py 프로젝트: RuiLiFeng/noise
def D_wgan(G, D, opt, training_set, minibatch_size, reals, labels, wgan_epsilon=0.001, **kwargs):
    _ = opt, training_set
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = D.get_output_for(reals, labels, is_training=True)
    fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = fake_scores_out - real_scores_out
    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
        loss += epsilon_penalty * wgan_epsilon
    return loss, None
예제 #25
0
def D_logistic_simplegp(E,
                        G,
                        D,
                        Inv,
                        real_portraits,
                        shuffled_portraits,
                        real_landmarks,
                        training_flag,
                        r1_gamma=10.0):

    with tf.device("/cpu:0"):
        appearance_flag = tf.math.equal(training_flag, "appearance")

    portraits = tf.cond(appearance_flag, lambda: feedthrough(real_portraits),
                        lambda: feedthrough(shuffled_portraits))

    num_layers, latent_dim = G.components.synthesis.input_shape[1:3]
    embedded_w = Inv.get_output_for(portraits, phase=True)
    embedded_w_tensor = tf.reshape(
        embedded_w, [portraits.shape[0], num_layers, latent_dim])

    latent_w = E.get_output_for(embedded_w_tensor, real_landmarks, phase=True)
    latent_wp = tf.reshape(latent_w,
                           [portraits.shape[0], num_layers, latent_dim
                            ])  # make synthetic from shuffled ones!
    fake_X = G.components.synthesis.get_output_for(latent_wp,
                                                   randomize_noise=False)
    real_scores_out = fp32(
        D.get_output_for(real_portraits, real_landmarks,
                         None))  # real portraits, real landmarks
    fake_scores_out = fp32(D.get_output_for(
        fake_X, real_landmarks, None))  # synthetic portaits, real landmarks

    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)

    loss_fake = tf.reduce_mean(tf.nn.softplus(fake_scores_out))
    loss_real = tf.reduce_mean(tf.nn.softplus(-real_scores_out))

    loss_fake = autosummary('Loss/scores/loss_fake', loss_fake)
    loss_real = autosummary('Loss/scores/loss_real', loss_real)

    with tf.name_scope('R1Penalty'):
        real_grads = fp32(tf.gradients(real_scores_out, [real_portraits])[0])
        r1_penalty = tf.reduce_mean(
            tf.reduce_sum(tf.square(real_grads), axis=[1, 2, 3]))
        r1_penalty = autosummary('Loss/r1_penalty', r1_penalty)
        loss_gp = r1_penalty * (r1_gamma * 0.5)

    loss = loss_fake + loss_real + loss_gp
    return loss, loss_fake, loss_real, loss_gp
예제 #26
0
def G_logistic_ns_pathreg_face(G, D, Df, opt, training_set, minibatch_size, pl_minibatch_shrink=2, pl_decay=0.01, pl_weight=2.0):
    _ = opt
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    labels = training_set.get_random_labels_tf(minibatch_size)
    fake_images_out, fake_dlatents_out = G.get_output_for(latents, labels, is_training=True, return_dlatents=True)
    fake_scores_out = D.get_output_for(fake_images_out, labels, is_training=True)
    fake_faces_out = get_faces_score(Df, fake_images_out)
    print ('fake_scores_out : %f, fake_faces_out : %f' %(fake_scores_out,fake_faces_out))
    loss = tf.nn.softplus(-fake_scores_out+fake_faces_out) # -log(sigmoid(fake_scores_out))

    # Path length regularization.
    with tf.name_scope('PathReg'):

        # Evaluate the regularization term using a smaller minibatch to conserve memory.
        if pl_minibatch_shrink > 1:
            pl_minibatch = minibatch_size // pl_minibatch_shrink
            pl_latents = tf.random_normal([pl_minibatch] + G.input_shapes[0][1:])
            pl_labels = training_set.get_random_labels_tf(pl_minibatch)
            fake_images_out, fake_dlatents_out = G.get_output_for(pl_latents, pl_labels, is_training=True, return_dlatents=True)

        # Compute |J*y|.
        pl_noise = tf.random_normal(tf.shape(fake_images_out)) / np.sqrt(np.prod(G.output_shape[2:]))
        pl_grads = tf.gradients(tf.reduce_sum(fake_images_out * pl_noise), [fake_dlatents_out])[0]
        pl_lengths = tf.sqrt(tf.reduce_mean(tf.reduce_sum(tf.square(pl_grads), axis=2), axis=1))
        pl_lengths = autosummary('Loss/pl_lengths', pl_lengths)

        # Track exponential moving average of |J*y|.
        with tf.control_dependencies(None):
            pl_mean_var = tf.Variable(name='pl_mean', trainable=False, initial_value=0.0, dtype=tf.float32)
        pl_mean = pl_mean_var + pl_decay * (tf.reduce_mean(pl_lengths) - pl_mean_var)
        pl_update = tf.assign(pl_mean_var, pl_mean)

        # Calculate (|J*y|-a)^2.
        with tf.control_dependencies([pl_update]):
            pl_penalty = tf.square(pl_lengths - pl_mean)
            pl_penalty = autosummary('Loss/pl_penalty', pl_penalty)

        # Apply weight.
        #
        # Note: The division in pl_noise decreases the weight by num_pixels, and the reduce_mean
        # in pl_lengths decreases it by num_affine_layers. The effective weight then becomes:
        #
        # gamma_pl = pl_weight / num_pixels / num_affine_layers
        # = 2 / (r^2) / (log2(r) * 2 - 2)
        # = 1 / (r^2 * (log2(r) - 1))
        # = ln(2) / (r^2 * (ln(r) - ln(2))
        #
        reg = pl_penalty * pl_weight

    return loss, reg
예제 #27
0
파일: loss.py 프로젝트: Lumaceon/stylegan2
def D_logistic_r1(G,
                  D,
                  opt,
                  training_set,
                  minibatch_size,
                  reals,
                  labels,
                  gamma=10.0):
    _ = opt, training_set
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = D.get_output_for(reals, labels, is_training=True)
    fake_scores_out = D.get_output_for(fake_images_out,
                                       labels,
                                       is_training=True)
    ppl_real, ppl_fake = None, None
    if isinstance(real_scores_out, tuple):
        real_scores_out, real_quant_loss, ppl_real = real_scores_out[
            0], real_scores_out[1], real_scores_out[2]
        fake_scores_out, fake_quant_loss, ppl_fake = fake_scores_out[
            0], fake_scores_out[1], fake_scores_out[2]
        real_scores_out = autosummary('Loss/scores/real', real_scores_out)
        fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
        loss = tf.nn.softplus(
            fake_scores_out)  # -log(1-sigmoid(fake_scores_out))
        loss += tf.nn.softplus(
            -real_scores_out
        ) + real_quant_loss + fake_quant_loss  # -log(sigmoid(real_scores_out)) # pylint:
        # disable=invalid-unary-operand-type
    else:
        real_scores_out = autosummary('Loss/scores/real', real_scores_out)
        fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
        loss = tf.nn.softplus(
            fake_scores_out)  # -log(1 - logistic(fake_scores_out))
        loss += tf.nn.softplus(
            -real_scores_out
        )  # -log(logistic(real_scores_out)) # temporary pylint workaround
    # pylint: disable=invalid-unary-operand-type

    with tf.name_scope('GradientPenalty'):
        real_grads = tf.gradients(tf.reduce_sum(real_scores_out), [reals])[0]
        gradient_penalty = tf.reduce_sum(tf.square(real_grads), axis=[1, 2, 3])
        gradient_penalty = autosummary('Loss/gradient_penalty',
                                       gradient_penalty)
        reg = gradient_penalty * (gamma * 0.5)
    if ppl_fake is not None:
        ppl = (ppl_fake + ppl_real) / 2
    else:
        ppl = tf.zeros(1)
    return loss, reg, ppl
예제 #28
0
def group_subspace_vae(E,
                       G,
                       opt,
                       training_set,
                       minibatch_size,
                       reals,
                       labels,
                       subgroup_sizes_ls,
                       subspace_sizes_ls,
                       latent_type='normal',
                       hy_beta=1,
                       hy_hes=0,
                       hy_rec=1,
                       hy_commute=0,
                       forward_eg=False,
                       recons_type='bernoulli_loss'):
    _ = opt, training_set
    means, log_var, group_feats_E = get_return_v(
        E.get_output_for(reals, labels, is_training=True), 3)
    kl_loss = compute_gaussian_kl(means, log_var)
    kl_loss = autosummary('Loss/kl_loss', kl_loss)

    sampled = sample_from_latent_distribution(means, log_var)

    reconstructions, group_feats_G, _, _, _, lie_alg_basis_flattened, _, _ = get_return_v(
        G.get_output_for(tf.concat([sampled, group_feats_E], axis=1)
                         if forward_eg else sampled,
                         labels,
                         is_training=True), 8)
    lie_group_loss = make_group_subspace_loss(
        minibatch_size=minibatch_size,
        group_feats_E=group_feats_E,
        group_feats_G=group_feats_G,
        subgroup_sizes_ls=subgroup_sizes_ls,
        subspace_sizes_ls=subspace_sizes_ls,
        lie_alg_basis_flattened=lie_alg_basis_flattened,
        hy_hes=hy_hes,
        hy_rec=hy_rec,
        hy_commute=hy_commute)

    reconstruction_loss = make_reconstruction_loss(reals,
                                                   reconstructions,
                                                   recons_type=recons_type)
    reconstruction_loss = autosummary('Loss/recons_loss', reconstruction_loss)

    elbo = reconstruction_loss + hy_beta * kl_loss
    elbo = autosummary('Loss/elbo', elbo)
    loss = elbo + lie_group_loss
    loss = autosummary('Loss/loss', loss)
    return loss
예제 #29
0
def cross_entropy_multiple(classifier, images, labels):
    model_pred, color_pred, manufacturer_pred, body_pred, rotation_pred, ratio_pred, background_pred = classifier.get_output_for(
        images, is_training=True)

    offsets = [1, 67, 12, 18, 10, 8, 5, 6]
    current_offset = offsets[0]
    next_offset = current_offset + offsets[1]
    model_label = labels[:, current_offset:next_offset]
    model_loss = model_label * -tf.log(model_pred)
    model_loss = autosummary('Classifier_multiple/model_loss', model_loss)
    loss = tf.reduce_sum(model_loss)

    current_offset = next_offset
    next_offset = current_offset + offsets[2]
    color_label = labels[:, current_offset:next_offset]
    color_loss = color_label * -tf.log(color_pred)
    color_loss = autosummary('Classifier_multiple/model_loss', color_loss)
    loss += tf.reduce_sum(color_loss)

    current_offset = next_offset
    next_offset = current_offset + offsets[3]
    manufacturer_label = labels[:, current_offset:next_offset]
    manufacturer_loss = manufacturer_label * -tf.log(manufacturer_pred)
    manufacturer_loss = autosummary('Classifier_multiple/manufacturer_loss',
                                    manufacturer_loss)
    loss += tf.reduce_sum(manufacturer_loss)

    current_offset = next_offset
    next_offset = current_offset + offsets[4]
    body_label = labels[:, current_offset:next_offset]
    body_loss = body_label * -tf.log(body_pred)
    body_loss = autosummary('Classifier_multiple/body_loss', body_loss)
    loss += tf.reduce_sum(body_loss)

    current_offset = next_offset
    next_offset = current_offset + offsets[5]
    rotation_label = labels[:, current_offset:next_offset]
    rotation_loss = rotation_label * -tf.log(rotation_pred)
    rotation_loss = autosummary('Classifier_multiple/rotation_loss',
                                rotation_loss)
    loss += tf.reduce_sum(rotation_loss)

    current_offset = next_offset
    next_offset = current_offset + offsets[6]
    ratio_label = labels[:, current_offset:next_offset]
    ratio_loss = ratio_label * -tf.log(ratio_pred)
    ratio_loss = autosummary('Classifier_multiple/model_loss', ratio_loss)
    loss += tf.reduce_sum(ratio_loss)

    current_offset = next_offset
    next_offset = current_offset + offsets[7]
    background_label = labels[:, current_offset:next_offset]
    background_loss = background_label * -tf.log(background_pred)
    background_loss = autosummary('Classifier_multiple/background_loss',
                                  background_loss)
    loss += tf.reduce_sum(background_loss)

    loss = autosummary('Classifier/loss', loss)
    return loss, labels
예제 #30
0
def D_wgan_gp(
    G,
    D,
    opt,
    training_set,
    minibatch_size,
    reals,
    labels,  # pylint: disable=unused-argument
    wgan_lambda=10.0,  # 梯度惩罚项的权重。
    wgan_epsilon=0.001,  # ε项的值, \epsilon_{drift}.
    wgan_target=1.0):  # 梯度幅度的目标值,即满足1-lipschitz范式所以梯度的模得小于等于1。

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
    fake_scores_out = fp32(
        D.get_output_for(fake_images_out, labels, is_training=True))
    real_scores_out = autosummary('Loss/scores/real', real_scores_out)
    fake_scores_out = autosummary('Loss/scores/fake', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform(
            [minibatch_size, 1, 1, 1], 0.0, 1.0,
            dtype=fake_images_out.dtype)  # alpha
        mixed_images_out = tflib.lerp(tf.cast(reals, fake_images_out.dtype),
                                      fake_images_out, mixing_factors)
        # 惩罚区的样本为xp = alpha * x + (1 - alpha) * G(z)
        mixed_scores_out = fp32(
            D.get_output_for(mixed_images_out, labels,
                             is_training=True))  # 惩罚区样本的判别值D(xp)
        mixed_scores_out = autosummary('Loss/scores/mixed', mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss,
                              [mixed_images_out])[0]))  # 惩罚区样本的梯度∇T
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads),
                          axis=[1, 2, 3]))  # 惩罚区样本梯度∇T的模||∇T||
        mixed_norms = autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms -
                                     wgan_target)  # 惩罚项为(||∇T||-1)^2
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = autosummary('Loss/epsilon_penalty',
                                      tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon
    return loss  # Loss_D = D(G(z)) - D(x) + η·(||∇T||-1)^2 + ε·D(x)^2