def D_wgangp_acgan(G, D, opt, training_set, minibatch_size, reals, labels,
    wgan_lambda     = 10.0,     # Weight for the gradient penalty term.
    wgan_epsilon    = 0.001,    # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target     = 1.0,      # Target value for gradient magnitudes.
    cond_weight     = 1.0,      # Weight of the conditioning terms.
    shared=False):

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    if shared:
        real_scores_out, real_labels_out = fp32(D.get_output_for(*reals, is_training=True))
        fake_scores_out, fake_labels_out = fp32(D.get_output_for(*fake_images_out, is_training=True))
        out_dtype = fake_images_out[0].dtype
    else:
        real_scores_out, real_labels_out = fp32(D.get_output_for(reals, is_training=True))
        fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True))
        out_dtype = fake_images_out.dtype
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=out_dtype)
        if shared:
            mixed_images_out = tuple(tfutil.lerp(tf.cast(_reals, out_dtype), _fake_images_out, mixing_factors) for _reals, _fake_images_out in zip(reals, fake_images_out))
            mixed_scores_out, mixed_labels_out = fp32(D.get_output_for(*mixed_images_out, is_training=True))
        else:
            mixed_images_out = tfutil.lerp(tf.cast(reals, out_dtype), fake_images_out, mixing_factors)
            mixed_scores_out, mixed_labels_out = fp32(D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        if shared:
            mixed_grads = opt.undo_loss_scaling(fp32(tf.concat([tf.reshape(x, (minibatch_size, -1)) for x in tf.gradients([mixed_loss], list(mixed_images_out)) if x is not None], axis=1)))
            mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1]))
        else:
            mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
            mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out)
            label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
            label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
    return loss
示例#2
0
def D_wgangp(G, D, opt, training_set, minibatch_size, reals, labels,
    wgan_lambda     = 10.0,     # Weight for the gradient penalty term.
    wgan_epsilon    = 0.001,    # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target     = 1.0):     # Target value for gradient magnitudes.

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out = fp32(D.get_output_for(reals, labels, is_training=True))
    fake_scores_out = fp32(D.get_output_for(fake_images_out, labels, is_training=True))
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
        mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    return loss
示例#3
0
def process_reals(x, lod, mirror_augment, drange_data, drange_net):
    with tf.name_scope('ProcessReals'):
        with tf.name_scope('DynamicRange'):
            x = tf.cast(x, tf.float32)
            x = misc.adjust_dynamic_range(x, drange_data, drange_net)
        if mirror_augment:
            with tf.name_scope('MirrorAugment'):
                s = tf.shape(x)
                mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
                mask = tf.tile(mask, [1, s[1], s[2], s[3]])
                x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
        with tf.name_scope(
                'FadeLOD'
        ):  # Smooth crossfade between consecutive levels-of-detail.
            s = tf.shape(x)
            y = tf.reshape(x, [-1, s[1], s[2] // 2, 2, s[3] // 2, 2])
            y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
            y = tf.tile(y, [1, 1, 1, 2, 1, 2])
            y = tf.reshape(y, [-1, s[1], s[2], s[3]])
            x = tfutil.lerp(x, y, lod - tf.floor(lod))
        with tf.name_scope(
                'UpscaleLOD'
        ):  # Upscale to match the expected input/output size of the networks.
            s = tf.shape(x)
            factor = tf.cast(2**tf.floor(lod), tf.int32)
            x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
            x = tf.tile(x, [1, 1, 1, factor, 1, factor])
            x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
        return x
def process_reals(x, lod, mirror_augment, drange_data, drange_net):
    with tf.name_scope('ProcessReals'):
        with tf.name_scope('DynamicRange'):
            x = tf.cast(x, tf.float32)
            x = misc.adjust_dynamic_range(x, drange_data, drange_net)
        if mirror_augment:
            with tf.name_scope('MirrorAugment'):
                s = tf.shape(x)
                mask = tf.random_uniform([s[0], 1, 1, 1], 0.0, 1.0)
                mask = tf.tile(mask, [1, s[1], s[2], s[3]])
                x = tf.where(mask < 0.5, x, tf.reverse(x, axis=[3]))
        with tf.name_scope('FadeLOD'): # Smooth crossfade between consecutive levels-of-detail.
            s = tf.shape(x)
            y = tf.reshape(x, [-1, s[1], s[2]//2, 2, s[3]//2, 2])
            y = tf.reduce_mean(y, axis=[3, 5], keepdims=True)
            y = tf.tile(y, [1, 1, 1, 2, 1, 2])
            y = tf.reshape(y, [-1, s[1], s[2], s[3]])
            x = tfutil.lerp(x, y, lod - tf.floor(lod))
        with tf.name_scope('UpscaleLOD'): # Upscale to match the expected input/output size of the networks.
            s = tf.shape(x)
            factor = tf.cast(2 ** tf.floor(lod), tf.int32)
            x = tf.reshape(x, [-1, s[1], s[2], 1, s[3], 1])
            x = tf.tile(x, [1, 1, 1, factor, 1, factor])
            x = tf.reshape(x, [-1, s[1], s[2] * factor, s[3] * factor])
        return x
示例#5
0
def D_wgangp_acgan(G, D, opt, training_set, minibatch_size, reals, labels, embeddings,
    use_embedding   = True,
    wgan_lambda     = 10.0,     # Weight for the gradient penalty term.
    wgan_epsilon    = 0.001,    # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target     = 1.0,      # Target value for gradient magnitudes.
    cond_weight     = 1.0):     # Weight of the conditioning terms.

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, embeddings, is_training=True)
    if(use_embedding):
        real_scores_out, real_labels_out, real_embeddings_out = fp32(D.get_output_for(reals, labels, embeddings, is_training=True))
        fake_scores_out, fake_labels_out, fake_embeddings_out = fp32(D.get_output_for(fake_images_out, labels, embeddings, is_training=True))
    else:
        real_scores_out, real_labels_out = fp32(D.get_output_for(reals, labels, embeddings, is_training=True))
        fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, labels, embeddings, is_training=True))
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
        if(use_embedding):
            mixed_scores_out, mixed_labels_out, mixed_embeddings_out = fp32(D.get_output_for(mixed_images_out, labels, embeddings, is_training=True))
        else:
            mixed_scores_out = fp32(D.get_output_for(mixed_images_out, labels, embeddings, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        
        if(use_embedding):
            with tf.name_scope('LabelPenalty'):
                #label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out)
                #label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
                label_penalty_reals = tf.losses.mean_squared_error(labels, real_labels_out)
                label_penalty_fakes = tf.losses.mean_squared_error(labels, fake_labels_out)
                label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals)
                label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes)
            loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
            with tf.name_scope('EmbeddingPenalty'):
                embedding_penalty_reals = tf.losses.mean_squared_error(embeddings, real_embeddings_out)
                embedding_penalty_fakes = tf.losses.mean_squared_error(embeddings, fake_embeddings_out)
                embedding_penalty_reals = tfutil.autosummary('Loss/embedding_penalty_reals', embedding_penalty_reals)
                embedding_penalty_fakes = tfutil.autosummary('Loss/embedding_penalty_fakes', embedding_penalty_fakes)
            loss += (embedding_penalty_reals + embedding_penalty_fakes) * cond_weight

    return loss
示例#6
0
def D_wgangp_acgan(G, D, opt, training_set, minibatch_size, reals, labels,
    wgan_lambda     = 10.0,     # Weight for the gradient penalty term.
    wgan_epsilon    = 0.001,    # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target     = 1.0,      # Target value for gradient magnitudes.
    cond_weight     = 1.0):     # Weight of the conditioning terms.
    
    total_latent_size = G.input_shapes[0][1:][0]
    c_size = 1 + 10 + 1
    random_latent_size = total_latent_size - c_size
    c_3_ind = tf.random_normal([minibatch_size], 0, 1, dtype = tf.float32)
#     c_3 = tf.one_hot(c_3_ind, 2)
    c_4_ind = tf.random_uniform([minibatch_size], 0, 10, dtype = tf.int32)
    c_4 = tf.one_hot(c_4_ind, 10)
    c_5_ind = tf.random_normal([minibatch_size], 0, 1, dtype = tf.float32)
#     c_5 = tf.one_hot(c_5_ind, 2)
    
    test = tf.random_uniform([minibatch_size], 0, 1, dtype = tf.float32)
    c_3 = tf.reshape(c_3_ind, [minibatch_size, 1])
#     c_4 = c_4_ind
    c_5 = tf.reshape(c_5_ind, [minibatch_size, 1])
    
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    
    #latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out, real_labels_out, qr3, qr4, qr5, lod_in = fp32(D.get_output_for(reals, is_training=True))
    fake_scores_out, fake_labels_out, qf3, qf4, qf5, lod_in = fp32(D.get_output_for(fake_images_out, is_training=True))
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out, q3, q4, q5, lod_in = fp32(D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out)
            label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
            label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
    loss = tfutil.autosummary('Loss/DFinalLoss', loss)    
    return loss
示例#7
0
def D_gen_wgangp(
    E_zg,
    E_zl,
    G,
    D_gen,
    D_gen_opt,
    minibatch_size,
    reals_fade,
    wgan_lambda=10.0,  # Weight for the gradient penalty term.
    wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target=1.0):  # Target value for gradient magnitudes.

    # random generated realism
    zg_latents = tf.random_normal([minibatch_size] + E_zg.output_shapes[0][1:])
    zl_latents = tf.random_normal([minibatch_size] + E_zl.output_shapes[0][1:])
    fake_images_out = G.get_output_for(
        tf.tile(zg_latents, [1, 1] + E_zl.output_shapes[0][2:]), zl_latents)
    fake_scores_out = fp32(D_gen.get_output_for(fake_images_out))
    real_scores_out = fp32(D_gen.get_output_for(reals_fade))
    gen_D_loss = tf.reduce_mean(fake_scores_out - real_scores_out,
                                axis=[1, 2, 3])
    gen_D_loss = tfutil.autosummary('Loss/gen_D_loss', gen_D_loss)
    loss = tf.identity(gen_D_loss)

    # gradient penalty
    with tf.name_scope('gen_GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(
            tf.cast(reals_fade, fake_images_out.dtype), fake_images_out,
            mixing_factors)
        mixed_scores_out = fp32(D_gen.get_output_for(mixed_images_out))
        mixed_loss = D_gen_opt.apply_loss_scaling(
            tf.reduce_sum(mixed_scores_out))
        mixed_grads = D_gen_opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        gen_gradient_penalty = tf.square(mixed_norms - wgan_target)
        gen_gradient_penalty *= (wgan_lambda / (wgan_target**2))
        gen_gradient_penalty = tfutil.autosummary('Loss/gen_gradient_penalty',
                                                  gen_gradient_penalty)
    loss += gen_gradient_penalty

    # calibration penalty
    with tf.name_scope('gen_EpsilonPenalty'):
        gen_epsilon_penalty = tf.reduce_mean(tf.square(real_scores_out),
                                             axis=[1, 2, 3]) * wgan_epsilon
        gen_epsilon_penalty = tfutil.autosummary('Loss/gen_epsilon_penalty',
                                                 gen_epsilon_penalty)
    loss += gen_epsilon_penalty

    return loss
示例#8
0
def D_rec_wgangp(
    EG,
    D_rec,
    D_rec_opt,
    minibatch_size,
    reals_orig,
    wgan_lambda=10.0,  # Weight for the gradient penalty term.
    wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target=1.0):  # Target value for gradient magnitudes.

    # reconstructed realism
    recs_out, fingerprints_out, logits_out = EG.get_output_for(reals_orig)
    rec_scores_out = fp32(D_rec.get_output_for(recs_out))
    real_scores_out = fp32(D_rec.get_output_for(reals_orig))
    rec_D_loss = tf.reduce_mean(rec_scores_out - real_scores_out,
                                axis=[1, 2, 3])
    rec_D_loss = tfutil.autosummary('Loss/rec_D_loss', rec_D_loss)
    loss = tf.identity(rec_D_loss)

    # gradient penalty
    with tf.name_scope('rec_GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=recs_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals_orig, recs_out.dtype),
                                       recs_out, mixing_factors)
        mixed_scores_out = fp32(D_rec.get_output_for(mixed_images_out))
        mixed_loss = D_rec_opt.apply_loss_scaling(
            tf.reduce_sum(mixed_scores_out))
        mixed_grads = D_rec_opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        rec_gradient_penalty = tf.square(mixed_norms - wgan_target)
        rec_gradient_penalty *= (wgan_lambda / (wgan_target**2))
        rec_gradient_penalty = tfutil.autosummary('Loss/rec_gradient_penalty',
                                                  rec_gradient_penalty)
    loss += rec_gradient_penalty

    # calibration penalty
    with tf.name_scope('rec_EpsilonPenalty'):
        rec_epsilon_penalty = tf.reduce_mean(tf.square(real_scores_out),
                                             axis=[1, 2, 3]) * wgan_epsilon
        rec_epsilon_penalty = tfutil.autosummary('Loss/rec_epsilon_penalty',
                                                 rec_epsilon_penalty)
    loss += rec_epsilon_penalty

    return loss
示例#9
0
def D_wgangp_acgan(G, D, opt, training_set, minibatch_size, faceB, labels,faceA,generate_y,varforgauss,
    wgan_lambda     = 10.0,     # Weight for the gradient penalty term.
    wgan_epsilon    = 0.001,    # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target     = 1.0,      # Target value for gradient magnitudes.
    cond_weight     = 1.0):     # Weight of the conditioning terms.

    tmpreals = tf.concat([faceB, faceA], axis=1)

    real_scores_out, real_labels_out = fp32(D.get_output_for(tmpreals, is_training=True))

    fake_scores_out, fake_labels_out = fp32(D.get_output_for(generate_y, is_training=True))

    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)

    fake_scores_out = tfutil.autosummary('Loss/fake_scores', 1 * fake_scores_out )

    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=generate_y.dtype)
        mixed_images_out = tfutil.lerp(1 * tf.cast(tmpreals, generate_y.dtype) , generate_y, mixing_factors)
        mixed_scores_out, mixed_labels_out = fp32(D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)

    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', (1 * tf.square(real_scores_out)))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out)
            label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
            label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes)

        loss += (1 * label_penalty_reals + 1*label_penalty_fakes) * cond_weight

    return loss
def D_wgangp_acgan(G, D, opt, training_set, minibatch_size, reals, labels,
    wgan_lambda     = 10.0,     # Weight for the gradient penalty term.
    wgan_epsilon    = 0.001,    # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target     = 1.0,      # Target value for gradient magnitudes.
    cond_weight     = 1.0):     # Weight of the conditioning terms.

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out, real_labels_out = fp32(D.get_output_for(reals, is_training=True))
    fake_scores_out, fake_labels_out = fp32(D.get_output_for(fake_images_out, is_training=True))
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1], 0.0, 1.0, dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype), fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out = fp32(D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(tf.reduce_sum(tf.square(mixed_grads), axis=[1,2,3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty', tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out)
            label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
            label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
    return loss
示例#11
0
def D_wgangp_acgan(
    G,
    D,
    opt,
    training_set,
    minibatch_size,
    reals,
    labels,
    wgan_lambda=10.0,  # Weight for the gradient penalty term.
    wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target=1.0,  # Target value for gradient magnitudes.
    cond_weight=1.0):  # Weight of the conditioning terms.
    print('Mini-batch size D' + str(minibatch_size))
    size = int(128)
    print('real shape' + str(reals.shape))

    real1 = reals[:, :, :(size), :(size)]
    real2 = reals[:, :, (size):, :(size)]
    real3 = reals[:, :, :(size), (size):]
    real4 = reals[:, :, :(size), :(size)]

    latents = tf.random_normal([minibatch_size, 3, size, size])
    left = tf.concat([real1, real2], axis=2)
    right = tf.concat([real3, latents], axis=2)
    lat_and_cond = tf.concat([left, right], axis=3)

    labels = training_set.get_random_labels_tf(minibatch_size)
    fake_images_out_small = G.get_output_for(lat_and_cond,
                                             labels,
                                             is_training=True)
    fake_image_out_right = tf.concat([real3, fake_images_out_small], axis=2)
    fake_image_out_left = tf.concat([real1, real2], axis=2)
    fake_images_out = tf.concat([fake_image_out_left, fake_image_out_right],
                                axis=3)

    real_scores_out, real_labels_out = fp32(
        D.get_output_for(reals, is_training=True))
    fake_scores_out, fake_labels_out = fp32(
        D.get_output_for(fake_images_out, is_training=True))
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype),
                                       fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out = fp32(
            D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores',
                                              mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty',
                                             tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=real_labels_out)
            label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=fake_labels_out)
            label_penalty_reals = tfutil.autosummary(
                'Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary(
                'Loss/label_penalty_fakes', label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
    return loss
示例#12
0
文件: loss.py 项目: notem/SSL-PG-GAN
def D_wgangp_acgan(
    G,
    D,
    opt,
    training_set,
    minibatch_size,
    reals,
    labels,
    unlabeled_reals,
    wgan_lambda=0.0,  # Weight for the gradient penalty term.
    wgan_epsilon=0.0,  # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target=0.1,  # Target value for gradient magnitudes.
    cond_weight=0.0):  # Weight of the conditioning terms.

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)

    output_before_softmax_lab, real_flogit_out, _ = fp32(
        D.get_output_for(reals, is_training=True))
    output_before_softmax_unl, _, _ = fp32(
        D.get_output_for(unlabeled_reals, is_training=True))
    output_before_softmax_fake, fake_flogit_out, _ = fp32(
        D.get_output_for(fake_images_out, is_training=True))

    # Direct port labeled loss from Salisman; no support for tensor indexing, so no work
    #simple_labels = tf.argmax(labels, axis=1)
    #z_exp_lab = tf.math.reduce_mean(tf.math.reduce_logsumexp(output_before_softmax_lab, axis=1))
    #l_lab = output_before_softmax_lab[tf.range(minibatch_size), simple_labels]
    #loss_lab = -tf.math.reduce_mean(l_lab) + tf.math.reduce_mean(z_exp_lab)

    # labeled sample loss is equivalent to cross entropy w/ softmax (I think?)
    loss_lab = tf.math.reduce_sum(
        tf.nn.softmax_cross_entropy_with_logits(
            labels=labels, logits=output_before_softmax_lab))

    # Direct port of unlabeled loss and fake loss.
    #z_exp_unl = tf.math.reduce_mean(tf.math.reduce_logsumexp(output_before_softmax_unl, axis=1))
    loss_unl = -0.5*tf.math.reduce_mean(tf.math.reduce_logsumexp(output_before_softmax_unl, axis=1)) + \
               0.5*tf.math.reduce_mean(tf.math.softplus(tf.math.reduce_logsumexp(output_before_softmax_unl, axis=1)))
    loss_fake = 0.5 * tf.math.reduce_mean(
        tf.math.softplus(
            tf.math.reduce_logsumexp(output_before_softmax_fake, axis=1)))

    # combine losses
    loss = loss_lab + loss_unl + loss_fake

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype),
                                       fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out, _ = fp32(
            D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores',
                                              mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty',
                                             tf.square(real_flogit_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=output_before_softmax_lab)
            label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=output_before_softmax_fake)
            label_penalty_reals = tfutil.autosummary(
                'Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary(
                'Loss/label_penalty_fakes', label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
    return loss
示例#13
0
def D_wgangp_acgan(
        G,
        D,
        opt,
        training_set,
        minibatch_size,
        reals,
        labels,
        scale,
        wgan_lambda=10.0,  # Weight for the gradient penalty term.
        wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
        wgan_target=1.0,  # Target value for gradient magnitudes.
        cond_weight=1.0,  # Weight of the conditioning terms.
):

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)

    # add the blurring:
    reals.set_shape([None, 3, 128, 128])
    reals_before = reals
    fake_images_out.set_shape([None, 3, 128, 128])
    fakes_before = fake_images_out

    reals = image_at_scale(reals, scale)
    fake_images_out = image_at_scale(fake_images_out, scale)

    with tf.device("cpu:0"):
        _reals_before = tf.transpose(reals_before, [0, 2, 3, 1])
        _fakes_before = tf.transpose(fakes_before, [0, 2, 3, 1])
        _reals = tf.transpose(reals, [0, 2, 3, 1])
        _fakes = tf.transpose(fake_images_out, [0, 2, 3, 1])
        tf.summary.image("smoothed_reals", _reals)
        tf.summary.image("smoothed_fakes", _fakes)
        tf.summary.image("raw_reals", _reals_before)
        tf.summary.image("raw_fakes", _fakes_before)

    real_scores_out, real_labels_out = fp32(
        D.get_output_for(reals, is_training=True))
    fake_scores_out, fake_labels_out = fp32(
        D.get_output_for(fake_images_out, is_training=True))
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype),
                                       fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out = fp32(
            D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores',
                                              mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty',
                                             tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=real_labels_out)
            label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=fake_labels_out)
            label_penalty_reals = tfutil.autosummary(
                'Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary(
                'Loss/label_penalty_fakes', label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
    return loss
示例#14
0
def EG_wgan(
    E_zg,
    E_zl,
    G,
    D_rec,
    G_fcn,
    D_interp,
    D_blend,
    minibatch_size,
    reals_fade,
    reals_orig,
    labels,
    permutation_matrix_h_forward,
    permutation_matrix_w_forward,
    permutation_matrix_h_backward,
    permutation_matrix_w_backward,
    scale_h,  # Height scale of interpolated size
    scale_w,  # Width scale of interpolated size
    zg_interp_variational,  # Enable hard or variational or learned or random zg interpolation?
    zl_interp_variational,  # Enable hard or variational or learned or random zl interpolation?
    rec_G_weight,  # Weight of the reconstructed realism loss term.
    pixel_weight,  # Weight of the L1-based loss term in the image domain.
    gram_weight,  # Weight of the Gram matrix loss term.
    latent_weight,  # Weight of the L1-based pixel loss term in the latent domain
    kl_weight,  # Weight of the KL divergence term
    interp_G_weight,  # Weight of the interpolated realism loss term.
    blend_interp_G_weight
):  # Weight of the blended interpolatedrealism loss term.

    # zg encoding
    enc_zg_mu, enc_zg_log_sigma = E_zg.get_output_for(reals_orig)
    if config.zg_enabled:
        enc_zg_latents = tf.identity(enc_zg_mu)
    else:
        enc_zg_latents = tf.zeros(tf.shape(enc_zg_mu))

    # zl encoding
    enc_zl_mu, enc_zl_log_sigma = E_zl.get_output_for(reals_orig)
    enc_zl_latents = tf.identity(enc_zl_mu)

    # generating
    rec_images_out = G.get_output_for(
        tf.tile(enc_zg_latents, [1, 1] + E_zl.output_shapes[0][2:]),
        enc_zl_latents)
    loss = None

    # reconstructed realism
    if rec_G_weight > 0.0:
        rec_scores_out = fp32(D_rec.get_output_for(rec_images_out))
        rec_G_loss = tf.reduce_mean(-rec_scores_out, axis=[1, 2, 3])
        rec_G_loss *= rec_G_weight
        rec_G_loss = tfutil.autosummary('Loss/rec_G_loss', rec_G_loss)
        loss = loss_addup(loss, rec_G_loss)

    # L1 pixel loss
    if pixel_weight > 0.0:
        rec_pixel_loss = tf.reduce_mean(
            tf.abs(rec_images_out - tf.cast(reals_fade, rec_images_out.dtype)),
            axis=[1, 2, 3])
        rec_pixel_loss *= pixel_weight
        rec_pixel_loss = tfutil.autosummary('Loss/rec_pixel_loss',
                                            rec_pixel_loss)
        loss = loss_addup(loss, rec_pixel_loss)

    # gram matrix loss
    if gram_weight > 0.0:
        data_dict = loadWeightsData('tensorflow_vgg/vgg19.npy')
        rec_vgg = custom_Vgg19(rec_images_out, data_dict=data_dict)
        real_vgg = custom_Vgg19(reals_fade, data_dict=data_dict)
        rec_feature = [
            rec_vgg.conv1_1, rec_vgg.conv2_1, rec_vgg.conv3_1, rec_vgg.conv4_1,
            rec_vgg.conv5_1
        ]
        real_feature = [
            real_vgg.conv1_1, real_vgg.conv2_1, real_vgg.conv3_1,
            real_vgg.conv4_1, real_vgg.conv5_1
        ]
        rec_gram = [gram_matrix(l, data_format='NHWC') for l in rec_feature]
        real_gram = [gram_matrix(l, data_format='NHWC') for l in real_feature]
        rec_gram_loss = multi_layer_diff(rec_gram,
                                         real_gram,
                                         dtype=rec_images_out.dtype)
        rec_gram_loss *= gram_weight
        rec_gram_loss = tfutil.autosummary('Loss/rec_gram_loss', rec_gram_loss)
        loss = loss_addup(loss, rec_gram_loss)

    # KL divergence regularization
    if kl_weight > 0.0:
        KL_zg = -0.5 * tf.reduce_mean(1 + 2 * enc_zg_log_sigma - enc_zg_mu**2 -
                                      tf.exp(2 * enc_zg_log_sigma),
                                      axis=[1, 2, 3])
        KL_zg *= kl_weight
        KL_zg = tfutil.autosummary('Loss/KL_zg', KL_zg)
        loss = loss_addup(loss, KL_zg)
        KL_zl = -0.5 * tf.reduce_mean(1 + 2 * enc_zl_log_sigma - enc_zl_mu**2 -
                                      tf.exp(2 * enc_zl_log_sigma),
                                      axis=[1, 2, 3])
        KL_zl *= kl_weight
        KL_zl = tfutil.autosummary('Loss/KL_zl', KL_zl)
        loss = loss_addup(loss, KL_zl)

    # interpolated realism and global/local gram matrix losses
    if interp_G_weight > 0.0 or blend_interp_G_weight > 0.0:
        if zg_interp_variational == 'hard':
            interp_enc_zg_latents = tf.tile(enc_zg_latents, [
                1, 1, E_zl.output_shapes[0][2] * scale_h,
                E_zl.output_shapes[0][3] * scale_w
            ])
        elif zg_interp_variational == 'variational':
            interp_enc_zg_latents = tf.random_normal([minibatch_size] +
                                                     E_zg.output_shapes[0][1:])
            interp_enc_zg_latents = interp_enc_zg_latents * tf.exp(
                enc_zg_log_sigma) + enc_zg_mu
            interp_enc_zg_latents = tf.tile(interp_enc_zg_latents, [
                1, 1, E_zl.output_shapes[0][2] * scale_h,
                E_zl.output_shapes[0][3] * scale_w
            ])
        if zl_interp_variational == 'hard':
            interp_enc_zl_latents = tf.tile(enc_zl_latents,
                                            [1, 1, scale_h, scale_w])
        elif zl_interp_variational == 'variational':
            interp_enc_zl_mu = tf.tile(enc_zl_mu, [1, 1, scale_h, scale_w])
            interp_enc_zl_log_sigma = tf.tile(enc_zl_log_sigma,
                                              [1, 1, scale_h, scale_w])
            interp_enc_zl_latents = tf.random_normal([minibatch_size] +
                                                     G_fcn.input_shapes[1][1:])
            interp_enc_zl_latents = interp_enc_zl_latents * tf.exp(
                interp_enc_zl_log_sigma) + interp_enc_zl_mu
        elif zl_interp_variational == 'random':
            interp_enc_zl_latents_1 = tf.concat([
                enc_zl_latents,
                tf.random_normal([
                    minibatch_size, G_fcn.input_shapes[1][1],
                    E_zl.output_shapes[0][2],
                    G_fcn.input_shapes[1][3] - 2 * E_zl.output_shapes[0][3]
                ]), enc_zl_latents
            ],
                                                axis=3)
            interp_enc_zl_latents_2 = tf.random_normal([
                minibatch_size, G_fcn.input_shapes[1][1],
                G_fcn.input_shapes[1][2] - 2 * E_zl.output_shapes[0][2],
                G_fcn.input_shapes[1][3]
            ])
            interp_enc_zl_latents_3 = tf.concat([
                enc_zl_latents,
                tf.random_normal([
                    minibatch_size, G_fcn.input_shapes[1][1],
                    E_zl.output_shapes[0][2],
                    G_fcn.input_shapes[1][3] - 2 * E_zl.output_shapes[0][3]
                ]), enc_zl_latents
            ],
                                                axis=3)
            interp_enc_zl_latents = tf.concat([
                interp_enc_zl_latents_1, interp_enc_zl_latents_2,
                interp_enc_zl_latents_3
            ],
                                              axis=2)
        elif zl_interp_variational == 'permutational':
            interp_enc_zl_latents = tiling_permutation(
                enc_zl_latents, scale_h, scale_w, permutation_matrix_h_forward,
                permutation_matrix_w_forward)

        if interp_G_weight > 0.0:
            interp_images_out = G_fcn.get_output_for(interp_enc_zg_latents,
                                                     interp_enc_zl_latents)
            crop_interp_images_out = random_crop(interp_images_out,
                                                 minibatch_size,
                                                 G_fcn.output_shape,
                                                 D_interp.input_shape)
            # interpolated realism
            crop_interp_scores_out = fp32(
                D_interp.get_output_for(crop_interp_images_out))
            crop_interp_G_loss = tf.reduce_mean(-crop_interp_scores_out,
                                                axis=[1, 2, 3])
            crop_interp_G_loss *= interp_G_weight
            crop_interp_G_loss = tfutil.autosummary('Loss/crop_interp_G_loss',
                                                    crop_interp_G_loss)
            loss = loss_addup(loss, crop_interp_G_loss)
            # interpolated local gram matrix loss
            if gram_weight > 0.0:
                crop_interp_vgg = custom_Vgg19(crop_interp_images_out,
                                               data_dict=data_dict)
                crop_interp_feature = [
                    crop_interp_vgg.conv1_1, crop_interp_vgg.conv2_1,
                    crop_interp_vgg.conv3_1, crop_interp_vgg.conv4_1,
                    crop_interp_vgg.conv5_1
                ]
                crop_interp_gram = [
                    gram_matrix(l, data_format='NHWC')
                    for l in crop_interp_feature
                ]
                crop_interp_gram_loss = multi_layer_diff(
                    crop_interp_gram,
                    real_gram,
                    dtype=crop_interp_images_out.dtype)
                crop_interp_gram_loss *= gram_weight
                crop_interp_gram_loss = tfutil.autosummary(
                    'Loss/crop_interp_gram_loss', crop_interp_gram_loss)
                loss = loss_addup(loss, crop_interp_gram_loss)

        # multi-texture interpolated realism
        if blend_interp_G_weight > 0.0:
            if zg_interp_variational == 'hard':
                interp_enc_zg_latents_reverse = tf.tile(
                    tf.reverse(enc_zg_latents, axis=[0]), [
                        1, 1, E_zl.output_shapes[0][2] * scale_h,
                        E_zl.output_shapes[0][3] * scale_w
                    ])
            elif zg_interp_variational == 'variational':
                interp_enc_zg_latents_reverse = tf.random_normal(
                    [minibatch_size] + E_zg.output_shapes[0][1:])
                interp_enc_zg_latents_reverse = interp_enc_zg_latents_reverse * tf.exp(
                    tf.reverse(enc_zg_log_sigma, axis=[0])) + tf.reverse(
                        enc_zg_mu, axis=[0])
                interp_enc_zg_latents_reverse = tf.tile(
                    interp_enc_zg_latents_reverse, [
                        1, 1, E_zl.output_shapes[0][2] * scale_h,
                        E_zl.output_shapes[0][3] * scale_w
                    ])
            if zl_interp_variational == 'hard':
                interp_enc_zl_latents_reverse = tf.tile(
                    tf.reverse(enc_zl_latents, axis=[0]),
                    [1, 1, scale_h, scale_w])
            elif zl_interp_variational == 'variational':
                interp_enc_zl_mu_reverse = tf.tile(
                    tf.reverse(enc_zl_mu, axis=[0]), [1, 1, scale_h, scale_w])
                interp_enc_zl_log_sigma_reverse = tf.tile(
                    tf.reverse(enc_zl_log_sigma, axis=[0]),
                    [1, 1, scale_h, scale_w])
                interp_enc_zl_latents_reverse = tf.random_normal(
                    [minibatch_size] + G_fcn.input_shapes[1][1:])
                interp_enc_zl_latents_reverse = interp_enc_zl_latents_reverse * tf.exp(
                    interp_enc_zl_log_sigma_reverse) + interp_enc_zl_mu_reverse
            elif zl_interp_variational == 'random':
                interp_enc_zl_latents_1_reverse = tf.concat([
                    tf.reverse(enc_zl_latents, axis=[0]),
                    tf.random_normal([
                        minibatch_size, G_fcn.input_shapes[1][1],
                        E_zl.output_shapes[0][2],
                        G_fcn.input_shapes[1][3] - 2 * E_zl.output_shapes[0][3]
                    ]),
                    tf.reverse(enc_zl_latents, axis=[0])
                ],
                                                            axis=3)
                interp_enc_zl_latents_2_reverse = tf.random_normal([
                    minibatch_size, G_fcn.input_shapes[1][1],
                    G_fcn.input_shapes[1][2] - 2 * E_zl.output_shapes[0][2],
                    G_fcn.input_shapes[1][3]
                ])
                interp_enc_zl_latents_3_reverse = tf.concat([
                    tf.reverse(enc_zl_latents, axis=[0]),
                    tf.random_normal([
                        minibatch_size, G_fcn.input_shapes[1][1],
                        E_zl.output_shapes[0][2],
                        G_fcn.input_shapes[1][3] - 2 * E_zl.output_shapes[0][3]
                    ]),
                    tf.reverse(enc_zl_latents, axis=[0])
                ],
                                                            axis=3)
                interp_enc_zl_latents_reverse = tf.concat([
                    interp_enc_zl_latents_1_reverse,
                    interp_enc_zl_latents_2_reverse,
                    interp_enc_zl_latents_3_reverse
                ],
                                                          axis=2)
            elif zl_interp_variational == 'permutational':
                interp_enc_zl_latents_reverse = tiling_permutation(
                    tf.reverse(enc_zl_latents, axis=[0]), scale_h, scale_w,
                    permutation_matrix_h_backward,
                    permutation_matrix_w_backward)
            mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                               0.0,
                                               1.0,
                                               dtype=enc_zg_latents.dtype)
            blend_interp_enc_zg_latents = tfutil.lerp(
                interp_enc_zg_latents_reverse, interp_enc_zg_latents,
                mixing_factors)
            blend_interp_enc_zl_latents = tfutil.lerp(
                interp_enc_zl_latents_reverse, interp_enc_zl_latents,
                mixing_factors)
            blend_interp_images_out = G_fcn.get_output_for(
                blend_interp_enc_zg_latents, blend_interp_enc_zl_latents)
            crop_blend_interp_images_out = random_crop(blend_interp_images_out,
                                                       minibatch_size,
                                                       G_fcn.output_shape,
                                                       D_blend.input_shape)
            crop_blend_interp_scores_out = fp32(
                D_blend.get_output_for(crop_blend_interp_images_out))
            crop_blend_interp_G_loss = tf.reduce_mean(
                -crop_blend_interp_scores_out, axis=[1, 2, 3])
            crop_blend_interp_G_loss *= blend_interp_G_weight
            crop_blend_interp_G_loss = tfutil.autosummary(
                'Loss/crop_blend_interp_G_loss', crop_blend_interp_G_loss)
            loss = loss_addup(loss, crop_blend_interp_G_loss)
            # multi-texture interpolated local gram matrix loss
            if gram_weight > 0.0:
                crop_blend_interp_vgg = custom_Vgg19(
                    crop_blend_interp_images_out, data_dict=data_dict)
                crop_blend_interp_feature = [
                    crop_blend_interp_vgg.conv1_1,
                    crop_blend_interp_vgg.conv2_1,
                    crop_blend_interp_vgg.conv3_1,
                    crop_blend_interp_vgg.conv4_1,
                    crop_blend_interp_vgg.conv5_1
                ]
                crop_blend_interp_gram = [
                    gram_matrix(l, data_format='NHWC')
                    for l in crop_blend_interp_feature
                ]
                real_gram_2 = [tf.reverse(mat, axis=[0]) for mat in real_gram]
                alpha = tf.random_uniform([minibatch_size, 1, 1, 1],
                                          0.0,
                                          1.0,
                                          dtype=interp_enc_zg_latents.dtype)
                crop_blend_interp_gram_loss = (1.0 - alpha) * multi_layer_diff(
                    crop_blend_interp_gram,
                    real_gram_2,
                    dtype=crop_blend_interp_images_out.dtype
                ) + alpha * multi_layer_diff(
                    crop_blend_interp_gram,
                    real_gram,
                    dtype=crop_blend_interp_images_out.dtype)
                crop_blend_interp_gram_loss *= gram_weight
                crop_blend_interp_gram_loss = tfutil.autosummary(
                    'Loss/crop_blend_interp_gram_loss',
                    crop_blend_interp_gram_loss)
                loss = loss_addup(loss, crop_blend_interp_gram_loss)

    return loss
示例#15
0
文件: loss.py 项目: keepgallop/GANFP
def D_wgangp_acgan(
        G,
        D,
        opt,
        training_set,
        minibatch_size,
        reals,
        labels,
        wgan_lambda=10.0,  # Weight for the gradient penalty term.
        wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.qishi
        wgan_target=1.0,  # Target value for gradient magnitudes.
        cond_weight=1.0,  # Weight of the conditioning terms.
        fingerprint_weight=20.0,  # Weight of the fingerprint terms.
):
    #print("#training_set.shape: ", training_set.shape) #training_set.shape:  [3, 128, 128]
    #print("#reals.shape: ", reals.shape)               #reals.shape:  (?, ?, ?, ?)

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out, real_labels_out, real_features_out = fp32(
        D.get_output_for(reals, is_training=True))
    fake_scores_out, fake_labels_out, fake_features_out = fp32(
        D.get_output_for(fake_images_out, is_training=True))
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out
    #print("#loss.shape: ", loss.shape) #loss.shape:  (?, 1)

    # Add Fingerprints loss
    with tf.name_scope('FingerprintPenalty'):
        #real_features_out = feature_extractor(image_batch=reals, image_shape=training_set.shape, batch_size=minibatch_size_np, extractor_dir=extractor_dir)
        #fake_features_out = feature_extractor(image_batch=fake_images_out, image_shape=training_set.shape, batch_size=minibatch_size_np, extractor_dir=extractor_dir)
        fingerprints_penalty = tf.reduce_mean(tf.abs(real_features_out -
                                                     fake_features_out),
                                              axis=1,
                                              keep_dims=False)
        fingerprints_penalty = tfutil.autosummary('Loss/fingerprints_scores',
                                                  fingerprints_penalty)
        #print("#fingerprints_penalty.shape: ", fingerprints_penalty.shape) #fingerprints_penalty.shape:  (minibatch_size,)
    fingerprints_penalty *= fingerprint_weight
    loss += fingerprints_penalty

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype),
                                       fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out, mixed_features_out = fp32(
            D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores',
                                              mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty',
                                             tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=real_labels_out)
            label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=fake_labels_out)
            label_penalty_reals = tfutil.autosummary(
                'Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary(
                'Loss/label_penalty_fakes', label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
    return loss  #, fingerprints_penalty
示例#16
0
def D_blend_wgangp(
    E_zg,
    E_zl,
    G_fcn,
    D_blend,
    D_blend_opt,
    minibatch_size,
    reals_fade,
    reals_orig,
    permutation_matrix_h_forward,
    permutation_matrix_w_forward,
    permutation_matrix_h_backward,
    permutation_matrix_w_backward,
    scale_h,  # Height scale of interpolated size
    scale_w,  # Width scale of interpolated size
    zg_interp_variational,  # Enable hard or variational or learned or random zg interpolation?
    zl_interp_variational,  # Enable hard or variational or learned or random zl interpolation?
    wgan_lambda=10.0,  # Weight for the gradient penalty term.
    wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target=1.0):  # Target value for gradient magnitudes.

    # zg encoding
    enc_zg_mu, enc_zg_log_sigma = E_zg.get_output_for(reals_orig)
    if config.zg_enabled:
        enc_zg_latents = tf.identity(enc_zg_mu)
    else:
        enc_zg_latents = tf.zeros(tf.shape(enc_zg_mu))

    # zl encoding
    enc_zl_mu, enc_zl_log_sigma = E_zl.get_output_for(reals_orig)
    enc_zl_latents = tf.identity(enc_zl_mu)

    # interpolating in latent space
    if zg_interp_variational == 'hard':
        interp_enc_zg_latents = tf.tile(enc_zg_latents, [
            1, 1, E_zl.output_shapes[0][2] * scale_h,
            E_zl.output_shapes[0][3] * scale_w
        ])
    elif zg_interp_variational == 'variational':
        interp_enc_zg_latents = tf.random_normal([minibatch_size] +
                                                 E_zg.output_shapes[0][1:])
        interp_enc_zg_latents = interp_enc_zg_latents * tf.exp(
            enc_zg_log_sigma) + enc_zg_mu
        interp_enc_zg_latents = tf.tile(interp_enc_zg_latents, [
            1, 1, E_zl.output_shapes[0][2] * scale_h,
            E_zl.output_shapes[0][3] * scale_w
        ])
    if zl_interp_variational == 'hard':
        interp_enc_zl_latents = tf.tile(enc_zl_latents,
                                        [1, 1, scale_h, scale_w])
    elif zl_interp_variational == 'variational':
        interp_enc_zl_mu = tf.tile(enc_zl_mu, [1, 1, scale_h, scale_w])
        interp_enc_zl_log_sigma = tf.tile(enc_zl_log_sigma,
                                          [1, 1, scale_h, scale_w])
        interp_enc_zl_latents = tf.random_normal([minibatch_size] +
                                                 G_fcn.input_shape[1:])
        interp_enc_zl_latents = interp_enc_zl_latents * tf.exp(
            interp_enc_zl_log_sigma) + interp_enc_zl_mu
    elif zl_interp_variational == 'random':
        interp_enc_zl_latents_1 = tf.concat([
            enc_zl_latents,
            tf.random_normal([
                minibatch_size, G_fcn.input_shapes[0][1],
                E_zl.output_shapes[0][2],
                G_fcn.input_shapes[0][3] - 2 * E_zl.output_shapes[0][3]
            ]), enc_zl_latents
        ],
                                            axis=3)
        interp_enc_zl_latents_2 = tf.random_normal([
            minibatch_size, G_fcn.input_shapes[0][1],
            G_fcn.input_shapes[0][2] - 2 * E_zl.output_shapes[0][2],
            G_fcn.input_shapes[0][3]
        ])
        interp_enc_zl_latents_3 = tf.concat([
            enc_zl_latents,
            tf.random_normal([
                minibatch_size, G_fcn.input_shapes[0][1],
                E_zl.output_shapes[0][2],
                G_fcn.input_shapes[0][3] - 2 * E_zl.output_shapes[0][3]
            ]), enc_zl_latents
        ],
                                            axis=3)
        interp_enc_zl_latents = tf.concat([
            interp_enc_zl_latents_1, interp_enc_zl_latents_2,
            interp_enc_zl_latents_3
        ],
                                          axis=2)
    elif zl_interp_variational == 'permutational':
        interp_enc_zl_latents = tiling_permutation(
            enc_zl_latents, scale_h, scale_w, permutation_matrix_h_forward,
            permutation_matrix_w_forward)

    # interpolating in latent space in reverse order
    if zg_interp_variational == 'hard':
        interp_enc_zg_latents_reverse = tf.tile(
            tf.reverse(enc_zg_latents, axis=[0]), [
                1, 1, E_zl.output_shapes[0][2] * scale_h,
                E_zl.output_shapes[0][3] * scale_w
            ])
    elif zg_interp_variational == 'variational':
        interp_enc_zg_latents_reverse = tf.random_normal(
            [minibatch_size] + E_zg.output_shapes[0][1:])
        interp_enc_zg_latents_reverse = interp_enc_zg_latents_reverse * tf.exp(
            tf.reverse(enc_zg_log_sigma, axis=[0])) + tf.reverse(enc_zg_mu,
                                                                 axis=[0])
        interp_enc_zg_latents_reverse = tf.tile(
            interp_enc_zg_latents_reverse, [
                1, 1, E_zl.output_shapes[0][2] * scale_h,
                E_zl.output_shapes[0][3] * scale_w
            ])
    if zl_interp_variational == 'hard':
        interp_enc_zl_latents_reverse = tf.tile(
            tf.reverse(enc_zl_latents, axis=[0]), [1, 1, scale_h, scale_w])
    elif zl_interp_variational == 'variational':
        interp_enc_zl_mu_reverse = tf.tile(tf.reverse(enc_zl_mu, axis=[0]),
                                           [1, 1, scale_h, scale_w])
        interp_enc_zl_log_sigma_reverse = tf.tile(
            tf.reverse(enc_zl_log_sigma, axis=[0]), [1, 1, scale_h, scale_w])
        interp_enc_zl_latents_reverse = tf.random_normal(
            [minibatch_size] + G_fcn.input_shapes[1][1:])
        interp_enc_zl_latents_reverse = interp_enc_zl_latents_reverse * tf.exp(
            interp_enc_zl_log_sigma_reverse) + interp_enc_zl_mu_reverse
    elif zl_interp_variational == 'random':
        interp_enc_zl_latents_1_reverse = tf.concat([
            tf.reverse(enc_zl_latents, axis=[0]),
            tf.random_normal([
                minibatch_size, G_fcn.input_shapes[1][1],
                E_zl.output_shapes[0][2],
                G_fcn.input_shapes[1][3] - 2 * E_zl.output_shapes[0][3]
            ]),
            tf.reverse(enc_zl_latents, axis=[0])
        ],
                                                    axis=3)
        interp_enc_zl_latents_2_reverse = tf.random_normal([
            minibatch_size, G_fcn.input_shapes[1][1],
            G_fcn.input_shapes[1][2] - 2 * E_zl.output_shapes[0][2],
            G_fcn.input_shapes[1][3]
        ])
        interp_enc_zl_latents_3_reverse = tf.concat([
            tf.reverse(enc_zl_latents, axis=[0]),
            tf.random_normal([
                minibatch_size, G_fcn.input_shapes[1][1],
                E_zl.output_shapes[0][2],
                G_fcn.input_shapes[1][3] - 2 * E_zl.output_shapes[0][3]
            ]),
            tf.reverse(enc_zl_latents, axis=[0])
        ],
                                                    axis=3)
        interp_enc_zl_latents_reverse = tf.concat([
            interp_enc_zl_latents_1_reverse, interp_enc_zl_latents_2_reverse,
            interp_enc_zl_latents_3_reverse
        ],
                                                  axis=2)
    elif zl_interp_variational == 'permutational':
        interp_enc_zl_latents_reverse = tiling_permutation(
            tf.reverse(enc_zl_latents, axis=[0]), scale_h, scale_w,
            permutation_matrix_h_backward, permutation_matrix_w_backward)
    mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                       0.0,
                                       1.0,
                                       dtype=enc_zg_latents.dtype)
    blend_interp_enc_zg_latents = tfutil.lerp(interp_enc_zg_latents_reverse,
                                              interp_enc_zg_latents,
                                              mixing_factors)
    blend_interp_enc_zl_latents = tfutil.lerp(interp_enc_zl_latents_reverse,
                                              interp_enc_zl_latents,
                                              mixing_factors)

    # generating and cropping
    blend_interp_images_out = G_fcn.get_output_for(
        blend_interp_enc_zg_latents, blend_interp_enc_zl_latents)
    crop_blend_interp_images_out = random_crop(blend_interp_images_out,
                                               minibatch_size,
                                               G_fcn.output_shape,
                                               D_blend.input_shape)

    # interpolated realism
    crop_blend_interp_scores_out = fp32(
        D_blend.get_output_for(crop_blend_interp_images_out))
    real_scores_out = fp32(D_blend.get_output_for(reals_fade))
    crop_blend_interp_D_loss = tf.reduce_mean(crop_blend_interp_scores_out -
                                              real_scores_out,
                                              axis=[1, 2, 3])
    crop_blend_interp_D_loss = tfutil.autosummary(
        'Loss/crop_blend_interp_D_loss', crop_blend_interp_D_loss)
    loss = tf.identity(crop_blend_interp_D_loss)

    with tf.name_scope('blend_GradientPenalty'):
        mixing_factors = tf.random_uniform(
            [minibatch_size, 1, 1, 1],
            0.0,
            1.0,
            dtype=crop_blend_interp_images_out.dtype)
        mixed_images_out = tfutil.lerp(
            tf.cast(reals_fade, crop_blend_interp_images_out.dtype),
            crop_blend_interp_images_out, mixing_factors)
        mixed_scores_out = fp32(D_blend.get_output_for(mixed_images_out))
        mixed_loss = D_blend_opt.apply_loss_scaling(
            tf.reduce_sum(mixed_scores_out))
        mixed_grads = D_blend_opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        crop_blend_interp_gradient_penalty = tf.square(mixed_norms -
                                                       wgan_target)
        crop_blend_interp_gradient_penalty *= (wgan_lambda / (wgan_target**2))
        crop_blend_interp_gradient_penalty = tfutil.autosummary(
            'Loss/crop_blend_interp_gradient_penalty',
            crop_blend_interp_gradient_penalty)
    loss += crop_blend_interp_gradient_penalty

    with tf.name_scope('blend_EpsilonPenalty'):
        crop_blend_interp_epsilon_penalty = tf.reduce_mean(
            tf.square(real_scores_out), axis=[1, 2, 3]) * wgan_epsilon
        crop_blend_interp_epsilon_penalty = tfutil.autosummary(
            'Loss/crop_blend_interp_epsilon_penalty',
            crop_blend_interp_epsilon_penalty)
    loss += crop_blend_interp_epsilon_penalty

    return loss
示例#17
0
def D_rec_wgangp(
    E_zg,
    E_zl,
    G,
    D_rec,
    D_rec_opt,
    minibatch_size,
    reals_fade,
    reals_orig,
    wgan_lambda=10.0,  # Weight for the gradient penalty term.
    wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target=1.0):  # Target value for gradient magnitudes.

    # zg encoding
    enc_zg_mu, enc_zg_log_sigma = E_zg.get_output_for(reals_orig)
    if config.zg_enabled:
        enc_zg_latents = tf.identity(enc_zg_mu)
    else:
        enc_zg_latents = tf.zeros(tf.shape(enc_zg_mu))

    # zl encoding
    enc_zl_mu, enc_zl_log_sigma = E_zl.get_output_for(reals_orig)
    enc_zl_latents = tf.identity(enc_zl_mu)

    # reconstructed realism
    rec_images_out = G.get_output_for(
        tf.tile(enc_zg_latents, [1, 1] + E_zl.output_shapes[0][2:]),
        enc_zl_latents)
    rec_scores_out = fp32(D_rec.get_output_for(rec_images_out))
    real_scores_out = fp32(D_rec.get_output_for(reals_fade))
    rec_D_loss = tf.reduce_mean(rec_scores_out - real_scores_out,
                                axis=[1, 2, 3])
    rec_D_loss = tfutil.autosummary('Loss/rec_D_loss', rec_D_loss)
    loss = tf.identity(rec_D_loss)

    # gradient penalty
    with tf.name_scope('rec_GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=rec_images_out.dtype)
        mixed_images_out = tfutil.lerp(
            tf.cast(reals_fade, rec_images_out.dtype), rec_images_out,
            mixing_factors)
        mixed_scores_out = fp32(D_rec.get_output_for(mixed_images_out))
        mixed_loss = D_rec_opt.apply_loss_scaling(
            tf.reduce_sum(mixed_scores_out))
        mixed_grads = D_rec_opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        rec_gradient_penalty = tf.square(mixed_norms - wgan_target)
        rec_gradient_penalty *= (wgan_lambda / (wgan_target**2))
        rec_gradient_penalty = tfutil.autosummary('Loss/rec_gradient_penalty',
                                                  rec_gradient_penalty)
    loss += rec_gradient_penalty

    # calibration penalty
    with tf.name_scope('rec_EpsilonPenalty'):
        rec_epsilon_penalty = tf.reduce_mean(tf.square(real_scores_out),
                                             axis=[1, 2, 3]) * wgan_epsilon
        rec_epsilon_penalty = tfutil.autosummary('Loss/rec_epsilon_penalty',
                                                 rec_epsilon_penalty)
    loss += rec_epsilon_penalty

    return loss
示例#18
0
def D_lo_gan(
    G,
    D,
    E,
    opt,
    training_set,
    minibatch_size,
    reals,
    labels,
    wgan_lambda=10.0,  # Weight for the gradient penalty term.
    wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target=1.0,  # Target value for gradient magnitudes.
    cond_weight=1.0):  # Weight of the conditioning terms.

    Z = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    X = reals
    labels = training_set.get_random_labels_tf(minibatch_size)

    G_out = G.get_output_for(Z, labels, is_training=True)
    L = E.get_output_for(X, is_training=True)

    D_o, _ = fp32(D.get_output_for(G_out, Z, is_training=True))
    Do, _ = fp32(D.get_output_for(X, L, is_training=True))

    X_hat = G.get_output_for(L, labels, is_training=True)

    loss = tf.square(tf.nn.sigmoid(D_o)) + tf.square(1 - tf.nn.sigmoid(Do))
    loss = tf.reduce_mean(loss)

    fake_images_out = X_hat
    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype),
                                       fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out = fp32(
            D.get_output_for(mixed_images_out, L, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores',
                                              mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    real_scores_out = tfutil.autosummary('Loss/real_scores', Do)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', D_o)

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty',
                                             tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    #     if D.output_shapes[1][1] > 0:
    #         with tf.name_scope('LabelPenalty'):
    #             label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=real_labels_out)
    #             label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(labels=labels, logits=fake_labels_out)
    #             label_penalty_reals = tfutil.autosummary('Loss/label_penalty_reals', label_penalty_reals)
    #             label_penalty_fakes = tfutil.autosummary('Loss/label_penalty_fakes', label_penalty_fakes)
    #         loss += (label_penalty_reals + label_penalty_fakes) * cond_weight

    return loss
示例#19
0
def D_wgangp_acgan(
        G,
        D,
        opt,
        minibatch_size,
        reals,
        labels,
        well_facies,
        prob_images,
        wgan_lambda=10.0,  # Weight for the gradient penalty term.
        wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
        wgan_target=1.0,  # Target value for gradient magnitudes.
        label_weight=10,  # Weight of the conditioning terms.
        batch_multiplier=1):

    latents = tf.random_normal([minibatch_size * batch_multiplier] +
                               G.input_shapes[0][1:])
    prob_images = tf.reshape(
        tf.tile(tf.expand_dims(prob_images, 1),
                [1, batch_multiplier, 1, 1, 1]),
        ([-1] + G.input_shapes[3][1:]))
    well_facies = tf.reshape(
        tf.tile(tf.expand_dims(well_facies, 1),
                [1, batch_multiplier, 1, 1, 1]),
        ([-1] + G.input_shapes[2][1:]))
    labels = tf.reshape(
        tf.tile(tf.expand_dims(labels, 1), [1, batch_multiplier, 1]),
        ([-1] + G.input_shapes[1][1:]))

    fake_images_out = G.get_output_for(latents,
                                       labels,
                                       well_facies,
                                       prob_images,
                                       is_training=True)

    reals = tf.reshape(
        tf.tile(tf.expand_dims(reals, 1), [1, batch_multiplier, 1, 1, 1]),
        ([-1] + G.input_shapes[3][1:]))
    real_scores_out, real_labels_out = fp32(
        D.get_output_for(reals, is_training=True))
    fake_scores_out, fake_labels_out = fp32(
        D.get_output_for(fake_images_out, is_training=True))
    real_scores_out = tfutil.autosummary('Loss_D/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss_D/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform(
            [minibatch_size * batch_multiplier, 1, 1, 1],
            0.0,
            1.0,
            dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype),
                                       fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out = fp32(
            D.get_output_for(mixed_images_out, is_training=True))
        #mixed_scores_out = tfutil.autosummary('Loss/mixed_scores', mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
    loss = tfutil.autosummary('Loss_D/WGAN_GP_loss', loss)

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss_D/epsilon_penalty',
                                             tf.square(real_scores_out))
        loss += epsilon_penalty * wgan_epsilon

    with tf.name_scope('LabelPenalty'):
        label_penalty_reals = tf.nn.l2_loss(labels - real_labels_out)
        label_penalty_fakes = tf.nn.l2_loss(labels - fake_labels_out)
        label_penalty_reals = tfutil.autosummary('Loss_D/label_penalty_reals',
                                                 label_penalty_reals)
        label_penalty_fakes = tfutil.autosummary('Loss_D/label_penalty_fakes',
                                                 label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * label_weight
        loss = tfutil.autosummary('Loss_D/Total_loss', loss)
    return loss
示例#20
0
def D_swgan(
    G,
    D,
    R,
    opt,
    training_set,
    minibatch_size,
    reals,
    labels,
    wgan_lambda=10.0,  # Weight for the gradient penalty term.
    wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target=1.0,  # Target value for gradient magnitudes.
    cond_weight=1.0):  # Weight of the conditioning terms.
    #wgan_target = 0.0
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out, real_labels_out = fp32(
        D.get_output_for(reals, is_training=True))
    fake_scores_out, fake_labels_out = fp32(
        D.get_output_for(fake_images_out, is_training=True))
    real_proj_out, _ = R.get_output_for(real_scores_out)
    real_proj_out = tf.reduce_mean(real_proj_out, [1])
    fake_proj_out, _ = R.get_output_for(fake_scores_out)
    fake_proj_out = tf.reduce_mean(fake_proj_out, [1])
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = real_proj_out - fake_proj_out
    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype),
                                       fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out = fp32(
            D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores',
                                              mixed_scores_out)
        mixed_proj_out, _ = R.get_output_for(mixed_scores_out)
        mixed_proj_out = tf.reduce_mean(mixed_proj_out, [1])
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_proj_out))
        mixed_grads = (1 - mixing_factors) * opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3, 4]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms)
    with tf.name_scope('ProjectionPenalty'):
        mixing_factors2 = tf.random_uniform([minibatch_size, 1],
                                            0.0,
                                            1.0,
                                            dtype=fake_images_out.dtype)
        mixed_features = (
            1 - mixing_factors2
        ) * real_scores_out + mixing_factors2 * fake_scores_out
        mixed_feat_proj, scale = R.get_output_for(mixed_features)
        features_gradients = tf.multiply(
            scale, 0.1 * (tf.abs(mixed_feat_proj) - mixed_feat_proj) + 0.5 *
            (tf.abs(mixed_feat_proj) + mixed_feat_proj))
        features_penalty = tf.reduce_mean(tf.square(features_gradients - 0.01))
    loss += (5 * gradient_penalty + features_penalty) * (wgan_lambda /
                                                         (wgan_target**2))
    return loss
def D_wgangp_acgan_can(
        G,
        D,
        opt,
        training_set,
        minibatch_size,
        reals,
        labels,
        wgan_lambda=10.0,  # Weight for the gradient penalty term.
        wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
        wgan_target=1.0,  # Target value for gradient magnitudes.
        cond_weight=1.0,  # Weight of the conditioning terms.
        can_level=0,  # Parameter for creativety
        smoothing=0.9):  # For Cross-Entropy

    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out, real_labels_out, real_class_out, real_class_logits = fp32(
        D.get_output_for(reals, is_training=True))
    fake_scores_out, fake_labels_out, fake_class_out, fake_class_logits = fp32(
        D.get_output_for(fake_images_out, is_training=True))
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    d_loss_class_real = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits_v2(logits=real_class_logits,
                                                   labels=smoothing * labels))
    loss += d_loss_class_real * can_level

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype),
                                       fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out, mixed_class_out, mixed_class_logits = fp32(
            D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores',
                                              mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty',
                                             tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=real_labels_out)
            label_penalty_fakes = tf.nn.softmax_cross_entropy_with_logits_v2(
                labels=labels, logits=fake_labels_out)
            label_penalty_reals = tfutil.autosummary(
                'Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary(
                'Loss/label_penalty_fakes', label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * cond_weight

    return loss
示例#22
0
def D_wgangp_acgan_weighted(
    G,
    D,
    opt,
    training_set,
    minibatch_size,
    reals,
    labels,
    turn,
    turn_threshold_for_label=0,  ####
    wgan_lambda=10.0,  # Weight for the gradient penalty term.
    wgan_epsilon=0.001,  # Weight for the epsilon term, \epsilon_{drift}.
    wgan_target=1.0,  # Target value for gradient magnitudes.
    cond_weight=1.0):  # Weight of the conditioning terms.

    #cond_weight = 6 ##
    cond_weight = tf.cond(tf.greater_equal(turn, 10), lambda: 2.,
                          lambda: cond_weight)
    cond_weight = tf.cond(tf.greater_equal(turn, 15), lambda: 3.,
                          lambda: cond_weight)
    cond_weight = tf.cond(tf.greater_equal(turn, 20), lambda: 4.,
                          lambda: cond_weight)
    cond_weight = tf.cond(tf.greater_equal(turn, 25), lambda: 5.,
                          lambda: cond_weight)
    cond_weight = tf.cond(tf.greater_equal(turn, 30), lambda: 6.,
                          lambda: cond_weight)
    latents = tf.random_normal([minibatch_size] + G.input_shapes[0][1:])
    fake_images_out = G.get_output_for(latents, labels, is_training=True)
    real_scores_out, real_labels_out = fp32(
        D.get_output_for(reals, is_training=True))
    fake_scores_out, fake_labels_out = fp32(
        D.get_output_for(fake_images_out, is_training=True))
    real_scores_out = tfutil.autosummary('Loss/real_scores', real_scores_out)
    fake_scores_out = tfutil.autosummary('Loss/fake_scores', fake_scores_out)
    loss = fake_scores_out - real_scores_out

    with tf.name_scope('GradientPenalty'):
        mixing_factors = tf.random_uniform([minibatch_size, 1, 1, 1],
                                           0.0,
                                           1.0,
                                           dtype=fake_images_out.dtype)
        mixed_images_out = tfutil.lerp(tf.cast(reals, fake_images_out.dtype),
                                       fake_images_out, mixing_factors)
        mixed_scores_out, mixed_labels_out = fp32(
            D.get_output_for(mixed_images_out, is_training=True))
        mixed_scores_out = tfutil.autosummary('Loss/mixed_scores',
                                              mixed_scores_out)
        mixed_loss = opt.apply_loss_scaling(tf.reduce_sum(mixed_scores_out))
        mixed_grads = opt.undo_loss_scaling(
            fp32(tf.gradients(mixed_loss, [mixed_images_out])[0]))
        mixed_norms = tf.sqrt(
            tf.reduce_sum(tf.square(mixed_grads), axis=[1, 2, 3]))
        mixed_norms = tfutil.autosummary('Loss/mixed_norms', mixed_norms)
        gradient_penalty = tf.square(mixed_norms - wgan_target)
    loss += gradient_penalty * (wgan_lambda / (wgan_target**2))
    loss = tfutil.autosummary('Loss/old_loss', loss)  ##

    with tf.name_scope('EpsilonPenalty'):
        epsilon_penalty = tfutil.autosummary('Loss/epsilon_penalty',
                                             tf.square(real_scores_out))
    loss += epsilon_penalty * wgan_epsilon

    if D.output_shapes[1][1] > 0:
        with tf.name_scope('LabelPenalty'):
            label_penalty_reals = tf.nn.weighted_cross_entropy_with_logits(
                targets=labels, logits=real_labels_out, pos_weight=2)
            label_penalty_reals = tf.reduce_mean(label_penalty_reals, 1)  ####
            #label_penalty_reals = label_penalty_reals[:,20] #####
            label_penalty_fakes = tf.nn.weighted_cross_entropy_with_logits(
                targets=labels, logits=fake_labels_out, pos_weight=2)
            label_penalty_fakes = tf.reduce_mean(label_penalty_fakes, 1)  ####
            #label_penalty_fakes = label_penalty_fakes[:,20] #####
            label_penalty_reals = tfutil.autosummary(
                'Loss/label_penalty_reals', label_penalty_reals)
            label_penalty_fakes = tfutil.autosummary(
                'Loss/label_penalty_fakes', label_penalty_fakes)
        loss += (label_penalty_reals + label_penalty_fakes) * cond_weight
        #loss = tf.cond(tf.greater_equal(turn,turn_threshold_for_label), lambda: loss + (label_penalty_reals + label_penalty_fakes) * cond_weight, lambda: loss) ####
        loss = tfutil.autosummary('Loss/new_loss', loss)  ##

    return loss