Пример #1
0
def gradient_penalty(inter_data, theta):
    _, inter = mlp(inter_data, theta)
    gradients = tf.gradients([inter], [inter_data])[0]
    slopes = tf.sqrt(
        tf.reduce_mean(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes - 1)**2)
    return gradient_penalty
Пример #2
0
     var] = normalize_toydata(toydata, testcase, var)

    # ------------------ Network architecture ------------------------------
    X = tf.placeholder(tf.float32, shape=[None, X_dim])
    z = tf.placeholder(tf.float32, shape=[None, z_dim])
    """ Generator: G(z) """
    theta_G = theta_def([z_dim, 128, 128, 128, X_dim])
    """ Encoder: E(X) """
    theta_E = theta_def([X_dim, 128, 128, 128, z_dim])
    """ Discriminator: D(x) """
    theta_D = theta_def([X_dim, 128, 128, 128, 1])

    # ------------------ Setup criteria/loss functions ---------------------

    # Encode and decode data
    _, _ze = mlp(X, theta_E)
    _Xr_prob, _Xr_logit = mlp(_ze, theta_G)

    # Latent interpolation
    alpha = 0.5
    zi = (1. - 1. * alpha) * z + 1. * alpha * _ze

    # Sample from random z
    _X_prob, _X_logit = mlp(z, theta_G)
    _Xi_prob, _Xi_logit = mlp(zi, theta_G)

    D_real, D_real_logit, f_real = mlp_feat(X, theta_D)
    D_recon, D_recon_logit, f_recon = mlp_feat(_Xr_prob, theta_D)
    D_fake, D_fake_logit, f_fake = mlp_feat(_X_prob, theta_D)
    D_inter, D_inter_logit, f_inter = mlp_feat(_Xi_prob, theta_D)
Пример #3
0
def gradient_penalty(inter_data, theta):
    _, inter = mlp(inter_data, theta)
    gradients = tf.gradients([inter], [inter_data])[0]
    slopes = tf.sqrt(tf.reduce_mean(tf.square(gradients), reduction_indices=[1]))
    gradient_penalty = tf.reduce_mean((slopes - 1) ** 2)
    return gradient_penalty