Exemplo n.º 1
0
disc_real, real_logit = Discriminator(real_data)
disc_fake, fake_logit = Discriminator(fake_data)

gen_params = lib.params_with_name('Generator')
disc_params = lib.params_with_name('Discriminator')

class_loss_real = tf.reduce_mean(
    tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real_label,
                                                   logits=real_logit))
class_loss_fake = tf.reduce_mean(
    tf.nn.sparse_softmax_cross_entropy_with_logits(labels=real_label,
                                                   logits=fake_logit))

#******************************************
bandwidths = [1.0, 2.0, 5.0, 10.0, 20.0, 40.0, 80.0]
kernel_cost = mmd.mix_rbf_mmd2(disc_real, disc_fake, sigmas=bandwidths)

ind_t = tf.placeholder(tf.int32, [11])
cum = tf.placeholder(tf.float32)
con_kernel_cost = 0
for i in range(10):
    find_index = tf.where(tf.equal(real_label, i))
    Image_c = tf.gather(disc_real, find_index)
    Gimage_c = tf.gather(disc_fake, find_index)
    Image_c_s = tf.reshape(Image_c, [-1, 1])
    Gimage_c_s = tf.reshape(Gimage_c, [-1, 1])
    con_kernel_cost += mmd.mix_rbf_mmd2(Image_c_s,
                                        Gimage_c_s,
                                        sigmas=bandwidths,
                                        id=ind_t[i])
Exemplo n.º 2
0
class_fake = tf.argmax(soft_class_fake, 1)

gen_params = lib.params_with_name('Generator')
disc_params = lib.params_with_name('Discriminator')

class_loss_real = tf.reduce_mean(
    tf.nn.sigmoid_cross_entropy_with_logits(labels=real_label,
                                            logits=disc_real_logit))
class_loss_fake = tf.reduce_mean(
    tf.nn.sigmoid_cross_entropy_with_logits(labels=real_label,
                                            logits=disc_fake_logit))

bandwidths = [1.0, 2.0, 5.0, 10.0, 20.0, 40.0, 80.0]
#bandwidths = [1.0, 2.0, 4.0, 8.0, 16.0]
kernel_cost = mmd.mix_rbf_mmd2(disc_real,
                               disc_fake,
                               sigmas=bandwidths,
                               id=BATCH_SIZE)
kernel_cost = tf.sqrt(kernel_cost)
ind_t = tf.placeholder(tf.int32, [11])
con_kernel_cost = 0
for i in range(10):
    find_index = tf.where(tf.equal(real_label, i))
    Image_c = tf.gather(disc_real, find_index)
    Gimage_c = tf.gather(disc_fake, find_index)
    Image_c_s = tf.reshape(Image_c, [-1, 1])
    Gimage_c_s = tf.reshape(Gimage_c, [-1, 1])
    con_kernel_cost += tf.sqrt(
        mmd.mix_rbf_mmd2(Image_c_s, Gimage_c_s, sigmas=bandwidths,
                         id=ind_t[i]))

alpha = tf.random_uniform(shape=[BATCH_SIZE, 1], minval=0., maxval=1.)
Exemplo n.º 3
0
    return tf.reshape(output, [BATCH_SIZE, 1])


real_data_int = tf.placeholder(tf.int32, shape=[BATCH_SIZE, OUTPUT_DIM])
real_data = 2 * ((tf.cast(real_data_int, tf.float32) / 255.) - .5)
fake_data = Generator(BATCH_SIZE)

disc_real = Discriminator(real_data)
disc_fake = Discriminator(fake_data)

gen_params = lib.params_with_name('Generator')
disc_params = lib.params_with_name('Discriminator')

#******************************************
bandwidths = [2.0, 5.0, 10.0, 20.0, 40.0, 80.0]
kernel_cost = mmd.mix_rbf_mmd2(disc_real, disc_fake, sigmas=bandwidths)

gen_cost = kernel_cost
disc_cost = -1 * kernel_cost

# gen_cost = -tf.reduce_mean(disc_fake)
# disc_cost = tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real)

# Gradient penalty
alpha = tf.random_uniform(shape=[BATCH_SIZE, 1], minval=0., maxval=1.)
differences = fake_data - real_data
interpolates = real_data + (alpha * differences)
gradients = tf.gradients(Discriminator(interpolates), [interpolates])[0]
slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
gradient_penalty = tf.reduce_mean((slopes - 1.)**2)
disc_cost += LAMBDA * gradient_penalty