Exemplo n.º 1
0
def UpsampleConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
    output = inputs
    output = lib.concat([output, output, output, output], axis=1)
    output = tf.transpose(output, [0,2,3,1])
    output = tf.depth_to_space(output, 2)
    output = tf.transpose(output, [0,3,1,2])
    output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
    return output
Exemplo n.º 2
0
def UpsampleConv(name, input_dim, output_dim, filter_size, inputs, he_init=True, biases=True):
    output = inputs
    output = lib.concat([output, output, output, output], axis=1)
    output = tf.transpose(output, [0,2,3,1])
    output = tf.depth_to_space(output, 2)
    output = tf.transpose(output, [0,3,1,2])    
    output = lib.ops.conv2d.Conv2D(name, input_dim, output_dim, filter_size, output, he_init=he_init, biases=biases)
    return output
Exemplo n.º 3
0
        # fake_data = tf.concat(fake_data_splits, axis=0)
        # fake_data_splits = tf.split(fake_data, len(DEVICES))

        all_real_data = tf.reshape(
            2 * ((tf.cast(all_real_data_conv, tf.float32) / 255.) - .5),
            [BATCH_SIZE, OUTPUT_DIM])
        all_real_data_splits = tf.split(all_real_data, len(DEVICES) / 2)

        DEVICES_B = DEVICES[:len(DEVICES) / 2]
        DEVICES_A = DEVICES[len(DEVICES) / 2:]

        disc_costs = []
        for i, device in enumerate(DEVICES_A):
            with tf.device(device):
                real_and_fake_data = lib.concat(
                    [all_real_data_splits[i]] + [fake_data_splits[i]] +
                    [fake_data_splits[len(DEVICES_A) + i]],
                    axis=0)
                disc_all = Discriminator(real_and_fake_data)
                disc_real = disc_all[:BATCH_SIZE / len(DEVICES_A)]
                disc_fake = disc_all[BATCH_SIZE / len(DEVICES_A):]
                disc_costs.append(
                    tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real))

        for i, device in enumerate(DEVICES_B):
            with tf.device(device):
                real_data = tf.identity(
                    all_real_data_splits[i])  # transfer from gpu0
                fake_data__ = lib.concat([
                    fake_data_splits[i], fake_data_splits[len(DEVICES_A) + i]
                ],
                                         axis=0)
Exemplo n.º 4
0
                                           minval=0.,
                                           maxval=1. / 128)  # dequantize
        all_real_data_splits = lib.split(all_real_data, len(DEVICES), axis=0)

        DEVICES_B = DEVICES[:len(DEVICES) / 2]
        DEVICES_A = DEVICES[len(DEVICES) / 2:]

        disc_costs = []
        disc_acgan_costs = []
        disc_acgan_accs = []
        disc_acgan_fake_accs = []
        for i, device in enumerate(DEVICES_A):
            with tf.device(device):
                real_and_fake_data = lib.concat([
                    all_real_data_splits[i],
                    all_real_data_splits[len(DEVICES_A) + i],
                    fake_data_splits[i], fake_data_splits[len(DEVICES_A) + i]
                ],
                                                axis=0)
                real_and_fake_labels = lib.concat([
                    labels_splits[i], labels_splits[len(DEVICES_A) + i],
                    labels_splits[i], labels_splits[len(DEVICES_A) + i]
                ],
                                                  axis=0)
                disc_all, disc_all_acgan = Discriminator(
                    real_and_fake_data, real_and_fake_labels)
                disc_real = disc_all[:BATCH_SIZE / len(DEVICES_A)]
                disc_fake = disc_all[BATCH_SIZE / len(DEVICES_A):]
                disc_costs.append(
                    tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real))
                if ACGAN:
                    disc_acgan_costs.append(
Exemplo n.º 5
0
        fake_data_splits = []
        for device in DEVICES:
            with tf.device(device):
                fake_data_splits.append(Generator(BATCH_SIZE/len(DEVICES)))

        all_real_data = tf.reshape(2*((tf.cast(all_real_data_int, tf.float32)/255.)-.5), [BATCH_SIZE, OUTPUT_DIM])
        all_real_data_splits = lib.split(all_real_data, len(DEVICES)/2, axis=0)

        DEVICES_B = DEVICES[:len(DEVICES)/2]
        DEVICES_A = DEVICES[len(DEVICES)/2:]

        disc_costs = []
        for i, device in enumerate(DEVICES_A):
            with tf.device(device):
                real_and_fake_data = lib.concat([all_real_data_splits[i]] + [fake_data_splits[i]] + [fake_data_splits[len(DEVICES_A)+i]], axis=0)
                disc_all = Discriminator(real_and_fake_data)
                disc_real = disc_all[:BATCH_SIZE/len(DEVICES_A)]
                disc_fake = disc_all[BATCH_SIZE/len(DEVICES_A):]
                disc_costs.append(tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real))

        for i, device in enumerate(DEVICES_B):
            with tf.device(device):
                real_data = tf.identity(all_real_data_splits[i]) # transfer from gpu0
                fake_data = lib.concat([fake_data_splits[i], fake_data_splits[len(DEVICES_A)+i]], axis=0)
                alpha = tf.random_uniform(
                    shape=[BATCH_SIZE/len(DEVICES_A),1], 
                    minval=0.,
                    maxval=1.
                )
                differences = fake_data - real_data
Exemplo n.º 6
0
#             outputs.append(ngram_output)
#             features.append(ngram_features)
#     if SETTINGS['rnn_discrim']:
#         outputs.append(output)
#     return tf.concat(0, outputs), language_model_output, features # we apply the sigmoid later

_iteration = tf.placeholder(tf.int32, shape=None)
real_inputs_discrete = tf.placeholder(tf.int32, shape=[BATCH_SIZE, SEQ_LEN])
real_inputs = tf.one_hot(real_inputs_discrete, len(charmap))
fake_inputs, _ = Generator(BATCH_SIZE)
fake_inputs_discrete = tf.argmax(fake_inputs, fake_inputs.get_shape().ndims-1)

disc_real, disc_real_lm, disc_real_features = Discriminator(real_inputs) 
disc_fake, disc_fake_lm, disc_fake_features = Discriminator(fake_inputs)
# disc_out = Discriminator(tf.concat([real_inputs, fake_inputs], 0))[0]
disc_out = Discriminator(lib.concat([real_inputs, fake_inputs], 0))[0]
disc_real = disc_out[:BATCH_SIZE]
disc_fake = disc_out[BATCH_SIZE:]

# Gen objective:  push D(fake) to one
if SETTINGS['feature_matching']:
    gen_costs = [tf.reduce_mean((tf.reduce_mean(real_features, reduction_indices=[0]) - tf.reduce_mean(fake_features, reduction_indices=[0]))**2) for real_features, fake_features in zip(disc_real_features, disc_fake_features)]
    gen_cost = 0.
    for gc in gen_costs:
        gen_cost = gen_cost + gc
elif SETTINGS['wgan']:
    gen_cost = -tf.reduce_mean(Discriminator(Generator(GEN_BS_MULTIPLE*BATCH_SIZE)[0])[0])

else:
    if SETTINGS['one_sided_label_smoothing']:
        raise Exception('check this implementation')
Exemplo n.º 7
0
        for device in DEVICES:
            with tf.device(device):
                fake_data_splits.append(Generator(BATCH_SIZE/len(DEVICES)))
        # fake_data = tf.concat(fake_data_splits, axis=0)
        # fake_data_splits = tf.split(fake_data, len(DEVICES))

        all_real_data = tf.reshape(2*((tf.cast(all_real_data_conv, tf.float32)/255.)-.5), [BATCH_SIZE, OUTPUT_DIM])
        all_real_data_splits = tf.split(all_real_data, len(DEVICES)/2)

        DEVICES_B = DEVICES[:len(DEVICES)/2]
        DEVICES_A = DEVICES[len(DEVICES)/2:]

        disc_costs = []
        for i, device in enumerate(DEVICES_A):
            with tf.device(device):
                real_and_fake_data = lib.concat([all_real_data_splits[i]] + [fake_data_splits[i]] + [fake_data_splits[len(DEVICES_A)+i]], axis=0)
                disc_all = Discriminator(real_and_fake_data)
                disc_real = disc_all[:BATCH_SIZE/len(DEVICES_A)]
                disc_fake = disc_all[BATCH_SIZE/len(DEVICES_A):]
                disc_costs.append(tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real))

        for i, device in enumerate(DEVICES_B):
            with tf.device(device):
                real_data = tf.identity(all_real_data_splits[i]) # transfer from gpu0
                fake_data__ = lib.concat([fake_data_splits[i], fake_data_splits[len(DEVICES_A)+i]], axis=0)
                alpha = tf.random_uniform(
                    shape=[BATCH_SIZE/len(DEVICES_A),1], 
                    minval=0.,
                    maxval=1.
                )
                differences = fake_data__ - real_data
Exemplo n.º 8
0
#             features.append(ngram_features)
#     if SETTINGS['rnn_discrim']:
#         outputs.append(output)
#     return tf.concat(0, outputs), language_model_output, features # we apply the sigmoid later

_iteration = tf.placeholder(tf.int32, shape=None)
real_inputs_discrete = tf.placeholder(tf.int32, shape=[BATCH_SIZE, SEQ_LEN])
real_inputs = tf.one_hot(real_inputs_discrete, len(charmap))
fake_inputs, _ = Generator(BATCH_SIZE)
fake_inputs_discrete = tf.argmax(fake_inputs,
                                 fake_inputs.get_shape().ndims - 1)

disc_real, disc_real_lm, disc_real_features = Discriminator(real_inputs)
disc_fake, disc_fake_lm, disc_fake_features = Discriminator(fake_inputs)
# disc_out = Discriminator(tf.concat([real_inputs, fake_inputs], 0))[0]
disc_out = Discriminator(lib.concat([real_inputs, fake_inputs], 0))[0]
disc_real = disc_out[:BATCH_SIZE]
disc_fake = disc_out[BATCH_SIZE:]

# Gen objective:  push D(fake) to one
if SETTINGS['feature_matching']:
    gen_costs = [
        tf.reduce_mean(
            (tf.reduce_mean(real_features, reduction_indices=[0]) -
             tf.reduce_mean(fake_features, reduction_indices=[0]))**2)
        for real_features, fake_features in zip(disc_real_features,
                                                disc_fake_features)
    ]
    gen_cost = 0.
    for gc in gen_costs:
        gen_cost = gen_cost + gc
Exemplo n.º 9
0
        all_real_data = tf.reshape(2*((tf.cast(all_real_data_int, tf.float32)/256.)-.5), [BATCH_SIZE, OUTPUT_DIM])
        all_real_data += tf.random_uniform(shape=[BATCH_SIZE,OUTPUT_DIM],minval=0.,maxval=1./128) # dequantize
        all_real_data_splits = lib.split(all_real_data, len(DEVICES), axis=0)

        DEVICES_B = DEVICES[:len(DEVICES)/2]
        DEVICES_A = DEVICES[len(DEVICES)/2:]

        disc_costs = []
        disc_acgan_costs = []
        disc_acgan_accs = []
        disc_acgan_fake_accs = []
        for i, device in enumerate(DEVICES_A):
            with tf.device(device):
                real_and_fake_data = lib.concat([
                    all_real_data_splits[i], 
                    all_real_data_splits[len(DEVICES_A)+i], 
                    fake_data_splits[i], 
                    fake_data_splits[len(DEVICES_A)+i]
                ], axis=0)
                real_and_fake_labels = lib.concat([
                    labels_splits[i], 
                    labels_splits[len(DEVICES_A)+i],
                    labels_splits[i],
                    labels_splits[len(DEVICES_A)+i]
                ], axis=0)
                disc_all, disc_all_acgan = Discriminator(real_and_fake_data, real_and_fake_labels)
                disc_real = disc_all[:BATCH_SIZE/len(DEVICES_A)]
                disc_fake = disc_all[BATCH_SIZE/len(DEVICES_A):]
                disc_costs.append(tf.reduce_mean(disc_fake) - tf.reduce_mean(disc_real))
                if ACGAN:
                    disc_acgan_costs.append(tf.reduce_mean(
                        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=disc_all_acgan[:BATCH_SIZE/len(DEVICES_A)], labels=real_and_fake_labels[:BATCH_SIZE/len(DEVICES_A)])