Exemplo n.º 1
0
def msg_gan(image_inputs, noise_inputs, latent_size, res_building,
            minibatch_size):
    # Multi-scaled input images
    real_inputs = []
    for factor in [2**res for res in range(3, -1, -1)]:
        real_input = layers.downscale2d(image_inputs, factor=factor)
        real_input = layers.upscale2d(real_input, factor=factor)
        real_inputs += [real_input]
    # Define networks
    generator = network.Network('generator',
                                msg_generator,
                                noise_inputs,
                                res_building=res_building,
                                latent_size=latent_size)
    discriminator = network.Network('discriminator',
                                    msg_discriminator,
                                    real_inputs,
                                    res_building=res_building,
                                    latent_size=latent_size)
    # Retrieve network outputs
    fake_images = generator(noise_inputs)
    fake_outputs = discriminator(fake_images)
    real_outputs = discriminator(real_inputs)
    # Losses
    gen_loss, disc_loss = losses.RelativisticAverageBCE(
        real_outputs, fake_outputs)
    disc_loss += losses.GradientPenaltyMSG(discriminator, real_inputs,
                                           fake_images, minibatch_size)
    # disc_loss += losses.EpsilonPenalty(real_outputs)
    return gen_loss, disc_loss, fake_images
Exemplo n.º 2
0
def pro_gan(image_inputs, noise_inputs, latent_size, res_building,
            res_training, minibatch_size):
    # Define networks
    generator = network.Network('generator',
                                pro_generator,
                                noise_inputs,
                                res_building=res_building,
                                res_training=res_training,
                                latent_size=latent_size)
    discriminator = network.Network('discriminator',
                                    pro_discriminator,
                                    image_inputs,
                                    res_building=res_building,
                                    res_training=res_training,
                                    latent_size=latent_size)
    # Retrieve network outputs
    fake_images = generator(noise_inputs)
    fake_outputs = discriminator(fake_images)
    real_outputs = discriminator(image_inputs)
    # Losses
    gen_loss, disc_loss = losses.RelativisticAverageBCE(
        real_outputs, fake_outputs)
    disc_loss += losses.GradientPenalty(discriminator, image_inputs,
                                        fake_images, minibatch_size)
    disc_loss += losses.EpsilonPenalty(real_outputs)
    return gen_loss, disc_loss, fake_images
Exemplo n.º 3
0
def infer_pro_disc():
    """
    Train the generative adverserial network.
    """
    # Graph definition: inputs, model, loss, optimizer, initializer
    print('Graph building...')
    # Inputs
    image_inputs = tf.compat.v1.placeholder(tf.compat.v1.float32,
                                            shape=[None, 256, 256, 1],
                                            name='image_inputs')
    res_training = tf.compat.v1.placeholder(tf.compat.v1.float32,
                                            shape=[],
                                            name='res_training')
    # Network
    discriminator = network.Network('discriminator',
                                    pro_discriminator,
                                    image_inputs,
                                    res_building=8,
                                    res_training=res_training,
                                    latent_size=latent_size)
    disc_vars = tf.compat.v1.get_collection(
        tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')
    outputs = discriminator(image_inputs)
    outputs = tf.nn.sigmoid(outputs)

    # Initializer
    init = tf.compat.v1.global_variables_initializer()
    print('Done: graph built.')

    inputs = getattr(dataset,
                     config.data_initilize)(**config.data_initilize_kwargs)
    select_minibatch = partial(getattr(dataset, config.data_selector), inputs)

    # Saver
    saver = tf.compat.v1.train.Saver(var_list=disc_vars)

    # Training --> use the configuration file
    print('Training...')
    with tf.compat.v1.Session() as sess:
        # Initialize
        init.run()
        # Restore parameters
        saver.restore(sess, restore)
        # Run
        n = 20
        name = 'logs/' + restore[restore.rfind('/') + 1:] + '.npy'
        out_tot = np.load(name)
        win = []

        for s in range(5):
            minibatch = select_minibatch(crt_img=n * s,
                                         res=8,
                                         minibatch_size=n)
            # minibatch = out_tot[n*s:n*(s+1),:,:]
            feed_dict = {image_inputs: minibatch, res_training: 8}
            out = sess.run(outputs, feed_dict)
            win += [out]
        print(np.mean((np.array(win).flatten() > 0.5).astype(np.float32)))
def classification(
    inputs,                 # Input images
    labels,                 # Input labels
    nbof_labels,            # Number of different labels (number of identities)
    training,               # Use dropout or not? (Training or not training?)
    regularizer_rate=0):    # Use weight regularization?
    net = network.Network('classification', wideresnet_se, inputs, training, nbof_labels, regularizer_rate=regularizer_rate)
    logit = net(inputs)
    # with tf.compat.v1.variable_scope('classification'):
    #     emb = deep_cnn_v1(inputs, training, 128, regularizer_rate=regularizer_rate)
    #     emb = tf.nn.l2_normalize(emb)
    #     logit = losses.cosineface_losses(emb, labels, nbof_labels, regularizer_rate=regularizer_rate)
    loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logit, labels=labels))
    reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    reg_losses = tf.compat.v1.add_n(reg_losses, name='reg_loss')
    return logit, loss, reg_losses
Exemplo n.º 5
0
def infer_pro_gan():
    """
    Train the generative adverserial network.
    """
    # Graph definition: inputs, model, loss, optimizer, initializer
    print('Graph building...')
    # Inputs
    noise_inputs = tf.compat.v1.placeholder(tf.compat.v1.float32,
                                            shape=[None, latent_size],
                                            name='noise_inputs')
    res_training = tf.compat.v1.placeholder(tf.compat.v1.float32,
                                            shape=[],
                                            name='res_training')
    # Network
    generator = network.Network('generator',
                                pro_generator,
                                noise_inputs,
                                res_building=8,
                                res_training=res_training,
                                latent_size=latent_size)
    gen_vars = tf.compat.v1.get_collection(
        tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope='generator')
    outputs = generator(noise_inputs)
    # outputs             = tf.math.reduce_mean(outputs, axis=-1)
    # outputs             = tf.clip_by_value(outputs, -1., 1.)
    # outputs             = outputs/2 + 0.5
    # outputs            *= 128

    # discriminator = network.Network('discriminator', pro_discriminator, image_inputs, res_building=res_building, res_training=res_training, latent_size=latent_size)

    # Initializer
    init = tf.compat.v1.global_variables_initializer()
    print('Done: graph built.')

    # Saver
    saver = tf.compat.v1.train.Saver(var_list=gen_vars)

    # Training --> use the configuration file
    print('Training...')
    with tf.compat.v1.Session() as sess:
        # Initialize
        init.run()
        # Restore parameters
        saver.restore(sess, restore)
        # Run
        n = 5
        out_tot = np.empty((0, 256, 256, 1))
        for s in range(4):
            feed_dict = {
                noise_inputs: 2 * np.random.random((n * n, latent_size)) - 1,
                res_training: 8
            }
            out = sess.run(outputs, feed_dict)
            out_tot = np.append(out_tot, out, axis=0)
            # for i in range(len(out)):
            #     im = out[i]*255
            #     im = im.astype(np.uint8)
            #     sk.io.imsave('logs/images/{}.jpg'.format(s*len(out)+i), im)
            # print(out.shape)
            # # fig = plt.figure()
            # # for i in range(n):
            # #     for j in range(n):
            # #         plt.subplot(n,n,i*n+j+1)
            # #         plt.imshow(out[i*n+j])
            # # plt.show()
            # # print(out[0,:5,:32])
            # for i in range(len(out)):
            #     # keyed_pianoroll2midi(out[i], file_path='../../../data/music/test_{}.mid'.format(i))
            #     # im = out[i]*255/np.max(out[i])
            #     # im = im.astype(np.uint8)
            #     keyed_pianoroll2midi(out[i]*100, file_path='logs/musics/{}.mid'.format(s*len(out)+i))
        name = 'logs/' + restore[restore.rfind('/') + 1:] + '.npy'
        print(name)
        print(out_tot.shape)
        np.save(name, np.array(out_tot))
Exemplo n.º 6
0
def infer_pro_win(res=2):
    """
    Train the generative adverserial network.
    """
    # Graph definition: inputs, model, loss, optimizer, initializer
    print('Graph building...')
    # Inputs
    noise_inputs = tf.compat.v1.placeholder(tf.compat.v1.float32,
                                            shape=[None, latent_size],
                                            name='noise_inputs')
    res_training = tf.compat.v1.placeholder(tf.compat.v1.float32,
                                            shape=[],
                                            name='res_training')
    image_inputs = tf.compat.v1.placeholder(tf.compat.v1.float32,
                                            shape=[None, 256, 256, 1],
                                            name='image_inputs')
    # Network
    generator = network.Network('generator',
                                pro_generator,
                                noise_inputs,
                                res_building=8,
                                res_training=res_training,
                                latent_size=latent_size)
    discriminator = network.Network('discriminator',
                                    pro_discriminator,
                                    image_inputs,
                                    res_building=8,
                                    res_training=res_training,
                                    latent_size=latent_size)
    var_list = tf.compat.v1.global_variables()
    gen_outputs = generator(noise_inputs)
    fake_outputs = discriminator(gen_outputs)
    real_outputs = discriminator(image_inputs)
    rf = real_outputs - tf.compat.v1.math.reduce_mean(fake_outputs)
    fr = fake_outputs - tf.compat.v1.math.reduce_mean(real_outputs)
    # outputs             = tf.math.reduce_mean(outputs, axis=-1)
    # outputs             = tf.clip_by_value(outputs, -1., 1.)
    # outputs             = outputs/2 + 0.5
    # outputs            *= 128

    # Initializer
    init = tf.compat.v1.global_variables_initializer()
    print('Done: graph built.')

    inputs = getattr(dataset,
                     config.data_initilize)(**config.data_initilize_kwargs)
    select_minibatch = partial(getattr(dataset, config.data_selector), inputs)

    # Saver
    saver = tf.compat.v1.train.Saver(var_list=var_list)

    # Training --> use the configuration file
    print('Training...')
    with tf.compat.v1.Session() as sess:
        # Initialize
        init.run()
        # Restore parameters
        saver.restore(sess, restore)
        # Run
        n = 4
        win = []
        for s in range(4):
            minibatch = select_minibatch(crt_img=n * n * s,
                                         res=8,
                                         minibatch_size=n * n)
            feed_dict = {
                noise_inputs: 2 * np.random.random((n * n, latent_size)) - 1,
                res_training: res,
                image_inputs: minibatch,
            }
            rf_, fr_ = sess.run([rf, fr], feed_dict)

            print(rf_)
            print(fr_)
            win = np.append(win, (rf_ < .0).astype(np.float32))
            win = np.append(win, (fr_ > .0).astype(np.float32))
            # out_tot += [out]
    print(np.mean(win))
# plt.show()
# Load graph

# Network parameters
image_inputs = tf.placeholder(
    tf.float32,
    shape=[None, config.image_size, config.image_size, 3],
    name='image_inputs')
image_normed = image_inputs / 127.5
image_normed = image_normed - 1
label_inputs = tf.placeholder(tf.int64, shape=[None], name='label_inputs')
nbof_labels = config.nbof_labels
emb_size = config.latent_size
training = tf.placeholder(tf.bool, shape=[], name='training')
# Network
net = network.Network('classification', network_classification.v3,
                      image_normed, labels, emb_size, training, nbof_labels)
logit = net(image_normed)
var_list = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.GLOBAL_VARIABLES,
                                       scope='classification')
pred = tf.nn.softmax(logit)
pred = tf.argmax(pred, axis=1)
# Load network
# saver_path = 'logs/Cifar10-classification_v1-run_20191026172524/models/'
# saver_path = 'logs/Cifar10-classification_v1-run_20191028120938/models/' # without batch norm (1568)
saver_path = 'logs/Cifar10-classification_v1-run_20191028144237/models/'  # with custom norm custom
# saver_path = 'logs/Cifar10-classification_v1-run_20191028151112/models/' # with keras batch norm

saver = tf.compat.v1.train.Saver(var_list=var_list)
# Run session on samples
with tf.Session() as sess:
    saver.restore(sess, saver_path + 'model-336.ckpt')