Пример #1
0
def train(net, dataset, epochs, batch_size, print_every=10, show_every=100, figsize=(5,5)):
    saver = tf.train.Saver()
    sample_z = np.random.uniform(-1, 1, size=(72, z_size))

    samples, losses = [], []
    steps = 0

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        for e in range(epochs):
            for x in dataset.batches(batch_size):
                steps += 1

                # Sample random noise for G
                batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))

                # Run optimizers
                _ = sess.run(net.d_opt, feed_dict={net.input_real: x, net.input_z: batch_z})
                _ = sess.run(net.g_opt, feed_dict={net.input_z: batch_z, net.input_real: x})

                if steps % print_every == 0:
                    # At the end of each epoch, get the losses and print them out
                    train_loss_d = net.d_loss.eval({net.input_z: batch_z, net.input_real: x})
                    train_loss_g = net.g_loss.eval({net.input_z: batch_z})

                    print("Epoch {}/{}...".format(e+1, epochs),
                          "Discriminator Loss: {:.4f}...".format(train_loss_d),
                          "Generator Loss: {:.4f}".format(train_loss_g))
                    # Save losses to view after training
                    losses.append((train_loss_d, train_loss_g))

                if steps % show_every == 0:
                    gen_samples = sess.run(
                                   generator(net.input_z, 3, reuse=True, training=False),
                                   feed_dict={net.input_z: sample_z})
                    samples.append(gen_samples)
                    _ = view_samples(-1, samples, 6, 12, figsize=figsize)
                    plt.show()

        saver.save(sess, './checkpoints/generator.ckpt')

    with open('samples.pkl', 'wb') as f:
        pkl.dump(samples, f)
    
    return losses, samples
Пример #2
0
                if steps % show_every == 0:
                    gen_samples = sess.run(
                                   generator(net.input_z, 3, reuse=True, training=False),
                                   feed_dict={net.input_z: sample_z})
                    samples.append(gen_samples)
                    _ = view_samples(-1, samples, 6, 12, figsize=figsize)
                    plt.show()

        saver.save(sess, './checkpoints/generator.ckpt')

    with open('samples.pkl', 'wb') as f:
        pkl.dump(samples, f)
    
    return losses, samples



# Create the network
net = GAN(real_size, z_size, learning_rate, alpha=alpha, beta1=beta1)

dataset = Dataset(trainset, testset)

losses, samples = train(net, dataset, epochs, batch_size, figsize=(10,5))

_ = view_samples(0, samples, 4, 4, figsize=(10,5))




Пример #3
0
from os.path import isfile, isdir
from tqdm import tqdm
import os
import cv2

print('Librearies imported %f' % process_time())

from model import GAN
from model import generator
from dataset import view_samples
from constants import *

print('Model and constants imported %f' % process_time())

# Create the network
net = GAN(real_size, z_size, learning_rate, alpha=alpha, beta1=beta1)
print('GAN created %f' % process_time())

saver = tf.train.Saver()
with tf.Session() as sess:
    saver.restore(sess, tf.train.latest_checkpoint('checkpoints'))
    print('Session restored %f' % process_time())

    sample_z = np.random.uniform(-1, 1, size=(int(real_size[0] / 2), z_size))
    gen_samples = sess.run(generator(net.input_z,
                                     3,
                                     reuse=True,
                                     training=False),
                           feed_dict={net.input_z: sample_z})
    _ = view_samples(0, [gen_samples], 4, 4, figsize=(5, 5))
Пример #4
0
def train(net,
          dataset,
          epochs,
          batch_size,
          print_every=10,
          show_every=100,
          figsize=(5, 5)):
    print('Training started %f' % process_time())
    saver = tf.train.Saver()
    sample_z = np.random.uniform(-1, 1, size=(real_size[0] * 2, z_size))

    samples, losses = [], []
    steps = 0

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        last = process_time()
        for e in range(epochs):
            for x in dataset.batches(batch_size):
                steps += 1
                # Sample random noise for G
                batch_z = np.random.uniform(-1, 1, size=(batch_size, z_size))
                # Run optimizers
                _ = sess.run(net.d_opt,
                             feed_dict={
                                 net.input_real: x,
                                 net.input_z: batch_z
                             })
                _ = sess.run(net.g_opt,
                             feed_dict={
                                 net.input_z: batch_z,
                                 net.input_real: x
                             })

                if steps % print_every == 0:
                    # At the end of each epoch, get the losses and print them out
                    train_loss_d = net.d_loss.eval({
                        net.input_z: batch_z,
                        net.input_real: x
                    })
                    train_loss_g = net.g_loss.eval({net.input_z: batch_z})

                    print("Epoch {}/{}...".format(e + 1, epochs),
                          "Discriminator Loss: {:.4f}...".format(train_loss_d),
                          "Generator Loss: {:.4f}".format(train_loss_g))
                    # Save losses to view after training
                    losses.append((train_loss_d, train_loss_g))

                if steps % show_every == 0:
                    gen_samples = sess.run(generator(net.input_z,
                                                     3,
                                                     reuse=True,
                                                     training=False),
                                           feed_dict={net.input_z: sample_z})
                    samples.append(gen_samples)
                    _ = view_samples(-1,
                                     samples,
                                     8,
                                     8,
                                     figsize=figsize,
                                     save=True,
                                     saveCount=len(samples))
            saver.save(sess, './checkpoints/generator {}.ckpt'.format(e + 1))
            last = process_time() - last
            print('Epoch time is %f' % last)
            last = process_time()

        saver.save(sess, './checkpoints/generator.ckpt')
        print('Model trained %f' % process_time())
    return losses, samples
Пример #5
0
    return train_accuracies, test_accuracies, samples


real_size = (32, 32, 3)
z_size = 100
learning_rate = 0.0003

net = GAN(real_size, z_size, learning_rate)
dataset = Dataset(trainset, testset)
batch_size = 128
epochs = 25
train_accuracies, test_accuracies, samples = train(net,
                                                   dataset,
                                                   epochs,
                                                   batch_size,
                                                   figsize=(10, 5))

fig, ax = plt.subplots()
plt.plot(train_accuracies, label='Train', alpha=0.5)
plt.plot(test_accuracies, label='Test', alpha=0.5)
plt.title("Accuracy")
plt.legend()

_ = view_samples(-1, samples, 5, 10, figsize=(10, 5))

for ii in range(len(samples)):
    fig, ax = view_samples(ii, samples, 5, 10, figsize=(10, 5))
    fig.savefig('images/samples_{:03d}.png'.format(ii))
    plt.close()