예제 #1
0
파일: AGAN.py 프로젝트: Kyanji/MAGNETO
def test_generator(generator, class_label=None):
    noise_input = np.random.uniform(-1.0, 1.0, size=[16, 100])
    step = 0
    if class_label is None:
        num_labels = 2
        noise_label = np.eye(num_labels)[np.random.choice(num_labels, 16)]
    else:
        noise_label = np.zeros((16, 2))
        noise_label[:, class_label] = 1
        step = class_label

    gan.plot_images(generator,
                    noise_input=noise_input,
                    noise_label=noise_label,
                    show=True,
                    step=step,
                    model_name="test_outputs")
예제 #2
0
def test_generator(generator, params, latent_size=100):
    label, code1, code2, p1, p2 = params
    noise_input = np.random.uniform(-1.0, 1.0, size=[16, latent_size])
    step = 0
    if label is None:
        num_labels = 10
        noise_label = np.eye(num_labels)[np.random.choice(num_labels, 16)]
    else:
        noise_label = np.zeros((16, 10))
        noise_label[:, label] = 1
        step = label

    if code1 is None:
        noise_code1 = np.random.normal(scale=0.5, size=[16, 1])
    else:
        if p1:
            a = np.linspace(-2, 2, 16)
            a = np.reshape(a, [16, 1])
            noise_code1 = np.ones((16, 1)) * a
        else:
            noise_code1 = np.ones((16, 1)) * code1
        print(noise_code1)

    if code2 is None:
        noise_code2 = np.random.normal(scale=0.5, size=[16, 1])
    else:
        if p2:
            a = np.linspace(-2, 2, 16)
            a = np.reshape(a, [16, 1])
            noise_code2 = np.ones((16, 1)) * a
        else:
            noise_code2 = np.ones((16, 1)) * code2
        print(noise_code2)

    gan.plot_images(generator,
                    noise_input=noise_input,
                    noise_label=noise_label,
                    noise_codes=[noise_code1, noise_code2],
                    show=True,
                    step=step,
                    model_name="test_outputs")
예제 #3
0
def train_for_n(epochs=1, batch_size=32):

    for epoch in range(epochs):

        # Plot some fake images
        noise = np.random.uniform(0., 1., size=[16, latent_dim])
        generated_images = generator.predict(noise)
        plot_images(generated_images,
                    fname=log_dir + '/generated_images_' + str(epoch))

        iterations_per_epoch = 60000 // batch_size  # number of training steps per epoch
        perm = np.random.choice(60000, size=60000, replace='False')

        for i in range(iterations_per_epoch):

            # Create a mini-batch of data (X: real images + fake images, y: corresponding class vectors)
            image_batch = X_train[perm[i * batch_size:(i + 1) *
                                       batch_size], :, :, :]  # real images
            noise_gen = np.random.uniform(0.,
                                          1.,
                                          size=[batch_size, latent_dim])
            generated_images = generator.predict(noise_gen)  # generated images
            X = np.concatenate((image_batch, generated_images))
            y = np.zeros([2 * batch_size, 2])  # class vector
            y[0:batch_size, 1] = 1
            y[batch_size:, 0] = 1

            # Train the discriminator on the mini-batch
            d_loss, d_acc = discriminator.train_on_batch(X, y)
            losses["d"].append(d_loss)
            discriminator_acc.append(d_acc)

            # Create a mini-batch of data (X: noise, y: class vectors pretending that these produce real images)
            noise_tr = np.random.uniform(0., 1., size=[batch_size, latent_dim])
            y2 = np.zeros([batch_size, 2])
            y2[:, 1] = 1

            # Train the generator part of the GAN on the mini-batch
            g_loss = GAN.train_on_batch(noise_tr, y2)
            losses["g"].append(g_loss)
예제 #4
0
import matplotlib.pyplot as plt
import keras.backend.tensorflow_backend as KTF
from gan import build_generator, build_discriminator, plot_images, make_trainable, get_session

log_dir = "."
KTF.set_session(get_session(
))  # Allows 2 jobs per GPU, Please do not change this during the tutorial

# prepare MNIST dataset
data = mnist.load_data()
X_train = data.train_images.reshape(-1, 28, 28, 1) / 255.
X_test = data.test_images.reshape(-1, 28, 28, 1) / 255.

# plot some real images
idx = np.random.choice(len(X_train), 16)
plot_images(X_train[idx], fname=log_dir + '/real_images.png')

# --------------------------------------------------
# Set up generator, discriminator and GAN (stacked generator + discriminator)
# Feel free to modify eg. :
# - the provided models (see gan.py)
# - the learning rate
# - the batchsize
# --------------------------------------------------

# Set up generator
print('\nGenerator')
latent_dim = 100
generator = build_generator(latent_dim)
print(generator.summary())
예제 #5
0
파일: AGAN.py 프로젝트: Kyanji/MAGNETO
def train(models, data, params):
    """Train the discriminator and adversarial Networks

    Alternately train discriminator and adversarial 
    networks by batch.
    Discriminator is trained first with real and fake 
    images and corresponding one-hot labels.
    Adversarial is trained next with fake images pretending 
    to be real and corresponding one-hot labels.
    Generate sample images per save_interval.

    # Arguments
        models (list): Generator, Discriminator,
            Adversarial models
        data (list): x_train, y_train data
        params (list): Network parameters

    """
    # the GAN models
    generator, discriminator, adversarial = models
    # images and their one-hot labels
    x_train, y_train = data
    # network parameters
    batch_size, latent_size, train_steps, num_labels, model_name \
            = params
    # the generator image is saved every 500 steps
    save_interval = 500
    # noise vector to see how the generator
    # output evolves during training
    noise_input = np.random.uniform(-1.0, 1.0, size=[16, latent_size])
    # class labels are 0, 1,
    # the generator must produce these calsses
    noise_label = np.eye(num_labels)[np.arange(0, 16) % num_labels]
    # number of elements in train dataset
    train_size = x_train.shape[0]
    print(model_name, "Labels for generated images: ",
          np.argmax(noise_label, axis=1))

    for i in range(train_steps):
        # train the discriminator for 1 batch
        # 1 batch of real (label=1.0) and fake images (label=0.0)
        # randomly pick real images and
        # corresponding labels from dataset
        rand_indexes = np.random.randint(0, train_size, size=batch_size)
        real_images = x_train[rand_indexes]
        real_labels = y_train[rand_indexes]
        # generate fake images from noise using generator
        # generate noise using uniform distribution
        noise = np.random.uniform(-1.0, 1.0, size=[batch_size, latent_size])
        # randomly pick one-hot labels
        fake_labels = np.eye(num_labels)[np.random.choice(
            num_labels, batch_size)]
        # generate fake images
        fake_images = generator.predict([noise, fake_labels])
        # real + fake images = 1 batch of train data
        x = np.concatenate((real_images, fake_images))
        # real + fake labels = 1 batch of train data labels
        labels = np.concatenate((real_labels, fake_labels))

        # label real and fake images
        # real images label is 1.0
        y = np.ones([2 * batch_size, 1])
        # fake images label is 0.0
        y[batch_size:, :] = 0
        # train discriminator network, log the loss and accuracy
        # ['loss', 'activation_1_loss',
        # 'label_loss', 'activation_1_acc', 'label_acc']
        metrics = discriminator.train_on_batch(x, [y, labels])
        fmt = "%d: [disc loss: %f, srcloss: %f,"
        fmt += "lblloss: %f, srcacc: %f, lblacc: %f]"
        log = fmt % (i, metrics[0], metrics[1], \
                metrics[2], metrics[3], metrics[4])

        # train the adversarial network for 1 batch
        # 1 batch of fake images with label=1.0 and
        # corresponding one-hot label or class
        # since the discriminator weights are frozen
        # in adversarial network only the generator is trained
        # generate noise using uniform distribution
        noise = np.random.uniform(-1.0, 1.0, size=[batch_size, latent_size])
        # randomly pick one-hot labels
        fake_labels = np.eye(num_labels)[np.random.choice(
            num_labels, batch_size)]
        # label fake images as real
        y = np.ones([batch_size, 1])
        # train the adversarial network
        # note that unlike in discriminator training,
        # we do not save the fake images in a variable
        # the fake images go to the discriminator input
        # of the adversarial for classification
        # log the loss and accuracy
        metrics = adversarial.train_on_batch([noise, fake_labels],
                                             [y, fake_labels])
        fmt = "%s [advr loss: %f, srcloss: %f,"
        fmt += "lblloss: %f, srcacc: %f, lblacc: %f]"
        log = fmt % (log, metrics[0], metrics[1],\
                metrics[2], metrics[3], metrics[4])
        print(log)
        if (i + 1) % save_interval == 0:
            # plot generator images on a periodic basis
            gan.plot_images(generator,
                            noise_input=noise_input,
                            noise_label=noise_label,
                            show=False,
                            step=(i + 1),
                            model_name=model_name)

    # save the model after training the generator
    # the trained generator can be reloaded
    # for future MNIST digit generation
    generator.save(model_name + ".h5")
예제 #6
0
def train(models, data, params):
    generator, discriminator, adversarial = models

    x_train, y_train = data

    batch_size, latent_size, train_steps, num_labels, model_name = params
    save_interval = 100
    noise_input = np.random.uniform(-1.0, 1.0, size=[16, latent_size])
    noise_label = np.eye(num_labels)[np.arange(0, 16) % num_labels]
    noise_code1 = np.random.normal(scale=0.5, size=[16, 1])
    noise_code2 = np.random.normal(scale=0.5, size=[16, 1])
    train_size = x_train.shape[0]
    print(model_name, "Labels for generated images: ",
          np.argmax(noise_label, axis=1))

    for i in range(train_steps):
        rand_indexes = np.random.randint(0, train_size, size=batch_size)
        real_images = x_train[rand_indexes]
        real_labels = y_train[rand_indexes]

        real_code1 = np.random.normal(scale=0.5, size=[batch_size, 1])
        real_code2 = np.random.normal(scale=0.5, size=[batch_size, 1])

        noise = np.random.uniform(-1.0, 1.0, size=[batch_size, latent_size])
        fake_labels = np.eye(num_labels)[np.random.choice(
            num_labels, batch_size)]
        fake_code1 = np.random.normal(scale=0.5, size=[batch_size, 1])
        fake_code2 = np.random.normal(scale=0.5, size=[batch_size, 1])
        inputs = [noise, fake_labels, fake_code1, fake_code2]
        fake_images = generator.predict(inputs)

        x = np.concatenate((real_images, fake_images))
        labels = np.concatenate((real_labels, fake_labels))
        codes1 = np.concatenate((real_code1, fake_code1))
        codes2 = np.concatenate((real_code2, fake_code2))

        y = np.ones([2 * batch_size, 1])

        y[batch_size:, :] = 0

        outputs = [y, labels, codes1, codes2]

        metrics = discriminator.train_on_batch(x, outputs)
        fmt = "%d: [discriminator loss: %f, label_acc: %f]"
        log = fmt % (i, metrics[0], metrics[6])

        noise = np.random.uniform(-1.0, 1.0, size=[batch_size, latent_size])
        fake_labels = np.eye(num_labels)[np.random.choice(
            num_labels, batch_size)]
        fake_code1 = np.random.normal(scale=0.5, size=[batch_size, 1])
        fake_code2 = np.random.normal(scale=0.5, size=[batch_size, 1])

        y = np.ones([batch_size, 1])

        inputs = [noise, fake_labels, fake_code1, fake_code2]
        outputs = [y, fake_labels, fake_code1, fake_code2]
        metrics = adversarial.train_on_batch(inputs, outputs)
        fmt = "%s [adversarial loss: %f, label_acc: %f]"
        log = fmt % (log, metrics[0], metrics[6])

        print(log)
        if (i + 1) % save_interval == 0:
            if (i + 1) == train_steps:
                show = True
            else:
                show = False

            gan.plot_images(generator,
                            noise_input=noise_input,
                            noise_label=noise_label,
                            noise_codes=[noise_code1, noise_code2],
                            show=show,
                            step=(i + 1),
                            model_name=model_name)

    generator.save(model_name + ".h5")
예제 #7
0
def test_generator(generator):
    noise_input = np.random.uniform(-1.0, 1.0, size=[16, 100])
    gan.plot_images(generator,
                    noise_input=noise_input,
                    show=True,
                    model_name="test_outputs")