Ejemplo n.º 1
0
 def __init__(self,
              root="../data/mnist.npz",
              train=True,
              transform=None,
              target_transform=None):
     self.root = root
     self.transform = transform
     self.target_transform = target_transform
     self.train = train
     (self.train_data,
      self.train_labels), (self.test_data,
                           self.test_labels) = reader.read_mnist(root)
Ejemplo n.º 2
0
from keras import models as KModels
from keras import layers as KLayers
from keras import initializers as KInits
from keras import optimizers as KOpts
import numpy as np
import pickle
from matplotlib import pyplot as plt

PHRASE = "TRAIN"

cfg = config.MNISTConfig()

if PHRASE == "TRAIN":
    np.random.seed(1000)
    randomDim = 100
    (X_train, y_train), (X_test, y_test) = reader.read_mnist("../data/mnist.npz")
    X_train = (X_train.astype(np.float32) - 127.5)/127.5
    X_train = X_train.reshape(60000, 784)

    adam = KOpts.Adam(lr=0.0002, beta_1=0.5)

    def build_generator(randomDim):
        model = KModels.Sequential()
        model.add(KLayers.Dense(128 * 7 * 7, input_dim=randomDim)) # ?,100 => ?, 128*7*7
        model.add(KLayers.LeakyReLU(alpha=0.2))
        model.add(KLayers.Reshape((7,7,128))) # ?, 128*7*7 => ?,7,7,128
        model.add(KLayers.BatchNormalization(momentum=0.8))
        model.add(KLayers.UpSampling2D()) # ?,7,7,128 => ?,14,14,128

        model.add(KLayers.Conv2D(128, kernel_size=3, padding='same')) # ?,14,14,128 => ?,14,14,128
        model.add(KLayers.LeakyReLU(alpha=0.2))
Ejemplo n.º 3
0
onehot = np.eye(10)
temp_z_ = np.random.normal(0, 1, (10, 100))
fixed_z_ = temp_z_
fixed_y_ = np.zeros((10, 1))

for i in range(9):
    fixed_z_ = np.concatenate([fixed_z_, temp_z_], 0)
    temp = np.ones((10, 1)) + i
    fixed_y_ = np.concatenate([fixed_y_, temp], 0)

fixed_y_ = onehot[fixed_y_.astype(np.int32)].squeeze()
batch_size = 100
lr = 0.0002
train_epoch = 10
adam = KOpts.Adam(lr=0.0002, beta_1=0.5)
(x_train, y_train), (x_test, y_test) = reader.read_mnist()
train_set = (x_train - 0.5) / 0.5
train_label = y_train

discriminator = build_discriminator()
discriminator.trainable = False

ganInput = KLayers.Input(shape=(100, ))
ganInputlabel = KLayers.Input(shape=(10, ))

generator = build_generator()
x = generator([ganInput, ganInputlabel])

ganOutput = discriminator([x, ganInputlabel])
gan = KModels.Model(inputs=[ganInput, ganInputlabel], outputs=ganOutput)
gan.compile(loss='binary_crossentropy', optimizer=adam)
Ejemplo n.º 4
0
            'binary_crossentropy', 'categorical_crossentropy', gaussian_loss
        ],
                      optimizer=adam)
        return model

    def sample_generator_input(batch_size):
        sampled_noise = np.random.normal(0, 1, (batch_size, 62))
        sampled_labels = np.random.randint(0, 10, batch_size).reshape(-1, 1)
        sampled_labels = KUtils.to_categorical(sampled_labels, num_classes=10)
        sampled_cont = np.random.uniform(-1, 1, size=(batch_size, 2))
        return sampled_noise, sampled_labels, sampled_cont

    adam = KOpts.Adam(lr=0.0002, beta_1=0.5)
    epochs = 6000
    batch_size = 100
    (x_train, y_train), (_, _) = reader.read_mnist()

    x_train = (x_train.astype(np.float32) - 127.5) / 127.5
    x_train = np.expand_dims(x_train, axis=3)
    y_train = y_train.reshape(-1, 1)

    discriminator = build_discriminator()
    discriminator.trainable = False

    ganInput = KLayers.Input(shape=(74, ))
    generator = build_generator()
    x = generator(ganInput)
    valid, target_label, target_count = discriminator(x)
    gan = KModels.Model(inputs=ganInput,
                        outputs=[valid, target_label, target_count])
    gan.compile(loss=[
Ejemplo n.º 5
0
discriminator = build_discriminator()
discriminator.compile(loss='binary_crossentropy',
                      optimizer=optimizer,
                      metrics=['accuracy'])

encoded_repr, reconstructed_img = generator(img)
discriminator.trainable = False
validity = discriminator(encoded_repr)

adversarial_autoencoder = Model(img, [reconstructed_img, validity])
adversarial_autoencoder.compile(loss=['mse', 'binary_crossentropy'],
                                loss_weights=[0.999, 0.001],
                                optimizer=optimizer)

(x_train, y_train), (x_test, y_test) = reader.read_mnist('../data/mnist.npz')
x_train = (x_train.astype(np.float32) - 127.5) / 127.5
x_train = np.expand_dims(x_train, axis=3)

half_batch = int(cfg.BATCH_SIZE / 2)

samples_image = []
progressBar = bar.ProgressBarGAN(
    1, cfg.EPOCH_NUM, "D loss: %.3f, acc: %.2f%% - G loss: %.3f, mse: %.2f")
for epoch in range(cfg.EPOCH_NUM):
    # Select a random half batch of images
    idx = np.random.randint(0, x_train.shape[0], half_batch)
    imgs = x_train[idx]

    # Generate a half batch of new images
    latent_fake, gen_imgs = generator.predict(imgs)