def __init__(self): self.img_rows = 28 self.img_cols = 28 self.img_dim = self.img_rows * self.img_cols self.latent_dim = 128 # The dimension of the data embedding optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = SquareLoss self.encoder = self.build_encoder(optimizer, loss_function) self.decoder = self.build_decoder(optimizer, loss_function) self.autoencoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.autoencoder.layers.extend(self.encoder.layers) self.autoencoder.layers.extend(self.decoder.layers) print() self.autoencoder.summary(name="Variational Autoencoder")
def __init__(self): self.img_rows = 28 self.img_cols = 28 self.img_dim = self.img_rows * self.img_cols self.latent_dim = 100 optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = CrossEntropy # Build the discriminator self.discriminator = self.build_discriminator(optimizer, loss_function) # Build the generator self.generator = self.build_generator(optimizer, loss_function) # Build the combined model self.combined = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.combined.layers.extend(self.generator.layers) self.combined.layers.extend(self.discriminator.layers) print() self.generator.summary(name="Generator") self.discriminator.summary(name="Discriminator")
def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(512, input_shape=(self.img_dim, ))) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(256)) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) return model
def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(256, input_shape=(self.latent_dim, ))) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.img_dim)) model.add(Activation('tanh')) return model
class GAN(): """A Generative Adversarial Network with deep fully-connected neural nets as Generator and Discriminator. Training Data: MNIST Handwritten Digits (28x28 images) """ def __init__(self): self.img_rows = 28 self.img_cols = 28 self.img_dim = self.img_rows * self.img_cols self.latent_dim = 100 optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = CrossEntropy # Build the discriminator self.discriminator = self.build_discriminator(optimizer, loss_function) # Build the generator self.generator = self.build_generator(optimizer, loss_function) # Build the combined model self.combined = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.combined.layers.extend(self.generator.layers) self.combined.layers.extend(self.discriminator.layers) print() self.generator.summary(name="Generator") self.discriminator.summary(name="Discriminator") def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(256, input_shape=(self.latent_dim, ))) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(512)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(1024)) model.add(Activation('leaky_relu')) model.add(BatchNormalization(momentum=0.8)) model.add(Dense(self.img_dim)) model.add(Activation('tanh')) return model def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(512, input_shape=(self.img_dim, ))) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(256)) model.add(Activation('leaky_relu')) model.add(Dropout(0.5)) model.add(Dense(2)) model.add(Activation('softmax')) return model def train(self, n_epochs, batch_size=128, save_interval=50): mnist = fetch_mldata('MNIST original') X = mnist.data y = mnist.target # Rescale [-1, 1] X = (X.astype(np.float32) - 127.5) / 127.5 half_batch = int(batch_size / 2) for epoch in range(n_epochs): # --------------------- # Train Discriminator # --------------------- self.discriminator.set_trainable(True) # Select a random half batch of images idx = np.random.randint(0, X.shape[0], half_batch) imgs = X[idx] # Sample noise to use as generator input noise = np.random.normal(0, 1, (half_batch, self.latent_dim)) # Generate a half batch of images gen_imgs = self.generator.predict(noise) # Valid = [1, 0], Fake = [0, 1] valid = np.concatenate((np.ones( (half_batch, 1)), np.zeros((half_batch, 1))), axis=1) fake = np.concatenate((np.zeros( (half_batch, 1)), np.ones((half_batch, 1))), axis=1) # Train the discriminator d_loss_real, d_acc_real = self.discriminator.train_on_batch( imgs, valid) d_loss_fake, d_acc_fake = self.discriminator.train_on_batch( gen_imgs, fake) d_loss = 0.5 * (d_loss_real + d_loss_fake) d_acc = 0.5 * (d_acc_real + d_acc_fake) # --------------------- # Train Generator # --------------------- # We only want to train the generator for the combined model self.discriminator.set_trainable(False) # Sample noise and use as generator input noise = np.random.normal(0, 1, (batch_size, self.latent_dim)) # The generator wants the discriminator to label the generated samples as valid valid = np.concatenate((np.ones( (batch_size, 1)), np.zeros((batch_size, 1))), axis=1) # Train the generator g_loss, g_acc = self.combined.train_on_batch(noise, valid) # Display the progress print("%d [D loss: %f, acc: %.2f%%] [G loss: %f, acc: %.2f%%]" % (epoch, d_loss, 100 * d_acc, g_loss, 100 * g_acc)) # If at save interval => save generated image samples if epoch % save_interval == 0: self.save_imgs(epoch) def save_imgs(self, epoch): r, c = 5, 5 # Grid size noise = np.random.normal(0, 1, (r * c, self.latent_dim)) # Generate images and reshape to image shape gen_imgs = self.generator.predict(noise).reshape( (-1, self.img_rows, self.img_cols)) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 fig, axs = plt.subplots(r, c) plt.suptitle("Generative Adversarial Network") cnt = 0 for i in range(r): for j in range(c): axs[i, j].imshow(gen_imgs[cnt, :, :], cmap='gray') axs[i, j].axis('off') cnt += 1 fig.savefig("mnist_%d.png" % epoch) plt.close()
def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add( Conv2D(32, filter_shape=(3, 3), stride=2, input_shape=self.img_shape, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Conv2D(64, filter_shape=(3, 3), stride=2, padding='same')) model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, filter_shape=(3, 3), stride=2, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, filter_shape=(3, 3), stride=1, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(2)) model.add(Activation('softmax')) return model
def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(128 * 7 * 7, input_shape=(100, ))) model.add(Activation('leaky_relu')) model.add(Reshape((128, 7, 7))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, filter_shape=(3, 3), padding='same')) model.add(Activation("leaky_relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, filter_shape=(3, 3), padding='same')) model.add(Activation("leaky_relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(1, filter_shape=(3, 3), padding='same')) model.add(Activation("tanh")) return model
class DCGAN(): def __init__(self): self.img_rows = 28 self.img_cols = 28 self.channels = 1 self.img_shape = (self.channels, self.img_rows, self.img_cols) self.latent_dim = 100 optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = CrossEntropy # Build the discriminator self.discriminator = self.build_discriminator(optimizer, loss_function) # Build the generator self.generator = self.build_generator(optimizer, loss_function) # Build the combined model self.combined = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.combined.layers.extend(self.generator.layers) self.combined.layers.extend(self.discriminator.layers) print() self.generator.summary(name="Generator") self.discriminator.summary(name="Discriminator") def build_generator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add(Dense(128 * 7 * 7, input_shape=(100, ))) model.add(Activation('leaky_relu')) model.add(Reshape((128, 7, 7))) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(128, filter_shape=(3, 3), padding='same')) model.add(Activation("leaky_relu")) model.add(BatchNormalization(momentum=0.8)) model.add(UpSampling2D()) model.add(Conv2D(64, filter_shape=(3, 3), padding='same')) model.add(Activation("leaky_relu")) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(1, filter_shape=(3, 3), padding='same')) model.add(Activation("tanh")) return model def build_discriminator(self, optimizer, loss_function): model = NeuralNetwork(optimizer=optimizer, loss=loss_function) model.add( Conv2D(32, filter_shape=(3, 3), stride=2, input_shape=self.img_shape, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Conv2D(64, filter_shape=(3, 3), stride=2, padding='same')) model.add(ZeroPadding2D(padding=((0, 1), (0, 1)))) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(128, filter_shape=(3, 3), stride=2, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(BatchNormalization(momentum=0.8)) model.add(Conv2D(256, filter_shape=(3, 3), stride=1, padding='same')) model.add(Activation('leaky_relu')) model.add(Dropout(0.25)) model.add(Flatten()) model.add(Dense(2)) model.add(Activation('softmax')) return model def train(self, epochs, batch_size=128, save_interval=50): mnist = fetch_mldata('MNIST original') X = mnist.data.reshape((-1, ) + self.img_shape) y = mnist.target # Rescale -1 to 1 X = (X.astype(np.float32) - 127.5) / 127.5 half_batch = int(batch_size / 2) for epoch in range(epochs): # --------------------- # Train Discriminator # --------------------- self.discriminator.set_trainable(True) # Select a random half batch of images idx = np.random.randint(0, X.shape[0], half_batch) imgs = X[idx] # Sample noise to use as generator input noise = np.random.normal(0, 1, (half_batch, 100)) # Generate a half batch of images gen_imgs = self.generator.predict(noise) valid = np.concatenate((np.ones( (half_batch, 1)), np.zeros((half_batch, 1))), axis=1) fake = np.concatenate((np.zeros( (half_batch, 1)), np.ones((half_batch, 1))), axis=1) # Train the discriminator d_loss_real, d_acc_real = self.discriminator.train_on_batch( imgs, valid) d_loss_fake, d_acc_fake = self.discriminator.train_on_batch( gen_imgs, fake) d_loss = 0.5 * (d_loss_real + d_loss_fake) d_acc = 0.5 * (d_acc_real + d_acc_fake) # --------------------- # Train Generator # --------------------- # We only want to train the generator for the combined model self.discriminator.set_trainable(False) # Sample noise and use as generator input noise = np.random.normal(0, 1, (batch_size, self.latent_dim)) # The generator wants the discriminator to label the generated samples as valid valid = np.concatenate((np.ones( (batch_size, 1)), np.zeros((batch_size, 1))), axis=1) # Train the generator g_loss, g_acc = self.combined.train_on_batch(noise, valid) # Display the progress print("%d [D loss: %f, acc: %.2f%%] [G loss: %f, acc: %.2f%%]" % (epoch, d_loss, 100 * d_acc, g_loss, 100 * g_acc)) # If at save interval => save generated image samples if epoch % save_interval == 0: self.save_imgs(epoch) def save_imgs(self, epoch): r, c = 5, 5 noise = np.random.normal(0, 1, (r * c, 100)) gen_imgs = self.generator.predict(noise) # Rescale images 0 - 1 (from -1 to 1) gen_imgs = 0.5 * (gen_imgs + 1) fig, axs = plt.subplots(r, c) plt.suptitle("Deep Convolutional Generative Adversarial Network") cnt = 0 for i in range(r): for j in range(c): axs[i, j].imshow(gen_imgs[cnt, 0, :, :], cmap='gray') axs[i, j].axis('off') cnt += 1 fig.savefig("mnist_%d.png" % epoch) plt.close()
def build_decoder(self, optimizer, loss_function): decoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) decoder.add(Dense(256, input_shape=(self.latent_dim, ))) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(512)) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(self.img_dim)) decoder.add(Activation('tanh')) return decoder
class Autoencoder(): """An Autoencoder with deep fully-connected neural nets. Training Data: MNIST Handwritten Digits (28x28 images) """ def __init__(self): self.img_rows = 28 self.img_cols = 28 self.img_dim = self.img_rows * self.img_cols self.latent_dim = 128 # The dimension of the data embedding optimizer = Adam(learning_rate=0.0002, b1=0.5) loss_function = SquareLoss self.encoder = self.build_encoder(optimizer, loss_function) self.decoder = self.build_decoder(optimizer, loss_function) self.autoencoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) self.autoencoder.layers.extend(self.encoder.layers) self.autoencoder.layers.extend(self.decoder.layers) print() self.autoencoder.summary(name="Variational Autoencoder") def build_encoder(self, optimizer, loss_function): encoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) encoder.add(Dense(512, input_shape=(self.img_dim, ))) encoder.add(Activation('leaky_relu')) encoder.add(BatchNormalization(momentum=0.8)) encoder.add(Dense(256)) encoder.add(Activation('leaky_relu')) encoder.add(BatchNormalization(momentum=0.8)) encoder.add(Dense(self.latent_dim)) return encoder def build_decoder(self, optimizer, loss_function): decoder = NeuralNetwork(optimizer=optimizer, loss=loss_function) decoder.add(Dense(256, input_shape=(self.latent_dim, ))) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(512)) decoder.add(Activation('leaky_relu')) decoder.add(BatchNormalization(momentum=0.8)) decoder.add(Dense(self.img_dim)) decoder.add(Activation('tanh')) return decoder def train(self, n_epochs, batch_size=128, save_interval=50): mnist = fetch_mldata('MNIST original') X = mnist.data y = mnist.target # Rescale [-1, 1] X = (X.astype(np.float32) - 127.5) / 127.5 for epoch in range(n_epochs): # Select a random half batch of images idx = np.random.randint(0, X.shape[0], batch_size) imgs = X[idx] # Train the Autoencoder loss, _ = self.autoencoder.train_on_batch(imgs, imgs) # Display the progress print("%d [D loss: %f]" % (epoch, loss)) # If at save interval => save generated image samples if epoch % save_interval == 0: self.save_imgs(epoch, X) def save_imgs(self, epoch, X): r, c = 5, 5 # Grid size # Select a random half batch of images idx = np.random.randint(0, X.shape[0], r * c) imgs = X[idx] # Generate images and reshape to image shape gen_imgs = self.autoencoder.predict(imgs).reshape( (-1, self.img_rows, self.img_cols)) # Rescale images 0 - 1 gen_imgs = 0.5 * gen_imgs + 0.5 fig, axs = plt.subplots(r, c) plt.suptitle("Autoencoder") cnt = 0 for i in range(r): for j in range(c): axs[i, j].imshow(gen_imgs[cnt, :, :], cmap='gray') axs[i, j].axis('off') cnt += 1 fig.savefig("ae_%d.png" % epoch) plt.close()