def train(self,
           X_train,
           nb_epoch=10,
           nb_iter=250,
           bs=128,
           y_train=None,
           save_path='../models/'):
     """ Train InfoGAN:
         - Train D to discriminate G results, conditioned on label
         - Train G to fool D, conditioned on label
     """
     for e in range(nb_epoch):
         print("Epoch " + str(e + 1) + "/" + str(nb_epoch))
         for i in tqdm(range(nb_iter)):
             # Retrieve discriminator and auxiliary network training data
             X, y, random_z, random_c = self.mixed_data(
                 bs // 2, X_train, y_train, self.G)
             # Train discriminator
             self.D.train_on_batch(X, y)
             # Freeze discriminator
             make_trainable(self.D, False)
             make_trainable(self.Q, False)
             # Train generator i.e. whole model (G + frozen D)
             self.G_and_D.train_on_batch(
                 [z_noise(bs), c_noise(bs)], np.zeros([bs]))
             # Unfreeze discriminator
             make_trainable(self.D, True)
             make_trainable(self.Q, True)
             # Train Auxiliary Network
             self.G_and_Q.train_on_batch([random_z, random_c],
                                         np.zeros([bs // 2]))
         self.G_and_Q.save_weights(save_path + 'InfoGAN_Q' + str(e + 1) +
                                   '.h5')
         self.G_and_D.save_weights(save_path + 'InfoGAN_D' + str(e + 1) +
                                   '.h5')
示例#2
0
 def train(self,
           X_train,
           nb_epoch=10,
           nb_iter=250,
           bs=128,
           y_train=None,
           save_path='../models/'):
     """ Train CGAN:
         - Train D to discriminate G results, conditioned on label
         - Train G to fool D, conditioned on label
     """
     for e in range(nb_epoch):
         print("Epoch " + str(e + 1) + "/" + str(nb_epoch))
         for i in tqdm(range(nb_iter)):
             # Get real and fake data + labels
             X, y, labels = self.mixed_data(bs // 2, X_train, y_train)
             # Train discriminator
             self.D.train_on_batch([X, labels], y)
             # Freeze discriminator
             make_trainable(self.D, False)
             # Train generator i.e. whole model (G + frozen D)
             self.m.train_on_batch([z_noise(bs), c_noise(bs)],
                                   np.zeros([bs]))
             # Unfreeze discriminator
             make_trainable(self.D, True)
         self.m.save_weights(save_path + 'CGAN_' + str(e + 1) + '.h5')
 def train(self,
           X_train,
           nb_epoch=10,
           nb_iter=450,
           bs=128,
           y_train=None,
           save_path='../models/'):
     """ Train DCGAN:
         - Train D to discriminate G results
         - Train G to fool D (D is frozen)
     """
     for e in range(nb_epoch):
         print("Epoch " + str(e + 1) + "/" + str(nb_epoch))
         for i in tqdm(range(nb_iter)):
             # Get real and fake data + labels
             X, y = self.mixed_data(bs // 2, X_train)
             # Train D
             self.D.train_on_batch(X, y)
             # Freeze D
             make_trainable(self.D, False)
             # Train G
             self.m.train_on_batch(z_noise(bs), np.zeros([bs]))
             # Unfreeze D
             make_trainable(self.D, True)
         self.m.save_weights(save_path + 'DCGAN_' + str(e + 1) + '.h5')
示例#4
0
 def train(self, X_train, nb_epoch=10, nb_iter=450, bs=128, y_train=None, save_path='../models/'):
     """ Train WGAN:
         - Train D to discriminate fake from real
         - Clip D weights to [-0.01, 0.01]
         - Train G to fool D
     """
     for e in range(nb_epoch):
         print("Epoch " + str(e+1) + "/" + str(nb_epoch))
         for i in tqdm(range(nb_iter)):
             # Get real and fake data + labels
             X,y = self.mixed_data(bs//2, X_train)
             # Train D
             self.D.train_on_batch(X,y)
             # Clip discriminator weights
             for l in self.D.layers:
                 weights = l.get_weights()
                 weights = [np.clip(w, -0.01, 0.01) for w in weights]
                 l.set_weights(weights)
             # Freeze D
             make_trainable(self.D, False)
             # Train G
             self.m.train_on_batch(z_noise(bs), np.zeros([bs]))
             # Unfreeze D
             make_trainable(self.D, True)
         self.m.save_weights(save_path +'WGAN_' + str(e+1) + '.h5')
 def mixed_data(self, sz, X_train):
     """ Generate fake and real data to train D
     """
     N = X_train.shape[0]
     sz = N // 200
     real_img = X_train[np.random.randint(0, N, size=sz)]
     X = np.concatenate((real_img, self.G.predict(z_noise(sz))))
     return X, [0] * sz + [1] * sz
示例#6
0
 def pre_train(self, X_train, y_train=None):
     """ Pre-train D for a couple of iterations
     """
     print("Pre-training D for a couple of iterations...", end='')
     sz = X_train.shape[0]//200
     x1 = np.concatenate([np.random.permutation(X_train)[:sz], self.G.predict(z_noise(sz))])
     self.D.fit(x1, [0]*sz + [1]*sz, batch_size=128, nb_epoch=1, verbose=2)
     print("done.")
示例#7
0
def plot_results_GAN(G, n=5):
    """ Plots n x n windows from DCGAN and WGAN generator
    """
    img = np.zeros((n * 28, 1))
    for i in range(n):
        col = np.multiply(
            np.add(G.predict(z_noise(n)).reshape(n * 28, 28), 1.0),
            255.0 / 2.0)
        img = np.concatenate((img, col), axis=1)
    plot_large(img)
示例#8
0
 def mixed_data(self, sz, X_train, y_train):
     """ Generate fake and real data to train D. Both real and fake data
     are conditioned on a one-hot encoded vector c.
     """
     permutations = np.random.randint(0, X_train.shape[0], size=sz)
     real_images = X_train[permutations[:sz]]
     labels = to_categorical(y_train[permutations[:sz]], 10)
     X = np.concatenate((real_images, self.G.predict([z_noise(sz),
                                                      labels])))
     labels = np.concatenate((labels, labels))
     return X, [0] * sz + [1] * sz, labels
 def mixed_data(self, sz, X_train, y_train, G):
     """ Generate fake and real data to train D and Q
     """
     # Pre-compute random vectors
     permutations = np.random.randint(0, X_train.shape[0], size=sz)
     random_z = z_noise(sz)  # Noise input
     random_c = c_noise(sz)  # Latent code
     # Sample real images and fake images
     X = np.concatenate(
         (X_train[permutations[:sz]], G.predict([random_z, random_c])))
     return X, [0] * sz + [1] * sz, random_z, random_c
示例#10
0
def plot_results_CGAN(G):
    """ Plots n x n windows from CGAN generator
    """
    labels = np.arange(0, 10)

    n = len(labels)
    img = np.zeros((n * 28, 1))
    for i in range(n):
        # Remap from tanh range [-1, 1] to image range [0, 255]
        col = np.multiply(np.add(G.predict([z_noise(n), \
            to_categorical(labels,n)]).reshape(n * 28,28), 1.0), 255.0/2.0)
        img = np.concatenate((img, col), axis=1)
    plot_large(img)
 def pre_train(self, X_train, y_train):
     """ Pre-train D for a couple of iterations
     """
     print("Pre-training D for a couple of iterations...", end='')
     sz = X_train.shape[0] // 200
     # Concatenate real and fake images
     real_images = np.random.permutation(X_train)[:sz]
     fake_images = self.G.predict([z_noise(sz), c_noise(sz)])
     x1 = np.concatenate([real_images, fake_images])
     # Train D
     self.D.fit(x1, [0] * sz + [1] * sz,
                batch_size=128,
                nb_epoch=1,
                verbose=2)
     print("done.")
示例#12
0
 def pre_train(self, X_train, y_train):
     """ Pre-train D for a couple of iterations
     """
     print("Pre-training D for a couple of iterations...", end='')
     sz = X_train.shape[0] // 200
     # Random labels to condition on
     permutations = np.random.randint(0, X_train.shape[0], size=sz)[:sz]
     random_labels = to_categorical(y_train[permutations[:sz]])
     random_images = X_train[permutations[:sz]]
     fake_pred = self.G.predict([z_noise(sz), random_labels])
     # Train D for a couple of iterations
     x1_D = np.concatenate([fake_pred, random_images])
     x2_D = np.concatenate([random_labels, random_labels])
     self.D.fit([x1_D, x2_D], [0] * sz + [1] * sz,
                batch_size=128,
                nb_epoch=1,
                verbose=2)
     print("done.")
示例#13
0
 def eval_gen_loss(self, n_samples=1000):
     return self.m.evaluate(x=z_noise(n_samples), y=np.zeros([n_samples]))