Esempio n. 1
0
def plotting_process(vae, decoder, vaegan, input_test, run_folder, latent_dim, w_filename):
    # Load weights
    vaegan.load_weights(w_filename)
    # random latent space samples to plot
    noise = np.random.normal(0, 1, (5 * 5, latent_dim))
    sample_images(decoder, os.path.join(run_folder, "sample-model"), noise, True)
    # real vs reconstruction plot
    plotRealReconstruction(vae, input_test, 1, 5, os.path.join(run_folder, "realReconstructedSamples"), True)
Esempio n. 2
0
def run_training(train_dataset, encoder, decoder, discriminator, vaegan, optimizers, gamma, noise, run_folder, label_smooth_r, label_smooth_f):
  enc_loss_hist = np.array([])
  dec_loss_hist = np.array([])
  dis_loss_hist = np.array([])
  for epoch in range(NO_EPOCHS):
      l = train(train_dataset, encoder, decoder, discriminator, optimizers, gamma, label_smooth_r, label_smooth_f)
      if (epoch+1) % 1 == 0:
          print('epoch:', epoch+1, 'kl_loss:', l[0], 'rec_loss:', l[1], 'gen_loss:', l[2], 'dis_loss:', l[3])
          enc_loss_hist = np.append(enc_loss_hist, (l[0]+l[1]))
          dec_loss_hist = np.append(dec_loss_hist, (l[1]-l[3]))
          dis_loss_hist = np.append(dis_loss_hist, l[3])
          sample_images(decoder, os.path.join(run_folder,"epochSamples","sample-%d" % (epoch+1)), noise, True)
          vaegan.save_weights(os.path.join(run_folder, "weights", "weights-%d.h5" % (epoch+1)))
  return enc_loss_hist, dec_loss_hist, dis_loss_hist
Esempio n. 3
0
def train(gan_model, gen_model, dis_model, x_train, batch_size, no_epochs,
          run_folder, noise, z_dim, label_noise_pct, label_smooth_r,
          label_smooth_f):
    """
    train the gan model
    """
    gen_loss_hist = np.array([])
    dis_loss_hist = np.array([])
    dis_loss_real_hist = np.array([])
    dis_loss_fake_hist = np.array([])
    nb_batch = x_train.shape[0] // batch_size
    idx_train = np.arange(len(x_train))
    for epoch in range(1, no_epochs + 1):
        np.random.shuffle(idx_train)
        genera_losses = np.array([])
        discri_losses = np.array([])
        discri_loss_r = np.array([])
        discri_loss_f = np.array([])
        for i in range(nb_batch):
            batch_x_train = x_train[idx_train[i * batch_size:(i + 1) *
                                              batch_size]]
            d = train_discriminator(gen_model, dis_model, batch_x_train,
                                    len(batch_x_train), z_dim, label_noise_pct,
                                    label_smooth_r, label_smooth_f)
            g = train_generator(gan_model, batch_size, z_dim)
            genera_losses = np.append(genera_losses, g)
            discri_losses = np.append(discri_losses, d[0])
            discri_loss_r = np.append(discri_loss_r, d[1])
            discri_loss_f = np.append(discri_loss_f, d[2])
        gen_loss_hist = np.append(gen_loss_hist, np.mean(genera_losses))
        dis_loss_hist = np.append(dis_loss_hist, np.mean(discri_losses))
        dis_loss_real_hist = np.append(dis_loss_real_hist,
                                       np.mean(discri_loss_r))
        dis_loss_fake_hist = np.append(dis_loss_fake_hist,
                                       np.mean(discri_loss_f))
        print(
            "epoch: %d, [Dis_loss: %.3f (Dis_loss_real: %.3f, Dis_loss_fake: %.3f), Gen_loss: %.3f] "
            % (epoch, np.mean(discri_losses), np.mean(discri_loss_r),
               np.mean(discri_loss_f), np.mean(genera_losses)))
        sample_images(
            gen_model,
            os.path.join(run_folder, "epochSamples", "sample-%d" % (epoch)),
            noise)
        gan_model.save_weights(
            os.path.join(run_folder, "weights", "weights-%d.h5" % (epoch)))
        epoch += 1
    return [
        gen_loss_hist, dis_loss_hist, dis_loss_real_hist, dis_loss_fake_hist
    ]
Esempio n. 4
0
def run_training(train_dataset,
                 encoder,
                 decoder,
                 discriminator,
                 vaegan,
                 epoch_start,
                 epochs,
                 optimizers,
                 gamma,
                 noise,
                 run_folder,
                 label_smooth_r,
                 label_smooth_f,
                 history=None):
    if epoch_start != 0:
        enc_loss_hist, dec_loss_hist, dis_loss_hist = history[0], history[
            1], history[2]
    else:
        enc_loss_hist, dec_loss_hist, dis_loss_hist = np.array([]), np.array(
            []), np.array([])

    for epoch in range(epoch_start, epochs):
        l = train(train_dataset, encoder, decoder, discriminator, optimizers,
                  gamma, label_smooth_r, label_smooth_f)
        if (epoch + 1) % 1 == 0:
            print('epoch:', epoch + 1, 'kl_loss:', l[0], 'rec_loss:', l[1],
                  'gen_loss:', l[2], 'dis_loss:', l[3])
            enc_loss_hist = np.append(enc_loss_hist, (l[0] + l[1]))
            dec_loss_hist = np.append(dec_loss_hist, (l[1] - l[3]))
            dis_loss_hist = np.append(dis_loss_hist, l[3])
            with open(os.path.join(run_folder, "history"), "wb") as fp:
                history = np.array(
                    [enc_loss_hist, dec_loss_hist, dis_loss_hist])
                pickle.dump(history, fp)
            sample_images(
                decoder,
                os.path.join(run_folder, "epochSamples",
                             "sample-%d" % (epoch + 1)), noise, True)
            vaegan.save_weights(
                os.path.join(run_folder, "weights",
                             "weights-%d.h5" % (epoch + 1)))
    return enc_loss_hist, dec_loss_hist, dis_loss_hist
Esempio n. 5
0
def train(vae_model, decoder, train_set, validation_set, batch_size, no_epochs,
          run_folder, noise):
    """ Train the model and return loss and accuracy of train and validation set """
    train_loss_hist = np.array([])
    train_acc_hist = np.array([])
    validation_loss_hist = np.array([])
    validation_acc_hist = np.array([])
    nb_batch = train_set.shape[0] // batch_size
    idx_train = np.arange(len(train_set))
    for epoch in range(1, no_epochs + 1):
        np.random.shuffle(idx_train)
        batch_losses = np.array([])
        batch_acc = np.array([])
        for i in range(nb_batch):
            batch_x_train = train_set[idx_train[i * batch_size:(i + 1) *
                                                batch_size]]
            vae_loss, vae_acc = vae_model.train_on_batch(
                batch_x_train, batch_x_train)
            batch_losses = np.append(batch_losses, vae_loss)
            batch_acc = np.append(batch_acc, vae_acc)
        train_epoch_loss, train_epoch_acc = np.mean(batch_losses), np.mean(
            batch_acc)
        validation_loss, validation_acc = vae_model.evaluate(validation_set,
                                                             validation_set,
                                                             verbose=0)
        train_loss_hist = np.append(train_loss_hist, train_epoch_loss)
        train_acc_hist = np.append(train_acc_hist, train_epoch_acc)
        validation_loss_hist = np.append(validation_loss_hist, validation_loss)
        validation_acc_hist = np.append(validation_acc_hist, validation_acc)

        print(
            "epoch: %d, [TrainSet loss: %.3f, TrainSet accuracy: %.3f] [ValidSet loss: %.3f, ValidSet accuracy: %.3f] "
            % (epoch, train_epoch_loss, train_epoch_acc, validation_loss,
               validation_acc))
        sample_images(
            decoder,
            os.path.join(run_folder, "epochSamples", "sample-%d" % (epoch)),
            noise, True)
        vae_model.save_weights(
            os.path.join(run_folder, "weights", "weights-%d.h5" % (epoch)))
        epoch += 1
    return train_loss_hist, train_acc_hist, validation_loss_hist, validation_acc_hist
Esempio n. 6
0
def plotting_process(dis, gen, gan, run_folder, latent_dim, w_filename):
    # Load weights
    gan.load_weights(w_filename)
    # random latent space samples to plot
    noise = np.random.normal(0, 1, (5 * 5, latent_dim))
    sample_images(gen, os.path.join(run_folder, "sample-model"), noise)