def train(self, d_model_A, d_model_B, g_model_AtoB, g_model_BtoA,
           c_model_AtoB, c_model_BtoA, dataset):
     '''  tarining step for cyclegan models'''
     # define properties of the training run
     n_epochs, n_batch, = 50, 1
     # determine the output square shape of the discriminator
     n_patch = d_model_A.output_shape[1]
     # unpack dataset
     trainA, trainB = dataset
     # prepare image pool for fakes
     poolA, poolB = list(), list()
     # calculate the number of batches per training epoch
     bat_per_epo = int(len(trainA) / n_batch)
     # calculate the number of training iterations
     n_steps = bat_per_epo * n_epochs
     # manually enumerate epochs
     for i in range(n_steps):
         # select a batch of real samples
         X_realA, y_realA = generate_real_samples(trainA, n_batch, n_patch)
         X_realB, y_realB = generate_real_samples(trainB, n_batch, n_patch)
         # generate a batch of fake samples
         X_fakeA, y_fakeA = generate_fake_samples(g_model_BtoA, X_realB,
                                                  n_patch)
         X_fakeB, y_fakeB = generate_fake_samples(g_model_AtoB, X_realA,
                                                  n_patch)
         # update fakes from pool
         X_fakeA = update_image_pool(poolA, X_fakeA)
         X_fakeB = update_image_pool(poolB, X_fakeB)
         # update generator B->A via adversarial and cycle los
         g_loss2, _, _, _, _ = c_model_BtoA.train_on_batch(
             [X_realB, X_realA], [y_realA, X_realA, X_realB, X_realA])
         # update discriminator for A -> [real/fake]
         dA_loss1 = d_model_A.train_on_batch(X_realA, y_realA)
         dA_loss2 = d_model_A.train_on_batch(X_fakeA, y_fakeA)
         # update generator A->B via adversarial and cycle loss
         g_loss1, _, _, _, _ = c_model_AtoB.train_on_batch(
             [X_realA, X_realB], [y_realB, X_realB, X_realA, X_realB])
         # update discriminator for B -> [real/fake]
         dB_loss1 = d_model_B.train_on_batch(X_realB, y_realB)
         dB_loss2 = d_model_B.train_on_batch(X_fakeB, y_fakeB)
         # summarize performance
         print('>%d, dA[%.3f,%.3f] dB[%.3f,%.3f] g[%.3f,%.3f]' %
               (i + 1, dA_loss1, dA_loss2, dB_loss1, dB_loss2, g_loss1,
                g_loss2))
         # evaluate the model performance every so often
         if (i + 1) % (bat_per_epo * 1) == 0:
             # plot A->B translation
             summarize_performance(i, g_model_AtoB, trainA, 'AtoB')
             # plot B->A translation
             summarize_performance(i, g_model_BtoA, trainB, 'BtoA')
         if (i + 1) % (bat_per_epo * 5) == 0:
             # save the models
             save_models(i, g_model_AtoB, g_model_BtoA)
Exemple #2
0
def train(g_model,
          d_model,
          gan_model,
          dataset,
          latent_dim,
          cat,
          epochs=100,
          batch=64):
    batch_per_epoch = int(dataset.shape[0] / batch)

    steps = batch_per_epoch * epochs

    half_batch = int(batch / 2)

    for i in range(steps):
        X_real, y_real = generate_real_samples(dataset, half_batch)

        d_loss1 = d_model.train_on_batch(X_real, y_real)

        X_fake, y_fake = generate_fake_samples(g_model, latent_dim, cat,
                                               half_batch)

        d_loss2 = d_model.train_on_batch(X_fake, y_fake)

        z_input, cat_codes = generate_latent_points(latent_dim, cat, batch)
        y_gan = np.ones((batch, 1))

        _, g_1, g_2 = gan_model.train_on_batch(z_input, [y_gan, cat_codes])

        print("[INFO] {:d}, d[{:.3f}, {:.3f}], g[{:.3f}], q[{:.3f}]".format(
            i + 1, d_loss1, d_loss2, g_1, g_2))

        if (i + 1) % (batch_per_epoch * 10) == 0:
            summarize_performance(i, g_model, gan_model, latent_dim, cat)
Exemple #3
0
def train(dataset, generator, discriminator, gan_model, latent_dim=100, n_epochs=20, n_batch=25):
    bat_per_epo = int(dataset.shape[0] / n_batch)
    half_batch = int(n_batch / 2)
    for i in range(n_epochs):
        # enumerate batches over the training set
        for j in range(bat_per_epo):
            start_time = time.time()
            # get randomly selected 'real' samples
            X_real, y_real = utils.generate_real_samples(dataset, half_batch)
            # update discriminator model weights
            d_loss1, _ = discriminator.train_on_batch(X_real, y_real)
            # generate 'fake' examples
            X_fake, y_fake = utils.generate_fake_samples(generator, latent_dim, half_batch)
            # update discriminator model weights
            d_loss2, _ = discriminator.train_on_batch(X_fake, y_fake)
            # prepare points in latent space as input for the generator
            X_gan = utils.generate_latent_points(latent_dim, n_batch)
            # create inverted labels for the fake samples
            y_gan = tf.ones((n_batch, 1))
            # update the generator via the discriminator's error
            g_loss = gan_model.train_on_batch(X_gan, y_gan)
            # summarize loss on this batch
            time_taken = time.time() - start_time
            print('>%d, %d/%d, d1=%.3f, d2=%.3f g=%.3f Time Taken:%.2f seconds' %
                  (i + 1, j + 1, bat_per_epo, d_loss1, d_loss2, g_loss, time_taken))
        # evaluate the model performance, sometimes
        if (i + 1) % 10 == 0:
            summarize_performance(i, generator, discriminator, dataset, latent_dim)
Exemple #4
0
def train(generator,
          discriminator,
          gan,
          dataset,
          latent_dim,
          epochs=100,
          batch=128):
    batch_per_epoch = int(dataset.shape[0] / batch)

    half_batch = int(batch / 2)

    for i in range(epochs):
        for j in range(batch_per_epoch):
            X_real, y_real = generate_real_samples(dataset, half_batch)

            d_loss1, _ = discriminator.train_on_batch(X_real, y_real)

            X_fake, y_fake = generate_fake_samples(generator, latent_dim,
                                                   half_batch)

            d_loss2, _ = discriminator.train_on_batch(X_fake, y_fake)

            Xgan = generate_latent_points(latent_dim, batch)
            ygan = np.ones((batch, 1))
            g_loss = gan.train_on_batch(Xgan, ygan)

            print("{:d}, {:d}/{:d}, d1={:.3f}, d2={:.3f}, g={:.3f}".format(
                i + 1, j + 1, batch_per_epoch, d_loss1, d_loss2, g_loss))

    generator.save("models/generator.h5")
Exemple #5
0
def train_discriminator(model, dataset, n_iter=20, n_batch=25, latent_dim=100):
    half_batch = int(n_batch / 2)
    for i in range(n_iter):
        X_real, y_real = utils.generate_real_samples(dataset=dataset, num_samples=half_batch)
        _, real_acc = model.train_on_batch(X_real, y_real)
        X_fake, y_fake = utils.generate_fake_samples(model, latent_dim=latent_dim, num_samples=half_batch)
        _, fake_acc = model.train_on_batch(X_fake, y_fake)
        print('>%d real=%.0f%% fake=%.0f%%' % (i + 1, real_acc * 100, fake_acc * 100))
Exemple #6
0
def summarize_performance(epoch, g_model, d_model, dataset, latent_dim, n_samples=150):
    # prepare real samples
    X_real, y_real = utils.generate_real_samples(dataset, n_samples)
    # evaluate discriminator on real examples
    _, acc_real = d_model.evaluate(X_real, y_real, verbose=0)
    # prepare fake examples
    x_fake, y_fake = utils.generate_fake_samples(g_model, latent_dim, n_samples)
    # evaluate discriminator on fake examples
    _, acc_fake = d_model.evaluate(x_fake, y_fake, verbose=0)
    # summarize discriminator performance
    print('>Accuracy real: %.0f%%, fake: %.0f%%' % (acc_real * 100, acc_fake * 100))
    # save plot
    save_plot(x_fake, epoch)
    # save the generator model tile file
    filename = 'generator_model_%03d.h5' % (epoch + 1)
    g_model.save(filename)
Exemple #7
0
def train(g_model,
          d_model,
          gan_model,
          dataset,
          latent_dim,
          epochs=20,
          batch=64):
    batch_per_epoch = int(dataset.shape[0] / batch)

    steps = batch_per_epoch * epochs

    half_batch = int(batch / 2)

    d1_hist, d2_hist, g_hist = list(), list(), list()

    for i in range(steps):
        X_real, y_real = generate_real_samples(dataset, half_batch)

        X_fake, y_fake = generate_fake_samples(g_model, latent_dim, half_batch)

        # update discriminator model
        d_loss1 = d_model.train_on_batch(X_real, y_real)
        d_loss2 = d_model.train_on_batch(X_fake, y_fake)

        # update generator via discriminator's loss
        z_input = generate_latent_points(latent_dim, batch)
        y_real2 = np.ones((batch, 1))

        g_loss = gan_model.train_on_batch(z_input, y_real2)

        print("{:d}, d1={:.3f}, d2={:.3f}, g={:.3f}".format(
            i + 1, d_loss1, d_loss2, g_loss))

        d1_hist.append(d_loss1)
        d2_hist.append(d_loss2)
        g_hist.append(g_loss)

        if (i + 1) % (batch_per_epoch * 1) == 0:
            summarize_performance(i, g_model, latent_dim)

    plot_history(d1_hist, d2_hist, g_hist)