Exemple #1
0
def train(train_datasetA, train_datasetB, epochs, lsgan=True, cyc_lambda=10):

    for epoch in range(epochs):

        start = time.time()

        with tf.GradientTape() as genA2B_tape, tf.GradientTape() as genB2A_tape, \
                tf.GradientTape() as discA_tape, tf.GradientTape() as discB_tape:

            try:
                # Next training minibatches, default size 1
                trainA = next(train_datasetA)
                trainB = next(train_datasetB)
            except tf.errors.OutOfRangeError:
                print("Error, run out of data")
                break

            genA2B_output = genA2B(trainA, training=True)
            genB2A_output = genB2A(trainB, training=True)

            discA_real_output = discA(trainA, training=True)
            discB_real_output = discB(trainB, training=True)

            discA_fake_output = discA(genB2A_output, training=True)
            discB_fake_output = discB(genA2B_output, training=True)

            reconstructedA = genB2A(genA2B_output, training=True)
            reconstructedB = genA2B(genB2A_output, training=True)

            # Use history buffer of 50 for disc loss
            discA_loss = discriminator_loss(discA_real_output, discA_fake_output, lsgan=lsgan)
            discB_loss = discriminator_loss(discB_real_output, discB_fake_output, lsgan=lsgan)

            genA2B_loss = generator_loss(discB_fake_output, lsgan=lsgan) + \
                cycle_consistency_loss(trainA, trainB, reconstructedA, reconstructedB,
                                       cyc_lambda=cyc_lambda)
            genB2A_loss = generator_loss(discA_fake_output, lsgan=lsgan) + \
                cycle_consistency_loss(trainA, trainB, reconstructedA, reconstructedB,
                                       cyc_lambda=cyc_lambda)

        genA2B_gradients = genA2B_tape.gradient(genA2B_loss, genA2B.trainable_variables)
        genB2A_gradients = genB2A_tape.gradient(genB2A_loss, genB2A.trainable_variables)

        discA_gradients = discA_tape.gradient(discA_loss, discA.trainable_variables)
        discB_gradients = discB_tape.gradient(discB_loss, discB.trainable_variables)

        genA2B_optimizer.apply_gradients(zip(genA2B_gradients, genA2B.trainable_variables))
        genB2A_optimizer.apply_gradients(zip(genB2A_gradients, genB2A.trainable_variables))

        discA_optimizer.apply_gradients(zip(discA_gradients, discA.trainable_variables))
        discB_optimizer.apply_gradients(zip(discB_gradients, discB.trainable_variables))

        if epoch % 40 == 0:
            generate_images(trainA, trainB, genB2A_output, genA2B_output, epoch)

            print('Time taken for epoch {} is {} sec'.format(epoch + 1, time.time() - start))
Exemple #2
0
    def train_step(images):
        noise = tf.random.normal([args.batsize, noise_dim])

        # D and G learns separately
        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            generated_images = generator(noise, training=True)

            real_output = discriminator(images, training=True)
            fake_output = discriminator(generated_images, training=True)

            gen_loss = generator_loss(fake_output)
            disc_loss = discriminator_loss(real_output, fake_output,
                                           args.alpha)

            gradients_of_generator = gen_tape.gradient(
                gen_loss, generator.trainable_variables)
            gradients_of_discriminator = disc_tape.gradient(
                disc_loss, discriminator.trainable_variables)

            generator_optimizer.apply_gradients(
                zip(gradients_of_generator, generator.trainable_variables))
            discriminator_optimizer.apply_gradients(
                zip(gradients_of_discriminator,
                    discriminator.trainable_variables))

        return gen_loss.numpy(), disc_loss.numpy()
Exemple #3
0
def train_step(input_image, target, epoch):
    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
        gen_output = generator(input_image, training=True)

        disc_real_output = discriminator([input_image, target], training=True)
        disc_generated_output = discriminator([input_image, gen_output],
                                              training=True)

        gen_total_loss, gen_gan_loss, gen_l1_loss = generator_loss(
            disc_generated_output, gen_output, target)
        disc_loss = discriminator_loss(disc_real_output, disc_generated_output)

    generator_gradients = gen_tape.gradient(gen_total_loss,
                                            generator.trainable_variables)
    discriminator_gradients = disc_tape.gradient(
        disc_loss, discriminator.trainable_variables)

    generator_optimizer.apply_gradients(
        zip(generator_gradients, generator.trainable_variables))
    discriminator_optimizer.apply_gradients(
        zip(discriminator_gradients, discriminator.trainable_variables))

    with summary_writer.as_default():
        tf.summary.scalar('gen_total_loss', gen_total_loss, step=epoch)
        tf.summary.scalar('gen_gan_loss', gen_gan_loss, step=epoch)
        tf.summary.scalar('gen_l1_loss', gen_l1_loss, step=epoch)
        tf.summary.scalar('disc_loss', disc_loss, step=epoch)
Exemple #4
0
    def train_step(self, input_image, target):
        # def train_step(self, input_image, target, meta):
        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            gen_output = self.generator(input_image, training=True)
            # gen_output = self.generator([input_image, meta], training = True)

            # disc_real_output = self.discriminator([input_image, meta, target], training = True)
            # disc_gen_output = self.discriminator([input_image, meta, gen_output], training = True)
            disc_real_output = self.discriminator([input_image, target],
                                                  training=True)
            disc_gen_output = self.discriminator([input_image, gen_output],
                                                 training=True)

            gen_total_loss, _, _ = generator_loss(disc_gen_output, gen_output,
                                                  target)
            disc_loss = discriminator_loss(disc_real_output, disc_gen_output)

        generator_gradients = gen_tape.gradient(
            gen_total_loss, self.generator.trainable_variables)
        discriminator_gradients = disc_tape.gradient(
            disc_loss, self.discriminator.trainable_variables)

        tf.print(
            'XX:XX:XX     INFO              trainer > Generator Loss:     ',
            gen_total_loss)
        tf.print(
            'XX:XX:XX     INFO              trainer > Discriminator Loss: ',
            disc_loss)

        self.generator_optimizer.apply_gradients(
            zip(generator_gradients, self.generator.trainable_variables))
        self.discriminator_optimizer.apply_gradients(
            zip(discriminator_gradients,
                self.discriminator.trainable_variables))
    def train_step(images):
        noise = tf.random.normal([BATCH_SIZE, noise_dim])

        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
            generated_images = generator(noise, training=True)

            real_output = discriminator(images, training=True)
            fake_output = discriminator(generated_images, training=True)

            gen_loss = generator_loss(fake_output)
            disc_loss = discriminator_loss(real_output, fake_output)

        gradients_of_generator = gen_tape.gradient(gen_loss, generator.trainable_variables)
        gradients_of_discriminator = disc_tape.gradient(disc_loss, discriminator.trainable_variables)

        generator_optimizer.apply_gradients(zip(gradients_of_generator, generator.trainable_variables))
        discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, discriminator.trainable_variables))
Exemple #6
0
    def train_step(input_image, target):
        '''
    Perform one training step

    Args:
      input_image   : Input image
      target        : Output image (ground thruth)

    Returns:
      gen_loss    : Generator loss
      disc_loss   : Dicriminator loss

    '''
        with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:

            # Compute the Generator output
            gen_output = generator(input_image, training=True)

            # Compute the Discriminator output for real and generated inputs
            disc_real_output = discriminator([input_image, target],
                                             training=True)
            disc_generated_output = discriminator([input_image, gen_output],
                                                  training=True)

            # Computes the Generator and Discriminator losses
            gen_loss = generator_loss(disc_generated_output, gen_output,
                                      target)
            disc_loss = discriminator_loss(disc_real_output,
                                           disc_generated_output)

        # Apply Gradient Descent
        generator_gradients = gen_tape.gradient(gen_loss,
                                                generator.trainable_variables)
        discriminator_gradients = disc_tape.gradient(
            disc_loss, discriminator.trainable_variables)

        generator_optimizer.apply_gradients(
            zip(generator_gradients, generator.trainable_variables))
        discriminator_optimizer.apply_gradients(
            zip(discriminator_gradients, discriminator.trainable_variables))

        return gen_loss, disc_loss, gen_output
Exemple #7
0
def train_step(input_image, target):
    with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
        gen_output = generator(input_image, training=True)

        disc_real_output = discriminator([input_image, target], training=True)
        disc_generated_output = discriminator([input_image, gen_output],
                                              training=True)

        gen_loss = generator_loss(disc_generated_output, gen_output, target)
        disc_loss = discriminator_loss(disc_real_output, disc_generated_output)

    generator_gradients = gen_tape.gradient(gen_loss,
                                            generator.trainable_variables)
    discriminator_gradients = disc_tape.gradient(
        disc_loss, discriminator.trainable_variables)

    generator_optimizer.apply_gradients(
        zip(generator_gradients, generator.trainable_variables))
    discriminator_optimizer.apply_gradients(
        zip(discriminator_gradients, discriminator.trainable_variables))
Exemple #8
0
def train_step(input_data, target):
    with tf.GradientTape() as gen_tape, tf.GradientTape() as dis_tape:
        gen_output = gen(input_data)

        dis_real_output = dis([input_data, target])
        dis_gene_output = dis([input_data, gen_output])

        tot_gen_loss, gen_loss, gen_l1_loss = model.generator_loss(dis_gene_output, gen_output, target)
        tot_dis_loss = model.discriminator_loss(dis_real_output, dis_gene_output)

    gen_gradients = gen_tape.gradient(tot_gen_loss, gen.trainable_variables)
    dis_gradients = dis_tape.gradient(tot_dis_loss, dis.trainable_variables)

    generator_optimizer.apply_gradients(
        zip(gen_gradients, gen.trainable_variables)
    )

    discriminator_optimizer.apply_gradients(
        zip(dis_gradients, dis.trainable_variables)
    )

    return tot_gen_loss, gen_loss, gen_l1_loss, tot_dis_loss
Exemple #9
0
def train_gan(D_net,
              G_net,
              D_optimizer,
              G_optimizer,
              discriminator_loss,
              generator_loss,
              noise_size=96,
              num_epochs=10):
    iter_count = 0
    for epoch in range(num_epochs):
        for x, _ in train_data:
            bs = x.shape[0]
            # 判别网络
            real_data = Variable(x)  # 真实数据
            logits_real = D_net(real_data)  # 判别网络得分

            sample_noise = (torch.rand(bs, noise_size) -
                            0.5) / 0.5  # -1 ~ 1 的均匀分布
            g_fake_seed = Variable(sample_noise)
            fake_images = G_net(g_fake_seed)  # 生成的假的数据
            logits_fake = D_net(fake_images)  # 判别网络得分

            d_total_error = discriminator_loss(logits_real,
                                               logits_fake)  # 判别器的 loss
            D_optimizer.zero_grad()
            d_total_error.backward()
            D_optimizer.step()  # 优化判别网络

            # 生成网络
            g_fake_seed = Variable(sample_noise).cuda()
            fake_images = G_net(g_fake_seed)  # 生成的假的数据

            gen_logits_fake = D_net(fake_images)
            g_error = generator_loss(gen_logits_fake)  # 生成网络的 loss
            G_optimizer.zero_grad()
            g_error.backward()
            G_optimizer.step()  # 优化生成网络
  dataset_name = 'fashion_mnist'
  assert dataset_name in ['mnist', 'fashion_mnist']

  if dataset_name == 'mnist':
    mnist_dataset = MnistDataset()
    test_images, test_labels = mnist_dataset.get_test_data()
  if dataset_name == 'fashion_mnist':
    fashin_mnist_dataset = FashinMnistDataset()
    test_images, test_labels = fashin_mnist_dataset.get_test_data()
    random_noise_test_images, _ = fashin_mnist_dataset.get_random_noise_test_data()
    random_noise_test_images = random_noise_test_images.reshape(-1, 28, 28, 1).astype('float32')

  test_images = test_images.reshape(-1, 28, 28, 1).astype('float32')

  print("Compute anomaly scores!!")

  for idx, (test_image, test_label) in enumerate(zip(test_images, test_labels)):
    if dataset_name == 'fashion_mnist' and idx % 2 == 1:
      test_image = random_noise_test_images[idx]
    test_image = (test_image / 127.5) - 1
    test_image = np.expand_dims(test_image, axis=0)
    gen_output = generator(test_image, training=False)
    disc_real_output = discriminator([test_image, test_image], training=False)
    disc_generated_output = discriminator([test_image, gen_output], training=False)

    anomaly_score, con_loss, lat_loss, adv_loss = generator_loss(gen_output,
                   test_image,
                   disc_real_output,
                   disc_generated_output)
    generate_images("./generate_image/{}_idx_{}_anomaly_score_{}.jpg".format(test_label, idx, anomaly_score), generator, test_image, test_image)
Exemple #11
0
def train_step(real_x, real_y, G_YtoX, G_XtoY, D_X, D_Y, G_YtoX_optimizer,
               G_XtoY_optimizer, D_X_optimizer, D_Y_optimizer, opt):
    # persistent is set to True because the tape is used more than once to calculate the gradients.
    with tf.GradientTape(persistent=True) as tape:
        # Generator G_XtoY translates X -> Y
        # Generator G_YtoX translates Y -> X.

        fake_y = G_XtoY(real_x, training=True)
        cycled_x = G_YtoX(fake_y, training=True)

        fake_x = G_YtoX(real_y, training=True)
        cycled_y = G_XtoY(fake_x, training=True)

        # same_x and same_y are used for identity loss.
        same_x = G_XtoY(real_x, training=True)
        same_y = G_XtoY(real_y, training=True)

        disc_real_x = D_X(real_x, training=True)
        disc_real_y = D_Y(real_y, training=True)

        disc_fake_x = D_X(fake_x, training=True)
        disc_fake_y = D_Y(fake_y, training=True)

        # calculate the loss
        G_XtoY_loss = model.generator_loss(disc_fake_y)
        G_YtoX_loss = model.generator_loss(disc_fake_x)

        if opt["use_cycle_consistency_loss"]:
            total_cycle_loss = model.calc_cycle_loss(
                real_x, cycled_x) + model.calc_cycle_loss(real_y, cycled_y)
        else:
            total_cycle_loss = 0

        # Total generator loss = adversarial loss + cycle loss
        total_G_XtoY_loss = G_XtoY_loss + total_cycle_loss + model.identity_loss(
            real_y, same_y)
        total_G_YtoX_loss = G_YtoX_loss + total_cycle_loss + model.identity_loss(
            real_x, same_x)

        disc_x_loss, update_D_X = model.discriminator_loss(
            disc_real_x, disc_fake_x)
        disc_y_loss, update_D_Y = model.discriminator_loss(
            disc_real_y, disc_fake_y)

        # total loss to be shown
        total_disc_loss = (disc_x_loss + disc_y_loss) / 2
        total_gen_loss = (total_G_XtoY_loss + total_G_YtoX_loss) / 2

    # Calculate the gradients for generator and discriminator

    G_XtoY_gradients = tape.gradient(total_G_XtoY_loss,
                                     G_XtoY.trainable_variables)
    G_YtoX_gradients = tape.gradient(total_G_YtoX_loss,
                                     G_YtoX.trainable_variables)
    if update_D_X:
        D_X_gradients = tape.gradient(disc_x_loss, D_X.trainable_variables)
    if update_D_Y:
        D_Y_gradients = tape.gradient(disc_y_loss, D_Y.trainable_variables)

    # Apply the gradients to the optimizer

    G_XtoY_optimizer.apply_gradients(
        zip(G_XtoY_gradients, G_XtoY.trainable_variables))

    G_YtoX_optimizer.apply_gradients(
        zip(G_YtoX_gradients, G_YtoX.trainable_variables))
    if update_D_X:
        D_X_optimizer.apply_gradients(
            zip(D_X_gradients, D_X.trainable_variables))
    if update_D_Y:
        D_Y_optimizer.apply_gradients(
            zip(D_Y_gradients, D_Y.trainable_variables))

    return total_disc_loss, total_gen_loss