示例#1
0
    def train_step(real_x, real_y):
        with tf.GradientTape(persistent=True) as gen_tape, tf.GradientTape(
                persistent=True) as disc_tape:
            # 生成器G将X域中的图像转换到Y域(X -> Y)
            # 生成器F将Y域中的图像转换到X域(Y -> X)

            fake_y = generator_g(real_x, training=True)
            cycled_x = generator_f(fake_y, training=True)

            fake_x = generator_f(real_y, training=True)
            cycled_y = generator_g(fake_x, training=True)

            # same_x和same_y被用作循环一致损失
            same_x = generator_f(real_x, training=True)
            same_y = generator_g(real_y, training=True)

            disc_real_x = discriminator_x(real_x, training=True)
            disc_real_y = discriminator_y(real_y, training=True)

            disc_fake_x = discriminator_x(fake_x, training=True)
            disc_fake_y = discriminator_y(fake_y, training=True)

            # 计算损失
            gen_g_loss = generator_loss(disc_fake_y)
            gen_f_loss = generator_loss(disc_fake_x)

            # 总损失 = 对抗损失 + 循环损失
            total_gen_g_loss = gen_g_loss + calc_cycle_loss(
                real_x, cycled_x) + identity_loss(real_x, same_x)
            total_gen_f_loss = gen_f_loss + calc_cycle_loss(
                real_y, cycled_y) + identity_loss(real_y, same_y)

            disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x)
            disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y)

        # 生成器和鉴别器的梯度计算
        generator_g_gradients = gen_tape.gradient(
            total_gen_g_loss, generator_g.trainable_variables)
        generator_f_gradients = gen_tape.gradient(
            total_gen_f_loss, generator_f.trainable_variables)

        discriminator_x_gradients = disc_tape.gradient(
            disc_x_loss, discriminator_x.trainable_variables)
        discriminator_y_gradients = disc_tape.gradient(
            disc_y_loss, discriminator_y.trainable_variables)

        generator_g_optimizer.apply_gradients(
            zip(generator_g_gradients, generator_g.trainable_variables))

        generator_f_optimizer.apply_gradients(
            zip(generator_f_gradients, generator_f.trainable_variables))

        discriminator_x_optimizer.apply_gradients(
            zip(discriminator_x_gradients,
                discriminator_x.trainable_variables))

        discriminator_y_optimizer.apply_gradients(
            zip(discriminator_y_gradients,
                discriminator_y.trainable_variables))
示例#2
0
def train_step(real_us, real_label):
    # persistent is set to True because the tape is used more than
    # once to calculate the gradients.
    with tf.GradientTape(persistent=True) as tape:
        # Generator Label translates US -> LABEL
        # Generator US translates LABEL -> US.
        #Random Layer
        
        # generate fake us from real label
        fake_us = generator(real_label, training=True)
        
        # discriminate between real label, cycled label and fake label given real us as input
        discriminator_real = discriminator([real_label, real_us], training=True)
        discriminator_fake = discriminator([real_label, fake_us], training=True)
        #discriminator_cycle = discriminator([real_us, cycled_label], training=True)
        
        
        # calculate generator loss for recycled label
        g_loss = generator_loss(discriminator_fake) + pixel_loss(real_us, fake_us)
        
        d_loss = discrimination_loss(discriminator_real, discriminator_fake)
        
        
        generator_gradients = tape.gradient(g_loss, 
                                         generator.trainable_variables)
        
        discriminator_gradients = tape.gradient(d_loss, 
                                         discriminator.trainable_variables)
        # Apply the gradients to the optimizer
        generator_optimizer.apply_gradients(zip(generator_gradients,
                                             generator.trainable_variables))
        discriminator_optimizer.apply_gradients(zip(discriminator_gradients,
                                             discriminator.trainable_variables))
    return g_loss, d_loss
def train_step(real_us, real_label):
    # persistent is set to True because the tape is used more than
    # once to calculate the gradients.
    with tf.GradientTape(persistent=True) as tape:
        # Generator Label translates US -> LABEL
        # Generator US translates LABEL -> US.
        
        # generate fake label from real us
        fake_label = gen_label(real_us, training=True)
        #generate cycled us from generated fake label
        cycled_us = gen_us(fake_label, training=True)
        
        # generate fake us from real label
        fake_us = gen_us(real_label, training=True)
        # generate cycled label from generated fake us
        cycled_label = gen_label(fake_us, training=True)
        
        # discriminate between real label, cycled label and fake label given real us as input
        disc_us_real = dis_us([real_us, real_label], training=True)
        disc_us_fake = dis_us([real_us, fake_label], training=True)
        #disc_us_cycle = dis_us([real_us, cycled_label], training=True)
        
        # discriminate between real us, cycled us and fake us given real label as input
        disc_label_real = dis_label([real_label, real_us], training=True)
        disc_label_fake = dis_label([real_label, fake_us], training=True)
        #disc_label_cycle = dis_label([real_label, cycled_us], training=True)
        
        
        # calculate generator loss for generated label
        gen_label_loss = generator_loss(disc_label_fake)
        # calculate generator loss for recycled label
        #gen_cycle_loss = generator_loss(disc_label_cycle) + generator_loss(disc_us_cycle)
        # calculate generator loss for generated us
        gen_us_loss = generator_loss(disc_us_fake)
        
        
        total_cycle_loss = pixel_loss(real_label, cycled_label) + pixel_loss(real_us, cycled_us)
        
        # Total generator loss = adversarial loss + cycle loss
        total_gen_label_loss = gen_label_loss + total_cycle_loss + pixel_loss(real_label, fake_label)
        total_gen_us_loss = gen_us_loss + total_cycle_loss + pixel_loss(real_us, fake_us)
        
        disc_label_loss = discrimination_loss(disc_label_real, disc_label_fake)
        disc_us_loss = discrimination_loss(disc_us_real, disc_us_fake)
        
        # Calculate the gradients for generator and discriminator
        gen_label_gradients = tape.gradient(total_gen_label_loss, 
                                            gen_label.trainable_variables)
        gen_us_gradients = tape.gradient(total_gen_us_loss, 
                                         gen_us.trainable_variables)
        
        dis_label_gradients = tape.gradient(disc_label_loss, 
                                            dis_label.trainable_variables)
        dis_us_gradients = tape.gradient(disc_us_loss, 
                                         dis_us.trainable_variables)
        # Apply the gradients to the optimizer
        gen_label_optimizer.apply_gradients(zip(gen_label_gradients, 
                                                gen_label.trainable_variables))
        gen_us_optimizer.apply_gradients(zip(gen_us_gradients,
                                             gen_us.trainable_variables))
        
        dis_label_optimizer.apply_gradients(zip(dis_label_gradients,
                                                dis_label.trainable_variables))
        dis_us_optimizer.apply_gradients(zip(dis_us_gradients,
                                             dis_us.trainable_variables))
    def train_step(real_x, real_y):
        # persistent is set to True because gen_tape and disc_tape is used more than
        # once to calculate the gradients.
        with tf.GradientTape(persistent=True) as gen_tape, tf.GradientTape(
                persistent=True) as disc_tape:
            # Generator G translates X -> Y
            # Generator F translates Y -> X.

            fake_y = generator_g(real_x, training=True)
            cycled_x = generator_f(fake_y, training=True)

            fake_x = generator_f(real_y, training=True)
            cycled_y = generator_g(fake_x, training=True)

            # same_x and same_y are used for identity loss.
            same_x = generator_f(real_x, training=True)
            same_y = generator_g(real_y, training=True)

            disc_real_x = discriminator_x(real_x, training=True)
            disc_real_y = discriminator_y(real_y, training=True)

            disc_fake_x = discriminator_x(fake_x, training=True)
            disc_fake_y = discriminator_y(fake_y, training=True)

            # calculate the loss
            gen_g_loss = generator_loss(disc_fake_y)
            gen_f_loss = generator_loss(disc_fake_x)

            # Total generator loss = adversarial loss + cycle loss
            total_gen_g_loss = gen_g_loss + calc_cycle_loss(
                real_x, cycled_x) + identity_loss(real_x, same_x)
            total_gen_f_loss = gen_f_loss + calc_cycle_loss(
                real_y, cycled_y) + identity_loss(real_y, same_y)

            disc_x_loss = discriminator_loss(disc_real_x, disc_fake_x)
            disc_y_loss = discriminator_loss(disc_real_y, disc_fake_y)

        # Calculate the gradients for generator and discriminator
        generator_g_gradients = gen_tape.gradient(
            total_gen_g_loss, generator_g.trainable_variables)
        generator_f_gradients = gen_tape.gradient(
            total_gen_f_loss, generator_f.trainable_variables)

        discriminator_x_gradients = disc_tape.gradient(
            disc_x_loss, discriminator_x.trainable_variables)
        discriminator_y_gradients = disc_tape.gradient(
            disc_y_loss, discriminator_y.trainable_variables)

        # Apply the gradients to the optimizer
        generator_g_optimizer.apply_gradients(
            zip(generator_g_gradients, generator_g.trainable_variables))

        generator_f_optimizer.apply_gradients(
            zip(generator_f_gradients, generator_f.trainable_variables))

        discriminator_x_optimizer.apply_gradients(
            zip(discriminator_x_gradients,
                discriminator_x.trainable_variables))

        discriminator_y_optimizer.apply_gradients(
            zip(discriminator_y_gradients,
                discriminator_y.trainable_variables))