Exemplo n.º 1
0
    def train_D(self, real_images, real_labels):
        num_images = K.int_shape(real_images)[0]
        noise_sample = make_noise(num_images, self.latent_size)
        fake_labels = tf.random.uniform((num_images, 1), 0, 10,
                                        tf.dtypes.int32)

        with tf.GradientTape() as disc_tape:
            fake_images = self.generator([noise_sample, fake_labels],
                                         training=True)

            real_logits, real_aux_logits = self.discriminator(real_images,
                                                              training=True)
            fake_logits, fake_aux_logits = self.discriminator(fake_images,
                                                              training=True)

            d_BC_loss = self.get_d_BC_loss(real_logits, fake_logits)
            d_SCC_loss = self.get_d_SCC_loss(real_labels, real_aux_logits)
            total_loss = d_BC_loss + d_SCC_loss

        disc_gradients = disc_tape.gradient(
            total_loss, self.discriminator.trainable_variables)
        self.d_opt.apply_gradients(
            zip(disc_gradients, self.discriminator.trainable_variables))

        return d_BC_loss, d_SCC_loss
Exemplo n.º 2
0
Arquivo: WGAN.py Projeto: leesc912/GAN
    def train_D(self, real_images):
        num_images = K.int_shape(real_images)[0]
        noise = make_noise(num_images, self.latent_size)

        with tf.GradientTape() as critic_tape:
            fake_images = self.generator(noise, training=True)

            alpha = tf.random.uniform((num_images, 1, 1, 1))  # [0, 1]
            other_samples = alpha * real_images + ((1 - alpha) * fake_images)

            fake_logits = self.critic(fake_images, training=True)
            real_logits = self.critic(real_images, training=True)
            other_logits = self.critic(other_samples, training=True)

            critic_loss = K.mean(fake_logits) + (-1.0) * K.mean(real_logits)

            # gradient penalty
            sample_gradients = K.gradients(other_logits, other_samples)
            l2_norm = K.sqrt(K.sum(K.square(sample_gradients), axis=[1, 2, 3]))
            gp = K.mean(K.square(l2_norm - 1.0))

            # Critic Loss = fake_loss - real_loss + lambda * gp
            total_loss = critic_loss + self.gp_coefficient * gp

        # Update Critic
        critic_gradients = critic_tape.gradient(
            total_loss, self.critic.trainable_variables)
        self.c_opt.apply_gradients(
            zip(critic_gradients, self.critic.trainable_variables))

        return total_loss
Exemplo n.º 3
0
    def plot_images(self, fname):
        fig = plt.figure(figsize=(8, 8))
        samples = make_noise(100, self.latent_size)
        fake_images = self.gen(samples, training=False)

        for i in range(fake_images.shape[0]):
            plt.subplot(10, 10, i + 1)
            plt.imshow(fake_images[i, :, :, 0] * 127.5 + 127.5, cmap="gray")
            plt.axis('off')

        plt.savefig(fname)
        plt.close()
Exemplo n.º 4
0
Arquivo: WGAN.py Projeto: leesc912/GAN
    def train_G(self):
        noise = make_noise(self.batch_size, self.latent_size)
        with tf.GradientTape() as gen_tape:
            fake_images = self.generator(noise, training=True)
            fake_logits = self.critic(fake_images, training=True)
            g_loss = (-1.0) * K.mean(fake_logits)

        gen_gradients = gen_tape.gradient(g_loss,
                                          self.generator.trainable_variables)
        self.g_opt.apply_gradients(
            zip(gen_gradients, self.generator.trainable_variables))

        return g_loss
Exemplo n.º 5
0
    def train_G(self, num_images) :
        noise_sample = make_noise(num_images, self.latent_size)

        with tf.GradientTape() as gen_tape :
            fake_images = self.generator(noise_sample, training = True)
            fake_logits = self.discriminator(fake_images, training = True)

            g_loss = self.get_generator_loss(fake_logits)

        # Gradient 계산
        gradients = gen_tape.gradient(g_loss, self.generator.trainable_variables)
        self.g_opt.apply_gradients(zip(gradients, self.generator.trainable_variables))

        return g_loss
Exemplo n.º 6
0
    def train_D(self, real_images) :
        num_images = K.int_shape(real_images)[0]
        noise_sample = make_noise(num_images, self.latent_size)

        with tf.GradientTape() as disc_tape :
            fake_images = self.generator(noise_sample, training = True)

            real_logits = self.discriminator(real_images, training = True)
            fake_logits = self.discriminator(fake_images, training = True)

            d_loss = self.get_discriminator_loss(real_logits, fake_logits)

        disc_gradients = disc_tape.gradient(d_loss, self.discriminator.trainable_variables)
        self.d_opt.apply_gradients(zip(disc_gradients, self.discriminator.trainable_variables))

        return d_loss
Exemplo n.º 7
0
    def train_G(self, num_images):
        samples = make_noise(num_images, self.latent_size)

        with tf.GradientTape() as gen_tape:
            fake_images = self.gen(samples, training=True)
            reconstructed_images, hidden_space = self.disc(fake_images,
                                                           training=True)

            loss = self.mse(fake_images, reconstructed_images)
            pt = self.repelling_regularizer(hidden_space, hidden_space)
            loss += self.pt_ratio * pt

            self.g_loss_metric.update_state(loss)

        gen_grads = gen_tape.gradient(loss, self.gen.trainable_variables)
        self.g_opt.apply_gradients(zip(gen_grads,
                                       self.gen.trainable_variables))
Exemplo n.º 8
0
    def plot_images(self, fname):
        fig = plt.figure(figsize=(8, 8))

        for number in range(10):  # 0부터 9까지 10장씩 생성
            noise = make_noise(10, self.latent_size)
            labels = np.array([[
                number,
            ]] * 10)
            fake_images = self.generator([noise, labels], training=False)

            for i in range(10):
                plt.subplot(10, 10, number * 10 + i + 1)
                plt.imshow(fake_images[i, :, :, 0] * 127.5 + 127.5,
                           cmap='gray')
                plt.axis('off')

        plt.savefig(fname)
        plt.close()
Exemplo n.º 9
0
    def train_G(self, num_images):
        noise_sample = make_noise(num_images, self.latent_size)
        fake_labels = tf.random.uniform((num_images, 1), 0, 10,
                                        tf.dtypes.int32)

        with tf.GradientTape() as gen_tape:
            fake_images = self.generator([noise_sample, fake_labels],
                                         training=True)
            fake_logits, fake_aux_logits = self.discriminator(fake_images,
                                                              training=True)

            g_loss = self.get_g_loss(fake_logits, fake_aux_logits, fake_labels)

        gen_gradients = gen_tape.gradient(g_loss,
                                          self.generator.trainable_variables)
        self.g_opt.apply_gradients(
            zip(gen_gradients, self.generator.trainable_variables))

        return g_loss
Exemplo n.º 10
0
    def train_D(self, real_images):
        num_images = tf.shape(real_images)[0]
        samples = make_noise(num_images, self.latent_size)

        with tf.GradientTape() as disc_tape:
            fake_images = self.gen(samples, training=True)

            reconstructed_real_images, _ = self.disc(real_images,
                                                     training=True)
            reconstructed_fake_images, _ = self.disc(fake_images,
                                                     training=True)

            loss_real = self.mse(real_images, reconstructed_real_images)
            loss_fake = self.mse(fake_images, reconstructed_fake_images)
            loss_fake = tf.math.maximum(
                tf.cast(self.margin, tf.float32) - loss_fake, 0.0)

            loss = loss_real + loss_fake
            self.d_loss_metric.update_state(loss)

        disc_grads = disc_tape.gradient(loss, self.disc.trainable_variables)
        self.d_opt.apply_gradients(
            zip(disc_grads, self.disc.trainable_variables))