Пример #1
0
    def bulid_mrgan(self):
        # Generator
        self.g = self.generator(self.z)
        self.g_reg = self.generator(self.encoder(self.x), reuse=True)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_real_reg = self.discriminator(self.g_reg, reuse=True)
        d_fake = self.discriminator(self.g, reuse=True)

        # Losses
        # Manifold Step
        # d_loss_1 = tf.reduce_mean(t.safe_log(d_real) + t.safe_log(1. - d_real_reg))
        # g_loss_1 = tf.reduce_mean(self.lambda_1 * t.safe_log(d_real_reg)) - \
        #            t.mse_loss(self.x, self.g_reg, self.batch_size)
        # Diffusion Step
        # d_loss_2 = tf.reduce_mean(t.safe_log(d_real_reg) + t.safe_log(1. - d_fake))
        # g_loss_2 = tf.reduce_mean(t.safe_log(d_fake))

        d_real_loss = -tf.reduce_mean(t.safe_log(d_real))
        d_fake_loss = -tf.reduce_mean(t.safe_log(1. - d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        e_mse_loss = self.lambda_1 * t.mse_loss(
            self.x, self.g_reg, self.batch_size, is_mean=True)
        e_adv_loss = self.lambda_2 * tf.reduce_mean(t.safe_log(d_real_reg))
        self.e_loss = e_adv_loss + e_mse_loss
        self.g_loss = -tf.reduce_mean(t.safe_log(d_fake)) + self.e_loss

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/e_adv_loss", e_adv_loss)
        tf.summary.scalar("loss/e_mse_loss", e_mse_loss)
        tf.summary.scalar("loss/e_loss", self.e_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Collect trainer values
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]
        e_params = [v for v in t_vars if v.name.startswith('e')]

        # Optimizer
        self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.g_loss, var_list=g_params)
        self.e_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.e_loss, var_list=e_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model Saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #2
0
    def build_ebgan(self):
        # Generator
        self.g = self.generator(self.z)
        self.g_test = self.generator(self.z, reuse=True, is_train=False)

        # Discriminator
        d_embed_real, d_decode_real = self.discriminator(self.x)
        d_embed_fake, d_decode_fake = self.discriminator(self.g, reuse=True)

        d_real_loss = t.mse_loss(d_decode_real, self.x, self.batch_size)
        d_fake_loss = t.mse_loss(d_decode_fake, self.g, self.batch_size)
        self.d_loss = d_real_loss + tf.maximum(0., self.margin - d_fake_loss)

        if self.EnablePullAway:
            self.pt_loss = self.pullaway_loss(d_embed_fake, self.batch_size)

        self.g_loss = d_fake_loss + self.pt_lambda * self.pt_loss

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)
        tf.summary.scalar("loss/pt_loss", self.pt_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #3
0
    def build_lsgan(self):
        # Generator
        self.g = self.generator(self.z)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_fake = self.discriminator(self.g, reuse=True)

        # LSGAN Loss
        d_real_loss = t.mse_loss(d_real, tf.ones_like(d_real), self.batch_size)
        d_fake_loss = t.mse_loss(d_fake, tf.zeros_like(d_fake),
                                 self.batch_size)
        self.d_loss = (d_real_loss + d_fake_loss) / 2.
        self.g_loss = t.mse_loss(d_fake, tf.ones_like(d_fake), self.batch_size)

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #4
0
    def build_magan(self):
        # Generator
        self.g = self.generator(self.z)

        # Discriminator
        _, d_real = self.discriminator(self.x)
        _, d_fake = self.discriminator(self.g, reuse=True)

        self.d_real_loss = t.mse_loss(self.x, d_real, self.batch_size)
        self.d_fake_loss = t.mse_loss(self.g, d_fake, self.batch_size)
        self.d_loss = self.d_real_loss + tf.maximum(0.,
                                                    self.m - self.d_fake_loss)
        self.g_loss = self.d_fake_loss

        # Summary
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/d_real_loss", self.d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", self.d_fake_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = AdamaxOptimizer(learning_rate=self.lr,
                                    beta1=self.beta1).minimize(
                                        self.d_loss, var_list=d_params)
        self.g_op = AdamaxOptimizer(learning_rate=self.lr,
                                    beta1=self.beta1).minimize(
                                        self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #5
0
    def build_srgan(self):
        # Generator
        self.g = self.generator(self.x_lr)

        # Discriminator
        d_real = self.discriminator(self.x_hr)
        d_fake = self.discriminator(self.g, reuse=True)

        # Losses
        # d_real_loss = -tf.reduce_mean(t.safe_log(d_real))
        # d_fake_loss = -tf.reduce_mean(t.safe_log(1. - d_fake))
        d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
        d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
        self.d_loss = d_real_loss + d_fake_loss

        if self.use_vgg19:
            x_vgg_real = tf.image.resize_images(self.x_hr,
                                                size=self.vgg_image_shape[:2],
                                                align_corners=False)
            x_vgg_fake = tf.image.resize_images(self.g,
                                                size=self.vgg_image_shape[:2],
                                                align_corners=False)

            vgg_bottle_real = self.build_vgg19(x_vgg_real)
            vgg_bottle_fake = self.build_vgg19(x_vgg_fake, reuse=True)

            self.g_cnt_loss = self.cnt_scaling * t.mse_loss(vgg_bottle_fake,
                                                            vgg_bottle_real,
                                                            self.batch_size,
                                                            is_mean=True)
        else:
            self.g_cnt_loss = t.mse_loss(self.g,
                                         self.x_hr,
                                         self.batch_size,
                                         is_mean=True)

        # self.g_adv_loss = self.adv_scaling * tf.reduce_mean(-1. * t.safe_log(d_fake))
        self.g_adv_loss = self.adv_scaling * t.sce_loss(
            d_fake, tf.ones_like(d_fake))
        self.g_loss = self.g_adv_loss + self.g_cnt_loss

        def inverse_transform(img):
            return (img + 1.) * 127.5

        # calculate PSNR
        g, x_hr = inverse_transform(self.g), inverse_transform(self.x_hr)
        self.psnr = t.psnr_loss(g, x_hr, self.batch_size)

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_cnt_loss", self.g_cnt_loss)
        tf.summary.scalar("loss/g_adv_loss", self.g_adv_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)
        tf.summary.scalar("misc/psnr", self.psnr)
        tf.summary.scalar("misc/lr", self.lr)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               loss=self.d_loss,
                                               var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               loss=self.g_loss,
                                               var_list=g_params)

        # pre-train
        self.g_init_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                                beta1=self.beta1,
                                                beta2=self.beta2).minimize(
                                                    loss=self.g_cnt_loss,
                                                    var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=2)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)