Пример #1
0
    def build_sagan(self):
        # Generator
        self.g = self.generator(self.z)
        self.g_test = self.generator(self.z_test, reuse=True)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_fake = self.discriminator(self.g, reuse=True)

        # Losses
        if self.use_hinge_loss:
            d_real_loss = tf.reduce_mean(tf.nn.relu(1.0 - d_real))
            d_fake_loss = tf.reduce_mean(tf.nn.relu(1.0 + d_fake))
            self.d_loss = d_real_loss + d_fake_loss
            self.g_loss = -tf.reduce_mean(d_fake)
        else:
            d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
            d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
            self.d_loss = d_real_loss + d_fake_loss
            self.g_loss = t.sce_loss(d_fake, tf.ones_like(d_fake))

        # gradient-penalty
        if self.use_gp:
            alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0.0, maxval=1.0, name='alpha')
            interp = alpha * self.x + (1.0 - alpha) * self.g
            d_interp = self.discriminator(interp, reuse=True)
            gradients = tf.gradients(d_interp, interp)[0]
            slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), axis=1))
            self.gp = tf.reduce_mean(tf.square(slopes - 1.0))

            # Update D loss
            self.d_loss += self.lambda_ * self.gp

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)
        if self.use_gp:
            tf.summary.scalar("misc/gp", self.gp)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(self.lr * 4, beta1=self.beta1, beta2=self.beta2).minimize(
            self.d_loss, var_list=d_params
        )
        self.g_op = tf.train.AdamOptimizer(self.lr * 1, beta1=self.beta1, beta2=self.beta2).minimize(
            self.g_loss, var_list=g_params
        )

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter(self.graph_path, self.s.graph)
Пример #2
0
    def bulid_dragan(self):
        # Generator
        self.g = self.generator(self.z)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_fake = self.discriminator(self.g, reuse=True)

        # sce losses
        d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
        d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        self.g_loss = t.sce_loss(d_fake, tf.ones_like(d_fake))

        # DRAGAN loss with GP (gradient penalty)
        alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1],
                                  minval=0.0,
                                  maxval=1.0,
                                  name='alpha')
        diff = self.x_p - self.x
        interpolates = self.x + alpha * diff
        d_inter = self.discriminator(interpolates, reuse=True)
        grads = tf.gradients(d_inter, [interpolates])[0]
        slopes = tf.sqrt(tf.reduce_sum(tf.square(grads),
                                       reduction_indices=[1]))
        self.gp = tf.reduce_mean(tf.square(slopes - 1.0))

        # update d_loss with gp
        self.d_loss += self.lambda_ * self.gp

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)
        tf.summary.scalar("misc/gp", self.gp)

        # Collect trainer values
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        # Optimizer
        self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1).minimize(
                                               self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model Saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #3
0
    def build_acgan(self):
        # Generator
        self.g = self.generator(self.z, self.y)

        # Discriminator
        c_real, d_real, _ = self.discriminator(self.x)
        c_fake, d_fake, _ = self.discriminator(self.g, reuse=True)

        # sigmoid ce loss
        d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
        d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        self.g_loss = t.sce_loss(d_fake, tf.ones_like(d_fake))

        # softmax ce loss
        c_real_loss = t.softce_loss(c_real, self.y)
        c_fake_loss = t.softce_loss(c_fake, self.y)
        self.c_loss = c_real_loss + c_fake_loss

        # self.d_loss = self.d_loss + self.c_loss
        # self.g_loss = self.g_loss - self.c_loss

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/c_real_loss", c_real_loss)
        tf.summary.scalar("loss/c_fake_loss", c_fake_loss)
        tf.summary.scalar("loss/c_loss", self.c_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]
        c_params = [v for v in t_vars if v.name.startswith('d') or v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(self.lr, beta1=self.beta1, beta2=self.beta2).minimize(
            self.d_loss, var_list=d_params
        )
        self.g_op = tf.train.AdamOptimizer(self.lr, beta1=self.beta1, beta2=self.beta2).minimize(
            self.g_loss, var_list=g_params
        )
        self.c_op = tf.train.AdamOptimizer(self.lr, beta1=self.beta1, beta2=self.beta2).minimize(
            self.c_loss, var_list=c_params
        )
        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #4
0
    def bulid_dcgan(self):
        # Generator
        self.g = self.generator(self.z)
        self.g_test = self.generator(self.z, reuse=True, is_train=False)

        # Discriminator
        _, d_real = self.discriminator(self.x)
        _, d_fake = self.discriminator(self.g, reuse=True)

        # Losses
        """
        d_real_loss = -tf.reduce_mean(log(d_real))
        d_fake_loss = -tf.reduce_mean(log(1. - d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        self.g_loss = -tf.reduce_mean(log(d_fake))
        """
        d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
        d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        self.g_loss = t.sce_loss(d_fake, tf.ones_like(d_fake))

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Collect trainer values
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        # Optimizer
        self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize(
            self.d_loss, var_list=d_params
        )
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1).minimize(
            self.g_loss, var_list=g_params
        )

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model Saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #5
0
    def build_sagan(self):
        # Generator
        self.g = self.generator(self.z)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_fake = self.discriminator(self.g, reuse=True)

        # Losses
        d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
        d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        self.g_loss = t.sce_loss(d_fake, tf.ones_like(d_fake))

        self.inception_score = t.inception_score(self.g)

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)
        tf.summary.scalar("metric/inception_score", self.inception_score)
        tf.summary.scalar("metric/fid_score", self.fid_score)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(self.lr * 4,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(self.lr * 1,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #6
0
    def build_cogan(self):
        # Generator
        self.g_1 = self.generator(self.z, self.y, share_params=False, reuse=False, name='g1')
        self.g_2 = self.generator(self.z, self.y, share_params=True, reuse=False, name='g2')

        self.g_sample_1 = self.generator(self.z, self.y, share_params=True, reuse=True, name='g1')
        self.g_sample_2 = self.generator(self.z, self.y, share_params=True, reuse=True, name='g2')

        # Discriminator
        d_1_real = self.discriminator(self.x_1, self.y, share_params=False, reuse=False, name='d1')
        d_2_real = self.discriminator(self.x_2, self.y, share_params=True, reuse=False, name='d2')
        d_1_fake = self.discriminator(self.g_1, self.y, share_params=True, reuse=True, name='d1')
        d_2_fake = self.discriminator(self.g_2, self.y, share_params=True, reuse=True, name='d2')

        # Losses
        d_1_real_loss = t.sce_loss(d_1_real, tf.ones_like(d_1_real))
        d_1_fake_loss = t.sce_loss(d_1_fake, tf.zeros_like(d_1_fake))
        d_2_real_loss = t.sce_loss(d_2_real, tf.ones_like(d_2_real))
        d_2_fake_loss = t.sce_loss(d_2_fake, tf.zeros_like(d_2_fake))
        self.d_1_loss = d_1_real_loss + d_1_fake_loss
        self.d_2_loss = d_2_real_loss + d_2_fake_loss
        self.d_loss = self.d_1_loss + self.d_2_loss

        g_1_loss = t.sce_loss(d_1_fake, tf.ones_like(d_1_fake))
        g_2_loss = t.sce_loss(d_2_fake, tf.ones_like(d_2_fake))
        self.g_loss = g_1_loss + g_2_loss

        # Summary
        tf.summary.scalar("loss/d_1_real_loss", d_1_real_loss)
        tf.summary.scalar("loss/d_1_fake_loss", d_1_fake_loss)
        tf.summary.scalar("loss/d_2_real_loss", d_2_real_loss)
        tf.summary.scalar("loss/d_2_fake_loss", d_2_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_1_loss", g_1_loss)
        tf.summary.scalar("loss/g_2_loss", g_2_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1, beta2=self.beta2).minimize(
            self.d_loss, var_list=d_params
        )
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr, beta1=self.beta1, beta2=self.beta2).minimize(
            self.g_loss, var_list=g_params
        )

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #7
0
    def build_srgan(self):
        # Generator
        self.g = self.generator(self.x_lr)

        # Discriminator
        d_real = self.discriminator(self.x_hr)
        d_fake = self.discriminator(self.g, reuse=True)

        # Losses
        # d_real_loss = -tf.reduce_mean(t.safe_log(d_real))
        # d_fake_loss = -tf.reduce_mean(t.safe_log(1. - d_fake))
        d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
        d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
        self.d_loss = d_real_loss + d_fake_loss

        if self.use_vgg19:
            x_vgg_real = tf.image.resize_images(self.x_hr,
                                                size=self.vgg_image_shape[:2],
                                                align_corners=False)
            x_vgg_fake = tf.image.resize_images(self.g,
                                                size=self.vgg_image_shape[:2],
                                                align_corners=False)

            vgg_bottle_real = self.build_vgg19(x_vgg_real)
            vgg_bottle_fake = self.build_vgg19(x_vgg_fake, reuse=True)

            self.g_cnt_loss = self.cnt_scaling * t.mse_loss(vgg_bottle_fake,
                                                            vgg_bottle_real,
                                                            self.batch_size,
                                                            is_mean=True)
        else:
            self.g_cnt_loss = t.mse_loss(self.g,
                                         self.x_hr,
                                         self.batch_size,
                                         is_mean=True)

        # self.g_adv_loss = self.adv_scaling * tf.reduce_mean(-1. * t.safe_log(d_fake))
        self.g_adv_loss = self.adv_scaling * t.sce_loss(
            d_fake, tf.ones_like(d_fake))
        self.g_loss = self.g_adv_loss + self.g_cnt_loss

        def inverse_transform(img):
            return (img + 1.0) * 127.5

        # calculate PSNR
        g, x_hr = inverse_transform(self.g), inverse_transform(self.x_hr)
        self.psnr = t.psnr_loss(g, x_hr, self.batch_size)

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_cnt_loss", self.g_cnt_loss)
        tf.summary.scalar("loss/g_adv_loss", self.g_adv_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)
        tf.summary.scalar("misc/psnr", self.psnr)
        tf.summary.scalar("misc/lr", self.lr)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               loss=self.d_loss,
                                               var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               loss=self.g_loss,
                                               var_list=g_params)

        # pre-train
        self.g_init_op = tf.train.AdamOptimizer(learning_rate=self.lr,
                                                beta1=self.beta1,
                                                beta2=self.beta2).minimize(
                                                    loss=self.g_cnt_loss,
                                                    var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=2)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #8
0
    def build_stargan(self):
        def gp_loss(real, fake, eps=self.epsilon):
            # alpha = tf.random_uniform(shape=real.get_shape(), minval=0., maxval=1., name='alpha')
            # diff = fake - real  # fake data - real data
            # interpolates = real + alpha * diff
            interpolates = eps * real + (1.0 - eps) * fake
            d_interp = self.discriminator(interpolates, reuse=True)
            gradients = tf.gradients(d_interp, [interpolates])[0]
            slopes = tf.sqrt(
                tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
            gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0))
            return gradient_penalty

        x_img_a = self.x_A[:, :, :, :self.channel]
        x_attr_a = self.x_A[:, :, :, self.channel:]
        x_img_b = self.x_B[:, :, :, :self.channel]
        # x_attr_b = self.x_B[:, :, :, self.channel:]

        # Generator
        self.fake_B = self.generator(self.x_A)
        gen_in = tf.concat([self.fake_B, x_attr_a], axis=3)
        self.fake_A = self.generator(gen_in, reuse=True)

        # Discriminator
        d_src_real_b, d_aux_real_b = self.discriminator(x_img_b)
        g_src_fake_b, g_aux_fake_b = self.discriminator(
            self.fake_B, reuse=True)  # used at updating G net
        d_src_fake_b, d_aux_fake_b = self.discriminator(
            self.fake_x_B, reuse=True)  # used at updating D net

        # using WGAN-GP losses
        gp = gp_loss(x_img_b, self.fake_x_B)
        d_src_loss = tf.reduce_mean(d_src_fake_b) - tf.reduce_mean(
            d_src_real_b) + gp
        d_aux_loss = t.sce_loss(d_aux_real_b, self.y_B)

        self.d_loss = d_src_loss + self.lambda_cls * d_aux_loss
        g_src_loss = -tf.reduce_mean(g_src_fake_b)
        g_aux_fake_loss = t.sce_loss(g_aux_fake_b, self.y_B)
        g_rec_loss = t.l1_loss(x_img_a, self.fake_A)
        self.g_loss = g_src_loss + self.lambda_cls * g_aux_fake_loss + self.lambda_rec * g_rec_loss

        # Summary
        tf.summary.scalar("loss/d_src_loss", d_src_loss)
        tf.summary.scalar("loss/d_aux_loss", d_aux_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_src_loss", g_src_loss)
        tf.summary.scalar("loss/g_aux_fake_loss", g_aux_fake_loss)
        tf.summary.scalar("loss/g_rec_loss", g_rec_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(
            learning_rate=self.d_lr * self.lr_decay,
            beta1=self.beta1).minimize(self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(
            learning_rate=self.g_lr * self.lr_decay,
            beta1=self.beta1).minimize(self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #9
0
    def build_wgan(self):
        # Generator
        self.g = self.generator(self.z)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_fake = self.discriminator(self.g, reuse=True)

        # Losses
        d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
        d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
        self.d_loss = d_real_loss + d_fake_loss
        self.g_loss = t.sce_loss(d_fake, tf.ones_like(d_fake))

        # The gradient penalty loss
        if self.EnableGP:
            alpha = tf.random_uniform(shape=[self.batch_size, 1, 1, 1], minval=0.0, maxval=1.0, name='alpha')
            diff = self.g - self.x  # fake data - real data
            interpolates = self.x + alpha * diff
            d_interp = self.discriminator(interpolates, reuse=True)
            gradients = tf.gradients(d_interp, [interpolates])[0]
            slopes = tf.sqrt(tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
            self.gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0))

            # Update D loss
            self.d_loss += self.d_lambda * self.gradient_penalty

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)
        if self.EnableGP:
            tf.summary.scalar("misc/gp", self.gradient_penalty)

        # Collect trainer values
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('discriminator')]
        g_params = [v for v in t_vars if v.name.startswith('generator')]

        if not self.EnableGP:
            self.d_clip = [v.assign(tf.clip_by_value(v, -self.clip, self.clip)) for v in d_params]

        # Optimizer
        if self.EnableGP:
            self.d_op = tf.train.AdamOptimizer(learning_rate=self.lr * 2, beta1=self.beta1, beta2=self.beta2).minimize(
                loss=self.d_loss, var_list=d_params
            )
            self.g_op = tf.train.AdamOptimizer(learning_rate=self.lr * 2, beta1=self.beta1, beta2=self.beta2).minimize(
                loss=self.g_loss, var_list=g_params
            )
        else:
            self.d_op = tf.train.RMSPropOptimizer(learning_rate=self.lr, decay=self.decay).minimize(
                self.d_loss, var_list=d_params
            )
            self.g_op = tf.train.RMSPropOptimizer(learning_rate=self.lr, decay=self.decay).minimize(
                self.g_loss, var_list=g_params
            )

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model Saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #10
0
    def bulid_lapgan(self):
        # Generator & Discriminator
        g1 = self.generator(x=self.x1_coarse,
                            y=self.y,
                            z=self.z[0],
                            scale=32,
                            do_rate=self.do_rate)
        d1_fake = self.discriminator(x1=g1,
                                     x2=self.x1_coarse,
                                     y=self.y,
                                     scale=32)
        d1_real = self.discriminator(x1=self.x1_diff,
                                     x2=self.x1_coarse,
                                     y=self.y,
                                     scale=32,
                                     reuse=True)

        g2 = self.generator(x=self.x2_coarse,
                            y=self.y,
                            z=self.z[1],
                            scale=16,
                            do_rate=self.do_rate)
        d2_fake = self.discriminator(x1=g2,
                                     x2=self.x2_coarse,
                                     y=self.y,
                                     scale=16)
        d2_real = self.discriminator(x1=self.x2_diff,
                                     x2=self.x2_coarse,
                                     y=self.y,
                                     scale=16,
                                     reuse=True)

        g3 = self.generator(x=None,
                            y=self.y,
                            z=self.z[2],
                            scale=8,
                            do_rate=self.do_rate)
        d3_fake = self.discriminator(x1=g3, x2=None, y=self.y, scale=8)
        d3_real = self.discriminator(x1=self.x3_fine,
                                     x2=None,
                                     y=self.y,
                                     scale=8,
                                     reuse=True)

        self.g = [g1, g2, g3]
        self.d_reals = [d1_real, d2_real, d3_real]
        self.d_fakes = [d1_fake, d2_fake, d3_fake]

        # Losses
        with tf.variable_scope('loss'):
            for i in range(len(self.g)):
                self.d_loss.append(
                    t.sce_loss(self.d_reals[i], tf.ones_like(
                        self.d_reals[i])) +
                    t.sce_loss(self.d_fakes[i], tf.zeros_like(self.d_fakes[i]))
                )
                self.g_loss.append(
                    t.sce_loss(self.d_fakes[i], tf.ones_like(self.d_fakes[i])))

        # Summary
        for i in range(len(self.g)):
            tf.summary.scalar('loss/d_loss_{0}'.format(i), self.d_loss[i])
            tf.summary.scalar('loss/g_loss_{0}'.format(i), self.g_loss[i])

        # Optimizer
        t_vars = tf.trainable_variables()
        for idx, i in enumerate([32, 16, 8]):
            self.d_op.append(
                tf.train.AdamOptimizer(
                    learning_rate=self.lr, beta1=self.beta1,
                    beta2=self.beta2).minimize(
                        loss=self.d_loss[idx],
                        var_list=[
                            v for v in t_vars
                            if v.name.startswith('discriminator_{0}'.format(i))
                        ],
                    ))
            self.g_op.append(
                tf.train.AdamOptimizer(
                    learning_rate=self.lr, beta1=self.beta1,
                    beta2=self.beta2).minimize(
                        loss=self.g_loss[idx],
                        var_list=[
                            v for v in t_vars
                            if v.name.startswith('generator_{0}'.format(i))
                        ]))

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model Saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #11
0
    def build_anogan(self):
        # Generator
        self.g = self.generator(self.z, self.y)
        self.g_test = self.generator(self.z,
                                     self.y,
                                     reuse=True,
                                     is_train=False)

        # Discriminator
        d_real_fm, d_real = self.discriminator(self.x)
        d_fake_fm, d_fake = self.discriminator(self.g_test, reuse=True)

        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('disc')]
        g_params = [v for v in t_vars if v.name.startswith('gen')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               loss=self.d_loss,
                                               var_list=d_params)

        if self.detect:
            self.d_loss = t.l1_loss(d_fake_fm, d_real_fm)  # disc     loss
            self.r_loss = t.l1_loss(self.x, self.g)  # residual loss
            self.ano_loss = (
                1.0 - self.lambda_) * self.r_loss + self.lambda_ * self.d_loss

            tf.summary.scalar("loss/d_loss", self.d_loss)
            tf.summary.scalar("loss/r_loss", self.r_loss)
            tf.summary.scalar("loss/ano_loss", self.ano_loss)

            self.ano_op = tf.train.AdamOptimizer(learning_rate=self.g_lr,
                                                 beta1=self.beta1,
                                                 beta2=self.beta2).minimize(
                                                     loss=self.ano_loss,
                                                     var_list=g_params)
        else:
            self.d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
            self.d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
            self.d_loss = self.d_real_loss + self.d_fake_loss
            self.g_loss = t.sce_loss(d_fake, tf.ones_like(d_fake))

            # Summary
            tf.summary.scalar("loss/d_fake_loss", self.d_fake_loss)
            tf.summary.scalar("loss/d_real_loss", self.d_real_loss)
            tf.summary.scalar("loss/d_loss", self.d_loss)
            tf.summary.scalar("loss/g_loss", self.r_loss)

            self.g_op = tf.train.AdamOptimizer(learning_rate=self.d_lr,
                                               beta1=self.beta1,
                                               beta2=self.beta2).minimize(
                                                   loss=self.d_loss,
                                                   var_list=d_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        if not self.detect:
            self.writer = tf.summary.FileWriter('./orig-model/', self.s.graph)
        else:
            self.writer = tf.summary.FileWriter('./ano-model/', self.s.graph)