Пример #1
0
    def build_began(self):
        # Generator
        self.g = self.generator(self.z)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_fake = self.discriminator(self.g, reuse=True)

        # Loss
        d_real_loss = t.l1_loss(self.x, d_real)
        d_fake_loss = t.l1_loss(self.g, d_fake)
        self.d_loss = d_real_loss - self.k * d_fake_loss
        self.g_loss = d_fake_loss

        # Convergence Metric
        self.balance = self.gamma * d_real_loss - self.g_loss  # (=d_fake_loss)
        self.m_global = d_real_loss + tf.abs(self.balance)

        # k_t update
        self.k_update = tf.assign(
            self.k,
            tf.clip_by_value(self.k + self.lambda_k * self.balance, 0, 1))

        # Summary
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)
        tf.summary.scalar("misc/balance", self.balance)
        tf.summary.scalar("misc/m_global", self.m_global)
        tf.summary.scalar("misc/k_t", self.k)
        tf.summary.scalar("misc/d_lr", self.d_lr)
        tf.summary.scalar("misc/g_lr", self.g_lr)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr_update,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(learning_rate=self.g_lr_update,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #2
0
    def build_dualgan(self):
        # Generator
        self.g = self.generator(self.z)

        # Discriminator
        d_real = self.discriminator(self.x)
        d_fake = self.discriminator(self.g, reuse=True)

        # Loss
        d_real_loss = t.l1_loss(self.x, d_real)
        d_fake_loss = t.l1_loss(self.g, d_fake)
        self.d_loss = d_real_loss + d_fake_loss
        self.g_loss = d_fake_loss

        # Summary
        tf.summary.scalar("loss/d_real_loss", d_real_loss)
        tf.summary.scalar("loss/d_fake_loss", d_fake_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.RMSPropOptimizer(learning_rate=self.d_lr,
                                              decay=self.decay).minimize(
                                                  self.d_loss,
                                                  var_list=d_params)
        self.g_op = tf.train.RMSPropOptimizer(learning_rate=self.g_lr,
                                              decay=self.decay).minimize(
                                                  self.g_loss,
                                                  var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #3
0
    def build_stargan(self):
        def gp_loss(real, fake, eps=self.epsilon):
            # alpha = tf.random_uniform(shape=real.get_shape(), minval=0., maxval=1., name='alpha')
            # diff = fake - real  # fake data - real data
            # interpolates = real + alpha * diff
            interpolates = eps * real + (1.0 - eps) * fake
            d_interp = self.discriminator(interpolates, reuse=True)
            gradients = tf.gradients(d_interp, [interpolates])[0]
            slopes = tf.sqrt(
                tf.reduce_sum(tf.square(gradients), reduction_indices=[1]))
            gradient_penalty = tf.reduce_mean(tf.square(slopes - 1.0))
            return gradient_penalty

        x_img_a = self.x_A[:, :, :, :self.channel]
        x_attr_a = self.x_A[:, :, :, self.channel:]
        x_img_b = self.x_B[:, :, :, :self.channel]
        # x_attr_b = self.x_B[:, :, :, self.channel:]

        # Generator
        self.fake_B = self.generator(self.x_A)
        gen_in = tf.concat([self.fake_B, x_attr_a], axis=3)
        self.fake_A = self.generator(gen_in, reuse=True)

        # Discriminator
        d_src_real_b, d_aux_real_b = self.discriminator(x_img_b)
        g_src_fake_b, g_aux_fake_b = self.discriminator(
            self.fake_B, reuse=True)  # used at updating G net
        d_src_fake_b, d_aux_fake_b = self.discriminator(
            self.fake_x_B, reuse=True)  # used at updating D net

        # using WGAN-GP losses
        gp = gp_loss(x_img_b, self.fake_x_B)
        d_src_loss = tf.reduce_mean(d_src_fake_b) - tf.reduce_mean(
            d_src_real_b) + gp
        d_aux_loss = t.sce_loss(d_aux_real_b, self.y_B)

        self.d_loss = d_src_loss + self.lambda_cls * d_aux_loss
        g_src_loss = -tf.reduce_mean(g_src_fake_b)
        g_aux_fake_loss = t.sce_loss(g_aux_fake_b, self.y_B)
        g_rec_loss = t.l1_loss(x_img_a, self.fake_A)
        self.g_loss = g_src_loss + self.lambda_cls * g_aux_fake_loss + self.lambda_rec * g_rec_loss

        # Summary
        tf.summary.scalar("loss/d_src_loss", d_src_loss)
        tf.summary.scalar("loss/d_aux_loss", d_aux_loss)
        tf.summary.scalar("loss/d_loss", self.d_loss)
        tf.summary.scalar("loss/g_src_loss", g_src_loss)
        tf.summary.scalar("loss/g_aux_fake_loss", g_aux_fake_loss)
        tf.summary.scalar("loss/g_rec_loss", g_rec_loss)
        tf.summary.scalar("loss/g_loss", self.g_loss)

        # Optimizer
        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('d')]
        g_params = [v for v in t_vars if v.name.startswith('g')]

        self.d_op = tf.train.AdamOptimizer(
            learning_rate=self.d_lr * self.lr_decay,
            beta1=self.beta1).minimize(self.d_loss, var_list=d_params)
        self.g_op = tf.train.AdamOptimizer(
            learning_rate=self.g_lr * self.lr_decay,
            beta1=self.beta1).minimize(self.g_loss, var_list=g_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        self.writer = tf.summary.FileWriter('./model/', self.s.graph)
Пример #4
0
    def build_anogan(self):
        # Generator
        self.g = self.generator(self.z, self.y)
        self.g_test = self.generator(self.z,
                                     self.y,
                                     reuse=True,
                                     is_train=False)

        # Discriminator
        d_real_fm, d_real = self.discriminator(self.x)
        d_fake_fm, d_fake = self.discriminator(self.g_test, reuse=True)

        t_vars = tf.trainable_variables()
        d_params = [v for v in t_vars if v.name.startswith('disc')]
        g_params = [v for v in t_vars if v.name.startswith('gen')]

        self.d_op = tf.train.AdamOptimizer(learning_rate=self.d_lr,
                                           beta1=self.beta1,
                                           beta2=self.beta2).minimize(
                                               loss=self.d_loss,
                                               var_list=d_params)

        if self.detect:
            self.d_loss = t.l1_loss(d_fake_fm, d_real_fm)  # disc     loss
            self.r_loss = t.l1_loss(self.x, self.g)  # residual loss
            self.ano_loss = (
                1.0 - self.lambda_) * self.r_loss + self.lambda_ * self.d_loss

            tf.summary.scalar("loss/d_loss", self.d_loss)
            tf.summary.scalar("loss/r_loss", self.r_loss)
            tf.summary.scalar("loss/ano_loss", self.ano_loss)

            self.ano_op = tf.train.AdamOptimizer(learning_rate=self.g_lr,
                                                 beta1=self.beta1,
                                                 beta2=self.beta2).minimize(
                                                     loss=self.ano_loss,
                                                     var_list=g_params)
        else:
            self.d_real_loss = t.sce_loss(d_real, tf.ones_like(d_real))
            self.d_fake_loss = t.sce_loss(d_fake, tf.zeros_like(d_fake))
            self.d_loss = self.d_real_loss + self.d_fake_loss
            self.g_loss = t.sce_loss(d_fake, tf.ones_like(d_fake))

            # Summary
            tf.summary.scalar("loss/d_fake_loss", self.d_fake_loss)
            tf.summary.scalar("loss/d_real_loss", self.d_real_loss)
            tf.summary.scalar("loss/d_loss", self.d_loss)
            tf.summary.scalar("loss/g_loss", self.r_loss)

            self.g_op = tf.train.AdamOptimizer(learning_rate=self.d_lr,
                                               beta1=self.beta1,
                                               beta2=self.beta2).minimize(
                                                   loss=self.d_loss,
                                                   var_list=d_params)

        # Merge summary
        self.merged = tf.summary.merge_all()

        # Model saver
        self.saver = tf.train.Saver(max_to_keep=1)
        if not self.detect:
            self.writer = tf.summary.FileWriter('./orig-model/', self.s.graph)
        else:
            self.writer = tf.summary.FileWriter('./ano-model/', self.s.graph)