예제 #1
0
def test_evaluate_g_losses(sess):

    _LAMBDA_A = 10
    _LAMBDA_B = 10

    input_a = tf.random_uniform((5, 7), maxval=1)
    cycle_images_a = input_a + 1
    input_b = tf.random_uniform((5, 7), maxval=1)
    cycle_images_b = input_b - 2

    cycle_consistency_loss_a = _LAMBDA_A * losses.cycle_consistency_loss(
        real_images=input_a,
        generated_images=cycle_images_a,
    )
    cycle_consistency_loss_b = _LAMBDA_B * losses.cycle_consistency_loss(
        real_images=input_b,
        generated_images=cycle_images_b,
    )

    prob_fake_a_is_real = tf.constant([0, 1.0, 0])
    prob_fake_b_is_real = tf.constant([1.0, 1.0, 0])

    lsgan_loss_a = losses.lsgan_loss_generator(prob_fake_a_is_real)
    lsgan_loss_b = losses.lsgan_loss_generator(prob_fake_b_is_real)

    assert np.isclose(sess.run(lsgan_loss_a), 0.66666669) and \
        np.isclose(sess.run(lsgan_loss_b), 0.3333333) and \
        np.isclose(sess.run(cycle_consistency_loss_a), 10) and \
        np.isclose(sess.run(cycle_consistency_loss_b), 20)
예제 #2
0
    def compute_losses(self):
        """
        In this function we are defining the variables for loss calculations
        and training model.

        d_loss_A/d_loss_B -> loss for discriminator A/B
        g_loss_A/g_loss_B -> loss for generator A/B
        *_trainer -> Various trainer for above loss functions
        *_summ -> Summary variables for above loss functions
        """
        cycle_consistency_loss_a = \
            self._lambda_a * losses.cycle_consistency_loss(
                real_images=self.input_a, generated_images=self.cycle_images_a,
            )
        cycle_consistency_loss_b = \
            self._lambda_b * losses.cycle_consistency_loss(
                real_images=self.input_b, generated_images=self.cycle_images_b,
            )

        lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)
        lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)

        g_loss_A = \
            cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b
        g_loss_B = \
            cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a

        d_loss_A = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_a_is_real,
            prob_fake_is_real=self.prob_fake_pool_a_is_real,
        )
        d_loss_B = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_b_is_real,
            prob_fake_is_real=self.prob_fake_pool_b_is_real,
        )

        print g_loss_A, g_loss_B
        optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5)

        self.model_vars = tf.trainable_variables()

        d_A_vars = [var for var in self.model_vars if 'd_A' in var.name]
        g_A_vars = [var for var in self.model_vars if 'g_A' in var.name]
        d_B_vars = [var for var in self.model_vars if 'd_B' in var.name]
        g_B_vars = [var for var in self.model_vars if 'g_B' in var.name]

        self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)
        self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)
        self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars)
        self.g_B_trainer = optimizer.minimize(g_loss_B, var_list=g_B_vars)

        for var in self.model_vars:
            print(var.name)

        # Summary variables for tensorboard
        self.g_A_loss_summ = tf.summary.scalar("g_A_loss", g_loss_A)
        self.g_B_loss_summ = tf.summary.scalar("g_B_loss", g_loss_B)
        self.d_A_loss_summ = tf.summary.scalar("d_A_loss", d_loss_A)
        self.d_B_loss_summ = tf.summary.scalar("d_B_loss", d_loss_B)
    def compute_losses(self):
        """Compute losses."""
        self.reconstruction_loss_a = losses.reconstruction_loss(
            real_images=self.input_a, generated_images=self.ae_images_a)
        self.reconstruction_loss_b = losses.reconstruction_loss(
            real_images=self.input_b, generated_images=self.ae_images_b)

        self.lsgan_loss_fake_a = losses.lsgan_loss_generator(
            self.prob_fake_a_is_real)
        self.lsgan_loss_fake_b = losses.lsgan_loss_generator(
            self.prob_fake_b_is_real)

        self.cycle_consistency_loss_a = losses.cycle_consistency_loss(
            real_images=self.input_a, generated_images=self.cycle_images_a)
        self.cycle_consistency_loss_b = losses.cycle_consistency_loss(
            real_images=self.input_b, generated_images=self.cycle_images_b)

        self.g_loss = self._rec_lambda_a * self.reconstruction_loss_a + \
            self._rec_lambda_b * self.reconstruction_loss_b + \
            self._cycle_lambda_a * self.cycle_consistency_loss_a + \
            self._cycle_lambda_b * self.cycle_consistency_loss_b + \
            self._lsgan_lambda_a * self.lsgan_loss_fake_a + \
            self._lsgan_lambda_b * self.lsgan_loss_fake_b

        self.d_loss_A = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_a_is_real,
            prob_fake_is_real=self.prob_fake_pool_a_is_real)
        self.d_loss_B = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_b_is_real,
            prob_fake_is_real=self.prob_fake_pool_b_is_real)

        self.model_vars = tf.trainable_variables()

        d_a_vars = [
            var for var in self.model_vars
            if 'd1' in var.name or 'd_shared' in var.name
        ]
        d_b_vars = [
            var for var in self.model_vars
            if 'd2' in var.name or 'd_shared' in var.name
        ]
        g_vars = [
            var for var in self.model_vars if 'ae1' in var.name
            or 'ae2' in var.name or 'ae_shared' in var.name
        ]

        optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5)

        self.d_A_trainer = optimizer.minimize(self.d_loss_A, var_list=d_a_vars)
        self.d_B_trainer = optimizer.minimize(self.d_loss_B, var_list=d_b_vars)
        self.g_trainer = optimizer.minimize(self.g_loss, var_list=g_vars)

        self.create_summaries()
예제 #4
0
def test_cycle_consistency_loss_is_none_with_perfect_fakes(sess):
    batch_size, height, width, channels = [16, 2, 3, 1]

    tf.set_random_seed(0)

    images = tf.random_uniform((batch_size, height, width, channels), maxval=1)

    loss = losses.cycle_consistency_loss(
        real_images=images,
        generated_images=images,
    )

    assert sess.run(loss) == 0
예제 #5
0
def test_cycle_consistency_loss_is_positive_with_imperfect_fake_x(sess):
    batch_size, height, width, channels = [16, 2, 3, 1]

    tf.set_random_seed(0)

    real_images = tf.random_uniform(
        (batch_size, height, width, channels),
        maxval=1,
    )
    generated_images = real_images + 1

    loss = losses.cycle_consistency_loss(
        real_images=real_images,
        generated_images=generated_images,
    )

    assert sess.run(loss) == 1
예제 #6
0
파일: main.py 프로젝트: ziyangwang007/SIFA
    def compute_losses(self):

        cycle_consistency_loss_a = \
            self._lambda_a * losses.cycle_consistency_loss(
                real_images=tf.expand_dims(self.input_a[:,:,:,1], axis=3), generated_images=self.cycle_images_a,
            )
        cycle_consistency_loss_b = \
            self._lambda_b * losses.cycle_consistency_loss(
                real_images=tf.expand_dims(self.input_b[:,:,:,1], axis=3), generated_images=self.cycle_images_b,
            )

        lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)
        lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)
        lsgan_loss_f = losses.lsgan_loss_generator(self.prob_fea_b_is_real)
        lsgan_loss_a_aux = losses.lsgan_loss_generator(
            self.prob_fake_a_aux_is_real)

        ce_loss_b, dice_loss_b = losses.task_loss(self.pred_mask_fake_b,
                                                  self.gt_a)

        l2_loss_b = tf.add_n([
            0.0001 * tf.nn.l2_loss(v) for v in tf.trainable_variables()
            if '/s_B/' in v.name or '/e_B/' in v.name
        ])

        g_loss_A = cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b
        g_loss_B = cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a

        self.loss_f_weight = tf.placeholder(tf.float32,
                                            shape=[],
                                            name="loss_f_weight")
        self.loss_f_weight_summ = tf.summary.scalar("loss_f_weight",
                                                    self.loss_f_weight)
        seg_loss_B = ce_loss_b + dice_loss_b + l2_loss_b + 0.1 * g_loss_B + self.loss_f_weight * lsgan_loss_f + 0.1 * lsgan_loss_a_aux

        d_loss_A = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_a_is_real,
            prob_fake_is_real=self.prob_fake_pool_a_is_real,
        )
        d_loss_A_aux = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_cycle_a_aux_is_real,
            prob_fake_is_real=self.prob_fake_pool_a_aux_is_real,
        )
        d_loss_A = d_loss_A + d_loss_A_aux
        d_loss_B = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_b_is_real,
            prob_fake_is_real=self.prob_fake_pool_b_is_real,
        )
        d_loss_F = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_fea_fake_b_is_real,
            prob_fake_is_real=self.prob_fea_b_is_real,
        )

        optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5)
        optimizer_seg = tf.train.AdamOptimizer(self.learning_rate_seg)

        self.model_vars = tf.trainable_variables()

        d_A_vars = [var for var in self.model_vars if '/d_A/' in var.name]
        d_B_vars = [var for var in self.model_vars if '/d_B/' in var.name]
        g_A_vars = [var for var in self.model_vars if '/g_A/' in var.name]
        e_B_vars = [var for var in self.model_vars if '/e_B/' in var.name]
        de_B_vars = [var for var in self.model_vars if '/de_B/' in var.name]
        s_B_vars = [var for var in self.model_vars if '/s_B/' in var.name]
        d_F_vars = [var for var in self.model_vars if '/d_F/' in var.name]

        self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)
        self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)
        self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars)
        self.g_B_trainer = optimizer.minimize(g_loss_B, var_list=de_B_vars)
        self.s_B_trainer = optimizer_seg.minimize(seg_loss_B,
                                                  var_list=e_B_vars + s_B_vars)
        self.d_F_trainer = optimizer.minimize(d_loss_F, var_list=d_F_vars)

        for var in self.model_vars:
            print(var.name)

        # Summary variables for tensorboard
        self.g_A_loss_summ = tf.summary.scalar("g_A_loss", g_loss_A)
        self.g_B_loss_summ = tf.summary.scalar("g_B_loss", g_loss_B)
        self.d_A_loss_summ = tf.summary.scalar("d_A_loss", d_loss_A)
        self.d_B_loss_summ = tf.summary.scalar("d_B_loss", d_loss_B)
        self.ce_B_loss_summ = tf.summary.scalar("ce_B_loss", ce_loss_b)
        self.dice_B_loss_summ = tf.summary.scalar("dice_B_loss", dice_loss_b)
        self.l2_B_loss_summ = tf.summary.scalar("l2_B_loss", l2_loss_b)
        self.s_B_loss_summ = tf.summary.scalar("s_B_loss", seg_loss_B)
        self.s_B_loss_merge_summ = tf.summary.merge([
            self.ce_B_loss_summ, self.dice_B_loss_summ, self.l2_B_loss_summ,
            self.s_B_loss_summ
        ])
        self.d_F_loss_summ = tf.summary.scalar("d_F_loss", d_loss_F)
예제 #7
0
    def compute_losses(self):
        """
        In this function we are defining the variables for loss calculations
        and training model.

        d_loss_A/d_loss_B -> loss for discriminator A/B
        g_loss_A/g_loss_B -> loss for generator A/B
        *_trainer -> Various trainer for above loss functions
        *_summ -> Summary variables for above loss functions
        """


        cycle_consistency_loss_a = \
            self._lambda_a * losses.cycle_consistency_loss(
                real_images=self.input_a, generated_images=self.cycle_images_a,
            )
        cycle_consistency_loss_b = \
            self._lambda_b * losses.cycle_consistency_loss(
                real_images=self.input_b, generated_images=self.cycle_images_b,
            )

        lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)
        lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)

        g_loss_A = \
            cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b
        g_loss_B = \
            cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a

        d_loss_A = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_a_is_real,
            prob_fake_is_real=self.prob_fake_pool_a_is_real,
        )
        d_loss_B = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_b_is_real,
            prob_fake_is_real=self.prob_fake_pool_b_is_real,
        )

        optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.5)
        optimizer2 = tf.train.AdamOptimizer(self.learning_rate / self.ratio,
                                            beta1=0.5)
        self.model_vars = tf.trainable_variables()

        d_A_vars = [var for var in self.model_vars if 'd_A' in var.name]
        g_A_vars = [var for var in self.model_vars if 'g_A/' in var.name]
        d_B_vars = [var for var in self.model_vars if 'd_B' in var.name]
        g_B_vars = [var for var in self.model_vars if 'g_B/' in var.name]
        g_Ae_vars = [var for var in self.model_vars if 'g_A_ae' in var.name]
        g_Be_vars = [var for var in self.model_vars if 'g_B_ae' in var.name]

        grads = tf.gradients(g_loss_A, g_A_vars + g_Ae_vars)
        grads1 = grads[:len(g_A_vars)]
        grads2 = grads[len(g_A_vars):]
        train_op1 = optimizer.apply_gradients(zip(grads1, g_A_vars))
        train_op2 = optimizer2.apply_gradients(zip(grads2, g_Ae_vars))
        self.g_A_trainer = tf.group(train_op1, train_op2)

        grads = tf.gradients(g_loss_B, g_B_vars + g_Be_vars)
        grads1 = grads[:len(g_B_vars)]
        grads2 = grads[len(g_B_vars):]
        train_op1_ = optimizer.apply_gradients(zip(grads1, g_B_vars))
        train_op2_ = optimizer2.apply_gradients(zip(grads2, g_Be_vars))
        self.g_B_trainer = tf.group(train_op1_, train_op2_)

        self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)
        self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)

        self.params_ae_c1 = g_A_vars[0]
        self.params_ae_c1_B = g_B_vars[0]
        for var in self.model_vars:
            print(var.name)

        # Summary variables for tensorboard
        self.g_A_loss_summ = tf.summary.scalar("g_A_loss", g_loss_A)
        self.g_B_loss_summ = tf.summary.scalar("g_B_loss", g_loss_B)
        self.d_A_loss_summ = tf.summary.scalar("d_A_loss", d_loss_A)
        self.d_B_loss_summ = tf.summary.scalar("d_B_loss", d_loss_B)
예제 #8
0
    def compute_losses(self):

        cycle_consistency_loss_a = \
            self._lambda_a * losses.cycle_consistency_loss(
                real_images=self.input_a, generated_images=self.cycle_images_a,
            )
        cycle_consistency_loss_b = \
            self._lambda_b * losses.cycle_consistency_loss(
                real_images=self.input_b, generated_images=self.cycle_images_b,
            )

        lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)
        lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)
        lsgan_loss_p = losses.lsgan_loss_generator(
            self.prob_pred_mask_b_is_real)
        lsgan_loss_p_ll = losses.lsgan_loss_generator(
            self.prob_pred_mask_b_ll_is_real)
        lsgan_loss_a_aux = losses.lsgan_loss_generator(
            self.prob_fake_a_aux_is_real)

        ce_loss_b, dice_loss_b = losses.task_loss(self.pred_mask_fake_b,
                                                  self.gt_a)
        ce_loss_b_ll, dice_loss_b_ll = losses.task_loss(
            self.pred_mask_fake_b_ll, self.gt_a)
        l2_loss_b = tf.add_n([
            0.0001 * tf.nn.l2_loss(v) for v in tf.trainable_variables()
            if '/s_B/' in v.name or '/s_B_ll/' in v.name or '/e_B/' in v.name
        ])

        g_loss_A = cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b
        g_loss_B = cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a

        seg_loss_B = ce_loss_b + dice_loss_b + l2_loss_b + 0.1 * (
            ce_loss_b_ll + dice_loss_b_ll
        ) + 0.1 * g_loss_B + 0.1 * lsgan_loss_p + 0.01 * lsgan_loss_p_ll + 0.1 * lsgan_loss_a_aux

        d_loss_A = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_a_is_real,
            prob_fake_is_real=self.prob_fake_pool_a_is_real,
        )
        d_loss_A_aux = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_cycle_a_aux_is_real,
            prob_fake_is_real=self.prob_fake_pool_a_aux_is_real,
        )
        d_loss_A = d_loss_A + d_loss_A_aux
        d_loss_B = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_b_is_real,
            prob_fake_is_real=self.prob_fake_pool_b_is_real,
        )
        d_loss_P = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_pred_mask_fake_b_is_real,
            prob_fake_is_real=self.prob_pred_mask_b_is_real,
        )
        d_loss_P_ll = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_pred_mask_fake_b_ll_is_real,
            prob_fake_is_real=self.prob_pred_mask_b_ll_is_real,
        )

        optimizer_gan = tf.train.AdamOptimizer(self.learning_rate_gan,
                                               beta1=0.5)
        optimizer_seg = tf.train.AdamOptimizer(self.learning_rate_seg)

        self.model_vars = tf.trainable_variables()

        d_A_vars = [var for var in self.model_vars if '/d_A/' in var.name]
        d_B_vars = [var for var in self.model_vars if '/d_B/' in var.name]
        g_A_vars = [var for var in self.model_vars if '/g_A/' in var.name]
        e_B_vars = [var for var in self.model_vars if '/e_B/' in var.name]
        de_B_vars = [var for var in self.model_vars if '/de_B/' in var.name]
        s_B_vars = [var for var in self.model_vars if '/s_B/' in var.name]
        s_B_ll_vars = [
            var for var in self.model_vars if '/s_B_ll/' in var.name
        ]
        d_P_vars = [var for var in self.model_vars if '/d_P/' in var.name]
        d_P_ll_vars = [
            var for var in self.model_vars if '/d_P_ll/' in var.name
        ]

        self.d_A_trainer = optimizer_gan.minimize(d_loss_A, var_list=d_A_vars)
        self.d_B_trainer = optimizer_gan.minimize(d_loss_B, var_list=d_B_vars)
        self.g_A_trainer = optimizer_gan.minimize(g_loss_A, var_list=g_A_vars)
        self.g_B_trainer = optimizer_gan.minimize(g_loss_B, var_list=de_B_vars)
        self.d_P_trainer = optimizer_gan.minimize(d_loss_P, var_list=d_P_vars)
        self.d_P_ll_trainer = optimizer_gan.minimize(d_loss_P_ll,
                                                     var_list=d_P_ll_vars)
        self.s_B_trainer = optimizer_seg.minimize(seg_loss_B,
                                                  var_list=e_B_vars +
                                                  s_B_vars + s_B_ll_vars)

        for var in self.model_vars:
            print(var.name)

        # Summary variables for tensorboard
        self.g_A_loss_summ = tf.summary.scalar("g_A_loss", g_loss_A)
        self.g_B_loss_summ = tf.summary.scalar("g_B_loss", g_loss_B)
        self.d_A_loss_summ = tf.summary.scalar("d_A_loss", d_loss_A)
        self.d_B_loss_summ = tf.summary.scalar("d_B_loss", d_loss_B)
        self.ce_B_loss_summ = tf.summary.scalar("ce_B_loss", ce_loss_b)
        self.dice_B_loss_summ = tf.summary.scalar("dice_B_loss", dice_loss_b)
        self.l2_B_loss_summ = tf.summary.scalar("l2_B_loss", l2_loss_b)
        self.s_B_loss_summ = tf.summary.scalar("s_B_loss", seg_loss_B)
        self.s_B_loss_merge_summ = tf.summary.merge([
            self.ce_B_loss_summ, self.dice_B_loss_summ, self.l2_B_loss_summ,
            self.s_B_loss_summ
        ])
        self.d_P_loss_summ = tf.summary.scalar("d_P_loss", d_loss_P)
        self.d_P_ll_loss_summ = tf.summary.scalar("d_P_loss_ll", d_loss_P_ll)
        self.d_P_loss_merge_summ = tf.summary.merge(
            [self.d_P_loss_summ, self.d_P_ll_loss_summ])
예제 #9
0
    def compute_losses(self):
        """
        In this function we are defining the variables for loss calculations
        and training model.

        d_loss_A/d_loss_B -> loss for discriminator A/B
        g_loss_A/g_loss_B -> loss for generator A/B
        seg_cost          -> Segmentation loss in target domain 
        *_trainer -> Various trainer for above loss functions
        *_summ -> Summary variables for above loss functions
        """

        self.seg_cost = tf.reduce_mean(tf.square(self.manual_seg
                - self.prediction))

        cycle_consistency_loss_a = self._lambda_a \
            * losses.cycle_consistency_loss(real_images=self.input_a,
                generated_images=self.cycle_images_a)
        cycle_consistency_loss_b = self._lambda_b \
            * losses.cycle_consistency_loss(real_images=self.input_b,
                generated_images=self.cycle_images_b)

        lsgan_loss_a = \
            losses.lsgan_loss_generator(self.prob_fake_a_is_real)
        lsgan_loss_b = \
            losses.lsgan_loss_generator(self.prob_fake_b_is_real)

        g_loss_A = cycle_consistency_loss_a + cycle_consistency_loss_b \
            + lsgan_loss_b + 10 * self.seg_cost
        g_loss_B = cycle_consistency_loss_b + cycle_consistency_loss_a \
            + lsgan_loss_a

        d_loss_A = \
            losses.lsgan_loss_discriminator(prob_real_is_real=self.prob_real_a_is_real,
                prob_fake_is_real=self.prob_fake_pool_a_is_real)
        d_loss_B = \
            losses.lsgan_loss_discriminator(prob_real_is_real=self.prob_real_b_is_real,
                prob_fake_is_real=self.prob_fake_pool_b_is_real)

        optimizer = tf.train.AdamOptimizer(self.learning_rate,
                beta1=0.5)
        self.model_vars = tf.trainable_variables()

        d_A_vars = [var for var in self.model_vars if 'd_A' in var.name]
        g_A_vars = [var for var in self.model_vars if 'g_A/'
                    in var.name]
        d_B_vars = [var for var in self.model_vars if 'd_B' in var.name]
        g_B_vars = [var for var in self.model_vars if 'g_B/'
                    in var.name]
        g_Ae_vars = [var for var in self.model_vars if 'g_A_ae'
                     in var.name]
        g_Be_vars = [var for var in self.model_vars if 'g_B_ae'
                     in var.name]

        seg_vars = [var for var in self.model_vars if 'seg_net'
                    in var.name]

       # self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars+g_Ae_vars)

        self.g_A_trainer = optimizer.minimize(g_loss_A,
                var_list=g_A_vars + g_Ae_vars + seg_vars)
        self.g_B_trainer = optimizer.minimize(g_loss_B,
                var_list=g_B_vars + g_Be_vars)
        self.g_A_trainer_bis = optimizer.minimize(g_loss_A,
                var_list=g_A_vars + seg_vars)
        self.g_B_trainer_bis = optimizer.minimize(g_loss_B,
                var_list=g_B_vars)
        self.d_A_trainer = optimizer.minimize(d_loss_A,
                var_list=d_A_vars)
        self.d_B_trainer = optimizer.minimize(d_loss_B,
                var_list=d_B_vars)

        self.seg_trainer = optimizer.minimize(self.seg_cost,
                var_list=seg_vars)

        self.params_ae_c1 = g_A_vars[0]
        self.params_ae_c1_B = g_B_vars[0]
        for var in self.model_vars:
            print var.name

        # Summary variables for tensorboard

        self.g_A_loss_summ = tf.summary.scalar('g_A_loss', g_loss_A)
        self.g_B_loss_summ = tf.summary.scalar('g_B_loss', g_loss_B)
        self.d_A_loss_summ = tf.summary.scalar('d_A_loss', d_loss_A)
        self.d_B_loss_summ = tf.summary.scalar('d_B_loss', d_loss_B)

        self.segmentation_loss_sum = \
            tf.summary.scalar('segmentation_loss', self.seg_cost)
예제 #10
0
    def compute_losses(self):
        """
        In this function we are defining the variables for loss calculations
        and training model.

        d_loss_A/d_loss_B -> loss for discriminator A/B
        g_loss_A/g_loss_B -> loss for generator A/B
        *_trainer -> Various trainer for above loss functions
        *_summ -> Summary variables for above loss functions
        """

        cycle_consistency_loss_a = self._lambda_a * losses.cycle_consistency_loss(real_images=self.input_a, generated_images=self.cycle_images_a)
        cycle_consistency_loss_b = self._lambda_b * losses.cycle_consistency_loss(real_images=self.input_b, generated_images=self.cycle_images_b)

        ssim_loss_A = 0.25 * (2 - (tf.reduce_mean(tf.image.ssim_multiscale(self.input_a[:,:,:,:-1], self.fake_images_b,2,power_factors=[0.0448, 0.2856, 0.3001]) + \
            tf.image.ssim_multiscale(self.fake_images_b, self.cycle_images_a[:,:,:,:-1],2,power_factors=[0.0448, 0.2856, 0.3001]))))

        ssim_loss_B = 0.25 * (2 - (tf.reduce_mean(tf.image.ssim_multiscale(self.input_b, self.fake_images_a[:,:,:,:-1],2,power_factors=[0.0448, 0.2856, 0.3001]) + \
            tf.image.ssim_multiscale(self.fake_images_a[:,:,:,:-1], self.cycle_images_b,2,power_factors=[0.0448, 0.2856, 0.3001]))))

        lsgan_loss_a = losses.lsgan_loss_generator(self.prob_fake_a_is_real)
        lsgan_loss_b = losses.lsgan_loss_generator(self.prob_fake_b_is_real)

        grad_loss_B = tf.reduce_mean(tf.image.image_gradients(tf.expand_dims(self.fake_images_a[:,:,:,-1], axis = 3))) 

        g_loss_A = cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b + ssim_loss_A + ssim_loss_B
        g_loss_B = cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a + ssim_loss_B + ssim_loss_A + grad_loss_B

        #g_loss_A = cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b 
        #g_loss_B = cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a

        d_loss_A = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_a_is_real,
            prob_fake_is_real=self.prob_fake_pool_a_is_real,
        )
        d_loss_B = losses.lsgan_loss_discriminator(
            prob_real_is_real=self.prob_real_b_is_real,
            prob_fake_is_real=self.prob_fake_pool_b_is_real,
        )

        optimizer = tf.train.AdamOptimizer(self.learning_rate, beta1=0.9)

        self.model_vars = tf.trainable_variables()

        d_A_vars = [var for var in self.model_vars if 'd_A' in var.name]
        g_A_vars = [var for var in self.model_vars if 'g_A' in var.name]
        d_B_vars = [var for var in self.model_vars if 'd_B' in var.name]
        g_B_vars = [var for var in self.model_vars if 'g_B' in var.name]

        self.d_A_trainer = optimizer.minimize(d_loss_A, var_list=d_A_vars)
        self.d_B_trainer = optimizer.minimize(d_loss_B, var_list=d_B_vars)
        self.g_A_trainer = optimizer.minimize(g_loss_A, var_list=g_A_vars)
        self.g_B_trainer = optimizer.minimize(g_loss_B, var_list=g_B_vars)

        for var in self.model_vars:
            print(var.name)

        # Summary variables for tensorboard
        self.g_A_loss_summ = tf.summary.scalar("g_A_loss", g_loss_A)
        self.g_B_loss_summ = tf.summary.scalar("g_B_loss", g_loss_B)
        self.d_A_loss_summ = tf.summary.scalar("d_A_loss", d_loss_A)
        self.d_B_loss_summ = tf.summary.scalar("d_B_loss", d_loss_B)
        self.ssim_A_loss_summ = tf.summary.scalar("ssim_A_loss", ssim_loss_A)
        self.ssim_B_loss_summ = tf.summary.scalar("ssim_B_loss", ssim_loss_B)
        self.grad_B_loss_summ = tf.summary.scalar("gradient_loss", grad_loss_B)
예제 #11
0
    def _buildGraph(self):

        # placeholder variables for feeding

        real_a = tf.placeholder(
            tf.float32,
            shape=[
                None,  # enables variable batch size
                self.feature_size
            ],
            name="A")

        real_b = tf.placeholder(
            tf.float32,
            shape=[
                None,  # same feature size for both domains
                self.feature_size
            ],
            name="B")

        dropout = tf.placeholder_with_default(1., shape=[], name="dropout")

        train_status = tf.placeholder_with_default(True,
                                                   shape=[],
                                                   name="train_status")

        # random intial values for fake pools

        fake_pool_a_sample = tf.placeholder(
            tf.float32,
            shape=[
                None,  # enables variable batch size
                self.feature_size
            ],
            name="fake_pool_a_sample")
        fake_pool_b_sample = tf.placeholder(
            tf.float32,
            shape=[
                None,  # same feature size for both domains
                self.feature_size
            ],
            name="fake_pool_b_sample")

        # construct generator and discriminator functions as class instances

        G_a2b = layers.generator(name='G_a2b',
                                 architecture=self.architecture['G_a2b'],
                                 activation=tf.nn.leaky_relu,
                                 output_activation=tf.nn.leaky_relu,
                                 dropout=dropout)
        # linear discriminator
        D_a = layers.discriminator(name='D_a',
                                   architecture=self.architecture['D_a'],
                                   activation=None,
                                   dropout=dropout)

        G_b2a = layers.generator(name='G_b2a',
                                 architecture=self.architecture['G_b2a'],
                                 activation=tf.nn.leaky_relu,
                                 output_activation=tf.nn.leaky_relu,
                                 dropout=dropout)
        # linear discriminator
        D_b = layers.discriminator(name='D_b',
                                   architecture=self.architecture['D_b'],
                                   activation=None,
                                   dropout=dropout)

        # print generator and discriminator architecture
        for f in [G_a2b, D_a, G_b2a, D_b]:
            f.print_architecture()

        # generate fake and cycle profiles
        fake_a = G_b2a(real_b, is_train=train_status)
        fake_b = G_a2b(real_a, is_train=train_status)

        cycle_a = G_b2a(fake_b, is_train=train_status)
        cycle_b = G_a2b(fake_a, is_train=train_status)

        # construct recent fake profile pool
        """
        See Zhu et al., "Training Details": 
        to reduce model oscillation [15], 
        we follow Shrivastava et al.’s strategy [46] and update the 
        discriminators using a history of generated images rather than 
        the ones produced by the latest generators. We keep an image
        buffer that stores the 50 previously created images.
        
        """

        # get discriminator predictions

        prob_real_a_is_a = D_a(real_a, is_train=train_status)
        prob_fake_a_is_a = D_a(fake_a, is_train=train_status)
        prob_real_b_is_b = D_b(real_b, is_train=train_status)
        prob_fake_b_is_b = D_b(fake_b, is_train=train_status)

        prob_fake_pool_a_is_a = D_a(fake_pool_a_sample, is_train=train_status)
        prob_fake_pool_b_is_b = D_b(fake_pool_b_sample, is_train=train_status)

        # calculate losses

        ## Generator loss
        lsgan_loss_a = losses.lsgan_loss_generator(prob_fake_a_is_a)
        lsgan_loss_b = losses.lsgan_loss_generator(prob_fake_b_is_b)

        ## Cycle-consistency loss
        ## Weighted by quality of the generated image
        cycle_consistency_loss_a =\
            self.lambda_a * losses.cycle_consistency_loss(
                real_images=real_a, generated_images=cycle_a) *\
                    tf.maximum(0.,tf.reduce_mean(prob_fake_b_is_b))


        cycle_consistency_loss_b =\
            self.lambda_b * losses.cycle_consistency_loss(
                real_images=real_b, generated_images=cycle_b) *\
                    tf.maximum(0.,tf.reduce_mean(prob_fake_a_is_a))



        g_loss_a2b =\
            cycle_consistency_loss_a + cycle_consistency_loss_b + lsgan_loss_b*2
        g_loss_b2a =\
            cycle_consistency_loss_b + cycle_consistency_loss_a + lsgan_loss_a*2

        ## Discriminator loss
        d_loss_a = losses.lsgan_loss_discriminator(
            prob_real_is_real=prob_real_a_is_a,
            prob_fake_is_real=prob_fake_pool_a_is_a,
        )
        d_loss_b = losses.lsgan_loss_discriminator(
            prob_real_is_real=prob_real_b_is_b,
            prob_fake_is_real=prob_fake_pool_b_is_b,
        )

        # construct optimizers

        global_step = tf.Variable(0, trainable=False)

        # make learning rate adjustable
        curr_lr_ = tf.Variable(self.learning_rate, trainable=False)

        optimizer = tf.train.AdamOptimizer(curr_lr_, beta1=0.5)

        model_vars = tf.trainable_variables()

        g_a2b_vars = [var for var in model_vars if 'G_a2b' in var.name]
        d_b_vars = [var for var in model_vars if 'D_b' in var.name]
        g_b2a_vars = [var for var in model_vars if 'G_b2a' in var.name]
        d_a_vars = [var for var in model_vars if 'D_a' in var.name]

        g_a2b_trainer = optimizer.minimize(g_loss_a2b, var_list=g_a2b_vars)
        d_b_trainer = optimizer.minimize(d_loss_b, var_list=d_b_vars)
        g_b2a_trainer = optimizer.minimize(g_loss_b2a, var_list=g_b2a_vars)
        d_a_trainer = optimizer.minimize(d_loss_a,
                                         var_list=d_a_vars,
                                         global_step=global_step)

        for var in model_vars:
            print(var.name)

        # Summary variables for tensorboard
        tf.summary.scalar("D_A(x)", tf.reduce_mean(prob_fake_a_is_a))
        tf.summary.scalar("D_B(x)", tf.reduce_mean(prob_fake_b_is_b))
        tf.summary.scalar("G_A2B_loss", g_loss_a2b)
        tf.summary.scalar("G_B2A_loss", g_loss_b2a)
        tf.summary.scalar("D_A_loss", d_loss_a)
        tf.summary.scalar("D_B_loss", d_loss_b)

        # Merge summary

        merged_summary = tf.summary.merge_all()

        # return handles
        return (real_a, real_b, dropout, train_status, fake_a, fake_b, cycle_a,
                cycle_b, fake_pool_a_sample, fake_pool_b_sample, g_loss_a2b,
                d_loss_b, g_loss_b2a, d_loss_a, g_a2b_trainer, d_b_trainer,
                g_b2a_trainer, d_a_trainer, curr_lr_, global_step,
                merged_summary)