def build_loss(d_real, d_real_logits, d_fake, d_fake_logits, label,
                       real_image, fake_image):
            alpha = 0.9
            real_label = tf.concat(
                [label, tf.zeros([self.batch_size, 1])], axis=1)
            fake_label = tf.concat(
                [(1 - alpha) * tf.ones([self.batch_size, n]) / n,
                 alpha * tf.ones([self.batch_size, 1])],
                axis=1)

            # Discriminator/classifier loss
            s_loss = tf.reduce_mean(huber_loss(label, d_real[:, :-1]))
            d_loss_real = tf.nn.softmax_cross_entropy_with_logits(
                logits=d_real_logits, labels=real_label)
            d_loss_fake = tf.nn.softmax_cross_entropy_with_logits(
                logits=d_fake_logits, labels=fake_label)
            d_loss = tf.reduce_mean(d_loss_real + d_loss_fake)

            # Generator loss
            g_loss = tf.reduce_mean(tf.log(d_fake[:, -1]))

            # Weight annealing
            g_loss += tf.reduce_mean(huber_loss(
                real_image, fake_image)) * self.recon_weight

            GAN_loss = tf.reduce_mean(d_loss + g_loss)

            # Classification accuracy
            correct_prediction = tf.equal(tf.argmax(d_real[:, :-1], 1),
                                          tf.argmax(self.label, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return s_loss, d_loss_real, d_loss_fake, d_loss, g_loss, GAN_loss, accuracy
예제 #2
0
        def build_loss(d_real, d_real_logits, d_fake, d_fake_logits, label, real_image, fake_image):
            """[using model output to build loss]
            
            Arguments:
                d_real {[Tensor]} -- [discrimintor loss for real image]
                d_real_logits {[type]} -- [description]
                d_fake {[type]} -- [discrimintor loss for fake image]
                d_fake_logits {[type]} -- [description]
                label {[type]} -- [description]
                real_image {[type]} -- [description]
                fake_image {[type]} -- [description]
            
            Returns:
                [s_loss, d_loss_real, d_loss_fake, d_loss, g_loss, GAN_loss, accuracy] -- [returned parameter group]
            """
            alpha = 0.9
            real_label = tf.concat([label, tf.zeros([self.batch_size, 1])], axis=1)
            # directly put fake_label to n+1 class
            fake_label = tf.concat([(1-alpha)*tf.ones([self.batch_size, n])/n, alpha*tf.ones([self.batch_size, 1])], axis=1)

            # Discriminator/classifier loss
            s_loss = tf.reduce_mean(huber_loss(label, d_real[:, :-1]))
            d_loss_real = tf.nn.softmax_cross_entropy_with_logits(logits=d_real_logits, labels=real_label)
            d_loss_fake = tf.nn.softmax_cross_entropy_with_logits(logits=d_fake_logits, labels=fake_label)
            d_loss = tf.reduce_mean(d_loss_real + d_loss_fake)

            # Generator loss
            g_loss = tf.reduce_mean(tf.log(d_fake[:, -1]))

            # Weight annealing
            g_loss += tf.reduce_mean(huber_loss(real_image, fake_image)) * self.recon_weight

            GAN_loss = tf.reduce_mean(d_loss + g_loss)

            # Classification accuracy
            correct_prediction = tf.equal(tf.argmax(d_real[:, :-1], 1), tf.argmax(self.label, 1))
            accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
            return s_loss, d_loss_real, d_loss_fake, d_loss, g_loss, GAN_loss, accuracy
        def build_loss(d_real, d_real_logits, d_fake, d_fake_logits, label,
                       real_image, fake_image):
            #alpha = 0.9
            real_label = tf.concat(
                [label, tf.zeros([self.batch_size, 1])], axis=1)
            #fake_label = tf.concat([(1-alpha)*tf.ones([self.batch_size, n])/n, alpha*tf.ones([self.batch_size, 1])], axis=1)
            fake_label = tf.concat([
                tf.zeros([self.batch_size, n]),
                tf.ones([self.batch_size, 1])
            ],
                                   axis=1)

            # Supervised loss
            s_loss = tf.reduce_mean(huber_loss(label[:, :n], d_real[:, :n]))

            # Discriminator/classifier loss {{

            d_loss_real_rf = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=d_real_logits[:, -1], labels=real_label[:, -1])
            d_loss_fake_rf = tf.nn.sigmoid_cross_entropy_with_logits(
                logits=d_fake_logits[:, -1], labels=fake_label[:, -1])

            if self.config.model in ('VA', 'BOTH'):
                d_loss_real_va = tf.losses.mean_squared_error(
                    real_label[:, :n_VA], d_real_logits[:, :n_VA])
                d_loss_fake_va = tf.losses.mean_squared_error(
                    fake_label[:, :n_VA], d_fake_logits[:, :n_VA])
                #d_loss_real_va = concordance_cc2(d_real_logits[:,:n_VA], real_label[:,:n_VA])
                #d_loss_fake_va = concordance_cc2(d_fake_logits[:,:n_VA], fake_label[:,:n_VA])

            if self.config.model in ('AU', 'BOTH'):
                d_loss_real_au = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=d_real_logits[:, n - n_AU:n],
                    labels=real_label[:, n - n_AU:n])
                d_loss_fake_au = tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=d_fake_logits[:, n - n_AU:n],
                    labels=fake_label[:, n - n_AU:n])

            if self.config.model == 'BOTH':
                d_loss_real = tf.reduce_mean(tf.reduce_mean(d_loss_real_va) + \
                 tf.reduce_mean(d_loss_real_au) + tf.reshape(d_loss_real_rf, [self.batch_size, 1]))
                d_loss_fake = tf.reduce_mean(tf.reduce_mean(d_loss_fake_va) + \
                 tf.reduce_mean(d_loss_fake_au) + tf.reshape(d_loss_fake_rf, [self.batch_size, 1]))

            elif self.config.model == 'AU':
                d_loss_real = tf.reduce_mean(
                    d_loss_real_au +
                    tf.reshape(d_loss_real_rf, [self.batch_size, 1]))
                d_loss_fake = tf.reduce_mean(
                    d_loss_fake_au +
                    tf.reshape(d_loss_fake_rf, [self.batch_size, 1]))

            elif self.config.model == 'VA':
                d_loss_real = tf.reduce_mean(tf.reduce_mean(d_loss_real_va) + \
                tf.reshape(d_loss_real_rf, [self.batch_size, 1]))
                d_loss_fake = tf.reduce_mean(tf.reduce_mean(d_loss_fake_va) + \
                tf.reshape(d_loss_fake_rf, [self.batch_size, 1]))

            d_loss = tf.reduce_mean(d_loss_real + d_loss_fake)
            # }}

            # Generator loss {{
            g_loss = tf.reduce_mean(tf.log(d_fake[:, -1]))
            # Weight annealing
            g_loss += tf.reduce_mean(huber_loss(
                real_image, fake_image)) * self.recon_weight
            # }}

            GAN_loss = tf.reduce_mean(d_loss + g_loss)

            return s_loss, d_loss_real, d_loss_fake, d_loss, g_loss, GAN_loss