def gradient_penalty_loss(self, y_true, y_pred, phi):
        """
        Computes gradient penalty on phi to ensure smoothness
        """
        # when ytrue = 0 but the discriminator give ypred =1 then it should be a small loss for the generator case
        #if y_true == 0:
        #lr = -K.log(K.maximum(y_pred, 1e-15) ) #ensure numerical stability avoid log 0 # negative sign because the loss should be a positive value
        #else:
        #    lr = 0  # no loss in the other case because the y_true in all the generation case should be 0

        #return lr
        #
        lr = K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
        # compute the numerical gradient of phi
        gradients = numerical_gradient_3D(phi)
        # #if self.DEBUG: gradients = K.print_tensor(gradients, message='gradients are:')
        #
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr,
                                  axis=np.arange(1, len(gradients_sqr.shape)))
        # #   ... and sqrt
        # #gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # # compute lambda * (1 - ||grad||)^2 still for each single sample
        # #gradient_penalty = K.square(1 - gradient_l2_norm)
        # # return the mean as loss over all the batch samples
        # #return K.mean(gradient_l2_norm) + lr
        return gradients_sqr_sum + lr
Exemple #2
0
 def gradient_penalty_loss(self, y_true, y_pred, phi):
     """
     Computes gradient penalty on phi to ensure smoothness
     """
     lr = K.mean(K.binary_crossentropy(y_true, y_pred), axis=-1)
     # compute the numerical gradient of phi
     gradients = numerical_gradient_3D(phi)
     # #if self.DEBUG: gradients = K.print_tensor(gradients, message='gradients are:')
     #
     # compute the euclidean norm by squaring ...
     gradients_sqr = K.square(gradients)
     #   ... summing over the rows ...
     gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
     # #   ... and sqrt
     gradient_l2_norm = K.sqrt(gradients_sqr_sum)
     # # compute lambda * (1 - ||grad||)^2 still for each single sample
     # #gradient_penalty = K.square(1 - gradient_l2_norm)
     # # return the mean as loss over all the batch samples
     return K.mean(gradient_l2_norm) + lr
Exemple #3
0
    def smoothness_loss(self, y_true, y_pred, phi):

        # mean square error loss
        mse_loss = K.mean(K.square(y_pred - y_true), axis=-1)

        # compute the numerical gradient of phi
        gradients = numerical_gradient_3D(phi)
        # #if self.DEBUG: gradients = K.print_tensor(gradients, message='gradients are:')
        #
        # compute the euclidean norm by squaring ...
        gradients_sqr = K.square(gradients)
        #   ... summing over the rows ...
        gradients_sqr_sum = K.sum(gradients_sqr, axis=np.arange(1, len(gradients_sqr.shape)))
        # #   ... and sqrt
        #gradient_l2_norm = K.sqrt(gradients_sqr_sum)
        # # compute lambda * (1 - ||grad||)^2 still for each single sample
        #gradient_penalty = K.square(1 - gradient_l2_norm)
        # # return the mean as loss over all the batch samples
        #return K.mean(gradient_l2_norm) + mse_loss
        return K.mean(gradients_sqr_sum) + mse_loss