Example #1
0
def get_f1(y_true, y_pred): #taken from old keras source code
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    recall = true_positives / (possible_positives + K.epsilon())
    f1_val = 2*(precision*recall)/(precision+recall+K.epsilon())
    return f1_val
Example #2
0
    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
Example #3
0
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall
 def focal_loss_fixed(y_true, y_pred):
     eps = 1e-6
     alpha = 0.5
     y_pred=K.clip(y_pred,eps,1.-eps)#improve the stability of the focal loss and see issues 1 for more information
     pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
     pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
     return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0),axis=-1)
Example #5
0
def FocalLoss(target, output):
    gamma = 5
    alpha = 0.3
    output = tfb.clip(output, tfb.epsilon(), 1 - tfb.epsilon())
    value = -alpha * target * tf.log(output + tfb.epsilon()) * tf.pow(
        1 - output, gamma) - (1 - alpha) * (1 - target) * tf.log(
            1 - output + tfb.epsilon()) * tf.pow(output, gamma)
    return tf.reduce_mean(value)
Example #6
0
 def loss(y_true, y_pred):
     # scale predictions so that the class probas of each sample sum to 1
     y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
     # clip to prevent NaN's and Inf's
     y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
     # calc
     loss = y_true * K.log(y_pred) * weights
     loss = -K.sum(loss, -1)
     return loss
Example #7
0
def weighted_BCE(y_true, y_pred):
    # scale predictions so that the class probas of each sample sum to 1
    #     weights = tfb.variable(1/np.array([0.07050923, 0.24034695, 0.19802742, 0.09862899, 0.16046447, 0.08317012, 0.10002798, 0.04882485]))
    weights = tfb.variable(np.array([1, 1, 1, 1, 1, 1, 1, 1]))
    y_pred /= tfb.sum(y_pred, axis=-1, keepdims=True)
    # clip to prevent NaN's and Inf's
    y_pred = tfb.clip(y_pred, tfb.epsilon(), 1 - tfb.epsilon())
    # calc
    loss = y_true * tfb.log(y_pred) * weights
    loss = -tfb.sum(loss, -1)
    return loss
Example #8
0
class focal_loss:

    """ A loss function similar to cross_entropy

        # Usage
            model.compile('sgd',loss=focal_loss.loss,.......)

        # Arguments
            class_weights : weights for each class to solve the class imbalance problem.
                            dtype --> array   default --> None
            pixel_weights : weights for each pixels in order to segment certain part of the image clearly.
                            dtype --> array   default --> None
    """

    def c_weights(self, x):

        try:
            if list(x) != None: return x
        except TypeError:
            return 1


    def p_weights(self, x):

        try:
            if list(x) != None: return x
        except TypeError:
            return 1


    clipping = lambda self,x: K.clip(x, K.epsilon(), 1.-K.epsilon())


    def __init__(self,class_weights=None, pixel_weights=None, gamma=2):
        self.class_weights = class_weights
        self.gamma = gamma
        self.pixel_weights = pixel_weights

    def loss(self,y_true,y_pred):

        """ executes the focal loss

            # Arguments
                y_true : true class values
                y_pred : predicted class values from the model
            # Returns
                fl : mean focal loss for the given batch
         """
        y_pred = self.clipping(y_pred)
        fl = -(K.sum((self.c_weights(self.class_weights) * K.pow(1.-y_pred,self.gamma) * (y_true * K.log(y_pred))),axis=-1))
        fl = K.sum((self.p_weights(self.pixel_weights) * fl),axis=(1,2))
        fl = K.mean(fl, axis=0)
        return fl/1000                                   ## scaling down the loss to prevent gradient explosion
Example #9
0
    def mean_log_Gaussian_like(self, y_true, parameters):
        """Mean Log Gaussian Likelihood distribution
        Note: The 'c' variable is obtained as global variable
        """
        components = ktf.reshape(parameters,[-1, 2*9 + 1, self.n_classes])
        
        mu = components[:, 0:9, :]
        sigma = components[:, 9:18, :]
        alpha = components[:, 18, :]

        alpha = ktf.softmax(ktf.clip(alpha,1e-8,1.))
        
        exponent = ktf.log(alpha) - .5 * float(self.c) * ktf.log(2 * np.pi) \
            - ktf.sum(ktf.log(sigma), axis=1) \
            - ktf.sum((ktf.expand_dims(y_true,2) - mu)**2 / (2*(sigma)**2), axis=1)
        
        log_gauss = log_sum_exp(exponent, axis=1)
        res = - ktf.mean(log_gauss)
        return res        
Example #10
0
 def kld(self, product_embeds1, product_embeds2):
     product_embeds2 = K.clip(product_embeds2, K.epsilon(), 1)
     product_embeds1 = K.clip(product_embeds1, K.epsilon(), 1)
     return K.sum(product_embeds1 * K.log(product_embeds1 / product_embeds2), axis=-1)
Example #11
0
 def kld(self, E, P):
     E = K.clip(E, K.epsilon(), 1)
     P = K.clip(P, K.epsilon(), 1)
     return K.sum(P * K.log(P / E), axis=-1)
Example #12
0
def precision_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision
Example #13
0
def recall_m(y_true, y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall