def Active_Contour_Loss(y_true, y_pred): # y_pred = K.cast(y_pred, dtype = 'float64') """ lenth term """ x = y_pred[:, :, 1:, :] - y_pred[:, :, :-1, :] # horizontal and vertical directions y = y_pred[:, :, :, 1:] - y_pred[:, :, :, :-1] delta_x = x[:, :, 1:, :-2] ** 2 delta_y = y[:, :, :-2, 1:] ** 2 delta_u = K.abs(delta_x + delta_y) epsilon = 0.00000001 # where is a parameter to avoid square root is zero in practice. w = 1 lenth = w * K.sum(K.sqrt(delta_u + epsilon)) # equ.(11) in the paper """ region term """ C_1 = np.ones((480, 320)) C_2 = np.zeros((480, 320)) region_in = K.abs(K.sum(y_pred[:, 0, :, :] * ((y_true[:, 0, :, :] - C_1) ** 2))) # equ.(12) in the paper region_out = K.abs(K.sum((1 - y_pred[:, 0, :, :]) * ((y_true[:, 0, :, :] - C_2) ** 2))) # equ.(12) in the paper lambdaP = 1 # lambda parameter could be various. loss = lenth + lambdaP * (region_in + region_out) return loss
def tversky_loss(y_true, y_pred, alpha=0.3, beta=0.7, smooth=1e-10): """ Tversky loss function. Parameters ---------- y_true : keras tensor tensor containing target mask. y_pred : keras tensor tensor containing predicted mask. alpha : float real value, weight of '0' class. beta : float real value, weight of '1' class. smooth : float small real value used for avoiding division by zero error. Returns ------- keras tensor tensor containing tversky loss. """ y_true = K.flatten(y_true) y_pred = K.flatten(y_pred) truepos = K.sum(y_true * y_pred) fp_and_fn = alpha * K.sum(y_pred * (1 - y_true)) + beta * K.sum((1 - y_pred) * y_true) answer = (truepos + smooth) / ((truepos + smooth) + fp_and_fn) return -answer # might be 1 -
def weighted_dice_loss(y_true, y_pred): weight = get_weight_matrix(y_true) smooth = 1. w, m1, m2 = weight * weight, y_true, y_pred intersection = (m1 * m2) score = (2. * K.sum(w * intersection) + smooth) / (K.sum(w * m1) + K.sum(w * m2) + smooth) loss = 1. - K.sum(score) return loss
def f1_score(y_true, y_pred): """calcultes f1 score """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) recall = true_positives / (possible_positives + K.epsilon()) return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
def R(y_true, y_pred): true_positives = K.sum( K.cast(K.greater(K.clip(y_true * y_pred, 0, 1), 0.20), 'float32')) poss_positives = K.sum( K.cast(K.greater(K.clip(y_true, 0, 1), 0.20), 'float32')) recall = true_positives / (poss_positives + K.epsilon()) return recall
def P(y_true, y_pred): true_positives = K.sum( K.cast(K.greater(K.clip(y_true * y_pred, 0, 1), 0.20), 'float32')) pred_positives = K.sum( K.cast(K.greater(K.clip(y_pred, 0, 1), 0.20), 'float32')) precision = true_positives / (pred_positives + K.epsilon()) return precision
def f1_loss(y_true, y_pred): tp = K.sum(K.cast(y_true * y_pred, 'float'), axis=0) tn = K.sum(K.cast((1 - y_true) * (1 - y_pred), 'float'), axis=0) fp = K.sum(K.cast((1 - y_true) * y_pred, 'float'), axis=0) fn = K.sum(K.cast(y_true * (1 - y_pred), 'float'), axis=0) p = tp / (tp + fp + K.epsilon()) r = tp / (tp + fn + K.epsilon()) f1 = 2 * p * r / (p + r + K.epsilon()) f1 = tf.where(tf.math.is_nan(f1), tf.zeros_like(f1), f1) return 1 - K.mean(f1)
def weighted_bce_dice_loss(y_true, y_pred): y_true = K.cast(y_true, 'float32') y_pred = K.cast(y_pred, 'float32') # if we want to get same size of output, kernel size must be odd number averaged_mask = K.pool2d( y_true, pool_size=(11, 11), strides=(1, 1), padding='same', pool_mode='avg') border = K.cast(K.greater(averaged_mask, 0.005), 'float32') * K.cast(K.less(averaged_mask, 0.995), 'float32') weight = K.ones_like(averaged_mask) w0 = K.sum(weight) weight += border * 2 w1 = K.sum(weight) weight *= (w0 / w1) loss = 0.0 * weighted_bce_loss(y_true, y_pred, weight) + \ weighted_dice_loss(y_true, y_pred, weight) return loss
def jaccard_coef_logloss(y_true, y_pred, smooth=1e-10): """ Loss function based on jaccard coefficient. Parameters ---------- y_true : keras tensor tensor containing target mask. y_pred : keras tensor tensor containing predicted mask. smooth : float small real value used for avoiding division by zero error. Returns ------- keras tensor tensor containing negative logarithm of jaccard coefficient. """ y_true = K.flatten(y_true) y_pred = K.flatten(y_pred) truepos = K.sum(y_true * y_pred) falsepos = K.sum(y_pred) - truepos falseneg = K.sum(y_true) - truepos jaccard = (truepos + smooth) / (smooth + truepos + falseneg + falsepos) return -K.log(jaccard + smooth) # might be 1 -
def euclidean_distance(x, y): sum_sqrt = K.sum(K.square(x - y), axis=1, keepdims=True) return K.sqrt(K.maximum(sum_sqrt, K.epsilon()))