def custom_cross_entropy_with_weight_tensor(self, y_true, y_pred): # y_true has the payoffs in the last dimension y_true, payoffs = splitter(y_true) y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos # get confusion matrix of all samples in batch as matrix tp = (y_pos * y_pred_pos) tn = (y_neg * y_pred_neg) fn = (y_pos * y_pred_neg) fp = (y_neg * y_pred_pos) if self.method == 'lay': tp_weight = K.abs(payoffs) fp_weight = K.abs(payoffs) tn_weight = 1 fn_weight = 0.95 elif self.method == 'back': tp_weight = K.abs(payoffs) # tp (correctly backing) fp_weight = 1 # fp cost (backing the wrong one) tn_weight = 0 # tn (correctly not backing) fn_weight = K.abs(payoffs) # fn cost (not backing if it should) # Get weights weight_tensor = tp_weight * tp + fp_weight * fp + tn_weight * tn + fn_weight * fn loss = binary_crossentropy(y_true, y_pred) weighted_loss = loss * weight_tensor return weighted_loss
def recall(y_true, y_pred): """Recall metric. Only computes a batch-wise average of recall. Computes the recall, a metric for multi-label classification of how many relevant items are selected. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall
def precision(y_true, y_pred): """Precision metric. Only computes a batch-wise average of precision. Computes the precision, a metric for multi-label classification of how many selected items are relevant. """ true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision
def confusion(y_true, y_pred): y_true, payoffs = splitter(y_true) y_pred_pos = K.round(K.clip(y_pred, 0, 1)) y_pred_neg = 1 - y_pred_pos y_pos = K.round(K.clip(y_true, 0, 1)) y_neg = 1 - y_pos tp = K.sum(y_pos * y_pred_pos) / (_EPSILON + K.sum(y_pos)) tn = K.sum(y_neg * y_pred_neg) / (_EPSILON + K.sum(y_neg)) fn = K.sum(y_pos * y_pred_neg) / (_EPSILON + K.sum(y_neg)) fp = K.sum(y_neg * y_pred_pos) / (_EPSILON + K.sum(y_pos)) return tp, tn, fn, fp
def jaccard_coef_int(y_true_values, y_predictions): # __author__ = Vladimir Iglovikov y_pred_pos = K.round(K.clip(y_predictions, 0, 1)) intersection = K.sum(y_true_values * y_pred_pos, axis=[0, -1, -2]) sum_ = K.sum(y_true_values + y_pred_pos, axis=[0, -1, -2]) jac = (intersection + smooth) / (sum_ - intersection + smooth) return K.mean(jac)
def call(self, inputs): if K.dtype(inputs) != 'int32': inputs = K.cast(inputs, 'int32') out = K.gather(self.embeddings, inputs) mask = K.expand_dims(K.clip(K.cast(inputs, 'float32'), 0, 1), axis=-1) return out * mask
def precision_m(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1))) precision = true_positives / (predicted_positives + K.epsilon()) return precision
def recall_m(y_true, y_pred): true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1))) possible_positives = K.sum(K.round(K.clip(y_true, 0, 1))) recall = true_positives / (possible_positives + K.epsilon()) return recall
def __call__(self, w): return K.clip(w, 0., 1.)