def custom_cross_entropy(self, y_true, y_pred):
        # y_true has the payoffs in the last dimension
        y_true, payoffs = splitter(y_true)

        if self.method == 'lay':
            tp_weight = K.abs(payoffs)
            fp_weight = K.abs(payoffs)
            tn_weight = 1
            fn_weight = 0.95

        elif self.method == 'back':
            tp_weight = K.abs(payoffs)  # opportunity cost
            tn_weight = 0  # opportunity cost
            fp_weight = 1  # cost
            fn_weight = K.abs(payoffs)  # cost

        loss = -K.mean(
            fn_weight * y_true * K.log(y_pred + _EPSILON)
            +  # fn cost (not backing if it should)
            fp_weight * (1 - y_true) *
            K.log(1 - y_pred + _EPSILON)  # fp cost (backing the wrong one)

            # + tp_weight * y_true * K.log(1 - y_pred + _EPSILON)  # tp (correctly backing)
            # + tn_weight * (1 - y_true) * K.log(y_pred + _EPSILON)  # tn (correctly not backing)
        )

        return loss
Пример #2
0
    def triplet_loss_v2(y_true, y_pred, N=512, beta=512, epsilon=1e-8):
        """
        参考:https://towardsdatascience.com/lossless-triplet-loss-7e932f990b24
        !!!需要输出层使用sigmoid激活函数!

        Implementation of the triplet loss function

        Arguments:
        y_true -- true labels, required when you define a loss in Keras, you don't need it in this function.
        y_pred -- python list containing three objects:
                anchor -- the encodings for the anchor data
                positive -- the encodings for the positive data (similar to anchor)
                negative -- the encodings for the negative data (different from anchor)
        N  --  The number of dimension
        beta -- The scaling factor, N is recommended
        epsilon -- The Epsilon value to prevent ln(0)


        Returns:
        loss -- real number, value of the loss
        """
        anc, pos, neg = y_pred[:, 0:N], y_pred[:, N:N * 2], y_pred[:, N * 2:]

        # 欧式距离
        pos_dist = K.sum(K.square(anc - pos), axis=-1, keepdims=True)
        neg_dist = K.sum(K.square(anc - neg), axis=-1, keepdims=True)

        pos_dist = -K.log(-(pos_dist / beta) + 1 + epsilon)
        neg_dist = -K.log(-((N - neg_dist) / beta) + 1 + epsilon)

        # compute loss
        loss = neg_dist + pos_dist

        return loss
Пример #3
0
def reed_hard_loss(y_true, y_pred):
    '''Expects a binary class matrix instead of a vector of scalar classes.
    '''
    return -K.log(K.max(y_pred, axis=1, keepdims=True) + 1e-8)
Пример #4
0
def reed_soft_loss(y_true, y_pred):
    '''Expects a binary class matrix instead of a vector of scalar classes.
    '''
    return -K.batch_dot(y_pred, K.log(y_pred + 1e-8), axes=(1, 1))
Пример #5
0
def jaccard_coef_loss(y_true, y_pred):
    # __author__ = Vladimir Iglovikov
    return -K.log(jaccard_coef(y_true, y_pred)) + binary_crossentropy(y_pred, y_true)