def focal_loss_fixed(y_true, y_pred):
     #pdb.set_trace()
     '''
     return -K.mean(K.mean(K.sum(y_true * K.log(y_pred + K.epsilon()), axis=-1)))
     '''
     if cfg.num_classes > 1:
         pt = K.sum(y_true * y_pred, axis=-1)
         alpha_ = K.sum(y_true * alpha, axis=-1)
     else:
         pt = K.where(K.equal(y_true, 1), y_pred, 1 - y_pred)
         alpha_ = K.where(K.equal(y_true, 1), alpha, 1 - alpha)
     return -K.mean(
         K.mean(alpha_ * K.pow(1. + K.epsilon() - pt, gamma) *
                K.log(pt + K.epsilon())))
Example #2
0
    def _focal(y_true, y_pred):
        """ Compute the focal loss given the target tensor and the predicted tensor.

        As defined in https://arxiv.org/abs/1708.02002

        Args
            y_true: Tensor of target data from the generator with shape (B, N, num_classes).
            y_pred: Tensor of predicted data from the network with shape (B, N, num_classes).

        Returns
            The focal loss of y_pred w.r.t. y_true.
        """
        labels = y_true[:, :, :-1]
        anchor_state = y_true[:, :,
                              -1]  # -1 for ignore, 0 for background, 1 for object
        classification = y_pred

        # filter out "ignore" anchors
        indices = backend.where(keras.backend.not_equal(anchor_state, -1))
        labels = backend.gather_nd(labels, indices)
        classification = backend.gather_nd(classification, indices)

        # compute the focal loss
        alpha_factor = keras.backend.ones_like(labels) * alpha
        alpha_factor = backend.where(keras.backend.equal(labels, 1),
                                     alpha_factor, 1 - alpha_factor)
        focal_weight = backend.where(keras.backend.equal(labels, 1),
                                     1 - classification, classification)
        focal_weight = alpha_factor * focal_weight**gamma

        cls_loss = focal_weight * keras.backend.binary_crossentropy(
            labels, classification)

        # compute the normalizer: the number of positive anchors
        normalizer = backend.where(keras.backend.equal(anchor_state, 1))
        normalizer = keras.backend.cast(
            keras.backend.shape(normalizer)[0], keras.backend.floatx())
        normalizer = keras.backend.maximum(keras.backend.cast_to_floatx(1.0),
                                           normalizer)

        return keras.backend.sum(cls_loss) / normalizer
Example #3
0
def mse(y_true, y_pred):
    cost = K.abs(y_pred - y_true)
    costs = K.where(K.is_nan(cost), T.zeros_like(cost), cost)
    return K.sum(costs, axis=-1)
def mse(y_true, y_pred):
    y_true = K.where(K.is_nan(y_true), y_pred, y_true)
    cost = K.abs(y_pred - y_true)
    return K.sum(cost, axis=-1)