Esempio n. 1
0
def FocalLoss(target, output):
    gamma = 5
    alpha = 0.3
    output = tfb.clip(output, tfb.epsilon(), 1 - tfb.epsilon())
    value = -alpha * target * tf.log(output + tfb.epsilon()) * tf.pow(
        1 - output, gamma) - (1 - alpha) * (1 - target) * tf.log(
            1 - output + tfb.epsilon()) * tf.pow(output, gamma)
    return tf.reduce_mean(value)
Esempio n. 2
0
def get_f1(y_true, y_pred): #taken from old keras source code
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    recall = true_positives / (possible_positives + K.epsilon())
    f1_val = 2*(precision*recall)/(precision+recall+K.epsilon())
    return f1_val
Esempio n. 3
0
 def loss(y_true, y_pred):
     # scale predictions so that the class probas of each sample sum to 1
     y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
     # clip to prevent NaN's and Inf's
     y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
     # calc
     loss = y_true * K.log(y_pred) * weights
     loss = -K.sum(loss, -1)
     return loss
Esempio n. 4
0
def weighted_BCE(y_true, y_pred):
    # scale predictions so that the class probas of each sample sum to 1
    #     weights = tfb.variable(1/np.array([0.07050923, 0.24034695, 0.19802742, 0.09862899, 0.16046447, 0.08317012, 0.10002798, 0.04882485]))
    weights = tfb.variable(np.array([1, 1, 1, 1, 1, 1, 1, 1]))
    y_pred /= tfb.sum(y_pred, axis=-1, keepdims=True)
    # clip to prevent NaN's and Inf's
    y_pred = tfb.clip(y_pred, tfb.epsilon(), 1 - tfb.epsilon())
    # calc
    loss = y_true * tfb.log(y_pred) * weights
    loss = -tfb.sum(loss, -1)
    return loss
Esempio n. 5
0
class focal_loss:

    """ A loss function similar to cross_entropy

        # Usage
            model.compile('sgd',loss=focal_loss.loss,.......)

        # Arguments
            class_weights : weights for each class to solve the class imbalance problem.
                            dtype --> array   default --> None
            pixel_weights : weights for each pixels in order to segment certain part of the image clearly.
                            dtype --> array   default --> None
    """

    def c_weights(self, x):

        try:
            if list(x) != None: return x
        except TypeError:
            return 1


    def p_weights(self, x):

        try:
            if list(x) != None: return x
        except TypeError:
            return 1


    clipping = lambda self,x: K.clip(x, K.epsilon(), 1.-K.epsilon())


    def __init__(self,class_weights=None, pixel_weights=None, gamma=2):
        self.class_weights = class_weights
        self.gamma = gamma
        self.pixel_weights = pixel_weights

    def loss(self,y_true,y_pred):

        """ executes the focal loss

            # Arguments
                y_true : true class values
                y_pred : predicted class values from the model
            # Returns
                fl : mean focal loss for the given batch
         """
        y_pred = self.clipping(y_pred)
        fl = -(K.sum((self.c_weights(self.class_weights) * K.pow(1.-y_pred,self.gamma) * (y_true * K.log(y_pred))),axis=-1))
        fl = K.sum((self.p_weights(self.pixel_weights) * fl),axis=(1,2))
        fl = K.mean(fl, axis=0)
        return fl/1000                                   ## scaling down the loss to prevent gradient explosion
Esempio n. 6
0
def f1_loss(y_true, y_pred):
    
    tp = K.sum(K.cast(y_true*y_pred, 'float'), axis=0)
    tn = K.sum(K.cast((1-y_true)*(1-y_pred), 'float'), axis=0)
    fp = K.sum(K.cast((1-y_true)*y_pred, 'float'), axis=0)
    fn = K.sum(K.cast(y_true*(1-y_pred), 'float'), axis=0)

    p = tp / (tp + fp + K.epsilon())
    r = tp / (tp + fn + K.epsilon())

    f1 = 2*p*r / (p+r+K.epsilon())
    f1 = tf.where(tf.is_nan(f1), tf.zeros_like(f1), f1)
    return 1 - K.mean(f1)
Esempio n. 7
0
def weighted_binary_crossentropy2(target, output):
    """
    Weighted binary crossentropy between an output tensor
    and a target tensor. POS_WEIGHT is used as a multiplier
    for the positive targets.

    Combination of the following functions:
    * keras.losses.binary_crossentropy
    * keras.backend.tensorflow_backend.binary_crossentropy
    * tf.nn.weighted_cross_entropy_with_logits

    reference: https://stackoverflow.com/a/47313183/979377
    """
    # transform back to logits

    POS_WEIGHT = 10  # multiplier for positive targets, needs to be tuned

    _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)
    output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)
    output = tf.log(output / (1 - output))
    # compute weighted loss
    loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,
                                                    logits=output,
                                                    pos_weight=POS_WEIGHT)
    return tf.reduce_mean(loss, axis=-1)
Esempio n. 8
0
    def call(self, x, mask=None):
        uit = dot_product(x, self.W)

        if self.bias:
            uit += self.b

        uit = K.tanh(uit)
        #ait = K.dot(uit, self.u)
        ait = dot_product(uit, self.u)
        a = K.exp(ait)

        # apply mask after the exp. will be re-normalized next
        if mask is not None:
            # Cast the mask to floatX to avoid float64 upcasting in theano
            a *= K.cast(mask, K.floatx())

        # in some cases especially in the early stages of training the sum may be almost zero
        # and this results in NaN's. A workaround is to add a very small positive number \epsilon to the sum.
        # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())
        a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())

        a = K.expand_dims(a)
        weighted_input = x * a
        #return K.sum(weighted_input, axis=1)
        print "here", weighted_input.shape
        return weighted_input
Esempio n. 9
0
def K_sparse_categorical_crossentropy(target,
                                      output,
                                      from_logits=False,
                                      axis=-1):
    output_dimensions = list(range(len(output.get_shape())))
    if axis != -1 and axis not in output_dimensions:
        raise ValueError('{}{}{}'.format(
            'Unexpected channels axis {}. '.format(axis),
            'Expected to be -1 or one of the axes of `output`, ',
            'which has {} dimensions.'.format(len(output.get_shape()))))
    # If the channels are not in the last axis, move them to be there:
    if axis != -1 and axis != output_dimensions[-1]:
        permutation = output_dimensions[:axis] + output_dimensions[axis + 1:]
        permutation += [axis]
        output = tf.transpose(output, perm=permutation)

    # Note: tf.nn.sparse_softmax_cross_entropy_with_logits
    # expects logits, Keras expects probabilities.
    if not from_logits:
        _epsilon = _to_tensor(epsilon(), output.dtype.base_dtype)
        output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)
        output = tf.log(output)

    output_shape = output.get_shape()
    targets = cast(flatten(target), 'int64')
    logits = tf.reshape(output, [-1, tf.shape(output)[-1]])
    res = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
                                                         logits=logits)
    if len(output_shape) >= 3:
        # if our output includes timestep dimension
        # or spatial dimensions we need to reshape
        return tf.reshape(res, tf.shape(output)[:-1])
    else:
        return res
Esempio n. 10
0
def custom_weighted_binary_crossentropy(targets,
                                        logits,
                                        pos_weight=weight_array,
                                        name=None):

    # transform back to logits
    _epsilon = tfb._to_tensor(tfb.epsilon(), logits.dtype.base_dtype)
    logits = tf.clip_by_value(logits, _epsilon, 1 - _epsilon)
    logits = tf.log(logits / (1 - logits))
    # compute weighted loss

    with ops.name_scope(name, "logistic_loss", [logits, targets]) as name:
        logits = ops.convert_to_tensor(logits, name="logits")
        targets = ops.convert_to_tensor(targets, name="targets")
        try:
            targets.get_shape().merge_with(logits.get_shape())
        except ValueError:
            raise ValueError(
                "logits and targets must have the same shape (%s vs %s)" %
                (logits.get_shape(), targets.get_shape()))

        loss = []
        for i in range(0, label_num - 1):
            log_weight = 1 + (pos_weight[i] - 1) * targets[i]
            loss_i = math_ops.add(
                (1 - targets[i]) * logits[i],
                log_weight *
                (math_ops.log1p(math_ops.exp(-math_ops.abs(logits[i]))) +
                 nn_ops.relu(-logits[i])),
                name=name)
            loss.append(loss_i)
            return tf.reduce_mean(loss)
Esempio n. 11
0
def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall

    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision

    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2 * ((precision * recall) / (precision + recall + K.epsilon()))
Esempio n. 12
0
 def weighted_binary_crossentropy(target, output):
     POS_WEIGHT = 10
     _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)
     output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)
     output = tf.log(output / (1 - output))
     loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,
                                                     logits=output,
                                                     pos_weight=POS_WEIGHT)
     return tf.reduce_mean(loss, axis=-1)
Esempio n. 13
0
    def precision(y_true, y_pred):
        """Precision metric.

        Only computes a batch-wise average of precision.

        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
Esempio n. 14
0
    def recall(y_true, y_pred):
        """Recall metric.

        Only computes a batch-wise average of recall.

        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall
Esempio n. 15
0
def softmax_cross_entropy(target,
                          output,
                          from_logits=True,
                          axis=-1,
                          normalize=False):
    """ Compute Softmax cross entropy loss for sparse target.

    Args:
        target (tensor): Target label. If 2D, shape is (w, h).
        output (tensor): Logits or Probabilities. If 2D, shape is (w, h, ch).
        from_logits (bool, optional): logits or softmax outputs? Defaults to True.
        axis (int, optional): Specifying the channels axis. Defaults to -1.
        normalize (bool, optional): Normalize loss across all instances. Defaults to False.
    """
    _check_dtype(target, 'int32')
    _check_dtype(output, 'float32')

    output_dimensions = list(range(len(output.get_shape())))
    if axis != -1 and axis not in output_dimensions:
        raise ValueError('{}{}{}'.format(
            'Unexpected channels axis {}. '.format(axis),
            'Expected to be -1 or one of the axes of `output`, ',
            'which has {} dimensions.'.format(len(output.get_shape()))))

    # move the channels to be in the last axis:
    if axis != -1 and axis != output_dimensions[-1]:
        permutation = output_dimensions[:axis] + output_dimensions[axis + 1:]
        permutation += [axis]
        output = tf.transpose(output, perm=permutation)

    # convert to the logits
    if not from_logits:
        _epsilon = _to_tensor(epsilon(), output.dtype.base_dtype)
        output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)
        output = tf.log(output)  # NOTE: log(exp(x)) = x
    logits = output

    # softmax_cross_entropy
    output_shape = output.get_shape()
    targets = cast(tf.reshape(target,
                              tf.shape(output)[:-1]), 'int32')  # NOTE: cast...

    res = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
                                                         logits=logits)

    # reduce
    if normalize:
        return tf.reduce_mean(res)
    else:
        return tf.reduce_sum(tf.reduce_mean(res, axis=0))  # only batch-axis
Esempio n. 16
0
def weighted_binary_crossentropy(target, output):
    """
    Weighted binary crossentropy between an output tensor 
    and a target tensor. POS_WEIGHT is used as a multiplier 
    for the positive targets.

    Combination of the following functions:
    * keras.losses.binary_crossentropy
    * keras.backend.tensorflow_backend.binary_crossentropy
    * tf.nn.weighted_cross_entropy_with_logits
    """
    # transform back to logits
    _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)
    output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)
    output = tf.log(output / (1 - output))
    # compute weighted loss
    loss = tf.nn.weighted_cross_entropy_with_logits(targets=target,
                                                    logits=output,
                                                    pos_weight=true_weight)
    return tf.reduce_mean(loss, axis=-1)
def binary_crossentropy_custom_tf(target, output, from_logits=True):
    """Binary crossentropy between an output tensor and a target tensor.

    # Arguments
        target: A tensor with the same shape as `output`.
        output: A tensor.
        from_logits: Whether `output` is expected to be a logits tensor.
            By default, we consider that `output`
            encodes a probability distribution.

    # Returns
        A tensor.
    """
    # Note: tf.nn.sigmoid_cross_entropy_with_logits
    # expects logits, Keras expects probabilities.
    if not from_logits:
        # transform back to logits
        _epsilon = tfb._to_tensor(tfb.epsilon(), output.dtype.base_dtype)
        output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)
        output = tf.log(output / (1 - output))

    return tf.nn.sigmoid_cross_entropy_with_logits(labels=target,
                                                   logits=output)
Esempio n. 18
0
 def kld(self, product_embeds1, product_embeds2):
     product_embeds2 = K.clip(product_embeds2, K.epsilon(), 1)
     product_embeds1 = K.clip(product_embeds1, K.epsilon(), 1)
     return K.sum(product_embeds1 * K.log(product_embeds1 / product_embeds2), axis=-1)
Esempio n. 19
0
 def kld(self, E, P):
     E = K.clip(E, K.epsilon(), 1)
     P = K.clip(P, K.epsilon(), 1)
     return K.sum(P * K.log(P / E), axis=-1)
Esempio n. 20
0
def recall_m(y_true, y_pred):
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall
Esempio n. 21
0
def f1_m(y_true, y_pred):
    precision = precision_m(y_true, y_pred)
    recall = recall_m(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon()))
Esempio n. 22
0
def precision_m(y_true, y_pred):
    true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
    predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
    precision = true_positives / (predicted_positives + K.epsilon())
    return precision