def focal_loss_fixed(y_true, y_pred):
     eps = 1e-6
     alpha = 0.5
     y_pred = K.clip(
         y_pred, eps, 1. - eps
     )  #improve the stability of the focal loss and see issues 1 for more information
     pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
     pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
     return -K.mean(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1)) - K.sum(
         (1 - alpha) * K.pow(pt_0, gamma) * K.log(1. - pt_0), axis=-1)
Exemple #2
0
 def loss(y_true, y_pred):
     # scale predictions so that the class probas of each sample sum to 1
     y_pred /= K.sum(y_pred, axis=-1, keepdims=True)
     # clip to prevent NaN's and Inf's
     y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())
     # calc
     loss = y_true * K.log(y_pred) * weights
     loss = -K.sum(loss, -1)
     return loss
Exemple #3
0
    def mean_log_Gaussian_like(self, y_true, parameters):
        """Mean Log Gaussian Likelihood distribution
        Note: The 'c' variable is obtained as global variable
        """
        components = ktf.reshape(parameters,[-1, 2*9 + 1, self.n_classes])
        
        mu = components[:, 0:9, :]
        sigma = components[:, 9:18, :]
        alpha = components[:, 18, :]

        alpha = ktf.softmax(ktf.clip(alpha,1e-8,1.))
        
        exponent = ktf.log(alpha) - .5 * float(self.c) * ktf.log(2 * np.pi) \
            - ktf.sum(ktf.log(sigma), axis=1) \
            - ktf.sum((ktf.expand_dims(y_true,2) - mu)**2 / (2*(sigma)**2), axis=1)
        
        log_gauss = log_sum_exp(exponent, axis=1)
        res = - ktf.mean(log_gauss)
        return res        
Exemple #4
0
def weighted_BCE(y_true, y_pred):
    # scale predictions so that the class probas of each sample sum to 1
    #     weights = tfb.variable(1/np.array([0.07050923, 0.24034695, 0.19802742, 0.09862899, 0.16046447, 0.08317012, 0.10002798, 0.04882485]))
    weights = tfb.variable(np.array([1, 1, 1, 1, 1, 1, 1, 1]))
    y_pred /= tfb.sum(y_pred, axis=-1, keepdims=True)
    # clip to prevent NaN's and Inf's
    y_pred = tfb.clip(y_pred, tfb.epsilon(), 1 - tfb.epsilon())
    # calc
    loss = y_true * tfb.log(y_pred) * weights
    loss = -tfb.sum(loss, -1)
    return loss
Exemple #5
0
    def loss(self,y_true,y_pred):

        """ executes the categorical cross-entropy

            # Arguments
                y_true : true class values
                y_pred : predicted class values from the model
            # Returns
                ce : mean cross-entropy for the given batch
        """
        y_pred = super().clipping(y_pred)
        ce = -(K.sum((super().c_weights(self.class_weights) * (y_true * K.log(y_pred))),axis=-1))
        ce = K.sum((super().p_weights(self.pixel_weights) * ce),axis=(1,2))
        ce = K.mean(ce,axis=0)
        return ce/1000                                ## scaling down the loss to prevent gradient explosion
    def loss(self, y_true, y_pred):
        """ executes the focal loss

            # Arguments
                y_true : true class values
                y_pred : predicted class values from the model
            # Returns
                fl : mean focal loss for the given batch
         """
        y_pred = self.clipping(y_pred)
        fl = -(K.sum(
            (self.c_weights(self.class_weights) *
             K.pow(1. - y_pred, self.gamma) * (y_true * K.log(y_pred))),
            axis=-1))
        fl = K.sum((self.p_weights(self.pixel_weights) * fl), axis=(1, 2))
        fl = K.mean(fl, axis=0)
        return fl / 100
Exemple #7
0
 def kld(self, product_embeds1, product_embeds2):
     product_embeds2 = K.clip(product_embeds2, K.epsilon(), 1)
     product_embeds1 = K.clip(product_embeds1, K.epsilon(), 1)
     return K.sum(product_embeds1 * K.log(product_embeds1 / product_embeds2), axis=-1)
Exemple #8
0
 def kld(self, E, P):
     E = K.clip(E, K.epsilon(), 1)
     P = K.clip(P, K.epsilon(), 1)
     return K.sum(P * K.log(P / E), axis=-1)
Exemple #9
0
    def yolo_loss(self,
                  args,
                  anchors,
                  num_classes,
                  ignore_thresh=.5,
                  print_loss=False):
        '''Return yolo_loss tensor

        Parameters
        ----------
        yolo_outputs: list of tensor, the output of yolo_body or tiny_yolo_body
        y_true: list of array, the output of preprocess_true_boxes
        anchors: array, shape=(N, 2), wh
        num_classes: integer
        ignore_thresh: float, the iou threshold whether to ignore object confidence loss

        Returns
        -------
        loss: tensor, shape=(1,)

        '''

        num_layers = len(anchors) // 3  # default setting

        yolo_outputs = args[:num_layers]
        y_true = args[num_layers:]

        anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]
                       ] if num_layers == 3 else [[3, 4, 5], [1, 2, 3]]
        input_shape = K.cast(
            K.shape(yolo_outputs[0])[1:3] * 32, K.dtype(y_true[0]))
        grid_shapes = [
            K.cast(K.shape(yolo_outputs[l])[1:3], K.dtype(y_true[0]))
            for l in range(num_layers)
        ]
        loss = 0
        m = K.shape(yolo_outputs[0])[0]  # batch size, tensor
        mf = K.cast(m, K.dtype(yolo_outputs[0]))

        for l in range(num_layers):
            object_mask = y_true[l][..., 4:5]
            true_class_probs = y_true[l][..., 5:]

            grid, raw_pred, pred_xy, pred_wh = self.yolo_head(
                yolo_outputs[l],
                anchors[anchor_mask[l]],
                num_classes,
                input_shape,
                calc_loss=True)
            pred_box = K.concatenate([pred_xy, pred_wh])

            # Darknet raw box to calculate loss.
            raw_true_xy = y_true[l][..., :2] * grid_shapes[l][::-1] - grid
            raw_true_wh = K.log(y_true[l][..., 2:4] / anchors[anchor_mask[l]] *
                                input_shape[::-1])
            raw_true_wh = K.switch(
                object_mask, raw_true_wh,
                K.zeros_like(raw_true_wh))  # avoid log(0)=-inf
            box_loss_scale = 2 - y_true[l][..., 2:3] * y_true[l][..., 3:4]

            # Find ignore mask, iterate over each of batch.
            ignore_mask = tf.TensorArray(K.dtype(y_true[0]),
                                         size=1,
                                         dynamic_size=True)
            object_mask_bool = K.cast(object_mask, 'bool')

            def loop_body(b, ignore_mask):
                true_box = tf.boolean_mask(y_true[l][b, ..., 0:4],
                                           object_mask_bool[b, ..., 0])
                iou = box_iou(pred_box[b], true_box)
                best_iou = K.max(iou, axis=-1)
                ignore_mask = ignore_mask.write(
                    b, K.cast(best_iou < ignore_thresh, K.dtype(true_box)))
                return b + 1, ignore_mask

            _, ignore_mask = K.control_flow_ops.while_loop(
                lambda b, *args: b < m, loop_body, [0, ignore_mask])
            ignore_mask = ignore_mask.stack()
            ignore_mask = K.expand_dims(ignore_mask, -1)

            # K.binary_crossentropy is helpful to avoid exp overflow.
            xy_loss = object_mask * box_loss_scale * K.binary_crossentropy(
                raw_true_xy, raw_pred[..., 0:2], from_logits=True)
            wh_loss = object_mask * box_loss_scale * 0.5 * K.square(
                raw_true_wh - raw_pred[..., 2:4])
            confidence_loss = object_mask * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True)+ \
                (1-object_mask) * K.binary_crossentropy(object_mask, raw_pred[...,4:5], from_logits=True) * ignore_mask
            class_loss = object_mask * K.binary_crossentropy(
                true_class_probs, raw_pred[..., 5:], from_logits=True)

            xy_loss = K.sum(xy_loss) / mf
            wh_loss = K.sum(wh_loss) / mf
            confidence_loss = K.sum(confidence_loss) / mf
            class_loss = K.sum(class_loss) / mf
            loss += xy_loss + wh_loss + confidence_loss + class_loss
            if print_loss:
                loss = tf.Print(loss, [
                    loss, xy_loss, wh_loss, confidence_loss, class_loss,
                    K.sum(ignore_mask)
                ],
                                message='loss: ')
        return loss
Exemple #10
0
def log_sum_exp(x, axis=None):
    """Log-sum-exp trick implementation"""
    x_max = ktf.max(x, axis=axis, keepdims=True)
    return ktf.log(ktf.sum(ktf.exp(x - x_max), axis=axis, keepdims=True))+x_max
Exemple #11
0
 def log_eps(i):
     return K.log(i+1e-11)
Exemple #12
0
def focal_loss(y_true, y_pred):
    gamma = 2.0
    alpha = 0.25
    pt_1 = tf.where(tf.equal(y_true, 1), y_pred, tf.ones_like(y_pred))
    pt_0 = tf.where(tf.equal(y_true, 0), y_pred, tf.zeros_like(y_pred))
    return -K.sum(alpha * K.pow(1. - pt_1, gamma) * K.log(pt_1))-K.sum((1-alpha) * K.pow( pt_0, gamma) * K.log(1. - pt_0))