Esempio n. 1
0
def IOU_calc(y_true, y_pred):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)

    return 2 * (intersection + iou_smooth) / (K.sum(y_true_f) +
                                              K.sum(y_pred_f) + iou_smooth)
Esempio n. 2
0
def dice_coef(y_true, y_pred):
    smooth = 1e-7
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth * 0.01) / (K.sum(y_true_f) +
                                                  K.sum(y_pred_f) + smooth)
Esempio n. 3
0
def dice_coeff(y_true, y_pred):
    smooth = 0.001
    y_true = K.flatten(y_true)
    y_pred = K.flatten(y_pred)
    intersection = K.sum(y_true * y_pred)
    return (2. * intersection + smooth) / (K.sum(y_true) + K.sum(y_pred) +
                                           smooth)
Esempio n. 4
0
def dice_coef(y_true, y_pred, smooth=1e-3):
    y_true_f = K.flatten(y_true)
    y_pred_f = K.flatten(y_pred)
    intersection = K.sum(y_true_f * y_pred_f)
    return K.mean(
        (2.0 * intersection + smooth)
        / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
    )
 def __metrics_base(self, y_true, y_pred):
     """ Base for all the metrics defined below """
     y_true, y_pred = K.flatten(tf.math.argmax(y_true, axis=-1)), K.flatten(
         tf.math.argmax(y_pred, axis=-1))
     con_mat = K.cast(tf.math.confusion_matrix(y_true, y_pred), K.floatx())
     correct = tf.linalg.diag_part(con_mat)
     total = K.sum(con_mat, axis=-1)
     return correct, total, con_mat
Esempio n. 6
0
def dice_coef(y_true, y_pred):
    #y_true_f = K.flatten(y_true.astype('float32'))
    #y_pred_f = K.flatten(y_pred.astype('float32'))
    y_true_f = K.flatten(y_true) # K.flatten(y_true.astype('float32'))
    y_pred_f = K.flatten(y_pred) # K.flatten(y_pred.astype('float32'))
    print (y_true_f)
    print ("******************")
    print (y_pred_f)
    intersection = K.sum(y_true_f * y_pred_f)
    return (2. * intersection + smooth) / (K.sum(y_true_f) + K.sum(y_pred_f) + smooth)
Esempio n. 7
0
def dice_axon(y_true, y_pred, smooth=1e-3):
    """
    Computes the pixel-wise dice myelin coefficient from the prediction tensor outputted by the network.
    :param y_pred: Tensor, the prediction outputed by the network. Shape (N,H,W,C).
    :param y_true: Tensor, the gold standard we work with. Shape (N,H,W,C).
    :return: dice axon coefficient for the current batch.
    """

    y_true_f = K.flatten(y_true[..., 2])
    y_pred_f = K.flatten(y_pred[..., 2])
    intersection = K.sum(y_true_f * y_pred_f)
    return K.mean((2. * intersection + smooth) /
                  (K.sum(y_true_f) + K.sum(y_pred_f) + smooth))
def yolo_head(graph, feats, anchors, num_classes):
    with graph.as_default():
        num_anchors = len(anchors)
        anchors_tensor = K.reshape(K.variable(anchors),
                                   [1, 1, 1, num_anchors, 2])

        conv_dims = K.shape(feats)[1:3]
        conv_height_index = K.arange(0, stop=conv_dims[0])
        conv_width_index = K.arange(0, stop=conv_dims[1])
        conv_height_index = K.tile(conv_height_index, [conv_dims[1]])

        conv_width_index = K.tile(K.expand_dims(conv_width_index, 0),
                                  [conv_dims[0], 1])
        conv_width_index = K.flatten(K.transpose(conv_width_index))
        conv_index = K.transpose(K.stack([conv_height_index,
                                          conv_width_index]))
        conv_index = K.reshape(conv_index,
                               [1, conv_dims[0], conv_dims[1], 1, 2])
        conv_index = K.cast(conv_index, K.dtype(feats))

        feats = K.reshape(
            feats,
            [-1, conv_dims[0], conv_dims[1], num_anchors, num_classes + 5])
        conv_dims = K.cast(K.reshape(conv_dims, [1, 1, 1, 1, 2]),
                           K.dtype(feats))

        box_xy = K.sigmoid(feats[..., :2])
        box_wh = K.exp(feats[..., 2:4])
        box_confidence = K.sigmoid(feats[..., 4:5])
        box_class_probs = K.softmax(feats[..., 5:])

        box_xy = (box_xy + conv_index) / conv_dims
        box_wh = box_wh * anchors_tensor / conv_dims

        return box_xy, box_wh, box_confidence, box_class_probs
Esempio n. 9
0
def K_sparse_categorical_crossentropy(target,
                                      output,
                                      from_logits=False,
                                      axis=-1):
    output_dimensions = list(range(len(output.get_shape())))
    if axis != -1 and axis not in output_dimensions:
        raise ValueError('{}{}{}'.format(
            'Unexpected channels axis {}. '.format(axis),
            'Expected to be -1 or one of the axes of `output`, ',
            'which has {} dimensions.'.format(len(output.get_shape()))))
    # If the channels are not in the last axis, move them to be there:
    if axis != -1 and axis != output_dimensions[-1]:
        permutation = output_dimensions[:axis] + output_dimensions[axis + 1:]
        permutation += [axis]
        output = tf.transpose(output, perm=permutation)

    # Note: tf.nn.sparse_softmax_cross_entropy_with_logits
    # expects logits, Keras expects probabilities.
    if not from_logits:
        _epsilon = _to_tensor(epsilon(), output.dtype.base_dtype)
        output = tf.clip_by_value(output, _epsilon, 1 - _epsilon)
        output = tf.log(output)

    output_shape = output.get_shape()
    targets = cast(flatten(target), 'int64')
    logits = tf.reshape(output, [-1, tf.shape(output)[-1]])
    res = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=targets,
                                                         logits=logits)
    if len(output_shape) >= 3:
        # if our output includes timestep dimension
        # or spatial dimensions we need to reshape
        return tf.reshape(res, tf.shape(output)[:-1])
    else:
        return res
Esempio n. 10
0
 def dice_coef(self, y_true, y_pred):
     y_true_f = ktf.flatten(y_true)
     y_pred_f = ktf.flatten(y_pred)
     intersection = ktf.sum(y_true_f * y_pred_f)
     return (2. * intersection + self.smooth) / (ktf.sum(y_true_f) + ktf.sum(y_pred_f) + self.smooth)