コード例 #1
0
 def prepare_simple_model(input_tensor, loss_name, target):
   axis = 1 if K.image_data_format() == 'channels_first' else -1
   loss = None
   num_channels = None
   activation = None
   if loss_name == 'sparse_categorical_crossentropy':
     loss = lambda y_true, y_pred: K.sparse_categorical_crossentropy(  # pylint: disable=g-long-lambda
         y_true, y_pred, axis=axis)
     num_channels = np.amax(target) + 1
     activation = 'softmax'
   elif loss_name == 'categorical_crossentropy':
     loss = lambda y_true, y_pred: K.categorical_crossentropy(  # pylint: disable=g-long-lambda
         y_true, y_pred, axis=axis)
     num_channels = target.shape[axis]
     activation = 'softmax'
   elif loss_name == 'binary_crossentropy':
     loss = lambda y_true, y_pred: K.binary_crossentropy(y_true, y_pred)  # pylint: disable=unnecessary-lambda
     num_channels = target.shape[axis]
     activation = 'sigmoid'
   predictions = Conv2D(num_channels,
                        1,
                        activation=activation,
                        kernel_initializer='ones',
                        bias_initializer='ones')(input_tensor)
   simple_model = keras.models.Model(inputs=input_tensor,
                                     outputs=predictions)
   simple_model.compile(optimizer='rmsprop', loss=loss)
   return simple_model
コード例 #2
0
def sparse_categorical_crossentropy(y_true,
                                    y_pred,
                                    from_logits=False,
                                    axis=-1):
    return K.sparse_categorical_crossentropy(y_true,
                                             y_pred,
                                             from_logits=from_logits,
                                             axis=axis)
コード例 #3
0
    def loss(y_true, y_pred):
        y_true = K.cast(y_true, K.floatx())
        mask = K.equal(y_true, mask_value)
        mask = 1 - K.cast(mask, K.floatx())
        y_true = y_true * mask

        loss = K.sparse_categorical_crossentropy(y_true, y_pred) * mask
        return K.sum(loss) / K.sum(mask)
コード例 #4
0
def loss_function(real, pred):
    mask = tf.math.logical_not(tf.math.equal(real, 0))
    loss_ = sparse_categorical_crossentropy(real, pred, from_logits=True)
    # loss_ = tf.keras.losses.sparse_categorical_crossentropy(real, pred)

    mask = tf.cast(mask, dtype=loss_.dtype)
    loss_ *= mask

    return tf.reduce_mean(loss_)
コード例 #5
0
    def yolo_loss(y_true, y_pred):
        # 1. transform all pred outputs
        # y_pred: (batch_size, grid, grid, anchors, (x, y, w, h, obj, ...cls))
        pred_box, pred_obj, pred_class, pred_xywh = yolo_boxes(
            y_pred, anchors, classes)
        pred_xy = pred_xywh[..., 0:2]
        pred_wh = pred_xywh[..., 2:4]

        # 2. transform all true outputs
        # y_true: (batch_size, grid, grid, anchors, (x1, y1, x2, y2, obj, cls))
        true_box, true_obj, true_class_idx = tf.split(y_true, (4, 1, 1),
                                                      axis=-1)
        true_xy = (true_box[..., 0:2] + true_box[..., 2:4]) / 2
        true_wh = true_box[..., 2:4] - true_box[..., 0:2]

        # give higher weights to small boxes
        box_loss_scale = 2 - true_wh[..., 0] * true_wh[..., 1]

        # 3. inverting the pred box equations
        grid_size = tf.shape(y_true)[1]
        grid = tf.meshgrid(tf.range(grid_size), tf.range(grid_size))
        grid = tf.expand_dims(tf.stack(grid, axis=-1), axis=2)
        true_xy = true_xy * tf.cast(grid_size, tf.float32) - \
            tf.cast(grid, tf.float32)
        true_wh = tf.math.log(true_wh / anchors)
        true_wh = tf.where(tf.math.is_inf(true_wh), tf.zeros_like(true_wh),
                           true_wh)

        # 4. calculate all masks
        obj_mask = tf.squeeze(true_obj, -1)
        # ignore false positive when iou is over threshold
        true_box_flat = tf.boolean_mask(true_box, tf.cast(obj_mask, tf.bool))
        best_iou = tf.reduce_max(broadcast_iou(pred_box, true_box_flat),
                                 axis=-1)
        ignore_mask = tf.cast(best_iou < ignore_thresh, tf.float32)

        # 5. calculate all losses
        xy_loss = obj_mask * box_loss_scale * \
            tf.reduce_sum(tf.square(true_xy - pred_xy), axis=-1)
        wh_loss = obj_mask * box_loss_scale * \
            tf.reduce_sum(tf.square(true_wh - pred_wh), axis=-1)
        obj_loss = binary_crossentropy(true_obj, pred_obj)
        obj_loss = obj_mask * obj_loss + \
            (1 - obj_mask) * ignore_mask * obj_loss
        # TODO: use binary_crossentropy instead
        class_loss = obj_mask * sparse_categorical_crossentropy(
            true_class_idx, pred_class)

        # 6. sum over (batch, gridx, gridy, anchors) => (batch, 1)
        xy_loss = tf.reduce_sum(xy_loss, axis=(1, 2, 3))
        wh_loss = tf.reduce_sum(wh_loss, axis=(1, 2, 3))
        obj_loss = tf.reduce_sum(obj_loss, axis=(1, 2, 3))
        class_loss = tf.reduce_sum(class_loss, axis=(1, 2, 3))

        return xy_loss + wh_loss + obj_loss + class_loss
コード例 #6
0
    def __call__(self, y_true, y_pred):
        y_true_val = y_true[:, :, 0]
        mask = y_true[:, :, 1]

        # masked per-sample means of each loss
        num_items_masked = K.sum(mask, axis=-1) + 1e-6
        masked_cross_entropy = (
            K.sum(mask * K.sparse_categorical_crossentropy(y_true_val, y_pred),
                  axis=-1) / num_items_masked)
        masked_entropy = (
            K.sum(mask * -K.sum(y_pred * K.log(y_pred), axis=-1), axis=-1) /
            num_items_masked)
        return masked_cross_entropy - self.penalty_weight * masked_entropy
コード例 #7
0
    def call(self, inputs, **kwargs):
        y_true = inputs[0]
        y_pred = inputs[1]

        y_true = K.cast(y_true, tf.int32)
        blank_mask = tf.not_equal(y_true, tf.to_int32(self.blank_value))

        y_true_초성, y_true_중성, y_true_종성 = JamoDeCompose()(y_true)
        y_pred_초성, y_pred_중성, y_pred_종성 = tf.split(
            y_pred,
            [len(초성) + 1, len(중성) + 1, len(종성) + 1], axis=-1)

        mask = tf.cast(blank_mask, dtype=K.floatx())
        loss_초성 = K.sparse_categorical_crossentropy(y_true_초성,
                                                    y_pred_초성) * mask
        loss_중성 = K.sparse_categorical_crossentropy(y_true_중성,
                                                    y_pred_중성) * mask
        loss_종성 = K.sparse_categorical_crossentropy(y_true_종성,
                                                    y_pred_종성) * mask

        mask = K.sum(mask, axis=1)
        loss_jamo = K.sum(loss_초성 + loss_중성 + loss_종성, axis=1)
        return loss_jamo / mask
コード例 #8
0
ファイル: losses.py プロジェクト: zhanggc14/tensorflow
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
  """Computes the sparse categorical crossentropy loss.

  Args:
    y_true: Ground truth values.
    y_pred: The predicted values.
    from_logits: Whether `y_pred` is expected to be a logits tensor. By default,
      we assume that `y_pred` encodes a probability distribution.
    axis: (Optional) Defaults to -1. The dimension along which the entropy is
      computed.

  Returns:
    Sparse categorical crossentropy loss value.
  """
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  return K.sparse_categorical_crossentropy(
      y_true, y_pred, from_logits=from_logits, axis=axis)
コード例 #9
0
ファイル: Losses.py プロジェクト: princefr/Multi3DPoseRCNN
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
    """RPN anchor classifier loss.
    rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
               -1=negative, 0=neutral anchor.
    rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
    """
    # Squeeze last dim to simplify
    rpn_match = tf.squeeze(rpn_match, -1)
    # Get anchor classes. Convert the -1/+1 match to 0/1 values.
    anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
    # Positive and Negative anchors contribute to the loss,
    # but neutral anchors (match value = 0) don't.
    indices = tf.where(K.not_equal(rpn_match, 0))
    # Pick rows that contribute to the loss and filter out the rest.
    rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
    anchor_class = tf.gather_nd(anchor_class, indices)
    # Cross entropy loss
    loss = K.sparse_categorical_crossentropy(target=anchor_class,
                                             output=rpn_class_logits,
                                             from_logits=True)
    loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
    return loss
コード例 #10
0
def sparse_categorical_crossentropy(y_true, y_pred):
  return K.sparse_categorical_crossentropy(y_true, y_pred)
コード例 #11
0
ファイル: losses.py プロジェクト: aeverall/tensorflow
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False):
  return K.sparse_categorical_crossentropy(
      y_true, y_pred, from_logits=from_logits)
コード例 #12
0
def perplexity(y_true, y_pred):
    cross_entropy = K.mean(K.sparse_categorical_crossentropy(y_true, y_pred))
    perplexity = K.exp(cross_entropy)
    return perplexity
コード例 #13
0
def sparse_categorical_crossentropy(y_true, y_pred, from_logits=False, axis=-1):
  y_pred = ops.convert_to_tensor(y_pred)
  y_true = math_ops.cast(y_true, y_pred.dtype)
  return K.sparse_categorical_crossentropy(
      y_true, y_pred, from_logits=from_logits, axis=axis)
コード例 #14
0
def myCrossEntropy(y_true, y_pred, e=0.3):
    loss = K.sparse_categorical_crossentropy(y_true, y_pred)
    loss0 = K.sparse_categorical_crossentropy(K.zeros_like(y_true), y_pred)
    loss1 = K.sparse_categorical_crossentropy(K.ones_like(y_true), y_pred)
    loss2 = K.sparse_categorical_crossentropy(K.ones_like(y_true) * 2, y_pred)
    loss3 = K.sparse_categorical_crossentropy(K.ones_like(y_true) * 3, y_pred)
    loss4 = K.sparse_categorical_crossentropy(K.ones_like(y_true) * 4, y_pred)
    loss5 = K.sparse_categorical_crossentropy(K.ones_like(y_true) * 5, y_pred)
    loss6 = K.sparse_categorical_crossentropy(K.ones_like(y_true) * 6, y_pred)
    loss7 = K.sparse_categorical_crossentropy(K.ones_like(y_true) * 7, y_pred)
    loss8 = K.sparse_categorical_crossentropy(K.ones_like(y_true) * 8, y_pred)
    loss9 = K.sparse_categorical_crossentropy(K.ones_like(y_true) * 9, y_pred)
    return ((100.0 - 5.765 - 1.359 - 1.000 - 1.348 - 1.554 - 1.995 - 3.042 -
             6.347 - 10.431 - 17.632) * loss + 5.765 * loss0 + 1.359 * loss1 +
            1.000 * loss2 + 1.348 * loss3 + 1.553 * loss4 + 1.995 * loss5 +
            3.042 * loss6 + 6.347 * loss7 + 10.421 * loss8 + 17.632 * loss9)