Пример #1
0
  def __call__(self, y_true, y_pred, sample_weight=None):
    """Invokes the `Loss` instance.

    Args:
      y_true: Ground truth values.
      y_pred: The predicted values.
      sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
        as `y_true`, or is broadcastable to `y_true`. `sample_weight` acts as a
        coefficient for the loss. If a scalar is provided, then the loss is
        simply scaled by the given value. If `sample_weight` is a tensor of size
        `[batch_size]`, then the total loss for each sample of the batch is
        rescaled by the corresponding element in the `sample_weight` vector. If
        the shape of `sample_weight` matches the shape of `y_pred`, then the
        loss of each measurable element of `y_pred` is scaled by the
        corresponding value of `sample_weight`.

    Returns:
      Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
        shape as `y_true`; otherwise, it is scalar.

    Raises:
      ValueError: If the shape of `sample_weight` is invalid.
    """
    # If we are wrapping a lambda function strip '<>' from the name as it is not
    # accepted in scope name.
    scope_name = 'lambda' if self.name == '<lambda>' else self.name
    graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
        y_true, y_pred, sample_weight)
    with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:
      losses = self.call(y_true, y_pred)
      return losses_utils.compute_weighted_loss(
          losses, sample_weight, reduction=self._get_reduction())
Пример #2
0
    def __call__(self, y_true, y_pred, sample_weight=None):
        """Invokes the `Loss` instance.

    Args:
      y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`
      y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`
      sample_weight: Optional `sample_weight` acts as a
        coefficient for the loss. If a scalar is provided, then the loss is
        simply scaled by the given value. If `sample_weight` is a tensor of size
        `[batch_size]`, then the total loss for each sample of the batch is
        rescaled by the corresponding element in the `sample_weight` vector. If
        the shape of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be
        broadcasted to this shape), then each loss element of `y_pred` is scaled
        by the corresponding value of `sample_weight`. (Note on`dN-1`: all loss
        functions reduce by 1 dimension, usually axis=-1.)

    Returns:
      Weighted loss float `Tensor`. If `reduction` is `NONE`, this has
        shape `[batch_size, d0, .. dN-1]`; otherwise, it is scalar. (Note `dN-1`
        because all loss functions reduce by 1 dimension, usually axis=-1.)

    Raises:
      ValueError: If the shape of `sample_weight` is invalid.
    """
        # If we are wrapping a lambda function strip '<>' from the name as it is not
        # accepted in scope name.
        scope_name = 'lambda' if self.name == '<lambda>' else self.name
        graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
            y_true, y_pred, sample_weight)
        with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:
            losses = self.call(y_true, y_pred)
            return losses_utils.compute_weighted_loss(
                losses, sample_weight, reduction=self._get_reduction())
Пример #3
0
    def __call__(self, y_true, y_pred, sample_weight=None):
        """Invokes the `Loss` instance.

    Args:
      y_true: Ground truth values.
      y_pred: The predicted values.
      sample_weight: Optional `Tensor` whose rank is either 0, or the same rank
        as `y_true`, or is broadcastable to `y_true`. `sample_weight` acts as a
        coefficient for the loss. If a scalar is provided, then the loss is
        simply scaled by the given value. If `sample_weight` is a tensor of size
        `[batch_size]`, then the total loss for each sample of the batch is
        rescaled by the corresponding element in the `sample_weight` vector. If
        the shape of `sample_weight` matches the shape of `y_pred`, then the
        loss of each measurable element of `y_pred` is scaled by the
        corresponding value of `sample_weight`.

    Returns:
      Weighted loss float `Tensor`. If `reduction` is `NONE`, this has the same
        shape as `y_true`; otherwise, it is scalar.

    Raises:
      ValueError: If the shape of `sample_weight` is invalid.
    """
        # If we are wrapping a lambda function strip '<>' from the name as it is not
        # accepted in scope name.
        scope_name = 'lambda' if self.name == '<lambda>' else self.name
        graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
            y_true, y_pred, sample_weight)
        with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:
            losses = self.call(y_true, y_pred)
            return losses_utils.compute_weighted_loss(
                losses, sample_weight, reduction=self._get_reduction())
Пример #4
0
    def decorated(metric_obj, *args, **kwargs):
        """Decorated function with `add_update()`."""

        with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
            update_op = update_state_fn(*args, **kwargs)
        if update_op is not None:  # update_op will be None in eager execution.
            metric_obj.add_update(update_op, inputs=True)
        return update_op
Пример #5
0
  def decorated(metric_obj, *args, **kwargs):
    """Decorated function with `add_update()`."""

    with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
      update_op = update_state_fn(*args, **kwargs)
    if update_op is not None:  # update_op will be None in eager execution.
      metric_obj.add_update(update_op, inputs=True)
    return update_op
Пример #6
0
 def __call__(self, y_true, y_pred, w, sample_weight=None):
   scope_name = 'lambda' if self.name == '<lambda>' else self.name
   graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
       y_true, y_pred, w, sample_weight)
   with K.name_scope(scope_name or self.__class__.__name__), graph_ctx:
     losses = self.call(y_true, y_pred, w)
     return losses_utils.compute_weighted_loss(
         losses, sample_weight, reduction=self._get_reduction())
Пример #7
0
    def __call__(self, y_true, y_pred, sample_weight=None):
        """
        https://github.com/richardaecn/class-balanced-loss
        :param y_true: batch_size x 512 x 512 x 8
        :param y_pred: batch_size x 512 x 512 x 7
        :param sample_weight:
        :return:
        """
        scope_name = 'lambda' if self.name == '<lambda>' else self.name
        graph_ctx = tf_utils.graph_context_for_symbolic_tensors(
            y_true, y_pred, sample_weight)
        with tf.name_scope(scope_name or self.__class__.__name__), graph_ctx:
            invalid_pixels = tf.expand_dims(tf.cast(y_true[:, :, :, -1], tf.bool), -1)  # Index of invalid pixels
            n_valid_pixels = tf.size(invalid_pixels) - tf.reduce_sum(tf.cast(invalid_pixels, tf.int32))
            mean_factor = tf.cast(tf.size(invalid_pixels), tf.float32) / tf.cast(n_valid_pixels, tf.float32)

            # Calculate loss value
            y_true_mask = tf.cast(y_true[:, :, :, :-1], tf.float32)

            red_dim = np.arange(0, y_true_mask.shape.ndims - 1, 1).tolist()
            
            ref_vol = tf.reduce_sum(tf.cast(y_true[:, :, :, :-1], tf.float32), axis=[0, 1, 2])   # Out: n_classes  axis=[0, 1, 2]
            n_valid_class = tf.math.count_nonzero(ref_vol, dtype=tf.float32)
            
            # tf.print("DEBUG: ", n_valid_class == 7)

            effective_num = 1.0 - tf.math.pow(self.beta, ref_vol)       # Out: n_classes
            weight = (1.0 - self.beta) * tf.math.reciprocal(effective_num)   # Out: n_classes

            weight = tf.where(tf.math.is_inf(weight), tf.zeros_like(weight), weight)    # Out: n_classes
            alpha = weight / tf.reduce_sum(weight) * n_valid_class    # Out: n_classes
            alpha = tf.expand_dims(tf.expand_dims(tf.expand_dims(alpha, axis=0), axis=0), axis=0)  # Out: 1 x 1 x 1 x n_classes
            alpha = tf.multiply(alpha, y_true_mask)
            
            y_true_mask = y_true_mask / tf.cast(tf.reduce_sum(y_true, axis=-1, keepdims=True), tf.float32)
            y_pred_mask = tf.nn.softmax(y_pred, axis=-1)
            cross_entropy = -tf.multiply(y_true_mask, tf.math.log(y_pred_mask))

            # A numerically stable implementation of modulator.
            if self.gamma == 0.0:
                modulator = 1.0
            else:
                modulator = tf.exp(-self.gamma * y_true_mask * y_pred_mask - self.gamma * tf.math.log1p(tf.math.exp(-1.0 * y_pred_mask)))

            weighted_loss = tf.reduce_sum(tf.multiply(alpha, modulator * cross_entropy), axis=-1)   # Batch size x 512 x 512 x 1
            focal_loss = mean_factor * tf.reduce_mean(weighted_loss)

            return focal_loss
Пример #8
0
  def decorated(metric_obj, *args, **kwargs):
    """Decorated function with `add_update()`."""
    strategy = distribution_strategy_context.get_strategy()

    for weight in metric_obj.weights:
      if (backend.is_tpu_strategy(strategy) and
          not strategy.extended.variable_created_in_scope(weight)
          and not distribution_strategy_context.in_cross_replica_context()):
        raise ValueError(
            'Trying to run metric.update_state in replica context when '
            'the metric was not created in TPUStrategy scope. '
            'Make sure the keras Metric is created in TPUstrategy scope. ')

    with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
      update_op = update_state_fn(*args, **kwargs)
    if update_op is not None:  # update_op will be None in eager execution.
      metric_obj.add_update(update_op)
    return update_op
Пример #9
0
  def decorated(metric_obj, *args, **kwargs):
    """Decorated function with `add_update()`."""
    strategy = distribution_strategy_context.get_strategy()
    # TODO(b/142574744): Remove this check if a better solution is found for
    # declaring keras Metric outside of TPUStrategy and then updating it per
    # replica.

    for weight in metric_obj.weights:
      if (tpu.is_tpu_strategy(strategy) and
          not strategy.extended.variable_created_in_scope(weight)
          and not distribution_strategy_context.in_cross_replica_context()):
        raise ValueError(
            'Trying to run metric.update_state in replica context when '
            'the metric was not created in TPUStrategy scope. '
            'Make sure the keras Metric is created in TPUstrategy scope. ')

    with tf_utils.graph_context_for_symbolic_tensors(*args, **kwargs):
      update_op = update_state_fn(*args, **kwargs)
    if update_op is not None:  # update_op will be None in eager execution.
      metric_obj.add_update(update_op)
    return update_op