Пример #1
0
    def build_losses(self,
                     labels,
                     model_outputs,
                     aux_losses=None) -> tf.Tensor:
        """Interface to compute losses. Refer to base_task.Task.build_losses."""
        del labels

        left_logits = model_outputs['left_logits']
        right_logits = model_outputs['right_logits']

        batch_size = tf_utils.get_shape_list(left_logits, name='batch_size')[0]

        ranking_labels = tf.range(batch_size)

        loss = tf_utils.safe_mean(
            tf.nn.sparse_softmax_cross_entropy_with_logits(
                labels=ranking_labels, logits=left_logits))

        if self.task_config.model.bidirectional:
            right_rank_loss = tf_utils.safe_mean(
                tf.nn.sparse_softmax_cross_entropy_with_logits(
                    labels=ranking_labels, logits=right_logits))

            loss += right_rank_loss
        return tf.reduce_mean(loss)
Пример #2
0
    def build_losses(self, labels, model_outputs, aux_losses=None):
        """Sparse categorical cross entropy loss.

    Args:
      labels: labels.
      model_outputs: Output logits of the classifier.
      aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.

    Returns:
      The total loss tensor.
    """
        losses_config = self.task_config.losses
        if losses_config.one_hot:
            total_loss = tf.keras.losses.categorical_crossentropy(
                labels,
                model_outputs,
                from_logits=True,
                label_smoothing=losses_config.label_smoothing)
        else:
            total_loss = tf.keras.losses.sparse_categorical_crossentropy(
                labels, model_outputs, from_logits=True)

        total_loss = tf_utils.safe_mean(total_loss)
        if aux_losses:
            total_loss += tf.add_n(aux_losses)

        return total_loss
Пример #3
0
    def build_losses(self, labels, model_outputs, aux_losses=None):
        """Sigmoid Cross Entropy.

    Args:
      labels: tensor containing truth labels.
      model_outputs: output logits of the classifier.
      aux_losses: tensor containing auxiliarly loss tensors, i.e. `losses` in
        keras.Model.

    Returns:
      Tensors: The total loss, model loss tensors.
    """
        losses_config = self.task_config.losses
        model_loss = tf.keras.losses.binary_crossentropy(
            labels,
            model_outputs,
            from_logits=losses_config.from_logits,
            label_smoothing=losses_config.label_smoothing)

        model_loss = tf_utils.safe_mean(model_loss)
        total_loss = model_loss
        if aux_losses:
            total_loss += tf.add_n(aux_losses)

        return total_loss, model_loss
Пример #4
0
    def _compute_top_k_loss(self, loss):
        """Computs top k loss."""
        batch_size = tf.shape(loss)[0]
        loss = tf.reshape(loss, shape=[batch_size, -1])

        top_k_pixels = tf.cast(self._top_k_percent_pixels *
                               tf.cast(tf.shape(loss)[-1], dtype=tf.float32),
                               dtype=tf.int32)

        # shape: [batch_size, top_k_pixels]
        per_sample_top_k_loss = tf.map_fn(
            fn=lambda x: tf.nn.top_k(x, k=top_k_pixels, sorted=False)[0],
            elems=loss,
            parallel_iterations=32,
            fn_output_signature=tf.float32)

        # shape: [batch_size]
        per_sample_normalizer = tf.reduce_sum(tf.cast(
            tf.not_equal(per_sample_top_k_loss, 0.0), dtype=tf.float32),
                                              axis=-1) + EPSILON
        per_sample_normalized_loss = tf.reduce_sum(
            per_sample_top_k_loss, axis=-1) / per_sample_normalizer

        normalized_loss = tf_utils.safe_mean(per_sample_normalized_loss)
        return normalized_loss
Пример #5
0
 def build_losses(self,
                  labels,
                  model_outputs,
                  aux_losses=None) -> tf.Tensor:
     loss = tf.keras.losses.sparse_categorical_crossentropy(
         labels, tf.cast(model_outputs, tf.float32), from_logits=True)
     return tf_utils.safe_mean(loss)
Пример #6
0
  def build_losses(self, labels, model_outputs, aux_losses=None) -> tf.Tensor:
    if self.task_config.model.num_classes == 1:
      loss = tf.keras.losses.mean_squared_error(labels, model_outputs)
    else:
      loss = tf.keras.losses.sparse_categorical_crossentropy(
          labels, tf.cast(model_outputs, tf.float32), from_logits=True)

    if aux_losses:
      loss += tf.add_n(aux_losses)
    return tf_utils.safe_mean(loss)
Пример #7
0
    def __call__(self, logits, labels, sample_weight=None):
        _, height, width, _ = labels.get_shape().as_list()
        logits = mask_ops.resize_and_rescale_offsets(
            logits, target_size=[height, width])

        loss = self._loss_fn(y_true=labels, y_pred=logits)

        if sample_weight is not None:
            loss *= sample_weight

        return tf_utils.safe_mean(loss)
Пример #8
0
    def __call__(self, logits, labels, sample_weight=None):
        _, height, width, _ = labels.get_shape().as_list()
        logits = tf.image.resize(logits,
                                 size=[height, width],
                                 method=tf.image.ResizeMethod.BILINEAR)

        loss = self._loss_fn(y_true=labels, y_pred=logits)

        if sample_weight is not None:
            loss *= sample_weight

        return tf_utils.safe_mean(loss)
Пример #9
0
 def build_losses(self,
                  labels,
                  model_outputs,
                  aux_losses=None) -> tf.Tensor:
     ranking_loss = tfr_losses.get(
         loss=self.task_config.loss,
         reduction=self.task_config.loss_reduction)
     loss = ranking_loss(tf.cast(labels, tf.float32),
                         tf.cast(model_outputs, tf.float32))
     if aux_losses:
         loss += tf.add_n(aux_losses)
     return tf_utils.safe_mean(loss)
Пример #10
0
    def build_losses(self,
                     labels: Any,
                     model_outputs: Any,
                     aux_losses: Optional[Any] = None):
        """Sparse categorical cross entropy loss.

    Args:
      labels: labels.
      model_outputs: Output logits of the classifier.
      aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.

    Returns:
      The total loss tensor.
    """
        all_losses = {}
        losses_config = self.task_config.losses
        total_loss = None
        if self._is_multilabel():
            entropy = -tf.reduce_mean(
                tf.reduce_sum(
                    model_outputs * tf.math.log(model_outputs + 1e-8), -1))
            total_loss = tf.keras.losses.binary_crossentropy(labels,
                                                             model_outputs,
                                                             from_logits=False)
            all_losses.update({
                'class_loss': total_loss,
                'entropy': entropy,
            })
        else:
            if losses_config.one_hot:
                total_loss = tf.keras.losses.categorical_crossentropy(
                    labels,
                    model_outputs,
                    from_logits=False,
                    label_smoothing=losses_config.label_smoothing)
            else:
                total_loss = tf.keras.losses.sparse_categorical_crossentropy(
                    labels, model_outputs, from_logits=False)

            total_loss = tf_utils.safe_mean(total_loss)
            all_losses.update({
                'class_loss': total_loss,
            })
        if aux_losses:
            all_losses.update({
                'reg_loss': aux_losses,
            })
            total_loss += tf.add_n(aux_losses)
        all_losses[self.loss] = total_loss

        return all_losses
    def build_losses(self,
                     labels: tf.Tensor,
                     model_outputs: tf.Tensor,
                     aux_losses: Optional[Any] = None) -> tf.Tensor:
        """Builds sparse categorical cross entropy loss.

    Args:
      labels: Input groundtruth labels.
      model_outputs: Output logits of the classifier.
      aux_losses: The auxiliarly loss tensors, i.e. `losses` in tf.keras.Model.

    Returns:
      The total loss tensor.
    """
        losses_config = self.task_config.losses
        is_multilabel = self.task_config.train_data.is_multilabel

        if not is_multilabel:
            if losses_config.one_hot:
                total_loss = tf.keras.losses.categorical_crossentropy(
                    labels,
                    model_outputs,
                    from_logits=True,
                    label_smoothing=losses_config.label_smoothing)
            elif losses_config.soft_labels:
                total_loss = tf.nn.softmax_cross_entropy_with_logits(
                    labels, model_outputs)
            else:
                total_loss = tf.keras.losses.sparse_categorical_crossentropy(
                    labels, model_outputs, from_logits=True)
        else:
            # Multi-label weighted binary cross entropy loss.
            total_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                labels=labels, logits=model_outputs)
            total_loss = tf.reduce_sum(total_loss, axis=-1)

        total_loss = tf_utils.safe_mean(total_loss)
        if aux_losses:
            total_loss += tf.add_n(aux_losses)

        total_loss = losses_config.loss_weight * total_loss
        return total_loss
Пример #12
0
    def build_losses(self,
                     labels: tf.Tensor,
                     model_outputs: tf.Tensor,
                     aux_losses: Optional[Any] = None) -> tf.Tensor:
        """Builds losses for training and validation.

    Args:
      labels: Input groundtruth labels.
      model_outputs: Output of the model.
      aux_losses: The auxiliarly loss tensors, i.e. `losses` in tf.keras.Model.

    Returns:
      The total loss tensor.
    """
        total_loss = tf.keras.losses.sparse_categorical_crossentropy(
            labels, model_outputs, from_logits=True)
        total_loss = tf_utils.safe_mean(total_loss)

        if aux_losses:
            total_loss += tf.add_n(aux_losses)

        return total_loss
Пример #13
0
 def __call__(self, predicted_scores, logits, labels):
     actual_scores = get_actual_mask_scores(logits, labels,
                                            self._ignore_label)
     loss = tf_utils.safe_mean(
         self._mse_loss(actual_scores, predicted_scores))
     return loss