Exemplo n.º 1
0
 def __init__(self,
              reduction=tf.losses.Reduction.AUTO,
              name=None,
              lambda_weight=None):
     super(SoftmaxLoss, self).__init__(reduction, name)
     self._loss = losses_impl.SoftmaxLoss(name='{}_impl'.format(name),
                                          lambda_weight=lambda_weight)
Exemplo n.º 2
0
    def __init__(self,
                 reduction=tf.losses.Reduction.AUTO,
                 name=None,
                 lambda_weight=None,
                 temperature=1.0,
                 ragged=False):
        """Softmax cross-entropy loss.

    Args:
      reduction: (Optional) The `tf.keras.losses.Reduction` to use (see
        `tf.keras.losses.Loss`).
      name: (Optional) The name for the op.
      lambda_weight: (Optional) A lambdaweight to apply to the loss. Can be one
        of `tfr.keras.losses.DCGLambdaWeight`,
        `tfr.keras.losses.NDCGLambdaWeight`, or,
        `tfr.keras.losses.PrecisionLambdaWeight`.
      temperature: (Optional) The temperature to use for scaling the logits.
      ragged: (Optional) If True, this loss will accept ragged tensors. If
        False, this loss will accept dense tensors.
    """
        super().__init__(reduction, name, lambda_weight, temperature, ragged)
        self._loss = losses_impl.SoftmaxLoss(
            name='{}_impl'.format(name) if name else None,
            lambda_weight=lambda_weight,
            temperature=temperature,
            ragged=ragged)
Exemplo n.º 3
0
    def test_softmax_loss_with_invalid_labels(self):
        with tf.Graph().as_default():
            scores = [[1., 3., 2.]]
            labels = [[0., -1., 1.]]
            reduction = tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS

            with self.cached_session():
                loss_fn = losses_impl.SoftmaxLoss(name=None)
                self.assertAlmostEqual(loss_fn.compute(labels, scores, None,
                                                       reduction).eval(),
                                       -(math.log(_softmax([1, 2])[1])),
                                       places=5)
Exemplo n.º 4
0
    def test_softmax_compute_per_list(self):
        with tf.Graph().as_default():
            scores = [[1., 3., 2.], [1., 2., 3.]]
            labels = [[0., 0., 1.], [0., 0., 2.]]
            per_item_weights = [[2., 3., 4.], [1., 1., 1.]]

            with self.cached_session():
                loss_fn = losses_impl.SoftmaxLoss(name=None)
                losses, weights = loss_fn.compute_per_list(
                    labels, scores, per_item_weights)
                losses, weights = losses.eval(), weights.eval()

            self.assertAllClose(losses, [1.407606, 0.407606])
            self.assertAllClose(weights, [4., 2.])
Exemplo n.º 5
0
    def test_softmax_loss(self):
        with tf.Graph().as_default():
            with self.cached_session():
                scores = [[1., 3., 2.], [1., 2., 3.], [1., 2., 3.]]
                labels = [[0., 0., 1.], [0., 0., 2.], [0., 0., 0.]]
                reduction = tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS

                loss_fn = losses_impl.SoftmaxLoss(name=None)
                result = loss_fn.compute(labels, scores, None,
                                         reduction).eval()

                self.assertAlmostEqual(
                    result,
                    -(math.log(_softmax(scores[0])[2]) +
                      math.log(_softmax(scores[1])[2]) * 2.) / 2.,
                    places=5)
Exemplo n.º 6
0
def _softmax_loss(
    labels,
    logits,
    weights=None,
    lambda_weight=None,
    reduction=tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS,
    name=None):
  """Computes the softmax cross entropy for a list.

  This is the ListNet loss originally proposed by Cao et al.
  ["Learning to Rank: From Pairwise Approach to Listwise Approach"] and is
  appropriate for datasets with binary relevance labels [see "An Analysis of
  the Softmax Cross Entropy Loss for Learning-to-Rank with Binary Relevance" by
  Bruch et al.]

  Given the labels l_i and the logits s_i, we sort the examples and obtain ranks
  r_i. The standard softmax loss doesn't need r_i and is defined as
      -sum_i l_i * log(exp(s_i) / (exp(s_1) + ... + exp(s_n))).
  The `lambda_weight` re-weight examples based on l_i and r_i.
      -sum_i w(l_i, r_i) * log(exp(s_i) / (exp(s_1) + ... + exp(s_n))).abc
  See 'individual_weights' in 'DCGLambdaWeight' for how w(l_i, r_i) is computed.

  Args:
    labels: A `Tensor` of the same shape as `logits` representing graded
      relevance.
    logits: A `Tensor` with shape [batch_size, list_size]. Each value is the
      ranking score of the corresponding item.
    weights: A scalar, a `Tensor` with shape [batch_size, 1] for list-wise
      weights, or a `Tensor` with shape [batch_size, list_size] for item-wise
      weights.
    lambda_weight: A `DCGLambdaWeight` instance.
    reduction: One of `tf.losses.Reduction` except `NONE`. Describes how to
      reduce training loss over batch.
    name: A string used as the name for this loss.

  Returns:
    An op for the softmax cross entropy as a loss.
  """
  loss = losses_impl.SoftmaxLoss(name, lambda_weight)
  with tf.compat.v1.name_scope(loss.name, 'softmax_loss',
                               (labels, logits, weights)):
    return loss.compute(labels, logits, weights, reduction)
Exemplo n.º 7
0
    def test_softmax_loss_should_support_lambda_weights(self):
        with tf.Graph().as_default():
            with self.cached_session():
                scores = [[1., 3., 2.], [1., 2., 3.], [1., 2., 3.]]
                labels = [[0., 0., 1.], [0., 0., 2.], [0., 0., 0.]]
                reduction = tf.compat.v1.losses.Reduction.SUM_BY_NONZERO_WEIGHTS
                lambda_weight = losses_impl.DCGLambdaWeight(
                    rank_discount_fn=lambda r: 1. / tf.math.log1p(r))

                loss_fn = losses_impl.SoftmaxLoss(name=None,
                                                  lambda_weight=lambda_weight)
                result = loss_fn.compute(labels, scores, None,
                                         reduction).eval()

                self.assertAlmostEqual(
                    result,
                    -(math.log(_softmax(scores[0])[2]) / math.log(1. + 2.) +
                      math.log(_softmax(scores[1])[2]) * 2. /
                      math.log(1. + 1.)) / 2.,
                    places=5)
Exemplo n.º 8
0
def make_loss_metric_fn(loss_key,
                        weights_feature_name=None,
                        lambda_weight=None,
                        name=None):
  """Factory method to create a metric based on a loss.

  Args:
    loss_key: A key in `RankingLossKey`.
    weights_feature_name: A `string` specifying the name of the weights feature
      in `features` dict.
    lambda_weight: A `_LambdaWeight` object.
    name: A `string` used as the name for this metric.

  Returns:
    A metric fn with the following Args:
    * `labels`: A `Tensor` of the same shape as `predictions` representing
    graded relevance.
    * `predictions`: A `Tensor` with shape [batch_size, list_size]. Each value
    is the ranking score of the corresponding example.
    * `features`: A dict of `Tensor`s that contains all features.
  """

  metric_dict = {
      RankingLossKey.PAIRWISE_HINGE_LOSS:
          losses_impl.PairwiseHingeLoss(name, lambda_weight=lambda_weight),
      RankingLossKey.PAIRWISE_LOGISTIC_LOSS:
          losses_impl.PairwiseLogisticLoss(name, lambda_weight=lambda_weight),
      RankingLossKey.PAIRWISE_SOFT_ZERO_ONE_LOSS:
          losses_impl.PairwiseSoftZeroOneLoss(
              name, lambda_weight=lambda_weight),
      RankingLossKey.SOFTMAX_LOSS:
          losses_impl.SoftmaxLoss(name, lambda_weight=lambda_weight),
      RankingLossKey.SIGMOID_CROSS_ENTROPY_LOSS:
          losses_impl.SigmoidCrossEntropyLoss(name),
      RankingLossKey.MEAN_SQUARED_LOSS:
          losses_impl.MeanSquaredLoss(name),
      RankingLossKey.LIST_MLE_LOSS:
          losses_impl.ListMLELoss(name, lambda_weight=lambda_weight),
      RankingLossKey.APPROX_NDCG_LOSS:
          losses_impl.ApproxNDCGLoss(name),
      RankingLossKey.APPROX_MRR_LOSS:
          losses_impl.ApproxMRRLoss(name),
      RankingLossKey.GUMBEL_APPROX_NDCG_LOSS: losses_impl.ApproxNDCGLoss(name),
  }

  def _get_weights(features):
    """Get weights tensor from features and reshape it to 2-D if necessary."""
    weights = None
    if weights_feature_name:
      weights = tf.convert_to_tensor(value=features[weights_feature_name])
      # Convert weights to a 2-D Tensor.
      weights = utils.reshape_to_2d(weights)
    return weights

  def metric_fn(labels, predictions, features):
    """Defines the metric fn."""
    weights = _get_weights(features)
    loss = metric_dict.get(loss_key, None)
    if loss is None:
      raise ValueError('loss_key {} not supported.'.format(loss_key))
    return loss.eval_metric(labels, predictions, weights)

  return metric_fn