Esempio n. 1
0
    def testIgnorePositiveExampleLossViaAlphaMultiplier(self):
        prediction_tensor = tf.constant(
            [[[_logit(0.55)], [_logit(0.52)], [_logit(0.50)], [_logit(0.48)],
              [_logit(0.45)]]], tf.float32)
        target_tensor = tf.constant([[[1], [1], [1], [0], [0]]], tf.float32)
        weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
        focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
                                                              alpha=0.0)
        sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
        focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor,
                                                 target_tensor,
                                                 weights=weights),
                                   axis=2)
        sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
                                                     target_tensor,
                                                     weights=weights),
                                     axis=2)

        with self.test_session() as sess:
            sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
            self.assertAllClose(focal_loss[0][:3], [0., 0., 0.])
            order_of_ratio = np.power(
                10,
                np.floor(np.log10(sigmoid_loss[0][3:] / focal_loss[0][3:])))
            self.assertAllClose(order_of_ratio, [1., 1.])
Esempio n. 2
0
    def testExpectedLossWithAlpha75AndZeroGamma(self):
        # All zeros correspond to 0.5 probability.
        prediction_tensor = tf.constant(
            [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
             [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32)
        target_tensor = tf.constant(
            [[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]],
             [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]]], tf.float32)
        weights = tf.constant([[1, 1, 1, 1], [1, 1, 1, 1]], tf.float32)
        focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.75,
                                                              gamma=0.0)

        focal_loss = tf.reduce_sum(
            focal_loss_op(prediction_tensor, target_tensor, weights=weights))
        with self.test_session() as sess:
            focal_loss = sess.run(focal_loss)
            self.assertAllClose(
                (
                    -math.log(.5) *  # x-entropy per class per anchor.
                    ((
                        0.75 *  # alpha for positives.
                        8) +  # positives from 8 anchors.
                     (
                         0.25 *  # alpha for negatives.
                         8 * 2))),  # negatives from 8 anchors for two classes.
                focal_loss)
Esempio n. 3
0
    def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self):
        prediction_tensor = tf.constant(
            [[[_logit(0.97)], [_logit(0.90)], [_logit(0.73)], [_logit(0.27)],
              [_logit(0.09)], [_logit(0.03)]]], tf.float32)
        target_tensor = tf.constant([[[1], [1], [1], [0], [0], [0]]],
                                    tf.float32)
        weights = tf.constant([[1, 1, 1, 1, 1, 1]], tf.float32)
        focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0,
                                                              alpha=None)
        sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
        focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor,
                                                 target_tensor,
                                                 weights=weights),
                                   axis=2)
        sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
                                                     target_tensor,
                                                     weights=weights),
                                     axis=2)

        with self.test_session() as sess:
            sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
            order_of_ratio = np.power(
                10, np.floor(np.log10(sigmoid_loss / focal_loss)))
            self.assertAllClose(order_of_ratio,
                                [[1000, 100, 10, 10, 100, 1000]])
Esempio n. 4
0
  def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self):
    prediction_tensor = tf.constant([[[-100, 100, -100],
                                      [100, -100, -100],
                                      [100, 0, -100],
                                      [-100, -100, 100]],
                                     [[-100, 0, 100],
                                      [-100, 100, -100],
                                      [100, 100, 100],
                                      [0, 0, -1]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [1, 1, 1],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1],
                           [1, 1, 1, 0]], tf.float32)
    focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=None, gamma=0.0)
    sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
    focal_loss = focal_loss_op(prediction_tensor, target_tensor,
                               weights=weights)
    sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
                                   weights=weights)

    with self.test_session() as sess:
      sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
      self.assertAllClose(sigmoid_loss, focal_loss)
Esempio n. 5
0
  def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self):
    prediction_tensor = tf.constant([[[_logit(0.55)],
                                      [_logit(0.52)],
                                      [_logit(0.50)],
                                      [_logit(0.48)],
                                      [_logit(0.45)]]], tf.float32)
    target_tensor = tf.constant([[[1],
                                  [1],
                                  [1],
                                  [0],
                                  [0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
    focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
    sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
    focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
                                             weights=weights))
    sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
                                                 target_tensor,
                                                 weights=weights))

    with self.test_session() as sess:
      sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
      order_of_ratio = np.power(10,
                                np.floor(np.log10(sigmoid_loss / focal_loss)))
      self.assertAlmostEqual(order_of_ratio, 1.)
Esempio n. 6
0
def _build_classification_loss(loss_config):
    """Builds a classification loss based on the loss config.

  Args:
    loss_config: A losses_pb2.ClassificationLoss object.

  Returns:
    Loss based on the config.

  Raises:
    ValueError: On invalid loss_config.
  """
    if not isinstance(loss_config, losses_pb2.ClassificationLoss):
        raise ValueError(
            'loss_config not of type losses_pb2.ClassificationLoss.')

    loss_type = loss_config.WhichOneof('classification_loss')

    if loss_type == 'weighted_sigmoid':
        return losses.WeightedSigmoidClassificationLoss()

    if loss_type == 'weighted_sigmoid_focal':
        config = loss_config.weighted_sigmoid_focal
        # alpha = None
        # if config.HasField('alpha'):
        #   alpha = config.alpha
        if config.alpha > 0:
            alpha = config.alpha
        else:
            alpha = None
        return losses.SigmoidFocalClassificationLoss(gamma=config.gamma,
                                                     alpha=alpha)
    if loss_type == 'weighted_softmax_focal':
        config = loss_config.weighted_softmax_focal
        # alpha = None
        # if config.HasField('alpha'):
        #   alpha = config.alpha
        if config.alpha > 0:
            alpha = config.alpha
        else:
            alpha = None
        return losses.SoftmaxFocalClassificationLoss(gamma=config.gamma,
                                                     alpha=alpha)

    if loss_type == 'weighted_softmax':
        config = loss_config.weighted_softmax
        return losses.WeightedSoftmaxClassificationLoss(
            logit_scale=config.logit_scale)

    if loss_type == 'bootstrapped_sigmoid':
        config = loss_config.bootstrapped_sigmoid
        return losses.BootstrappedSigmoidClassificationLoss(
            alpha=config.alpha,
            bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))

    raise ValueError('Empty loss config.')
Esempio n. 7
0
def _build_classification_loss(loss_config):
    """Builds a classification loss based on the loss config.

    Args:
    loss_config: A yaml.ClassificationLoss object.

    Returns:
    Loss based on the config.

    Raises:
    ValueError: On invalid loss_config.
    """
    if 'weighted_sigmoid' in loss_config:
        return losses.WeightedSigmoidClassificationLoss()

    if 'weighted_sigmoid_focal' in loss_config:
        config = loss_config.weighted_sigmoid_focal
        # alpha = None
        # if config.HasField('alpha'):
        #   alpha = config.alpha
        if config.alpha > 0:
            alpha = config.alpha
        else:
            alpha = None
        return losses.SigmoidFocalClassificationLoss(gamma=config.gamma,
                                                     alpha=alpha)
    if 'weighted_softmax_focal' in loss_config:
        config = loss_config.weighted_softmax_focal
        # alpha = None
        # if config.HasField('alpha'):
        #   alpha = config.alpha
        if config.alpha > 0:
            alpha = config.alpha
        else:
            alpha = None
        return losses.SoftmaxFocalClassificationLoss(gamma=config.gamma,
                                                     alpha=alpha)

    if 'weighted_softmax' in loss_config:
        config = loss_config.weighted_softmax
        return losses.WeightedSoftmaxClassificationLoss(
            logit_scale=config.logit_scale)

    if 'bootstrapped_sigmoid' in loss_config:
        config = loss_config.bootstrapped_sigmoid
        return losses.BootstrappedSigmoidClassificationLoss(
            alpha=config.alpha,
            bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))
    else:
        raise ValueError('Empty loss config.')
def build_faster_rcnn_classification_loss(loss_config):
  """Builds a classification loss for Faster RCNN based on the loss config.

  Args:
    loss_config: A losses_pb2.ClassificationLoss object.

  Returns:
    Loss based on the config.

  Raises:
    ValueError: On invalid loss_config.
  """
  if not isinstance(loss_config, losses_pb2.ClassificationLoss):
    raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')

  loss_type = loss_config.WhichOneof('classification_loss')

  if loss_type == 'weighted_sigmoid':
    return losses.WeightedSigmoidClassificationLoss()
  if loss_type == 'weighted_softmax':
    config = loss_config.weighted_softmax
    return losses.WeightedSoftmaxClassificationLoss(
        logit_scale=config.logit_scale)
  if loss_type == 'weighted_logits_softmax':
    config = loss_config.weighted_logits_softmax
    return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
        logit_scale=config.logit_scale)
  if loss_type == 'weighted_sigmoid_focal':
    config = loss_config.weighted_sigmoid_focal
    alpha = None
    if config.HasField('alpha'):
      alpha = config.alpha
    return losses.SigmoidFocalClassificationLoss(
        gamma=config.gamma,
        alpha=alpha)

  # By default, Faster RCNN second stage classifier uses Softmax loss
  # with anchor-wise outputs.
  config = loss_config.weighted_softmax
  return losses.WeightedSoftmaxClassificationLoss(
      logit_scale=config.logit_scale)