Ejemplo n.º 1
0
def build_faster_rcnn_classification_loss(loss_config):
  
  if not isinstance(loss_config, losses_pb2.ClassificationLoss):
    raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')

  loss_type = loss_config.WhichOneof('classification_loss')

  if loss_type == 'weighted_sigmoid':
    return losses.WeightedSigmoidClassificationLoss()
  if loss_type == 'weighted_softmax':
    config = loss_config.weighted_softmax
    return losses.WeightedSoftmaxClassificationLoss(
        logit_scale=config.logit_scale)
  if loss_type == 'weighted_logits_softmax':
    config = loss_config.weighted_logits_softmax
    return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
        logit_scale=config.logit_scale)
  if loss_type == 'weighted_sigmoid_focal':
    config = loss_config.weighted_sigmoid_focal
    alpha = None
    if config.HasField('alpha'):
      alpha = config.alpha
    return losses.SigmoidFocalClassificationLoss(
        gamma=config.gamma,
        alpha=alpha)

  # By default, Faster RCNN second stage classifier uses Softmax loss
  # with anchor-wise outputs.
  config = loss_config.weighted_softmax
  return losses.WeightedSoftmaxClassificationLoss(
      logit_scale=config.logit_scale)
Ejemplo n.º 2
0
    def testExpectedLossWithAlpha75AndZeroGamma(self):
        # All zeros correspond to 0.5 probability.
        prediction_tensor = tf.constant(
            [[[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]],
             [[0, 0, 0], [0, 0, 0], [0, 0, 0], [0, 0, 0]]], tf.float32)
        target_tensor = tf.constant(
            [[[0, 1, 0], [1, 0, 0], [1, 0, 0], [0, 0, 1]],
             [[0, 0, 1], [0, 1, 0], [1, 0, 0], [1, 0, 0]]], tf.float32)
        weights = tf.constant([[1, 1, 1, 1], [1, 1, 1, 1]], tf.float32)
        focal_loss_op = losses.SigmoidFocalClassificationLoss(
            anchorwise_output=False, alpha=0.75, gamma=0.0)

        focal_loss = focal_loss_op(prediction_tensor,
                                   target_tensor,
                                   weights=weights)
        with self.test_session() as sess:
            focal_loss = sess.run(focal_loss)
            self.assertAllClose(
                (
                    -math.log(.5) *  # x-entropy per class per anchor.
                    ((
                        0.75 *  # alpha for positives.
                        8) +  # positives from 8 anchors.
                     (
                         0.25 *  # alpha for negatives.
                         8 * 2))),  # negatives from 8 anchors for two classes.
                focal_loss)
Ejemplo n.º 3
0
  def testSameAsSigmoidXEntropyWithNoAlphaAndZeroGamma(self):
    prediction_tensor = tf.constant([[[-100, 100, -100],
                                      [100, -100, -100],
                                      [100, 0, -100],
                                      [-100, -100, 100]],
                                     [[-100, 0, 100],
                                      [-100, 100, -100],
                                      [100, 100, 100],
                                      [0, 0, -1]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [1, 1, 1],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1],
                           [1, 1, 1, 0]], tf.float32)
    focal_loss_op = losses.SigmoidFocalClassificationLoss(
        anchorwise_output=True, alpha=None, gamma=0.0)
    sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(
        anchorwise_output=True)
    focal_loss = focal_loss_op(prediction_tensor, target_tensor,
                               weights=weights)
    sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
                                   weights=weights)

    with self.test_session() as sess:
      sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
      self.assertAllClose(sigmoid_loss, focal_loss)
Ejemplo n.º 4
0
  def testNonAnchorWiseOutputComparableToSigmoidXEntropy(self):
    prediction_tensor = tf.constant([[[_logit(0.55)],
                                      [_logit(0.52)],
                                      [_logit(0.50)],
                                      [_logit(0.48)],
                                      [_logit(0.45)]]], tf.float32)
    target_tensor = tf.constant([[[1],
                                  [1],
                                  [1],
                                  [0],
                                  [0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
    focal_loss_op = losses.SigmoidFocalClassificationLoss(
        anchorwise_output=False, gamma=2.0, alpha=None)
    sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(
        anchorwise_output=False)
    focal_loss = focal_loss_op(prediction_tensor, target_tensor,
                               weights=weights)
    sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
                                   weights=weights)

    with self.test_session() as sess:
      sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
      order_of_ratio = np.power(10,
                                np.floor(np.log10(sigmoid_loss / focal_loss)))
      self.assertAlmostEqual(order_of_ratio, 1.)
Ejemplo n.º 5
0
  def testIgnorePositiveExampleLossViaAlphaMultiplier(self):
    prediction_tensor = tf.constant([[[_logit(0.55)],
                                      [_logit(0.52)],
                                      [_logit(0.50)],
                                      [_logit(0.48)],
                                      [_logit(0.45)]]], tf.float32)
    target_tensor = tf.constant([[[1],
                                  [1],
                                  [1],
                                  [0],
                                  [0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
    focal_loss_op = losses.SigmoidFocalClassificationLoss(
        anchorwise_output=True, gamma=2.0, alpha=0.0)
    sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(
        anchorwise_output=True)
    focal_loss = focal_loss_op(prediction_tensor, target_tensor,
                               weights=weights)
    sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
                                   weights=weights)

    with self.test_session() as sess:
      sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
      self.assertAllClose(focal_loss[0][:3], [0., 0., 0.])
      order_of_ratio = np.power(10,
                                np.floor(np.log10(sigmoid_loss[0][3:] /
                                                  focal_loss[0][3:])))
      self.assertAllClose(order_of_ratio, [1., 1.])
Ejemplo n.º 6
0
  def testExpectedLossWithAlphaOneAndZeroGamma(self):
    # All zeros correspond to 0.5 probability.
    prediction_tensor = tf.constant([[[0, 0, 0],
                                      [0, 0, 0],
                                      [0, 0, 0],
                                      [0, 0, 0]],
                                     [[0, 0, 0],
                                      [0, 0, 0],
                                      [0, 0, 0],
                                      [0, 0, 0]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1],
                           [1, 1, 1, 1]], tf.float32)
    focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=1.0, gamma=0.0)

    focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
                                             weights=weights))
    with self.test_session() as sess:
      focal_loss = sess.run(focal_loss)
      self.assertAllClose(
          (-math.log(.5) *  # x-entropy per class per anchor
           1.0 *            # alpha
           8),              # positives from 8 anchors
          focal_loss)
Ejemplo n.º 7
0
  def testEasyExamplesProduceSmallLossComparedToSigmoidXEntropy(self):
    prediction_tensor = tf.constant([[[_logit(0.97)],
                                      [_logit(0.90)],
                                      [_logit(0.73)],
                                      [_logit(0.27)],
                                      [_logit(0.09)],
                                      [_logit(0.03)]]], tf.float32)
    target_tensor = tf.constant([[[1],
                                  [1],
                                  [1],
                                  [0],
                                  [0],
                                  [0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1, 1, 1]], tf.float32)
    focal_loss_op = losses.SigmoidFocalClassificationLoss(
        anchorwise_output=True, gamma=2.0, alpha=None)
    sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss(
        anchorwise_output=True)
    focal_loss = focal_loss_op(prediction_tensor, target_tensor,
                               weights=weights)
    sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
                                   weights=weights)

    with self.test_session() as sess:
      sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
      order_of_ratio = np.power(10,
                                np.floor(np.log10(sigmoid_loss / focal_loss)))
      self.assertAllClose(order_of_ratio, [[1000, 100, 10, 10, 100, 1000]])
Ejemplo n.º 8
0
  def testHardExamplesProduceLossComparableToSigmoidXEntropy(self):
    prediction_tensor = tf.constant([[[_logit(0.55)],
                                      [_logit(0.52)],
                                      [_logit(0.50)],
                                      [_logit(0.48)],
                                      [_logit(0.45)]]], tf.float32)
    target_tensor = tf.constant([[[1],
                                  [1],
                                  [1],
                                  [0],
                                  [0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1, 1]], tf.float32)
    focal_loss_op = losses.SigmoidFocalClassificationLoss(gamma=2.0, alpha=None)
    sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
    focal_loss = tf.reduce_sum(focal_loss_op(prediction_tensor, target_tensor,
                                             weights=weights), axis=2)
    sigmoid_loss = tf.reduce_sum(sigmoid_loss_op(prediction_tensor,
                                                 target_tensor,
                                                 weights=weights), axis=2)

    with self.test_session() as sess:
      sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
      order_of_ratio = np.power(10,
                                np.floor(np.log10(sigmoid_loss / focal_loss)))
      self.assertAllClose(order_of_ratio, [[1., 1., 1., 1., 1.]])
Ejemplo n.º 9
0
  def testSimilarToSigmoidXEntropyWithHalfAlphaAndZeroGammaUpToAScale(self):
    prediction_tensor = tf.constant([[[-100, 100, -100],
                                      [100, -100, -100],
                                      [100, 0, -100],
                                      [-100, -100, 100]],
                                     [[-100, 0, 100],
                                      [-100, 100, -100],
                                      [100, 100, 100],
                                      [0, 0, -1]]], tf.float32)
    target_tensor = tf.constant([[[0, 1, 0],
                                  [1, 0, 0],
                                  [1, 0, 0],
                                  [0, 0, 1]],
                                 [[0, 0, 1],
                                  [0, 1, 0],
                                  [1, 1, 1],
                                  [1, 0, 0]]], tf.float32)
    weights = tf.constant([[1, 1, 1, 1],
                           [1, 1, 1, 0]], tf.float32)
    focal_loss_op = losses.SigmoidFocalClassificationLoss(alpha=0.5, gamma=0.0)
    sigmoid_loss_op = losses.WeightedSigmoidClassificationLoss()
    focal_loss = focal_loss_op(prediction_tensor, target_tensor,
                               weights=weights)
    sigmoid_loss = sigmoid_loss_op(prediction_tensor, target_tensor,
                                   weights=weights)

    with self.test_session() as sess:
      sigmoid_loss, focal_loss = sess.run([sigmoid_loss, focal_loss])
      self.assertAllClose(sigmoid_loss, focal_loss * 2)
Ejemplo n.º 10
0
def _build_classification_loss(loss_config):
    """Builds a classification loss based on the loss config.

  Args:
    loss_config: A losses_pb2.ClassificationLoss object.

  Returns:
    Loss based on the config.

  Raises:
    ValueError: On invalid loss_config.
  """
    if not isinstance(loss_config, losses_pb2.ClassificationLoss):
        raise ValueError(
            'loss_config not of type losses_pb2.ClassificationLoss.')

    loss_type = loss_config.WhichOneof('classification_loss')
    print('hello', loss_type)

    if loss_type == 'weighted_sigmoid':
        return losses.WeightedSigmoidClassificationLoss()

    if loss_type == 'weighted_sigmoid_focal':
        config = loss_config.weighted_sigmoid_focal
        alpha = None
        if config.HasField('alpha'):
            alpha = config.alpha
        return losses.SigmoidFocalClassificationLoss(gamma=config.gamma,
                                                     alpha=alpha)

    if loss_type == 'weighted_softmax':
        config = loss_config.weighted_softmax
        return losses.WeightedSoftmaxClassificationLoss(
            logit_scale=config.logit_scale)

    if loss_type == 'weighted_logits_softmax':
        config = loss_config.weighted_logits_softmax
        return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
            logit_scale=config.logit_scale)

    if loss_type == 'bootstrapped_sigmoid':
        config = loss_config.bootstrapped_sigmoid
        return losses.BootstrappedSigmoidClassificationLoss(
            alpha=config.alpha,
            bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))

    if loss_type == 'bounded_sigmoid':
        config = loss_config.bounded_sigmoid
        return losses.BoundedSigmoidCrossEntropyLoss()

    if loss_type == 'quadratic_bounded_sigmoid':
        config = loss_config.quadratic_bounded_sigmoid
        return losses.QuadraticBoundedSigmoidCrossEntropyLoss()

    if loss_type == 'soft_target_sigmoid':
        config = loss_config.soft_target_sigmoid
        return losses.SoftTargetSigmoidCrossEntropyLoss()

    raise ValueError('Empty loss config.')
Ejemplo n.º 11
0
def _build_classification_loss(loss_config):
    """Builds a classification loss based on the loss config.

  Args:
    loss_config: A losses_pb2.ClassificationLoss object.

  Returns:
    Loss based on the config.

  Raises:
    ValueError: On invalid loss_config.
  """
    if not isinstance(loss_config, losses_pb2.ClassificationLoss):
        raise ValueError(
            'loss_config not of type losses_pb2.ClassificationLoss.')

    loss_type = loss_config.WhichOneof('classification_loss')

    if loss_type == 'weighted_sigmoid':
        return losses.WeightedSigmoidClassificationLoss()

    elif loss_type == 'weighted_sigmoid_focal':
        config = loss_config.weighted_sigmoid_focal
        alpha = None
        if config.HasField('alpha'):
            alpha = config.alpha
        return losses.SigmoidFocalClassificationLoss(gamma=config.gamma,
                                                     alpha=alpha)

    elif loss_type == 'weighted_softmax':
        config = loss_config.weighted_softmax
        return losses.WeightedSoftmaxClassificationLoss(
            logit_scale=config.logit_scale)

    elif loss_type == 'weighted_logits_softmax':
        config = loss_config.weighted_logits_softmax
        return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
            logit_scale=config.logit_scale)

    elif loss_type == 'bootstrapped_sigmoid':
        config = loss_config.bootstrapped_sigmoid
        return losses.BootstrappedSigmoidClassificationLoss(
            alpha=config.alpha,
            bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))

    elif loss_type == 'penalty_reduced_logistic_focal_loss':
        config = loss_config.penalty_reduced_logistic_focal_loss
        return losses.PenaltyReducedLogisticFocalLoss(alpha=config.alpha,
                                                      beta=config.beta)

    elif loss_type == 'weighted_dice_classification_loss':
        config = loss_config.weighted_dice_classification_loss
        return losses.WeightedDiceClassificationLoss(
            squared_normalization=config.squared_normalization,
            is_prediction_probability=config.is_prediction_probability)

    else:
        raise ValueError('Empty loss config.')
def _build_classification_loss(loss_config):
    """Builds a classification loss based on the loss config.

  Args:
    loss_config: A losses_pb2.ClassificationLoss object.

  Returns:
    Loss based on the config.

  Raises:
    ValueError: On invalid loss_config.
  """
    if not isinstance(loss_config, losses_pb2.ClassificationLoss):
        raise ValueError(
            'loss_config not of type losses_pb2.ClassificationLoss.')

    loss_type = loss_config.WhichOneof('classification_loss')

    if loss_type == 'weighted_sigmoid':
        config = loss_config.weighted_sigmoid
        return losses.WeightedSigmoidClassificationLoss(
            anchorwise_output=config.anchorwise_output)

    if loss_type == 'confidence_weighted_sigmoid':
        config = loss_config.confidence_weighted_sigmoid
        return losses.ConfidenceWeightedSigmoidClassificationLoss(
            anchorwise_output=config.anchorwise_output)

    if loss_type == 'weighted_sigmoid_focal':
        config = loss_config.weighted_sigmoid_focal
        alpha = None
        if config.HasField('alpha'):
            alpha = config.alpha
        return losses.SigmoidFocalClassificationLoss(
            anchorwise_output=config.anchorwise_output,
            gamma=config.gamma,
            alpha=alpha)

    if loss_type == 'weighted_softmax':
        config = loss_config.weighted_softmax
        return losses.WeightedSoftmaxClassificationLoss(
            anchorwise_output=config.anchorwise_output,
            logit_scale=config.logit_scale)

    if loss_type == 'bootstrapped_sigmoid':
        config = loss_config.bootstrapped_sigmoid
        return losses.BootstrappedSigmoidClassificationLoss(
            alpha=config.alpha,
            bootstrap_type=('hard' if config.hard_bootstrap else 'soft'),
            anchorwise_output=config.anchorwise_output)

    raise ValueError('Empty loss config.')
def build_faster_rcnn_classification_loss(loss_config):
    """Builds a classification loss for Faster RCNN based on the loss config.

  Args:
    loss_config: A losses_pb2.ClassificationLoss object.

  Returns:
    Loss based on the config.

  Raises:
    ValueError: On invalid loss_config.
  """
    if not isinstance(loss_config, losses_pb2.ClassificationLoss):
        raise ValueError(
            'loss_config not of type losses_pb2.ClassificationLoss.')

    loss_type = loss_config.WhichOneof('classification_loss')

    if loss_type == 'weighted_sigmoid':
        config = loss_config.weighted_sigmoid
        return losses.WeightedSigmoidClassificationLoss(
            anchorwise_output=config.anchorwise_output)
    if loss_type == 'weighted_softmax':
        config = loss_config.weighted_softmax
        return losses.WeightedSoftmaxClassificationLoss(
            anchorwise_output=config.anchorwise_output)
    if loss_type == 'weighted_sigmoid_focal':
        config = loss_config.weighted_sigmoid_focal
        alpha = None
        if config.HasField('alpha'):
            alpha = config.alpha
        return losses.SigmoidFocalClassificationLoss(
            anchorwise_output=config.anchorwise_output,
            gamma=config.gamma,
            alpha=alpha)

    # By default, Faster RCNN second stage classifier uses Softmax loss
    # with anchor-wise outputs.
    return losses.WeightedSoftmaxClassificationLoss(anchorwise_output=True)
Ejemplo n.º 14
0
def build_loss(loss_type):
    """Builds the desired type of loss

    Args:
        loss_type: loss type (e.g. 'berHu', 'smooth_l1')

    Returns:
        Class of the specified loss_type
    """

    if loss_type == 'berHu':
        return losses_custom.WeightedBerHu()

    elif loss_type == 'chamfer_dist':
        return losses_custom.ChamferDistance()

    elif loss_type == 'emd':
        return losses_custom.EarthMoversDistance()

    elif loss_type == 'smooth_l1':
        return losses.WeightedSmoothL1LocalizationLoss()

    elif loss_type == 'smooth_l1_nonzero':
        return losses_custom.WeightedNonZeroSmoothL1LocalizationLoss()

    elif loss_type == 'softmax':
        return losses.WeightedSoftmaxClassificationLoss()

    elif loss_type == 'focal':
        return losses.SigmoidFocalClassificationLoss()

    elif loss_type == 'softmax_temp':
        return losses.WeightedSoftmaxClassificationLoss(0.5)

    elif loss_type == 'sigmoid_ce':
        return losses_custom.SigmoidClassificationLoss()

    else:
        raise ValueError('Invalid loss type', loss_type)
Ejemplo n.º 15
0
def _build_classification_loss(loss_config):
  
  if not isinstance(loss_config, losses_pb2.ClassificationLoss):
    raise ValueError('loss_config not of type losses_pb2.ClassificationLoss.')

  loss_type = loss_config.WhichOneof('classification_loss')

  if loss_type == 'weighted_sigmoid':
    return losses.WeightedSigmoidClassificationLoss()

  if loss_type == 'weighted_sigmoid_focal':
    config = loss_config.weighted_sigmoid_focal
    alpha = None
    if config.HasField('alpha'):
      alpha = config.alpha
    return losses.SigmoidFocalClassificationLoss(
        gamma=config.gamma,
        alpha=alpha)

  if loss_type == 'weighted_softmax':
    config = loss_config.weighted_softmax
    return losses.WeightedSoftmaxClassificationLoss(
        logit_scale=config.logit_scale)

  if loss_type == 'weighted_logits_softmax':
    config = loss_config.weighted_logits_softmax
    return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
        logit_scale=config.logit_scale)

  if loss_type == 'bootstrapped_sigmoid':
    config = loss_config.bootstrapped_sigmoid
    return losses.BootstrappedSigmoidClassificationLoss(
        alpha=config.alpha,
        bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))

  raise ValueError('Empty loss config.')