Example #1
0
    def testReturnsCorrectAnchorWiseLossWithLogitScaleSetting(self):
        logit_scale = 100.
        prediction_tensor = tf.constant(
            [[[-100, 100, -100], [100, -100, -100], [0, 0, -100],
              [-100, -100, 100]],
             [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100],
              [100, -100, -100]]], tf.float32)
        target_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100],
                                      [0, 0, -100], [-100, -100, 100]],
                                     [[-100, 0, 0], [-100, 100, -100],
                                      [-100, 100, -100], [100, -100, -100]]],
                                    tf.float32)
        weights = tf.constant([[1, 1, .5, 1], [1, 1, 1, 0]], tf.float32)
        loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
            logit_scale=logit_scale)
        loss = loss_op(prediction_tensor, target_tensor, weights=weights)

        # find softmax of the two prediction types above
        softmax_pred1 = [np.exp(-1), np.exp(-1), np.exp(1)]
        softmax_pred1 /= sum(softmax_pred1)
        softmax_pred2 = [np.exp(0), np.exp(0), np.exp(-1)]
        softmax_pred2 /= sum(softmax_pred2)

        # compute the expected cross entropy for perfect matches
        exp_entropy1 = sum([-x * np.log(x) for x in softmax_pred1])
        exp_entropy2 = sum([-x * np.log(x) for x in softmax_pred2])

        # weighted expected losses
        exp_loss = np.matrix(
            [[exp_entropy1, exp_entropy1, exp_entropy2 * .5, exp_entropy1],
             [exp_entropy2, exp_entropy1, exp_entropy1, 0.]])

        with self.test_session() as sess:
            loss_output = sess.run(loss)
            self.assertAllClose(loss_output, exp_loss)
Example #2
0
def build_faster_rcnn_classification_loss(loss_config):
    """Builds a classification loss for Faster RCNN based on the loss config.

  Args:
    loss_config: A losses_pb2.ClassificationLoss object.

  Returns:
    Loss based on the config.

  Raises:
    ValueError: On invalid loss_config.
  """
    if not isinstance(loss_config, losses_pb2.ClassificationLoss):
        raise ValueError(
            'loss_config not of type losses_pb2.ClassificationLoss.')

    loss_type = loss_config.WhichOneof('classification_loss')

    if loss_type == 'weighted_sigmoid':
        return losses.WeightedSigmoidClassificationLoss()
    if loss_type == 'weighted_softmax':
        config = loss_config.weighted_softmax
        return losses.WeightedSoftmaxClassificationLoss(
            logit_scale=config.logit_scale)
    if loss_type == 'weighted_logits_softmax':
        config = loss_config.weighted_logits_softmax
        return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
            logit_scale=config.logit_scale)

    # By default, Faster RCNN second stage classifier uses Softmax loss
    # with anchor-wise outputs.
    config = loss_config.weighted_softmax
    return losses.WeightedSoftmaxClassificationLoss(
        logit_scale=config.logit_scale)
Example #3
0
def _build_classification_loss(loss_config):
    """Builds a classification loss based on the loss config.

  Args:
    loss_config: A losses_pb2.ClassificationLoss object.

  Returns:
    Loss based on the config.

  Raises:
    ValueError: On invalid loss_config.
  """
    if not isinstance(loss_config, losses_pb2.ClassificationLoss):
        raise ValueError(
            'loss_config not of type losses_pb2.ClassificationLoss.')

    loss_type = loss_config.WhichOneof('classification_loss')

    if loss_type == 'weighted_sigmoid':
        return losses.WeightedSigmoidClassificationLoss()

    if loss_type == 'weighted_sigmoid_focal':
        config = loss_config.weighted_sigmoid_focal
        alpha = None
        if config.HasField('alpha'):
            alpha = config.alpha
        return losses.SigmoidFocalClassificationLoss(gamma=config.gamma,
                                                     alpha=alpha)

    if loss_type == 'weighted_softmax':
        config = loss_config.weighted_softmax
        return losses.WeightedSoftmaxClassificationLoss(
            logit_scale=config.logit_scale)

    if loss_type == 'weighted_logits_softmax':
        config = loss_config.weighted_logits_softmax
        return losses.WeightedSoftmaxClassificationAgainstLogitsLoss(
            logit_scale=config.logit_scale)

    if loss_type == 'bootstrapped_sigmoid':
        config = loss_config.bootstrapped_sigmoid
        return losses.BootstrappedSigmoidClassificationLoss(
            alpha=config.alpha,
            bootstrap_type=('hard' if config.hard_bootstrap else 'soft'))

    raise ValueError('Empty loss config.')
Example #4
0
    def testReturnsCorrectAnchorWiseLoss(self):
        prediction_tensor = tf.constant(
            [[[-100, 100, -100], [100, -100, -100], [0, 0, -100],
              [-100, -100, 100]],
             [[-100, 0, 0], [-100, 100, -100], [-100, 100, -100],
              [100, -100, -100]]], tf.float32)
        target_tensor = tf.constant([[[-100, 100, -100], [100, -100, -100],
                                      [100, -100, -100], [-100, -100, 100]],
                                     [[-100, -100, 100], [-100, 100, -100],
                                      [-100, 100, -100], [100, -100, -100]]],
                                    tf.float32)
        weights = tf.constant([[1, 1, .5, 1], [1, 1, 1, 0]], tf.float32)
        loss_op = losses.WeightedSoftmaxClassificationAgainstLogitsLoss()
        loss = loss_op(prediction_tensor, target_tensor, weights=weights)

        exp_loss = np.matrix([[0, 0, -0.5 * math.log(.5), 0],
                              [-math.log(.5), 0, 0, 0]])
        with self.test_session() as sess:
            loss_output = sess.run(loss)
            self.assertAllClose(loss_output, exp_loss)