def _build_localization_loss(loss_config): """Builds a localization loss based on the loss config. Args: loss_config: A losses_pb2.LocalizationLoss object. Returns: Loss based on the config. Raises: ValueError: On invalid loss_config. """ if not isinstance(loss_config, losses_pb2.LocalizationLoss): raise ValueError('loss_config not of type losses_pb2.LocalizationLoss.') loss_type = loss_config.WhichOneof('localization_loss') if loss_type == 'weighted_l2': return losses.WeightedL2LocalizationLoss() if loss_type == 'weighted_smooth_l1': return losses.WeightedSmoothL1LocalizationLoss( loss_config.weighted_smooth_l1.delta) if loss_type == 'weighted_iou': return losses.WeightedIOULocalizationLoss() raise ValueError('Empty loss config.')
def testReturnsCorrectAnchorwiseLoss(self): batch_size = 3 num_anchors = 16 code_size = 4 prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.ones([batch_size, num_anchors]) loss_op = losses.WeightedL2LocalizationLoss(anchorwise_output=True) loss = loss_op(prediction_tensor, target_tensor, weights=weights) expected_loss = np.ones((batch_size, num_anchors)) * 2 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, expected_loss)
def testReturnsCorrectLoss(self): batch_size = 3 num_anchors = 10 code_size = 4 prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.zeros([batch_size, num_anchors, code_size]) weights = tf.constant( [[1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 0, 0, 0, 0, 0]], tf.float32) loss_op = losses.WeightedL2LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights) expected_loss = (3 * 5 * 4) / 2.0 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, expected_loss)
def _build_classification_loss(loss_config): """Builds a classification loss based on the loss config. Args: loss_config: A losses_pb2.ClassificationLoss object. Returns: Loss based on the config. Raises: ValueError: On invalid loss_config. """ if not isinstance(loss_config, losses_pb2.ClassificationLoss): raise ValueError( 'loss_config not of type losses_pb2.ClassificationLoss.') loss_type = loss_config.WhichOneof('classification_loss') if loss_type == 'weighted_sigmoid': config = loss_config.weighted_sigmoid return losses.WeightedSigmoidClassificationLoss( anchorwise_output=config.anchorwise_output) if loss_type == 'weighted_softmax': config = loss_config.weighted_softmax return losses.WeightedSoftmaxClassificationLoss( anchorwise_output=config.anchorwise_output) if loss_type == 'bootstrapped_sigmoid': config = loss_config.bootstrapped_sigmoid return losses.BootstrappedSigmoidClassificationLoss( alpha=config.alpha, bootstrap_type=('hard' if config.hard_bootstrap else 'soft'), anchorwise_output=config.anchorwise_output) if loss_type == 'weighted_l2': config = loss_config.weighted_l2 return losses.WeightedL2LocalizationLoss( anchorwise_output=config.anchorwise_output) if loss_type == 'weighted_smooth_l1': config = loss_config.weighted_smooth_l1 return losses.WeightedSmoothL1LocalizationLoss( anchorwise_output=config.anchorwise_output) raise ValueError('Empty loss config.')
def testReturnsCorrectNanLoss(self): batch_size = 3 num_anchors = 10 code_size = 4 prediction_tensor = tf.ones([batch_size, num_anchors, code_size]) target_tensor = tf.concat([ tf.zeros([batch_size, num_anchors, code_size / 2]), tf.ones([batch_size, num_anchors, code_size / 2]) * np.nan ], axis=2) weights = tf.ones([batch_size, num_anchors]) loss_op = losses.WeightedL2LocalizationLoss() loss = loss_op(prediction_tensor, target_tensor, weights=weights, ignore_nan_targets=True) expected_loss = (3 * 5 * 4) / 2.0 with self.test_session() as sess: loss_output = sess.run(loss) self.assertAllClose(loss_output, expected_loss)