def _get_cls_loss(model, cls_logits, cls_gt): """Calculates cross entropy loss for classification Args: model: network model cls_logits: predicted classification logits cls_gt: ground truth one-hot classification vector Returns: cls_loss: cross-entropy classification loss """ # Cross-entropy loss for classification weighted_softmax_classification_loss = \ losses.WeightedSoftmaxLoss() cls_loss_weight = model._config.loss_config.cls_loss_weight cls_loss = weighted_softmax_classification_loss(cls_logits, cls_gt, weight=cls_loss_weight) # Normalize by the size of the minibatch with tf.variable_scope('cls_norm'): cls_loss = cls_loss / tf.cast(tf.shape(cls_gt)[0], dtype=tf.float32) # Add summary scalar during training if model._train_val_test == 'train': tf.summary.scalar('classification', cls_loss) return cls_loss
def loss(self, prediction_dict): # these should include mini-batch values only objectness_gt = prediction_dict[self.PRED_MB_OBJECTNESS_GT] offsets_gt = prediction_dict[self.PRED_MB_OFFSETS_GT] # Predictions with tf.variable_scope('rpn_prediction_mini_batch'): objectness = prediction_dict[self.PRED_MB_OBJECTNESS] offsets = prediction_dict[self.PRED_MB_OFFSETS] with tf.variable_scope('rpn_losses'): with tf.variable_scope('objectness'): cls_loss = losses.WeightedSoftmaxLoss() cls_loss_weight = self._config.loss_config.cls_loss_weight objectness_loss = cls_loss(objectness, objectness_gt, weight=cls_loss_weight) with tf.variable_scope('obj_norm'): # normalize by the number of anchor mini-batches objectness_loss = objectness_loss / tf.cast( tf.shape(objectness_gt)[0], dtype=tf.float32) tf.summary.scalar('objectness', objectness_loss) with tf.variable_scope('regression'): reg_loss = losses.WeightedSmoothL1Loss() reg_loss_weight = self._config.loss_config.reg_loss_weight anchorwise_localization_loss = reg_loss(offsets, offsets_gt, weight=reg_loss_weight) masked_localization_loss = \ anchorwise_localization_loss * objectness_gt[:, 1] localization_loss = tf.reduce_sum(masked_localization_loss) with tf.variable_scope('reg_norm'): # normalize by the number of positive objects num_positives = tf.reduce_sum(objectness_gt[:, 1]) # Assert the condition `num_positives > 0` with tf.control_dependencies( [tf.assert_positive(num_positives)]): localization_loss = localization_loss / num_positives tf.summary.scalar('regression', localization_loss) with tf.variable_scope('total_loss'): total_loss = objectness_loss + localization_loss loss_dict = { self.LOSS_RPN_OBJECTNESS: objectness_loss, self.LOSS_RPN_REGRESSION: localization_loss, } return loss_dict, total_loss