def loss(self, inputs, truth_boxes, truth_labels, truth_instances):
        cfg = self.cfg

        self.rpn_cls_loss, self.rpn_reg_loss = \
           rpn_loss( self.rpn_logits_flat, self.rpn_deltas_flat, self.rpn_labels, self.rpn_label_weights, self.rpn_targets, self.rpn_target_weights)

        self.rcnn_cls_loss, self.rcnn_reg_loss = \
            rcnn_loss(self.rcnn_logits, self.rcnn_deltas, self.rcnn_labels, self.rcnn_targets)

        ## self.mask_cls_loss = Variable(torch.cuda.FloatTensor(1).zero_()).sum()
        self.mask_cls_loss  = \
             mask_loss( self.mask_logits, self.mask_labels, self.mask_instances )

        self.total_loss = self.rpn_cls_loss + self.rpn_reg_loss \
                          + self.rcnn_cls_loss +  self.rcnn_reg_loss \
                          + self.mask_cls_loss

        return self.total_loss
Example #2
0
    def loss(self, inputs, truth_boxes, truth_labels, truth_instances):
        cfg = self.cfg

        self.rpn_cls_loss, self.rpn_reg_loss = rpn_loss(
            self._rpn_logits_flat, self._rpn_deltas_flat, self._rpn_labels, self._rpn_label_weights,
            self._rpn_targets, self._rpn_target_weights)

        self.rcnn_cls_loss, self.rcnn_reg_loss = rcnn_loss(self._rcnn_logits, self._rcnn_deltas,
                                                           self._rcnn_labels, self._rcnn_targets)

        ## self.mask_cls_loss = Variable(torch.cuda.FloatTensor(1).zero_()).sum()
        # TODO(alexander): self._mask_logits can be not updated at `forward` step.
        self.mask_cls_loss = mask_loss(self._mask_logits, self._mask_labels, self._mask_instances)

        self.total_loss = self.rpn_cls_loss + self.rpn_reg_loss \
                          + self.rcnn_cls_loss +  self.rcnn_reg_loss \
                          + self.mask_cls_loss

        return self.total_loss