def __call__(self, class_logits):
        """
        Computes the loss for Faster R-CNN.
        This requires that the subsample method has been called beforehand.

        Arguments:
            class_logits (list[Tensor])

        Returns:
            classification_loss (Tensor)
        """

        class_logits = cat(class_logits, dim=0)
        device = class_logits.device

        if not hasattr(self, "_proposal_pairs"):
            raise RuntimeError("subsample needs to be called before")

        proposals = self._proposal_pairs

        labels = cat([proposal.get_field("labels") for proposal in proposals],
                     dim=0)

        rel_fg_cnt = len(labels.nonzero())
        rel_bg_cnt = labels.shape[0] - rel_fg_cnt
        ce_weights = labels.new(class_logits.size(1)).fill_(1).float()
        ce_weights[0] = float(rel_fg_cnt) / (rel_bg_cnt + 1e-5)
        classification_loss = F.cross_entropy(class_logits,
                                              labels,
                                              weight=ce_weights)

        return classification_loss
Exemple #2
0
    def __call__(self, class_logits):
        """
        Computes the loss for Faster R-CNN.
        This requires that the subsample method has been called beforehand.

        Arguments:
            class_logits (list[Tensor])

        Returns:
            classification_loss (Tensor)
        """

        class_logits = cat(class_logits, dim=0)
        device = class_logits.device

        if not hasattr(self, "_proposal_pairs"):
            raise RuntimeError("subsample needs to be called before")

        proposals = self._proposal_pairs

        labels = cat([proposal.get_field("labels") for proposal in proposals],
                     dim=0)

        classification_loss = F.cross_entropy(class_logits, labels)

        return classification_loss
    def obj_classification_loss(self, proposals, class_logits):
        class_logits = cat(class_logits, dim=0)

        device = class_logits.device

        labels = cat([proposal.get_field("labels") for proposal in proposals],
                     dim=0)

        classification_loss = F.cross_entropy(class_logits, labels)

        return classification_loss
Exemple #4
0
    def __call__(self, class_logits, box_regression):
        """
        Computes the loss for Faster R-CNN.
        This requires that the subsample method has been called beforehand.

        Arguments:
            class_logits (list[Tensor])
            box_regression (list[Tensor])

        Returns:
            classification_loss (Tensor)
            box_loss (Tensor)
        """

        class_logits = cat(class_logits, dim=0)
        box_regression = cat(box_regression, dim=0)
        device = class_logits.device

        if not hasattr(self, "_proposals"):
            raise RuntimeError("subsample needs to be called before")

        proposals = self._proposals

        labels = cat([proposal.get_field("labels") for proposal in proposals],
                     dim=0)
        regression_targets = cat([
            proposal.get_field("regression_targets") for proposal in proposals
        ],
                                 dim=0)

        classification_loss = F.cross_entropy(class_logits, labels)

        # get indices that correspond to the regression targets for
        # the corresponding ground truth labels, to be used with
        # advanced indexing
        sampled_pos_inds_subset = torch.nonzero(labels > 0).squeeze(1)
        labels_pos = labels[sampled_pos_inds_subset]
        if self.cls_agnostic_bbox_reg:
            map_inds = torch.tensor([4, 5, 6, 7], device=device)
        else:
            map_inds = 4 * labels_pos[:, None] + torch.tensor([0, 1, 2, 3],
                                                              device=device)

        box_loss = smooth_l1_loss(
            box_regression[sampled_pos_inds_subset[:, None], map_inds],
            regression_targets[sampled_pos_inds_subset],
            size_average=False,
            beta=1,
        )
        box_loss = box_loss / labels.numel()

        return classification_loss, box_loss
    def pred_classification_loss(self, class_logits, freq_prior=None):
        """
        Computes the loss for Faster R-CNN.
        This requires that the subsample method has been called beforehand.

        Arguments:
            class_logits (list[Tensor])

        Returns:
            classification_loss (Tensor)
        """
        class_logits = cat(class_logits, dim=0)
        device = class_logits.device

        if not hasattr(self, "_proposal_pairs"):
            raise RuntimeError("subsample needs to be called before")

        proposals = self._proposal_pairs
        labels = cat([proposal.get_field("labels") for proposal in proposals],
                     dim=0)

        rel_fg_cnt = len(labels.nonzero())
        rel_bg_cnt = labels.shape[0] - rel_fg_cnt
        ce_weights = labels.new(class_logits.size(1)).fill_(1).float()
        ce_weights[0] = float(rel_fg_cnt) / (rel_bg_cnt + 1e-5)
        classification_loss = F.cross_entropy(class_logits,
                                              labels,
                                              weight=ce_weights)

        # add an auxilary loss to mine some positive relationship pairs
        # class_probs = torch.log_softmax(class_logits[:, 1:], dim=-1)
        # freq_probs = torch.softmax(freq_prior[:, 1:], dim=-1)
        # klloss = F.kl_div(class_probs, freq_probs, reduction='batchmean')
        #
        # classification_loss += klloss

        # class_probs = torch.softmax(class_logits, dim=-1).detach()
        # freq_labels = freq_prior.argmax(1)
        # pred_labels = class_probs[:, 1:].argmax(1) + 1
        # match_idx = (freq_labels == pred_labels).nonzero().view(-1)
        # keep_idx = (labels[match_idx] == 0).nonzero().view(-1)
        # match_idx = match_idx[keep_idx]
        # if match_idx.numel() > 0:
        #     labels_mined = freq_labels[match_idx]
        #     class_logits_mined = class_logits[match_idx]
        #     # weights = labels.new(class_logits.size(0)).fill_(1).float()
        #     weights = class_probs.max(1)[0][match_idx].detach()
        #     classification_loss += (weights * F.cross_entropy(class_logits_mined, labels_mined, weight=ce_weights, reduction='none')).mean()

        return classification_loss