Exemplo n.º 1
0
    def test_nms_match(self):
        if not torch.cuda.is_available():
            return
        from mmcv.ops import nms, nms_match
        iou_thr = 0.6
        # empty input
        empty_dets = np.array([])
        assert len(nms_match(empty_dets, iou_thr)) == 0

        # non empty ndarray input
        np_dets = np.array(
            [[49.1, 32.4, 51.0, 35.9, 0.9], [49.3, 32.9, 51.0, 35.3, 0.9],
             [35.3, 11.5, 39.9, 14.5, 0.4], [35.2, 11.7, 39.7, 15.7, 0.3]],
            dtype=np.float32)
        np_groups = nms_match(np_dets, iou_thr)
        assert isinstance(np_groups[0], np.ndarray)
        assert len(np_groups) == 2
        tensor_dets = torch.from_numpy(np_dets)
        boxes = tensor_dets[:, :4]
        scores = tensor_dets[:, 4]
        nms_keep_inds = nms(boxes.contiguous(), scores.contiguous(),
                            iou_thr)[1]
        assert set([g[0].item()
                    for g in np_groups]) == set(nms_keep_inds.tolist())

        # non empty tensor input
        tensor_dets = torch.from_numpy(np_dets)
        tensor_groups = nms_match(tensor_dets, iou_thr)
        assert isinstance(tensor_groups[0], torch.Tensor)
        for i in range(len(tensor_groups)):
            assert np.equal(tensor_groups[i].numpy(), np_groups[i]).all()

        # input of wrong shape
        wrong_dets = np.zeros((2, 3))
        with pytest.raises(AssertionError):
            nms_match(wrong_dets, iou_thr)
Exemplo n.º 2
0
    def _sample_neg(self,
                    assign_result,
                    num_expected,
                    bboxes,
                    feats=None,
                    img_meta=None,
                    **kwargs):
        """Sample negative samples.
        Score-HLR sampler is done in the following steps:
        1. Take the maximum positive score prediction of each negative samples
            as s_i.
        2. Filter out negative samples whose s_i <= score_thr, the left samples
            are called valid samples.
        3. Use NMS-Match to divide valid samples into different groups,
            samples in the same group will greatly overlap with each other
        4. Rank the matched samples in two-steps to get Score-HLR.
            (1) In the same group, rank samples with their scores.
            (2) In the same score rank across different groups,
                rank samples with their scores again.
        5. Linearly map Score-HLR to the final label weights.
        Args:
            assign_result (:obj:`AssignResult`): result of assigner.
            num_expected (int): Expected number of samples.
            bboxes (Tensor): bbox to be sampled.
            feats (Tensor): Features come from FPN.
            img_meta (dict): Meta information dictionary.
        """
        neg_inds = torch.nonzero(assign_result.gt_inds == 0).flatten()
        num_neg = neg_inds.size(0)
        if num_neg == 0:
            return neg_inds, None
        with torch.no_grad():
            neg_bboxes = bboxes[neg_inds]
            neg_rois = bbox2roi([neg_bboxes])
            bbox_result = self.context._bbox_forward(feats, neg_rois)
            cls_score, bbox_pred = bbox_result['cls_score'], bbox_result[
                'bbox_pred']

            ori_loss = self.bbox_head.loss(
                cls_score=cls_score,
                bbox_pred=None,
                rois=None,
                labels=neg_inds.new_full((num_neg, ),
                                         self.bbox_head.num_classes),
                label_weights=cls_score.new_ones(num_neg),
                bbox_targets=None,
                bbox_weights=None,
                reduction_override='none')['loss_cls']

            # filter out samples with the max score lower than score_thr
            max_score, argmax_score = cls_score.softmax(-1)[:, :-1].max(-1)
            valid_inds = (max_score > self.score_thr).nonzero().view(-1)
            invalid_inds = (max_score <= self.score_thr).nonzero().view(-1)
            num_valid = valid_inds.size(0)
            num_invalid = invalid_inds.size(0)

            num_expected = min(num_neg, num_expected)
            num_hlr = min(num_valid, num_expected)
            num_rand = num_expected - num_hlr
            if num_valid > 0:
                valid_rois = neg_rois[valid_inds]
                valid_max_score = max_score[valid_inds]
                valid_argmax_score = argmax_score[valid_inds]
                valid_bbox_pred = bbox_pred[valid_inds]

                # valid_bbox_pred shape: [num_valid, #num_classes, 4]
                valid_bbox_pred = valid_bbox_pred.view(valid_bbox_pred.size(0),
                                                       -1, 4)
                selected_bbox_pred = valid_bbox_pred[range(num_valid),
                                                     valid_argmax_score]
                pred_bboxes = self.bbox_head.bbox_coder.decode(
                    valid_rois[:, 1:], selected_bbox_pred)
                pred_bboxes_with_score = torch.cat(
                    [pred_bboxes, valid_max_score[:, None]], -1)
                group = nms_match(pred_bboxes_with_score, self.iou_thr)

                # imp: importance
                imp = cls_score.new_zeros(num_valid)
                for g in group:
                    g_score = valid_max_score[g]
                    # g_score has already sorted
                    rank = g_score.new_tensor(range(g_score.size(0)))
                    imp[g] = num_valid - rank + g_score
                _, imp_rank_inds = imp.sort(descending=True)
                _, imp_rank = imp_rank_inds.sort()
                hlr_inds = imp_rank_inds[:num_expected]

                if num_rand > 0:
                    rand_inds = torch.randperm(num_invalid)[:num_rand]
                    select_inds = torch.cat(
                        [valid_inds[hlr_inds], invalid_inds[rand_inds]])
                else:
                    select_inds = valid_inds[hlr_inds]

                neg_label_weights = cls_score.new_ones(num_expected)

                up_bound = max(num_expected, num_valid)
                imp_weights = (up_bound -
                               imp_rank[hlr_inds].float()) / up_bound
                neg_label_weights[:num_hlr] = imp_weights
                neg_label_weights[num_hlr:] = imp_weights.min()
                neg_label_weights = (self.bias +
                                     (1 - self.bias) * neg_label_weights).pow(
                                         self.k)
                ori_selected_loss = ori_loss[select_inds]
                new_loss = ori_selected_loss * neg_label_weights
                norm_ratio = ori_selected_loss.sum() / new_loss.sum()
                neg_label_weights *= norm_ratio
            else:
                neg_label_weights = cls_score.new_ones(num_expected)
                select_inds = torch.randperm(num_neg)[:num_expected]

            return neg_inds[select_inds], neg_label_weights