コード例 #1
0
ファイル: inference.py プロジェクト: zhubinQAQ/CPM-R-CNN
    def add_gt_proposals(self, proposal, target):
        device = proposal.bbox.device
        gt_box = target.copy_with_fields(['labels'])
        gt_box.add_field("objectness", torch.ones(len(gt_box), device=device))
        proposal = cat_boxlist((proposal, gt_box))

        return proposal
コード例 #2
0
ファイル: inference.py プロジェクト: zhubinQAQ/CPM-R-CNN
    def forward(self, anchors, objectness, box_regression, targets=None):
        """
        Arguments:
            anchors: list[list[BoxList]]
            objectness: list[tensor]
            box_regression: list[tensor]

        Returns:
            boxlists (list[BoxList]): the post-processed anchors, after
                applying box decoding and NMS
        """
        sampled_boxes = []
        num_levels = len(objectness)
        anchors = list(zip(*anchors))
        for a, o, b in zip(anchors, objectness, box_regression):
            sampled_boxes.append(self.forward_for_single_feature_map(a, o, b))

        boxlists = list(zip(*sampled_boxes))
        boxlists = [cat_boxlist(boxlist) for boxlist in boxlists]

        if num_levels > 1:
            boxlists = self.select_over_all_levels(boxlists)

        # append ground-truth bboxes to proposals
        if self.training and targets is not None:
            boxlists = self.add_gt_proposals(boxlists, targets)

        return boxlists
コード例 #3
0
ファイル: inference.py プロジェクト: zhubinQAQ/CPM-R-CNN
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class_old = boxlist_for_class
            if cfg.TEST.SOFT_NMS.ENABLED:
                boxlist_for_class = boxlist_soft_nms(
                    boxlist_for_class,
                    sigma=cfg.TEST.SOFT_NMS.SIGMA,
                    overlap_thresh=self.nms,
                    score_thresh=0.0001,
                    method=cfg.TEST.SOFT_NMS.METHOD
                )
            else:
                boxlist_for_class = boxlist_nms(
                    boxlist_for_class, self.nms
                )
            # Refine the post-NMS boxes using bounding-box voting
            if cfg.TEST.BBOX_VOTE.ENABLED and boxes_j.shape[0] > 0:
                boxlist_for_class = boxlist_box_voting(
                    boxlist_for_class,
                    boxlist_for_class_old,
                    cfg.TEST.BBOX_VOTE.VOTE_TH,
                    scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD
                )
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
            )
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
            )
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
コード例 #4
0
def get_full_sample_boxes(cls_proposals, grid_proposals):
    full_boxes = []
    for cls_proposal, grid_proposal in zip(cls_proposals, grid_proposals):
        labels = cls_proposal.get_field("labels")
        neg_inds = labels <= 0
        inds = neg_inds.nonzero().squeeze(1)
        if cfg.GRID_RCNN.RESCORE_OPTION.KEEP_RATIO:
            pos_num = grid_proposal.bbox.shape[0]
            neg_num = pos_num * 3
            if neg_num <= inds.shape[0]:
                _ind = torch.randperm(inds.shape[0])[:neg_num]
                inds = inds[_ind]
        box = cat_boxlist((cls_proposal[inds], grid_proposal))
        full_boxes.append(box)
    return full_boxes
コード例 #5
0
ファイル: grid_rcnn.py プロジェクト: zhubinQAQ/CPM-R-CNN
def select_boxes(proposals, type=None, ind=None):
    if type == 'split':
        assert isinstance(proposals, list)
        thresh = 0**2
        bbox = proposals[0].bbox
        s = (bbox[:, 2] - bbox[:, 0]) * (bbox[:, 3] - bbox[:, 1])
        l_ind = s > thresh
        s_ind = s <= thresh
        return [s_ind, l_ind]
    elif type == 'cat':
        assert isinstance(proposals, tuple)
        assert ind is not None
        proposals, old_proposals = proposals
        proposals[0] = proposals[0][ind[0]]
        old_proposals[0] = old_proposals[0][ind[1]]
        return [cat_boxlist((proposals[0], old_proposals[0]))]
    else:
        raise Exception('error')
コード例 #6
0
    def __call__(self, anchors, objectness, box_regression, targets):
        """
        Arguments:
            anchors (list[BoxList])
            objectness (list[Tensor])
            box_regression (list[Tensor])
            targets (list[BoxList])

        Returns:
            objectness_loss (Tensor)
            box_loss (Tensor
        """
        anchors = [cat_boxlist(anchors_per_image) for anchors_per_image in anchors]
        labels, regression_targets = self.prepare_targets(anchors, targets)
        sampled_pos_inds, sampled_neg_inds = self.fg_bg_sampler(labels)
        sampled_pos_inds = torch.nonzero(torch.cat(sampled_pos_inds, dim=0)).squeeze(1)
        sampled_neg_inds = torch.nonzero(torch.cat(sampled_neg_inds, dim=0)).squeeze(1)

        sampled_inds = torch.cat([sampled_pos_inds, sampled_neg_inds], dim=0)

        objectness, box_regression = concat_box_prediction_layers(objectness, box_regression)

        objectness = objectness.squeeze()

        labels = torch.cat(labels, dim=0)
        regression_targets = torch.cat(regression_targets, dim=0)

        box_loss = smooth_l1_loss(
            box_regression[sampled_pos_inds],
            regression_targets[sampled_pos_inds],
            beta=cfg.RPN.SMOOTH_L1_BETA,
            reduction="sum"
        ) / (sampled_inds.numel())

        objectness_loss = F.binary_cross_entropy_with_logits(
            objectness[sampled_inds], labels[sampled_inds]
        )

        return objectness_loss, box_loss
コード例 #7
0
ファイル: inference.py プロジェクト: zhubinQAQ/CPM-R-CNN
    def add_gt_proposals(self, proposals, targets):
        """
        Arguments:
            proposals: list[BoxList]
            targets: list[BoxList]
        """
        # Get the device we're operating on
        device = proposals[0].bbox.device

        gt_boxes = [target.copy_with_fields([]) for target in targets]

        # later cat of bbox requires all fields to be present for all bbox
        # so we need to add a dummy for objectness that's missing
        for gt_box in gt_boxes:
            gt_box.add_field("objectness",
                             torch.ones(len(gt_box), device=device))

        proposals = [
            cat_boxlist((proposal, gt_box))
            for proposal, gt_box in zip(proposals, gt_boxes)
        ]

        return proposals
コード例 #8
0
    def forward(self, box_cls_all, box_reg_all, centerness_all, boxes_all):
        device = box_cls_all.device
        boxes_per_image = [len(box) for box in boxes_all]
        cls = box_cls_all.split(boxes_per_image, dim=0)
        reg = box_reg_all.split(boxes_per_image, dim=0)
        center = centerness_all.split(boxes_per_image, dim=0)

        results = []
        for box_cls, box_regression, centerness, boxes in zip(cls, reg, center, boxes_all):
            N, C, H, W = box_cls.shape
            # put in the same format as locations
            box_cls = box_cls.permute(0, 2, 3, 1).reshape(N, -1, self.num_classes).sigmoid()
            box_regression = box_regression.permute(0, 2, 3, 1).reshape(N, -1, 4)
            centerness = centerness.permute(0, 2, 3, 1).reshape(N, -1).sigmoid()

            # multiply the classification scores with centerness scores
            box_cls = box_cls * centerness[:, :, None]
            _boxes = boxes.bbox
            size = boxes.size
            boxes_scores = boxes.get_field("scores")
            results_per_image = [boxes]
            for i in range(N):
                box = _boxes[i]
                boxes_score = boxes_scores[i]
                per_box_cls = box_cls[i]
                per_box_cls_max, per_box_cls_inds = per_box_cls.max(dim=0)

                per_class = torch.range(2, 1 + self.num_classes, dtype=torch.long, device=device)

                per_box_regression = box_regression[i]
                per_box_regression = per_box_regression[per_box_cls_inds]

                x_step = 1.0
                y_step = 1.0
                shifts_x = torch.arange(
                    0, self.m, step=x_step,
                    dtype=torch.float32, device=device
                ) + x_step / 2
                shifts_y = torch.arange(
                    0, self.m, step=y_step,
                    dtype=torch.float32, device=device
                ) + y_step / 2
                shift_y, shift_x = torch.meshgrid(shifts_y, shifts_x)
                shift_x = shift_x.reshape(-1)
                shift_y = shift_y.reshape(-1)
                locations = torch.stack((shift_x, shift_y), dim=1)
                per_locations = locations[per_box_cls_inds]

                _x1 = per_locations[:, 0] - per_box_regression[:, 0]
                _y1 = per_locations[:, 1] - per_box_regression[:, 1]
                _x2 = per_locations[:, 0] + per_box_regression[:, 2]
                _y2 = per_locations[:, 1] + per_box_regression[:, 3]

                _x1 = _x1 / self.m * (box[2] - box[0]) + box[0]
                _y1 = _y1 / self.m * (box[3] - box[1]) + box[1]
                _x2 = _x2 / self.m * (box[2] - box[0]) + box[0]
                _y2 = _y2 / self.m * (box[3] - box[1]) + box[1]

                detections = torch.stack([_x1, _y1, _x2, _y2], dim=-1)

                boxlist = BoxList(detections, size, mode="xyxy")
                boxlist.add_field("labels", per_class)
                boxlist.add_field("scores", torch.sqrt(torch.sqrt(per_box_cls_max) * boxes_score))
                boxlist = boxlist.clip_to_image(remove_empty=False)
                boxlist = remove_small_boxes(boxlist, 0)
                results_per_image.append(boxlist)

            results_per_image = cat_boxlist(results_per_image)
            results.append(results_per_image)

        return results