コード例 #1
0
def boxlist_box_voting(top_boxlist,
                       all_boxlist,
                       thresh,
                       scoring_method='ID',
                       beta=1.0,
                       score_field="scores"):
    if thresh <= 0:
        return top_boxlist
    mode = top_boxlist.mode
    top_boxes = top_boxlist.convert("xyxy").bbox.cpu()
    all_boxes = all_boxlist.convert("xyxy").bbox.cpu()
    top_score = top_boxlist.get_field(score_field).cpu()
    all_score = all_boxlist.get_field(score_field).cpu()
    top_dets = np.hstack((top_boxes, top_score[:,
                                               np.newaxis])).astype(np.float32,
                                                                    copy=False)
    all_dets = np.hstack((all_boxes, all_score[:,
                                               np.newaxis])).astype(np.float32,
                                                                    copy=False)
    dets = box_utils.box_voting(top_dets, all_dets, thresh, scoring_method,
                                beta)
    boxlist = BoxList(torch.from_numpy(dets[:, :4]).cuda(),
                      all_boxlist.size,
                      mode="xyxy")
    boxlist.add_field("scores", torch.from_numpy(dets[:, -1]).cuda())
    return boxlist.convert(mode)
コード例 #2
0
def boxlist_soft_nms(boxlist,
                     sigma=0.5,
                     overlap_thresh=0.3,
                     score_thresh=0.001,
                     method='linear',
                     score_field="scores"):
    """
    Performs non-maximum suppression on a boxlist, with scores specified
    in a boxlist field via score_field.

    Arguments:
        boxlist(BoxList)
        nms_thresh (float)
        max_proposals (int): if > 0, then only the top max_proposals are kept
            after non-maximum suppression
        score_field (str)
    """
    if overlap_thresh <= 0:
        return boxlist
    mode = boxlist.mode
    boxlist = boxlist.convert("xyxy")
    boxes = boxlist.bbox.cpu()
    score = boxlist.get_field(score_field).cpu()
    dets = np.hstack((boxes, score[:, np.newaxis])).astype(np.float32,
                                                           copy=False)
    dets, _ = box_utils.soft_nms(dets, sigma, overlap_thresh, score_thresh,
                                 method)
    boxlist = BoxList(torch.from_numpy(dets[:, :4]).cuda(),
                      boxlist.size,
                      mode="xyxy")
    boxlist.add_field("scores", torch.from_numpy(dets[:, -1]).cuda())
    return boxlist.convert(mode)
コード例 #3
0
def cat_boxlist(bboxes):
    """
    Concatenates a list of BoxList (having the same image size) into a
    single BoxList

    Arguments:
        bboxes (list[BoxList])
    """
    assert isinstance(bboxes, (list, tuple))
    assert all(isinstance(bbox, BoxList) for bbox in bboxes)

    size = bboxes[0].size
    assert all(bbox.size == size for bbox in bboxes)

    mode = bboxes[0].mode
    assert all(bbox.mode == mode for bbox in bboxes)

    fields = set(bboxes[0].fields())
    assert all(set(bbox.fields()) == fields for bbox in bboxes)

    cat_boxes = BoxList(_cat([bbox.bbox for bbox in bboxes], dim=0), size,
                        mode)

    for field in fields:
        data = _cat([bbox.get_field(field) for bbox in bboxes], dim=0)
        cat_boxes.add_field(field, data)

    return cat_boxes
コード例 #4
0
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        if len(anno) > 0:
            if 'iscrowd' in anno[0]:
                anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        if 'segm' in self.ann_types:
            masks = [obj["segmentation"] for obj in anno]
            masks = SegmentationMask(masks, img.size, mode='poly')
            target.add_field("masks", masks)

        if 'hier' in self.ann_types:
            if anno and "hier" in anno[0]:
                hier = [obj["hier"] for obj in anno]
                hier = Hier(hier, img.size)
                target.add_field("hier", hier)

        target = target.clip_to_image(remove_empty=True)

        if self._transforms is not None:
            img, target = self._transforms(img, target)

        return img, target, idx
コード例 #5
0
    def forward(self, x, boxes):
        """
        Arguments:
            x (Tensor): the mask logits
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for ech image
        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra field mask
        """
        mask_prob = x.sigmoid()

        # select masks coresponding to the predicted classes
        num_masks = x.shape[0]
        labels = [bbox.get_field("labels") for bbox in boxes]
        labels = torch.cat(labels)
        index = torch.arange(num_masks, device=labels.device)
        mask_prob = mask_prob[index, labels][:, None]

        boxes_per_image = [len(box) for box in boxes]
        mask_prob = mask_prob.split(boxes_per_image, dim=0)

        results = []
        for prob, box in zip(mask_prob, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox_scores = bbox.get_field("scores")
            bbox.add_field("mask", prob.cpu().numpy())
            bbox.add_field("mask_scores", bbox_scores.cpu().numpy())
            results.append(bbox)

        return results
コード例 #6
0
    def forward(self, x, boxes):
        """
        Arguments:
            x (Tensor): the mask logits
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for ech image
        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra field mask
        """
        parsing_prob = x
        parsing_prob = F.softmax(parsing_prob, dim=1)

        boxes_per_image = [len(box) for box in boxes]
        parsing_prob = parsing_prob.split(boxes_per_image, dim=0)

        results = []
        for prob, box in zip(parsing_prob, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")

            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox_scores = bbox.get_field("scores")
            bbox.add_field("parsing", prob.cpu().numpy())
            bbox.add_field("parsing_scores", bbox_scores.cpu().numpy())
            results.append(bbox)

        return results
コード例 #7
0
 def copy_with_fields(self, fields, skip_missing=False):  # 直接返回BoxList
     boxlist = BoxList(self.bbox, self.size, self.mode)
     if not isinstance(fields, (list, tuple)):
         fields = [fields]
     for field in fields:
         if self.has_field(field):
             boxlist.add_field(field, self.get_field(field))
         elif not skip_missing:
             raise KeyError("Field '{}' not found in {}".format(
                 field, self))
     return boxlist
コード例 #8
0
ファイル: inference.py プロジェクト: soeaver/Hier-R-CNN
    def forward_for_single_feature_map(self, anchors, objectness,
                                       box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            objectness: tensor of size N, A, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        device = objectness.device
        N, A, H, W = objectness.shape

        # put in the same format as anchors
        objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1)
        objectness = objectness.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)

        num_anchors = A * H * W

        pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
        objectness, topk_idx = objectness.topk(pre_nms_top_n,
                                               dim=1,
                                               sorted=True)

        batch_idx = torch.arange(N, device=device)[:, None]
        box_regression = box_regression[batch_idx, topk_idx]

        image_shapes = [box.size for box in anchors]
        concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
        concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]

        proposals = self.box_coder.decode(box_regression.view(-1, 4),
                                          concat_anchors.view(-1, 4))

        proposals = proposals.view(N, -1, 4)

        result = []
        for proposal, score, im_shape in zip(proposals, objectness,
                                             image_shapes):
            boxlist = BoxList(proposal, im_shape, mode="xyxy")
            boxlist.add_field("objectness", score)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            boxlist = boxlist_nms(
                boxlist,
                self.nms_thresh,
                max_proposals=self.post_nms_top_n,
                score_field="objectness",
            )
            result.append(boxlist)
        return result
コード例 #9
0
ファイル: coco.py プロジェクト: yf19970118/OPLD-Pytorch
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        if len(anno) > 0:
            if 'iscrowd' in anno[0]:
                anno = [obj for obj in anno if obj["iscrowd"] == 0]

        if 'quad' in self.ann_types:
            quad = [obj["segmentation"][0] for obj in anno]
            quad = torch.as_tensor(quad)
            target = QuadBoxes(quad, img.size, mode="xyxy")
        else:
            boxes = [obj["bbox"] for obj in anno]
            boxes = torch.as_tensor(boxes).reshape(-1,
                                                   4)  # guard against no boxes
            target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        # target = target.clip_to_image(remove_empty=True)

        if self._transforms is not None:
            img, target = self._transforms(img, target)

        return img, target, idx
コード例 #10
0
ファイル: inference.py プロジェクト: soeaver/RP-R-CNN
    def forward(self, boxes, pred_parsingiou):
        num_parsings = pred_parsingiou.shape[0]
        index = torch.arange(num_parsings, device=pred_parsingiou.device)
        parsingious = pred_parsingiou[index, 0]
        parsingious = [parsingious]
        results = []
        for parsingiou, box in zip(parsingious, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox_scores = bbox.get_field("scores")
            parsing_scores = torch.sqrt(bbox_scores * parsingiou)
            bbox.add_field("parsing_scores", parsing_scores.cpu().numpy())
            prob = bbox.get_field("parsing")
            bbox.add_field("parsing", prob.cpu().numpy())
            results.append(bbox)

        return results
コード例 #11
0
    def forward(self, boxes, pred_maskiou, labels):
        num_masks = pred_maskiou.shape[0]
        index = torch.arange(num_masks, device=labels.device)
        maskious = pred_maskiou[index, labels]
        maskious = [maskious]
        results = []
        for maskiou, box in zip(maskious, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox_scores = bbox.get_field("scores")
            mask_scores = bbox_scores * maskiou
            bbox.add_field("mask_scores", mask_scores.cpu().numpy())
            prob = bbox.get_field("mask")
            bbox.add_field("mask", prob.cpu().numpy())
            results.append(bbox)

        return results
コード例 #12
0
ファイル: inference.py プロジェクト: soeaver/Hier-R-CNN
 def prepare_boxlist(self, boxes, scores, image_shape):
     """
     Returns BoxList from `boxes` and adds probability scores information
     as an extra field
     `boxes` has shape (#detections, 4 * #classes), where each row represents
     a list of predicted bounding boxes for each of the object classes in the
     dataset (including the background class). The detections in each row
     originate from the same object proposal.
     `scores` has shape (#detection, #classes), where each row represents a list
     of object detection confidence scores for each of the object classes in the
     dataset (including the background class). `scores[i, j]`` corresponds to the
     box at `boxes[i, j * 4:(j + 1) * 4]`.
     """
     boxes = boxes.reshape(-1, 4)
     scores = scores.reshape(-1)
     boxlist = BoxList(boxes, image_shape, mode="xyxy")
     boxlist.add_field("scores", scores)
     return boxlist
コード例 #13
0
    def get_det_result(self, locations, box_cls, box_regression, boxes):
        N = len(box_cls)
        h, w = self.resolution

        candidate_inds = box_cls > self.pre_nms_thresh
        pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)

        _boxes = boxes.bbox
        size = boxes.size
        boxes_scores = boxes.get_field("scores")

        results = []
        for i in range(N):
            box = _boxes[i]
            boxes_score = boxes_scores[i]
            per_box_cls = box_cls[i]
            per_candidate_inds = candidate_inds[i]
            per_box_cls = per_box_cls[per_candidate_inds]

            per_candidate_nonzeros = per_candidate_inds.nonzero()
            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1] + 2

            per_box_regression = box_regression[i]
            per_box_regression = per_box_regression[per_box_loc]
            per_locations = locations[per_box_loc]

            per_pre_nms_top_n = pre_nms_top_n[i]

            if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
                per_box_cls, top_k_indices = per_box_cls.topk(
                    per_pre_nms_top_n, sorted=False)
                per_class = per_class[top_k_indices]
                per_box_regression = per_box_regression[top_k_indices]
                per_locations = per_locations[top_k_indices]

            _x1 = per_locations[:, 0] - per_box_regression[:, 0]
            _y1 = per_locations[:, 1] - per_box_regression[:, 1]
            _x2 = per_locations[:, 0] + per_box_regression[:, 2]
            _y2 = per_locations[:, 1] + per_box_regression[:, 3]

            _x1 = _x1 / w * (box[2] - box[0]) + box[0]
            _y1 = _y1 / h * (box[3] - box[1]) + box[1]
            _x2 = _x2 / w * (box[2] - box[0]) + box[0]
            _y2 = _y2 / h * (box[3] - box[1]) + box[1]

            detections = torch.stack([_x1, _y1, _x2, _y2], dim=-1)

            boxlist = BoxList(detections, size, mode="xyxy")
            boxlist.add_field("labels", per_class)
            boxlist.add_field(
                "scores", torch.sqrt(torch.sqrt(per_box_cls) * boxes_score))
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)
        results = cat_boxlist(results)

        return results
コード例 #14
0
    def forward(self, uv_logits, boxes):
        """
        Arguments:
            uv_logits (List): the uv logits
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for ech image

        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra field mask
        """
        UV_pred_Ann, UV_pred_Index, UV_pred_U, UV_pred_V = uv_logits

        boxes_per_image = [len(box) for box in boxes]
        UV_pred_Ann = UV_pred_Ann.split(boxes_per_image, dim=0)
        UV_pred_Index = UV_pred_Index.split(boxes_per_image, dim=0)
        UV_pred_U = UV_pred_U.split(boxes_per_image, dim=0)
        UV_pred_V = UV_pred_V.split(boxes_per_image, dim=0)

        results = []
        for Ann, Index, U, V, box in zip(UV_pred_Ann, UV_pred_Index, UV_pred_U,
                                         UV_pred_V, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox.add_field("uv", [
                Ann.cpu().numpy(),
                Index.cpu().numpy(),
                U.cpu().numpy(),
                V.cpu().numpy()
            ])
            results.append(bbox)

        return results
コード例 #15
0
ファイル: inference.py プロジェクト: soeaver/Hier-R-CNN
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4:(j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class_old = boxlist_for_class
            if cfg.TEST.SOFT_NMS.ENABLED:
                boxlist_for_class = boxlist_soft_nms(
                    boxlist_for_class,
                    sigma=cfg.TEST.SOFT_NMS.SIGMA,
                    overlap_thresh=self.nms,
                    score_thresh=0.0001,
                    method=cfg.TEST.SOFT_NMS.METHOD)
            else:
                boxlist_for_class = boxlist_nms(boxlist_for_class, self.nms)
            # Refine the post-NMS boxes using bounding-box voting
            if cfg.TEST.BBOX_VOTE.ENABLED and boxes_j.shape[0] > 0:
                boxlist_for_class = boxlist_box_voting(
                    boxlist_for_class,
                    boxlist_for_class_old,
                    cfg.TEST.BBOX_VOTE.VOTE_TH,
                    scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD)
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels",
                torch.full((num_labels, ), j, dtype=torch.int64,
                           device=device))
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(),
                number_of_detections - self.detections_per_img + 1)
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
コード例 #16
0
 def forward(self, image_list, feature_maps):
     grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
     anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
     anchors = []
     for i, (image_height,
             image_width) in enumerate(image_list.image_sizes):  # 对图像而言
         anchors_in_image = []
         for anchors_per_feature_map in anchors_over_all_feature_maps:
             boxlist = BoxList(anchors_per_feature_map,
                               (image_width, image_height),
                               mode="xyxy")
             self.add_visibility_to(boxlist)
             anchors_in_image.append(boxlist)
         anchors.append(anchors_in_image)
     return anchors
コード例 #17
0
ファイル: test.py プロジェクト: yf19970118/OPLD-Pytorch
def filter_results(boxlist, nms_thresh=0.5, detections_per_img=100):
    num_classes = cfg.MODEL.NUM_CLASSES
    if not cfg.TEST.SOFT_NMS.ENABLED and not cfg.TEST.BBOX_VOTE.ENABLED:
        result = boxlist_ml_nms(boxlist, nms_thresh)
    else:
        boxes = boxlist.bbox
        scores = boxlist.get_field("scores")
        labels = boxlist.get_field("labels")
        result = []
        for j in range(1, num_classes):  # skip the background
            inds = (labels == j).nonzero().view(-1)
            scores_j = scores[inds]
            boxes_j = boxes[inds, :].view(-1, 4)
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class_old = boxlist_for_class
            if cfg.TEST.SOFT_NMS.ENABLED:
                boxlist_for_class = boxlist_soft_nms(
                    boxlist_for_class,
                    sigma=cfg.TEST.SOFT_NMS.SIGMA,
                    overlap_thresh=nms_thresh,
                    score_thresh=0.0001,
                    method=cfg.TEST.SOFT_NMS.METHOD)
            else:
                boxlist_for_class = boxlist_nms(boxlist_for_class, nms_thresh)
            # Refine the post-NMS boxes using bounding-box voting
            if cfg.TEST.BBOX_VOTE.ENABLED and boxes_j.shape[0] > 0:
                boxlist_for_class = boxlist_box_voting(
                    boxlist_for_class,
                    boxlist_for_class_old,
                    cfg.TEST.BBOX_VOTE.VOTE_TH,
                    scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD)
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels",
                torch.full((num_labels, ),
                           j,
                           dtype=torch.int64,
                           device=scores.device))
            result.append(boxlist_for_class)
        result = cat_boxlist(result)

    # Limit to max_per_image detections **over all classes**
    number_of_detections = len(result)
    if number_of_detections > detections_per_img > 0:
        cls_scores = result.get_field("scores")
        image_thresh, _ = torch.kthvalue(
            cls_scores.cpu(), number_of_detections - detections_per_img + 1)
        keep = cls_scores >= image_thresh.item()
        keep = torch.nonzero(keep).squeeze(1)
        result = result[keep]
    return result
コード例 #18
0
    def forward(self, x, boxes):
        boxes_per_image = [len(box) for box in boxes]
        kpt_prob = x.split(boxes_per_image, dim=0)

        results = []
        for prob, box in zip(kpt_prob, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox.add_field("keypoints", prob.cpu().numpy())
            results.append(bbox)

        return results
コード例 #19
0
def filter_results(boxlist):
    num_classes = cfg.MODEL.NUM_CLASSES
    if not cfg.TEST.SOFT_NMS.ENABLED and not cfg.TEST.BBOX_VOTE.ENABLED:
        # multiclass nms
        scores = boxlist.get_field("scores")
        device = scores.device
        num_repeat = int(boxlist.bbox.shape[0] / num_classes)
        labels = np.tile(np.arange(num_classes), num_repeat)
        boxlist.add_field(
            "labels",
            torch.from_numpy(labels).to(dtype=torch.int64, device=device))
        fg_labels = torch.from_numpy(
            (np.arange(boxlist.bbox.shape[0]) % num_classes !=
             0).astype(int)).to(dtype=torch.uint8, device=device)
        _scores = scores > cfg.FAST_RCNN.SCORE_THRESH
        inds_all = _scores & fg_labels
        result = boxlist_ml_nms(boxlist[inds_all], cfg.FAST_RCNN.NMS)
    else:
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)
        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > cfg.FAST_RCNN.SCORE_THRESH
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4:(j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class_old = boxlist_for_class
            if cfg.TEST.SOFT_NMS.ENABLED:
                boxlist_for_class = boxlist_soft_nms(
                    boxlist_for_class,
                    sigma=cfg.TEST.SOFT_NMS.SIGMA,
                    overlap_thresh=cfg.FAST_RCNN.NMS,
                    score_thresh=0.0001,
                    method=cfg.TEST.SOFT_NMS.METHOD)
            else:
                boxlist_for_class = boxlist_nms(boxlist_for_class,
                                                cfg.FAST_RCNN.NMS)
            # Refine the post-NMS boxes using bounding-box voting
            if cfg.TEST.BBOX_VOTE.ENABLED and boxes_j.shape[0] > 0:
                boxlist_for_class = boxlist_box_voting(
                    boxlist_for_class,
                    boxlist_for_class_old,
                    cfg.TEST.BBOX_VOTE.VOTE_TH,
                    scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD)
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels",
                torch.full((num_labels, ), j, dtype=torch.int64,
                           device=device))
            result.append(boxlist_for_class)

        result = cat_boxlist(result)

    number_of_detections = len(result)

    # Limit to max_per_image detections **over all classes**
    if number_of_detections > cfg.FAST_RCNN.DETECTIONS_PER_IMG > 0:
        cls_scores = result.get_field("scores")
        image_thresh, _ = torch.kthvalue(
            cls_scores.cpu(),
            number_of_detections - cfg.FAST_RCNN.DETECTIONS_PER_IMG + 1)
        keep = cls_scores >= image_thresh.item()
        keep = torch.nonzero(keep).squeeze(1)
        result = result[keep]
    return result
コード例 #20
0
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        if len(anno) > 0:
            if 'iscrowd' in anno[0]:
                anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        if 'segm' in self.ann_types:
            masks = [obj["segmentation"] for obj in anno]
            masks = SegmentationMask(masks, img.size, mode='poly')
            target.add_field("masks", masks)

        if 'semseg' in self.ann_types:
            if 'parsing' in self.ann_types:
                semsegs_anno = get_semseg(
                    self.root,
                    self.coco.loadImgs(self.ids[idx])[0]['file_name'])
                semsegs = SemanticSegmentation(semsegs_anno,
                                               classes,
                                               img.size,
                                               mode='pic')
            else:
                semsegs_anno = [obj["segmentation"] for obj in anno]
                semsegs = SemanticSegmentation(semsegs_anno,
                                               classes,
                                               img.size,
                                               mode='poly')
            target.add_field("semsegs", semsegs)

        if 'parsing' in self.ann_types:
            parsing = [get_parsing(self.root, obj["parsing"]) for obj in anno]
            parsing = Parsing(parsing, img.size)
            target.add_field("parsing", parsing)

        target = target.clip_to_image(remove_empty=True)

        if self._transforms is not None:
            img, target = self._transforms(img, target)

        return img, target, idx
コード例 #21
0
ファイル: loss.py プロジェクト: soeaver/Hier-R-CNN
    def prepare_targets(self, proposals, targets):
        positive_proposals = []
        for proposals_per_image, targets_per_image in zip(proposals, targets):
            matched_targets = self.match_targets_to_proposals(
                proposals_per_image, targets_per_image)
            matched_idxs = matched_targets.get_field("matched_idxs")

            labels_per_image = matched_targets.get_field("labels")
            labels_per_image = labels_per_image.to(dtype=torch.int64)

            neg_inds = matched_idxs == Matcher.BELOW_LOW_THRESHOLD
            labels_per_image[neg_inds] = 0

            hier_per_image = matched_targets.get_field("hier")
            within_box = center_within_box(hier_per_image.hier,
                                           matched_targets.bbox)
            vis_hier = hier_per_image.hier[..., 4] > 0
            is_visible = (within_box & vis_hier).sum(1) > 0

            if self.limit_type != 'none':
                if self.limit_type == 'hand_and_foot':
                    has_part = vis_hier[:,
                                        2:].sum(1) == (within_box
                                                       & vis_hier)[:,
                                                                   2:].sum(1)
                elif self.limit_type == 'all':
                    has_part = vis_hier[:,
                                        0:].sum(1) == (within_box
                                                       & vis_hier)[:,
                                                                   0:].sum(1)
                else:
                    raise Exception("Limit type not support: ",
                                    self.limit_type)
                is_visible = has_part & is_visible

            labels_per_image[~is_visible] = -1

            positive_inds = torch.nonzero(labels_per_image > 0).squeeze(1)

            if self.roi_size_per_img > 0:
                if self.roi_size_per_img < positive_inds.shape[0]:
                    _inds = torch.randperm(
                        positive_inds.shape[0])[:self.roi_size_per_img]
                    positive_inds = positive_inds[_inds]

            proposals_per_image = proposals_per_image[positive_inds]
            hier_per_image = hier_per_image[positive_inds]

            hier_gt_per_image = targets_per_image.get_field("hier")
            hier_gt_parts = hier_gt_per_image.hier[:, 2:]
            vis_hier_parts = hier_gt_parts[..., 4].sum(1) > 1
            parts_nonzeros = vis_hier_parts.nonzero()[:, 0]

            if parts_nonzeros.shape[0] > 0 and self.roi_size_per_img > 0:
                gt_parts_batch_size = self.roi_size_per_img - positive_inds.shape[
                    0]
                if gt_parts_batch_size < parts_nonzeros.shape[0]:
                    _inds = torch.randperm(
                        parts_nonzeros.shape[0])[:gt_parts_batch_size]
                    parts_nonzeros = parts_nonzeros[_inds]

            if parts_nonzeros.shape[0] > 0:
                hier_gt_parts = hier_gt_parts[parts_nonzeros]
                parts_boxes = []
                for i in range(parts_nonzeros.shape[0]):
                    hier_gt_part = hier_gt_parts[i, (
                        hier_gt_parts[i, :, 4] > 0).nonzero()[:, 0], :4]
                    x1 = hier_gt_part[:, 0].min()
                    y1 = hier_gt_part[:, 1].min()
                    x2 = hier_gt_part[:, 2].max()
                    y2 = hier_gt_part[:, 3].max()
                    parts_boxes.append(torch.stack([x1, y1, x2, y2], dim=0))
                parts_boxes = torch.stack(parts_boxes, dim=0)
                parts_hier = hier_gt_per_image[parts_nonzeros]

                boxes = torch.cat([proposals_per_image.bbox, parts_boxes],
                                  dim=0)
                hier = torch.cat([hier_per_image.hier, parts_hier.hier], dim=0)

                proposals_per_image = BoxList(boxes,
                                              proposals_per_image.size,
                                              mode=proposals_per_image.mode)
                hier_per_image = Hier(hier, proposals_per_image.size)

            if len(proposals_per_image) == 0:
                hier_gt_per_image = targets_per_image.get_field("hier")
                vis_hier_parts = hier_gt_per_image.hier[..., 4].sum(1) > 0
                parts_nonzeros = vis_hier_parts.nonzero()[:, 0][:1]
                proposals_per_image = BoxList(
                    targets_per_image[parts_nonzeros].bbox,
                    targets_per_image.size,
                    mode=targets_per_image.mode)
                hier_per_image = hier_gt_per_image[parts_nonzeros]

            proposals_per_image.add_field("hier_target", hier_per_image)
            positive_proposals.append(proposals_per_image)
        return positive_proposals
コード例 #22
0
ファイル: coco.py プロジェクト: yonghoonkwon/Parsing-R-CNN
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        if len(anno) > 0:
            if 'iscrowd' in anno[0]:
                anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        if 'segm' in self.ann_types:
            masks = [obj["segmentation"] for obj in anno]
            masks = SegmentationMask(masks, img.size, mode='poly')
            target.add_field("masks", masks)

        if 'keypoints' in self.ann_types:
            if anno and "keypoints" in anno[0]:
                keypoints = [obj["keypoints"] for obj in anno]
                keypoints = PersonKeypoints(keypoints, img.size)
                target.add_field("keypoints", keypoints)

        if 'parsing' in self.ann_types:
            parsing = [get_parsing(self.root, obj["parsing"]) for obj in anno]
            parsing = Parsing(parsing, img.size)
            target.add_field("parsing", parsing)

        if 'uv' in self.ann_types:
            uv_ann = []
            for anno_uv in anno:
                if "dp_x" in anno_uv:
                    uv_ann.append([
                        anno_uv['dp_x'], anno_uv['dp_y'], anno_uv['dp_I'],
                        anno_uv['dp_U'], anno_uv['dp_V'], anno_uv['dp_masks']
                    ])
                else:
                    uv_ann.append([])
            uv = DenseposeUVs(uv_ann, img.size)
            target.add_field("uv", uv)

        target = target.clip_to_image(remove_empty=True)

        if self._transforms is not None:
            img, target = self._transforms(img, target)

        return img, target, idx
コード例 #23
0
    def compute(self, limit, area_range):
        gt_overlaps = []
        num_pos = 0
        for image_id, prediction in enumerate(self.cocoDt):
            original_id = self.cocoGt.id_to_img_map[image_id]

            img_info = self.cocoGt.get_img_info(image_id)
            image_width = img_info["width"]
            image_height = img_info["height"]
            prediction = prediction.resize((image_width, image_height))

            # sort predictions in descending order
            inds = prediction.get_field("objectness").sort(descending=True)[1]
            prediction = prediction[inds]

            ann_ids = self.cocoGt.coco.getAnnIds(imgIds=original_id)
            anno = self.cocoGt.coco.loadAnns(ann_ids)
            gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
            gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4)  # guard against no boxes
            gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert("xyxy")
            gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])

            if len(gt_boxes) == 0:
                continue

            valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
            gt_boxes = gt_boxes[valid_gt_inds]

            num_pos += len(gt_boxes)

            if len(gt_boxes) == 0:
                continue

            if len(prediction) == 0:
                continue

            if limit is not None and len(prediction) > limit:
                prediction = prediction[:limit]

            overlaps = boxlist_iou(prediction, gt_boxes)
            _gt_overlaps = torch.zeros(len(gt_boxes))
            for j in range(min(len(prediction), len(gt_boxes))):
                # find which proposal box maximally covers each gt box
                # and get the iou amount of coverage for each gt box
                max_overlaps, argmax_overlaps = overlaps.max(dim=0)

                # find which gt box is 'best' covered (i.e. 'best' = most iou)
                gt_ovr, gt_ind = max_overlaps.max(dim=0)
                assert gt_ovr >= 0
                # find the proposal box that covers the best covered gt box
                box_ind = argmax_overlaps[gt_ind]
                # record the iou coverage of this gt box
                _gt_overlaps[j] = overlaps[box_ind, gt_ind]
                assert _gt_overlaps[j] == gt_ovr
                # mark the proposal box and the gt box as used
                overlaps[box_ind, :] = -1
                overlaps[:, gt_ind] = -1

            # append recorded iou coverage level
            gt_overlaps.append(_gt_overlaps)
        gt_overlaps = torch.cat(gt_overlaps, dim=0)
        gt_overlaps, _ = torch.sort(gt_overlaps)

        if self.thresholds is None:
            step = 0.05
            self.thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
        recalls = torch.zeros_like(self.thresholds)
        # compute recall for each iou threshold
        for i, t in enumerate(self.thresholds):
            recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
        # ar = 2 * np.trapz(recalls, thresholds)
        ar = recalls.mean()
        return {"ar": ar, "recalls": recalls, "thresholds": self.thresholds,
                "gt_overlaps": gt_overlaps, "num_pos": num_pos}
コード例 #24
0
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            if not cfg.TEST.SOFT_NMS.ENABLED and not cfg.TEST.BBOX_VOTE.ENABLED:
                # multiclass nms
                result = boxlist_ml_nms(boxlists[i], self.nms_thresh)
            else:
                scores = boxlists[i].get_field("scores")
                labels = boxlists[i].get_field("labels")
                boxes = boxlists[i].bbox
                boxlist = boxlists[i]
                result = []
                # skip the background
                for j in range(2, self.num_classes + 1):
                    inds = (labels == j).nonzero().view(-1)

                    scores_j = scores[inds]
                    boxes_j = boxes[inds, :].view(-1, 4)
                    boxlist_for_class = BoxList(boxes_j,
                                                boxlist.size,
                                                mode="xyxy")
                    boxlist_for_class.add_field("scores", scores_j)
                    boxlist_for_class_old = boxlist_for_class
                    if cfg.TEST.SOFT_NMS.ENABLED:
                        boxlist_for_class = boxlist_soft_nms(
                            boxlist_for_class,
                            sigma=cfg.TEST.SOFT_NMS.SIGMA,
                            overlap_thresh=self.nms_thresh,
                            score_thresh=0.0001,
                            method=cfg.TEST.SOFT_NMS.METHOD)
                    else:
                        boxlist_for_class = boxlist_nms(boxlist_for_class,
                                                        self.nms_thresh,
                                                        score_field="scores")
                    # Refine the post-NMS boxes using bounding-box voting
                    if cfg.TEST.BBOX_VOTE.ENABLED and boxes_j.shape[0] > 0:
                        boxlist_for_class = boxlist_box_voting(
                            boxlist_for_class,
                            boxlist_for_class_old,
                            cfg.TEST.BBOX_VOTE.VOTE_TH,
                            scoring_method=cfg.TEST.BBOX_VOTE.SCORING_METHOD)
                    num_labels = len(boxlist_for_class)
                    boxlist_for_class.add_field(
                        "labels",
                        torch.full((num_labels, ),
                                   j,
                                   dtype=torch.int64,
                                   device=scores.device))
                    result.append(boxlist_for_class)

                result = cat_boxlist(result)

            number_of_detections = len(result)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result.get_field("scores")
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1)
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                result = result[keep]
            results.append(result)
        return results