Ejemplo n.º 1
0
    def __getitem__(self, idx):
        img, anno = super(ObstacleCOCOSet, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        # masks = [obj["segmentation"] for obj in anno]
        # masks = SegmentationMask(masks, img.size, mode='poly')
        # target.add_field("masks", masks)

        # if anno and "keypoints" in anno[0]:
        #     keypoints = [obj["keypoints"] for obj in anno]
        #     keypoints = PersonKeypoints(keypoints, img.size)
        #     target.add_field("keypoints", keypoints)

        target = target.clip_to_image(remove_empty=True)

        if self._transforms is not None:
            img, target = self._transforms(img, target)

        return img, target, idx
Ejemplo n.º 2
0
    def get_groundtruth(self, index):
        img_id = self.ids[index]
        anno = ET.parse(self._annopath % img_id).getroot()
        anno = self._preprocess_annotation(anno)

        height, width = anno["im_info"]
        target = BoxList(anno["boxes"], (width, height), mode="xyxy")
        target.add_field("labels", anno["labels"])
        # target.add_field("difficult", anno["difficult"])
        return target
Ejemplo n.º 3
0
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0] #将label文件转成列表

        boxes = [obj["bbox"] for obj in anno] #读取图片中物体的真值框数据[x,y,w,h]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes 生成一个张量 n*4 4这个维度表示的是x,y,w,h, n表示的是 一张图片里有多少个真值框
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy") #按照对应的类 初始化一下

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes) #target.extra_fields["labels"] = classes

        masks = [obj["segmentation"] for obj in anno]
        masks = SegmentationMask(masks, img.size, mode='poly')
        target.add_field("masks", masks)

        if anno and "keypoints" in anno[0]:
            keypoints = [obj["keypoints"] for obj in anno]
            keypoints = PersonKeypoints(keypoints, img.size)
            target.add_field("keypoints", keypoints)

        target = target.clip_to_image(remove_empty=True)

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, idx
Ejemplo n.º 4
0
    def __getitem__(self, idx):
        # from sampled idx to original idx
        # self.sampled_index = [8, 30, 45, ... ] # 15 frames per video
        idx = self.sampled_index[idx]

        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        masks = [obj["segmentation"] for obj in anno]
        masks = SegmentationMask(masks, img.size, mode='poly')
        target.add_field("masks", masks)

        if anno and "keypoints" in anno[0]:
            keypoints = [obj["keypoints"] for obj in anno]
            keypoints = PersonKeypoints(keypoints, img.size)
            target.add_field("keypoints", keypoints)

        target = target.clip_to_image(remove_empty=True)

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, idx
Ejemplo n.º 5
0
def im_detect_bbox_aug_vote(arguements, model, images, device):
    # Collect detections computed under different transformations
    boxlists_ts = []
    for _ in range(len(images)):
        boxlists_ts.append([])

    def add_preds_t(boxlists_t):
        for i, boxlist_t in enumerate(boxlists_t):
            if len(boxlists_ts[i]) == 0:
                # The first one is identity transform, no need to resize the boxlist
                boxlists_ts[i].append(boxlist_t)
            else:
                # Resize the boxlist as the first one
                boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))

    # Compute detections for the original image (identity transform)
    boxlists_i = im_detect_bbox(arguements, model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device)
    add_preds_t(boxlists_i)

    # Perform detection on the horizontally flipped image
    if cfg.TEST.BBOX_AUG.H_FLIP:
        boxlists_hf = im_detect_bbox_hflip(arguements, model, images, cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MAX_SIZE_TEST, device)
        add_preds_t(boxlists_hf)

    for idx, scale in enumerate(cfg.TEST.BBOX_AUG.SCALES):
        max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
        min_range = cfg.TEST.BBOX_AUG.SCALE_RANGES[idx][0]
        max_range = cfg.TEST.BBOX_AUG.SCALE_RANGES[idx][1]
        if scale < 800:
            max_size = cfg.INPUT.MAX_SIZE_TEST

        boxlists_scl = im_detect_bbox_scale(arguements, model, images, scale, max_size, device)
        boxlists_scl = remove_boxes(boxlists_scl, min_range, max_range)
        add_preds_t(boxlists_scl)

        if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
            boxlists_scl_hf = im_detect_bbox_scale(arguements, model, images, scale, max_size, device, hflip=True)
            boxlists_scl_hf = remove_boxes(boxlists_scl_hf, min_range, max_range)
            add_preds_t(boxlists_scl_hf)

    # Merge boxlists detected by different bbox aug params
    boxlists = []
    for _, boxlist_ts in enumerate(boxlists_ts):
        bbox = torch.cat([boxlist_t.bbox for boxlist_t in boxlist_ts])
        scores = torch.cat(
            [boxlist_t.get_field('scores') for boxlist_t in boxlist_ts])
        labels = torch.cat(
            [boxlist_t.get_field('labels') for boxlist_t in boxlist_ts])
        boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode)
        boxlist.add_field('scores', scores)
        boxlist.add_field('labels', labels)
        boxlists.append(boxlist)
    results = merge_result_from_multi_scales(boxlists, cfg.TEST.BBOX_AUG.MERGE_TYPE, cfg.TEST.BBOX_AUG.VOTE_TH)
    return results
Ejemplo n.º 6
0
    def forward_for_single_feature_map(self, anchors, objectness,
                                       box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            objectness: tensor of size N, A, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        device = objectness.device
        N, A, H, W = objectness.shape

        # put in the same format as anchors
        objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1)
        objectness = objectness.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)

        num_anchors = A * H * W

        pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
        objectness, topk_idx = objectness.topk(pre_nms_top_n,
                                               dim=1,
                                               sorted=True)

        batch_idx = torch.arange(N, device=device)[:, None]
        box_regression = box_regression[batch_idx, topk_idx]

        image_shapes = [box.size for box in anchors]
        concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
        concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]

        proposals = self.box_coder.decode(box_regression.view(-1, 4),
                                          concat_anchors.view(-1, 4))

        proposals = proposals.view(N, -1, 4)

        result = []
        for proposal, score, im_shape in zip(proposals, objectness,
                                             image_shapes):
            boxlist = BoxList(proposal, im_shape, mode="xyxy")
            boxlist.add_field("objectness", score)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            boxlist = boxlist_nms(
                boxlist,
                self.nms_thresh,
                max_proposals=self.post_nms_top_n,
                score_field="objectness",
            )
            result.append(boxlist)
        return result
Ejemplo n.º 7
0
    def get_pos_proposal_indexes(self, locations, box_regression,
                                 matched_idxes, targets):
        locations = torch.cat(locations, dim=0)
        pos_indexes_for_targets = []
        for im in range(len(targets)):
            pos_indexes_for_targets_per_im = []
            box_regression_im = [
                box_regression[l][im].detach().view(4, -1).transpose(
                    0, 1).contiguous() * self.fpn_strides[l]
                for l in range(len(box_regression))
            ]
            box_regression_im = torch.cat(box_regression_im, dim=0)
            for t_id in range(len(targets[im])):
                valid = matched_idxes[im] == t_id
                if valid.sum() == 0:
                    pos_indexes_for_targets_per_im.append(valid.new_tensor([]))
                    continue
                valid_location = locations[valid]
                valid_regression = box_regression_im[valid]
                detections = torch.stack([
                    valid_location[:, 0] - valid_regression[:, 0],
                    valid_location[:, 1] - valid_regression[:, 1],
                    valid_location[:, 0] + valid_regression[:, 2],
                    valid_location[:, 1] + valid_regression[:, 3],
                ],
                                         dim=1)
                detect_boxlist = BoxList(detections,
                                         targets[im].size,
                                         mode="xyxy")
                target_boxlist = BoxList(targets[im].bbox[t_id:t_id + 1],
                                         targets[im].size,
                                         mode="xyxy")
                match_quality_matrix = boxlist_iou(detect_boxlist,
                                                   target_boxlist)

                pos_labels_per_target = torch.zeros_like(valid)
                iou_in_target = match_quality_matrix[:, 0]
                if iou_in_target.max() > self.sample_pos_iou_th:
                    pos_in_target = (iou_in_target > self.sample_pos_iou_th)
                else:
                    pos_in_target = (iou_in_target == iou_in_target.max())
                pos_labels_per_target[valid] = pos_in_target

                pos_indexes_for_targets_per_im.append(
                    pos_labels_per_target.nonzero().squeeze(1))
            pos_indexes_for_targets.append(pos_indexes_for_targets_per_im)

        return pos_indexes_for_targets
def _test_feature_extractors(self, extractors, overwrite_cfgs,
                             overwrite_in_channels):
    ''' Make sure roi box feature extractors run '''

    self.assertGreater(len(extractors), 0)

    in_channels_default = 64

    for name, builder in extractors.items():
        print('Testing {}...'.format(name))
        if name in overwrite_cfgs:
            cfg = load_config(overwrite_cfgs[name])
        else:
            # Use default config if config file is not specified
            cfg = copy.deepcopy(g_cfg)

        in_channels = overwrite_in_channels.get(name, in_channels_default)

        fe = builder(cfg, in_channels)
        self.assertIsNotNone(
            getattr(fe, 'out_channels', None),
            'Need to provide out_channels for feature extractor {}'.format(
                name))

        N, C_in, H, W = 2, in_channels, 24, 32
        input = torch.rand([N, C_in, H, W], dtype=torch.float32)
        bboxes = [[1, 1, 10, 10], [5, 5, 8, 8], [2, 2, 3, 4]]
        img_size = [384, 512]
        box_list = BoxList(bboxes, img_size, "xyxy")
        out = fe([input], [box_list] * N)
        self.assertEqual(out.shape[:2],
                         torch.Size([N * len(bboxes), fe.out_channels]))
Ejemplo n.º 9
0
 def prepare_boxlist(self, boxes, scores, image_shape):
     """
     Returns BoxList from `boxes` and adds probability scores information
     as an extra field
     `boxes` has shape (#detections, 4 * #classes), where each row represents
     a list of predicted bounding boxes for each of the object classes in the
     dataset (including the background class). The detections in each row
     originate from the same object proposal.
     `scores` has shape (#detection, #classes), where each row represents a list
     of object detection confidence scores for each of the object classes in the
     dataset (including the background class). `scores[i, j]`` corresponds to the
     box at `boxes[i, j * 4:(j + 1) * 4]`.
     """
     boxes = boxes.reshape(-1, 4)
     scores = scores.reshape(-1)
     boxlist = BoxList(boxes, image_shape, mode="xyxy")
     boxlist.add_field("scores", scores)
     return boxlist
Ejemplo n.º 10
0
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            scores = boxlists[i].get_field("scores")
            labels = boxlists[i].get_field("labels")
            diagonals = boxlists[i].get_field("diagonals")
            boxes = boxlists[i].bbox
            boxlist = boxlists[i]
            result = []
            # skip the background
            for j in tqdm(range(1, self.num_classes)):
                inds = (labels == j).nonzero().view(-1)

                scores_j = scores[inds]
                boxes_j = boxes[inds, :].view(-1, 4)
                diagonals_j = diagonals[inds, :].view(-1, 16)
                boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
                boxlist_for_class.add_field("scores", scores_j)
                boxlist_for_class.add_field("diagonals", diagonals_j)
                if scores_j.size()[0] != 0:
                    boxlist_for_class = diagonal_nms(boxlist_for_class,
                                                     self.nms_thresh,
                                                     self.nms_topk,
                                                     score_field="scores")
                    num_labels = len(boxlist_for_class)
                    boxlist_for_class.add_field(
                        "labels",
                        torch.full((num_labels, ),
                                   j,
                                   dtype=torch.int64,
                                   device=scores.device))
                    result.append(boxlist_for_class)

            result = cat_boxlist(result)
            # new_result = all_class_nms(result, self.nms_thresh)
            number_of_detections = len(result)
            print('Number of detections in this image {}'.format(
                number_of_detections))

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result.get_field("scores")
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1)
                # image_thresh = torch.as_tensor(0.5)
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                result = result[keep]
            result = all_class_nms(result, self.nms_thresh)
            results.append(result)

        return results
Ejemplo n.º 11
0
    def __getitem__(self, item):
        img = Image.open(self.image_lists[item]).convert("RGB")

        # dummy target
        w, h = img.size
        target = BoxList([[0, 0, w, h]], img.size, mode="xyxy")

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target
Ejemplo n.º 12
0
    def forward(self, x, boxes, features):
        """
        Arguments:
            x (Tensor): the attribute logits
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for each image
            features (Tensor) : attribute features

        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra field attribute
        """
        boxes_per_image = [len(box) for box in boxes]
        attribute_probs = F.softmax(x, -1)
        num_classes = attribute_probs.shape[1]

        attribute_probs = attribute_probs.split(boxes_per_image, dim=0)
        features = features.split(boxes_per_image, dim=0)

        results = []
        for box, prob, feature in zip(boxes, attribute_probs, features):
            # copy the current boxes
            boxlist = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                boxlist.add_field(field, box.get_field(field))
            if self.output_feature:
                boxlist.add_field('attr_feature', feature)
            # filter out low probability and redundent boxes
            boxlist = self.filter_results(boxlist, prob, feature, num_classes)
            results.append(boxlist)

        return results
Ejemplo n.º 13
0
    def forward(self, x, boxes):
        """
        Arguments:
            x (Tensor): the mask logits
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for ech image

        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra field mask
        """
        mask_prob = x.sigmoid()

        # select masks coresponding to the predicted classes
        num_masks = x.shape[0]
        labels = [bbox.get_field("labels") for bbox in boxes]
        labels = torch.cat(labels)
        index = torch.arange(num_masks, device=labels.device)
        mask_prob = mask_prob[index, labels][:, None]

        boxes_per_image = [len(box) for box in boxes]
        mask_prob = mask_prob.split(boxes_per_image, dim=0)

        if self.masker:
            mask_prob = self.masker(mask_prob, boxes)

        results = []
        for prob, box in zip(mask_prob, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox.add_field("mask", prob)
            results.append(bbox)

        return results
Ejemplo n.º 14
0
 def forward(self, image_list, feature_maps):
     grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
     anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
     anchors = []
     for i, (image_height, image_width) in enumerate(image_list.image_sizes):
         anchors_in_image = []
         for anchors_per_feature_map in anchors_over_all_feature_maps:
             boxlist = BoxList(
                 anchors_per_feature_map, (image_width, image_height), mode="xyxy"
             )
             self.add_visibility_to(boxlist)
             anchors_in_image.append(boxlist)
         anchors.append(anchors_in_image)
     return anchors
Ejemplo n.º 15
0
 def prepare_empty_boxlist(self, boxlist):
     device = boxlist.bbox.device
     boxlist_empty = BoxList(torch.zeros((0, 4)).to(device),
                             boxlist.size,
                             mode='xyxy')
     boxlist_empty.add_field("scores", torch.Tensor([]).to(device))
     boxlist_empty.add_field(
         "labels", torch.full((0, ), -1, dtype=torch.int64, device=device))
     return boxlist_empty
Ejemplo n.º 16
0
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            scores = boxlists[i].get_field("scores")
            labels = boxlists[i].get_field("labels")
            masks = boxlists[i].get_field("mask")
            boxes = boxlists[i].bbox
            boxlist = boxlists[i]
            result = []

            # skip the background
            for j in range(1, self.num_classes):
                inds = (labels == j).nonzero().view(-1)

                scores_j = scores[inds]
                boxes_j = boxes[inds, :].view(-1, 4)
                boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
                boxlist_for_class.add_field("scores", scores_j)
                boxlist_for_class, keep = boxlist_nms_with_keep(
                    boxlist_for_class, self.nms_thresh, score_field="scores")
                num_labels = len(boxlist_for_class)

                boxlist_for_class.add_field(
                    "labels",
                    torch.full((num_labels, ),
                               j,
                               dtype=torch.int64,
                               device=scores.device))
                boxlist_for_class.add_field("mask", masks[inds[keep], :, :, :])
                result.append(boxlist_for_class)

            result = cat_boxlist(result)
            number_of_detections = len(result)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result.get_field("scores")
                image_thresh, _ = torch.kthvalue(
                    cls_scores.cpu(),
                    number_of_detections - self.fpn_post_nms_top_n + 1)
                keep = cls_scores >= image_thresh.item()
                keep = torch.nonzero(keep).squeeze(1)
                result = result[keep]
            results.append(result)
        return results
Ejemplo n.º 17
0
    def get_groundtruth(self, index):
        anno_name = self.img_item_name[index][:-4]+'.xml'
        anno_path = os.path.join(self._annodir,anno_name)
        anno = ET.parse(anno_path).getroot()
        anno = self._preprocess_annotation(anno)

        height, width = anno["im_info"]
        target = BoxList(anno["boxes"], (width, height), mode="xyxy")
        target.add_field("labels", anno["labels"])
        target.add_field("difficult", anno["difficult"])
        return target
Ejemplo n.º 18
0
    def get_groundtruth(self, index):
        img_id = self.ids[index]
        anno = ET.parse(self._annopath % img_id).getroot()
        anno = self._preprocess_annotation(anno)

        height, width = anno["im_info"]
        try:
            target = BoxList(anno["boxes"], (width, height), mode="xyxy")
        except ValueError:
            raise ValueError("bbox error in {}, {}".format(
                self._annopath % img_id, anno["boxes"]))
        else:
            target.add_field("labels", anno["labels"])
            target.add_field("difficult", anno["difficult"])
            return target
Ejemplo n.º 19
0
def merge_result_from_multi_scales(boxlists, nms_type='nms', vote_thresh=0.65):
    num_images = len(boxlists)
    results = []
    for i in range(num_images):
        scores = boxlists[i].get_field("scores")
        labels = boxlists[i].get_field("labels")
        boxes = boxlists[i].bbox
        boxlist = boxlists[i]
        result = []
        # skip the background
        for j in range(1, cfg.MODEL.RETINANET.NUM_CLASSES):
            inds = (labels == j).nonzero().view(-1)

            scores_j = scores[inds]
            boxes_j = boxes[inds, :].view(-1, 4)
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(boxlist_for_class,
                                            cfg.MODEL.ATSS.NMS_TH,
                                            score_field="scores",
                                            nms_type=nms_type,
                                            vote_thresh=vote_thresh)
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels",
                torch.full((num_labels, ),
                           j,
                           dtype=torch.int64,
                           device=scores.device))
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > cfg.MODEL.ATSS.PRE_NMS_TOP_N > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(),
                number_of_detections - cfg.MODEL.ATSS.PRE_NMS_TOP_N + 1)
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        results.append(result)
    return results
Ejemplo n.º 20
0
    def _py_bbox_list_to_bbox_list(self, py_bbox_list, im_size):
        '''
        :param py_bbox_list:
        :param im_size: (w, h)
        :return:
        '''
        bboxes = []
        labels = []
        scores = []
        for item in py_bbox_list:
            bboxes.append(item["box"])
            labels.append(item["label_id"])
            scores.append(item["score"])

        box_list = BoxList(torch.tensor(bboxes, dtype=torch.float32), im_size)
        box_list.add_field("labels", torch.tensor(labels, dtype=torch.long))
        box_list.add_field("scores", torch.tensor(scores, dtype=torch.float32))

        return box_list
Ejemplo n.º 21
0
    def __getitem__(self, idx):
        # img, anno = super(COCODataset, self).__getitem__(idx)
        coco = self.coco
        img_id = self.ids[idx]
        ann_ids = coco.getAnnIds(imgIds=img_id)
        anno = coco.loadAnns(ann_ids)
        path = coco.loadImgs(img_id)[0]['file_name']
        img = Image.open(os.path.join(self.root, path)).convert('RGB')

        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        # masks = [obj["segmentation"] for obj in anno]
        # masks = SegmentationMask(masks, img.size, mode='poly')
        # target.add_field("masks", masks)

        if anno and "keypoints" in anno[0]:
            keypoints = [obj["keypoints"] for obj in anno]
            keypoints = PersonKeypoints(keypoints, img.size)
            target.add_field("keypoints", keypoints)

        target = target.clip_to_image(remove_empty=True)

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, idx
Ejemplo n.º 22
0
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4:(j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(boxlist_for_class, self.nms)
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels",
                torch.full((num_labels, ), j, dtype=torch.int64,
                           device=device))
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(),
                number_of_detections - self.detections_per_img + 1)
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
    def forward(self, x, boxes):
        mask_prob = x

        scores = None
        if self.keypointer:
            mask_prob, scores = self.keypointer(x, boxes)

        assert len(boxes) == 1, "Only non-batched inference supported for now"
        boxes_per_image = [box.bbox.size(0) for box in boxes]
        mask_prob = mask_prob.split(boxes_per_image, dim=0)
        scores = scores.split(boxes_per_image, dim=0)

        results = []
        for prob, box, score in zip(mask_prob, boxes, scores):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            prob = PersonKeypoints(prob, box.size)
            prob.add_field("logits", score)
            bbox.add_field("keypoints", prob)
            results.append(bbox)

        return results
Ejemplo n.º 24
0
def evaluate_box_proposals(predictions,
                           dataset,
                           thresholds=None,
                           area="all",
                           limit=None):
    """Evaluate detection proposal recall metrics. This function is a much
    faster alternative to the official COCO API recall evaluation code. However,
    it produces slightly different results.
    """
    # Record max overlap value for each gt box
    # Return vector of overlap values
    areas = {
        "all": 0,
        "small": 1,
        "medium": 2,
        "large": 3,
        "96-128": 4,
        "128-256": 5,
        "256-512": 6,
        "512-inf": 7,
    }
    area_ranges = [
        [0**2, 1e5**2],  # all
        [0**2, 32**2],  # small
        [32**2, 96**2],  # medium
        [96**2, 1e5**2],  # large
        [96**2, 128**2],  # 96-128
        [128**2, 256**2],  # 128-256
        [256**2, 512**2],  # 256-512
        [512**2, 1e5**2],
    ]  # 512-inf
    assert area in areas, "Unknown area range: {}".format(area)
    area_range = area_ranges[areas[area]]
    gt_overlaps = []
    num_pos = 0

    for image_id, prediction in enumerate(predictions):
        original_id = dataset.id_to_img_map[image_id]

        img_info = dataset.get_img_info(image_id)
        image_width = img_info["width"]
        image_height = img_info["height"]
        prediction = prediction.resize((image_width, image_height))

        # sort predictions in descending order
        # TODO maybe remove this and make it explicit in the documentation
        inds = prediction.get_field("objectness").sort(descending=True)[1]
        prediction = prediction[inds]

        ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
        anno = dataset.coco.loadAnns(ann_ids)
        gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
        gt_boxes = torch.as_tensor(gt_boxes).reshape(
            -1, 4)  # guard against no boxes
        gt_boxes = BoxList(gt_boxes, (image_width, image_height),
                           mode="xywh").convert("xyxy")
        gt_areas = torch.as_tensor(
            [obj["area"] for obj in anno if obj["iscrowd"] == 0])

        if len(gt_boxes) == 0:
            continue

        valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <=
                                                       area_range[1])
        gt_boxes = gt_boxes[valid_gt_inds]

        num_pos += len(gt_boxes)

        if len(gt_boxes) == 0:
            continue

        if len(prediction) == 0:
            continue

        if limit is not None and len(prediction) > limit:
            prediction = prediction[:limit]

        overlaps = boxlist_iou(prediction, gt_boxes)

        _gt_overlaps = torch.zeros(len(gt_boxes))
        for j in range(min(len(prediction), len(gt_boxes))):
            # find which proposal box maximally covers each gt box
            # and get the iou amount of coverage for each gt box
            max_overlaps, argmax_overlaps = overlaps.max(dim=0)

            # find which gt box is 'best' covered (i.e. 'best' = most iou)
            gt_ovr, gt_ind = max_overlaps.max(dim=0)
            assert gt_ovr >= 0
            # find the proposal box that covers the best covered gt box
            box_ind = argmax_overlaps[gt_ind]
            # record the iou coverage of this gt box
            _gt_overlaps[j] = overlaps[box_ind, gt_ind]
            assert _gt_overlaps[j] == gt_ovr
            # mark the proposal box and the gt box as used
            overlaps[box_ind, :] = -1
            overlaps[:, gt_ind] = -1

        # append recorded iou coverage level
        gt_overlaps.append(_gt_overlaps)
    gt_overlaps = torch.cat(gt_overlaps, dim=0)
    gt_overlaps, _ = torch.sort(gt_overlaps)

    if thresholds is None:
        step = 0.05
        thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
    recalls = torch.zeros_like(thresholds)
    # compute recall for each iou threshold
    for i, t in enumerate(thresholds):
        recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
    # ar = 2 * np.trapz(recalls, thresholds)
    ar = recalls.mean()
    return {
        "ar": ar,
        "recalls": recalls,
        "thresholds": thresholds,
        "gt_overlaps": gt_overlaps,
        "num_pos": num_pos,
    }
Ejemplo n.º 25
0
def main(args):
    annFile = 'datasets/coco/annotations/instances_train2017_0.5.json'
    coco = COCO(annFile)

    with open(annFile, 'r') as f:
        result_json = json.load(f)
    annos_json = result_json['annotations']
    # anno_id = max([ann['id'] for ann in annos_json]) + 1

    output_dir = os.path.join(args.predictions, 'coco_2017_train_partial')
    image_ids = torch.load(os.path.join(output_dir, 'image_ids.pth'))
    predictions = torch.load(os.path.join(output_dir, 'predictions.pth'))
    anno_id = max(torch.load(os.path.join(output_dir, 'box_ids.pth'))) + 1

    imgIds = sorted(coco.getImgIds())

    threshold = args.confidence
    # threshold = torch.tensor([-1.0, 0.46633365750312805, 0.4409848749637604, 0.47267603874206543, 0.4707889258861542, 0.5220812559127808, 0.5358721613883972, 0.5226702690124512, 0.45160290598869324])
    iou_threshold = 0.5

    cpu_device = torch.device("cpu")

    partial_box_num = 0

    N = len(image_ids)
    for i in tqdm(range(N)):
        im_idx = image_ids[i]
        bbox = predictions[i]
        imginfo = coco.loadImgs(imgIds[im_idx])[0]
        image_width = imginfo['width']
        image_height = imginfo['height']

        # load annotations
        partial_anns = coco.loadAnns(coco.getAnnIds(imgIds=(imgIds[im_idx], )))
        # full_anns = coco_full.loadAnns(coco_full.getAnnIds(imgIds=(imgIds[im_idx],), catIds=catIds))

        partial_boxes = [obj["bbox"] for obj in partial_anns]
        partial_boxes_ids = set([obj["id"] for obj in partial_anns])

        partial_boxes = torch.as_tensor(partial_boxes).reshape(
            -1, 4)  # guard against no boxes
        partial_boxes = BoxList(partial_boxes, (image_width, image_height),
                                mode="xywh").convert("xyxy")

        partial_box_num += len(partial_boxes_ids)

        # get predictions
        bbox = bbox.resize((image_width, image_height))
        bbox = bbox.to(cpu_device)

        # generate pseudo labels
        idx = generate_pseudo_label_with_confidence_score(
            bbox, im_idx, threshold)

        if len(idx) > 0:
            pseudo_labels = bbox[idx]
            scores = pseudo_labels.get_field("scores").tolist()

            # compute iou
            overlaps = boxlist_iou(partial_boxes, pseudo_labels)
            matched_id = [True] * len(pseudo_labels)

            # remove predictions for partial labels
            for i in range(len(partial_boxes)):
                matched = np.argmax(overlaps[i])
                if overlaps[i, matched] >= iou_threshold:
                    matched_id[matched] = False

            pseudo_labels = pseudo_labels[matched_id]
            # print(num, len(pseudo_labels))
            pseudo_annos, anno_id = new_annotation_json(
                pseudo_labels, imgIds[im_idx], anno_id)
            annos_json.extend(pseudo_annos)

    print('confidence threshold: {}'.format(threshold))

    result_json['annotations'] = annos_json
    with open(args.annotation, 'w') as f:
        json.dump(result_json, f)

    print(partial_box_num, len(result_json['annotations']))
Ejemplo n.º 26
0
    def forward_for_single_feature_map(
            self, locations, box_cls,
            box_regression, centerness,
            image_sizes):
        """
        Arguments:
            anchors: list[BoxList]
            box_cls: tensor of size N, A * C, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        N, C, H, W = box_cls.shape

        # put in the same format as locations
        box_cls = box_cls.view(N, C, H, W).permute(0, 2, 3, 1)
        box_cls = box_cls.reshape(N, -1, C).sigmoid()
        box_regression = box_regression.view(N, 4, H, W).permute(0, 2, 3, 1)
        box_regression = box_regression.reshape(N, -1, 4)
        centerness = centerness.view(N, 1, H, W).permute(0, 2, 3, 1)
        centerness = centerness.reshape(N, -1).sigmoid()

        candidate_inds = box_cls > self.pre_nms_thresh
        pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)

        # multiply the classification scores with centerness scores
        box_cls = box_cls * centerness[:, :, None]

        results = []
        for i in range(N):
            per_box_cls = box_cls[i]
            per_candidate_inds = candidate_inds[i]
            per_box_cls = per_box_cls[per_candidate_inds]

            per_candidate_nonzeros = per_candidate_inds.nonzero()
            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1] + 1

            per_box_regression = box_regression[i]
            per_box_regression = per_box_regression[per_box_loc]
            per_locations = locations[per_box_loc]

            per_pre_nms_top_n = pre_nms_top_n[i]

            if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
                per_box_cls, top_k_indices = \
                    per_box_cls.topk(per_pre_nms_top_n, sorted=False)
                per_class = per_class[top_k_indices]
                per_box_regression = per_box_regression[top_k_indices]
                per_locations = per_locations[top_k_indices]

            detections = torch.stack([
                per_locations[:, 0] - per_box_regression[:, 0],
                per_locations[:, 1] - per_box_regression[:, 1],
                per_locations[:, 0] + per_box_regression[:, 2],
                per_locations[:, 1] + per_box_regression[:, 3],
            ], dim=1)

            h, w = image_sizes[i]
            boxlist = BoxList(detections, (int(w), int(h)), mode="xyxy")
            boxlist.add_field("labels", per_class)
            boxlist.add_field("scores", torch.sqrt(per_box_cls))
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)

        return results
Ejemplo n.º 27
0
    def forward_for_mask(self, boxlists, pixel_embed):
        N, dim, m_h, m_w = pixel_embed.shape
        new_boxlists = []
        stride = self.fpn_strides[0] / self.mask_scale_factor
        for im in range(N):
            boxlist = boxlists[im]
            boxes = boxlist.bbox
            input_w, input_h = boxlist.size
            proposal_embed = boxlist.get_field('proposal_embed')
            if len(proposal_embed) == 0:
                new_boxlist = BoxList(boxes, boxlist.size, mode="xyxy")
                new_boxlist.add_field("labels", boxlist.get_field("labels"))
                new_boxlist.add_field("scores", boxlist.get_field("scores"))
                new_boxlist.add_field('mask', torch.tensor([]))
                if self.post_process_masks:
                    new_boxlist.add_field('stride', torch.tensor(1))
                    new_boxlist.add_field('mask_th', torch.tensor(0.0))
                else:
                    new_boxlist.add_field('stride', torch.tensor(stride))
                    new_boxlist.add_field('mask_th',
                                          torch.tensor(self.mask_th))

                new_boxlists.append(new_boxlist)
                continue

            mask_boxes = boxes / stride
            box_masks = boxes_to_masks(mask_boxes, m_h, m_w)
            proposal_margin = boxlist.get_field('proposal_margin')
            mask_prob = self.compute_mask_prob(pixel_embed[im], proposal_embed,
                                               proposal_margin, mask_boxes)
            masks = mask_prob * box_masks.float()

            if self.post_process_masks:
                masks = torch.nn.functional.interpolate(
                    input=masks.unsqueeze(1).float(),
                    scale_factor=stride,
                    mode="bilinear",
                    align_corners=False).gt(self.mask_th)
                masks = masks[:, 0, :input_h, :input_w]

            new_boxlist = BoxList(boxes, boxlist.size, mode="xyxy")
            new_boxlist.add_field('mask', masks)
            new_boxlist.add_field("labels", boxlist.get_field("labels"))
            new_boxlist.add_field("scores", boxlist.get_field("scores"))
            if self.post_process_masks:
                new_boxlist.add_field('stride', torch.tensor(1))
                new_boxlist.add_field('mask_th', torch.tensor(0.0))
            else:
                new_boxlist.add_field('stride', torch.tensor(stride))
                new_boxlist.add_field('mask_th', torch.tensor(self.mask_th))

            new_boxlists.append(new_boxlist)

        return new_boxlists
Ejemplo n.º 28
0
    def __call__(self, image, target=None):
        if self.to_bgr255:
            image = image[[2, 1, 0]] * 255
        image = F.normalize(image, mean=self.mean, std=self.std)
        if target is None:
            return image
        return image, target


if __name__ == '__main__':
    import cv2
    from fcos_core.structures.bounding_box import BoxList

    image = Image.open("/home/nie/lena.jpg").convert("RGB")
    target = BoxList(torch.tensor([[80, 92, 152, 194]], dtype=torch.float32),
                     image.size,
                     mode="xyxy")
    target.add_field("labels", torch.tensor([1]))
    transform = Compose([
        # Resize(256, 256),
        # RandomHorizontalFlip(0.5),
        ImgJitter(),
        ImgAug_Private()
    ])
    for i in range(10):
        _image, _target = transform(image=image, target=target)
        rgb_image = np.array(_image)
        bboxes = np.array(_target.bbox.numpy(), dtype=np.int).tolist()
        for bbox in bboxes:
            rgb_image = cv2.rectangle(rgb_image, (bbox[0], bbox[1]),
                                      (bbox[2], bbox[3]), (0, 255, 0), 2)
Ejemplo n.º 29
0
    def forward_for_single_feature_map(self, anchors, box_cls, box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            box_cls: tensor of size N, A * C, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        device = box_cls.device
        N, _, H, W = box_cls.shape
        A = box_regression.size(1) // 4
        C = box_cls.size(1) // A

        # put in the same format as anchors
        box_cls = permute_and_flatten(box_cls, N, A, C, H, W)
        box_cls = box_cls.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)
        box_regression = box_regression.reshape(N, -1, 4)

        num_anchors = A * H * W

        candidate_inds = box_cls > self.pre_nms_thresh

        pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max=self.pre_nms_top_n)

        results = []
        for per_box_cls, per_box_regression, per_pre_nms_top_n, \
        per_candidate_inds, per_anchors in zip(
            box_cls,
            box_regression,
            pre_nms_top_n,
            candidate_inds,
            anchors):

            # Sort and select TopN
            # TODO most of this can be made out of the loop for
            # all images.
            # TODO:Yang: Not easy to do. Because the numbers of detections are
            # different in each image. Therefore, this part needs to be done
            # per image.
            per_box_cls = per_box_cls[per_candidate_inds]

            per_box_cls, top_k_indices = \
                    per_box_cls.topk(per_pre_nms_top_n, sorted=False)

            per_candidate_nonzeros = \
                    per_candidate_inds.nonzero()[top_k_indices, :]

            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1]
            per_class += 1

            detections = self.box_coder.decode(
                per_box_regression[per_box_loc, :].view(-1, 4),
                per_anchors.bbox[per_box_loc, :].view(-1, 4))

            boxlist = BoxList(detections, per_anchors.size, mode="xyxy")
            boxlist.add_field("labels", per_class)
            boxlist.add_field("scores", per_box_cls)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)

        return results
Ejemplo n.º 30
0
    def __call__(self, tensor, target):
        if self.ratio >= 1.0:
            return tensor, target

        self.img_pool.append({'tensor': tensor, 'target': target})

        if len(self.img_pool) > self.img_pool_size:
            self.img_pool.pop(0)

        c, h, w = tensor.shape
        if self.size_divisible > 0:
            h = int(math.ceil(h / self.size_divisible) * self.size_divisible)
            w = int(math.ceil(w / self.size_divisible) * self.size_divisible)

        new_h, new_w = int(self.ratio * h), int(self.ratio * w)
        in_tensor, in_target = scale_jitter(tensor, target, (new_h, new_w))

        if len(self.img_pool) < 4:
            tensor_out, target_out = in_tensor, in_target
        else:
            pad_imgs = random.sample(self.img_pool, 3)
            pad_tensors, pad_targets = [], []
            for img in pad_imgs:
                pad_tensor, pad_target = scale_jitter(img['tensor'],
                                                      img['target'],
                                                      (new_h, new_w))
                pad_tensors.append(pad_tensor)
                pad_targets.append(pad_target)

            crop_boxes = [(0, 0, w - new_w, new_h), (0, 0, new_w, h - new_h),
                          (0, 0, w - new_w, h - new_h)]

            tensor_out = in_tensor.new(*(c, h, w)).zero_()
            tensor_out[:c, :new_h, :new_w].copy_(in_tensor)
            tensor_out[:c, :new_h, new_w:].copy_(
                pad_tensors[0][:c, :crop_boxes[0][3], :crop_boxes[0][2]])
            tensor_out[:c, new_h:, :new_w].copy_(
                pad_tensors[1][:c, :crop_boxes[1][3], :crop_boxes[1][2]])
            tensor_out[:c, new_h:, new_w:].copy_(
                pad_tensors[2][:c, :crop_boxes[2][3], :crop_boxes[2][2]])

            crop_targets = []
            for i, pad_target in enumerate(pad_targets):
                crop_target = pad_target.crop(crop_boxes[i])
                ious = crop_target.area() / pad_target.area()
                crop_target = crop_target[ious >= self.iou_threshold]
                crop_targets.append(crop_target)

            offsets_box = [
                torch.Tensor([0.0, 0.0, 0.0, 0.0]),
                torch.Tensor([new_w, 0.0, new_w, 0.0]),
                torch.Tensor([0.0, new_h, 0.0, new_h]),
                torch.Tensor([new_w, new_h, new_w, new_h])
            ]
            target_out = torch.cat([
                target.bbox + offsets_box[i]
                for i, target in enumerate([in_target] + crop_targets)
            ],
                                   dim=0)
            target_out = BoxList(target_out, (w, h), mode='xyxy')
            target_out.add_field(
                'labels',
                torch.cat([
                    target.extra_fields['labels']
                    for target in ([in_target] + crop_targets)
                ],
                          dim=-1))

            polys_list = [[
                poly.polygons[0]
                for poly in target.extra_fields['masks'].instances.polygons
            ] for target in ([in_target] + crop_targets)]
            offsets_mask = [[0.0, 0.0], [new_w, 0.0], [0.0, new_h],
                            [new_w, new_h]]

            syn_mask = []
            for i, polys in enumerate(polys_list):
                syn_mask += [[
                    list(
                        np.array(poly) +
                        np.array(offsets_mask[i] * int(len(poly) / 2)))
                ] for poly in polys]

            syn_mask = SegmentationMask(syn_mask, (w, h), mode='poly')
            target_out.add_field('masks', syn_mask)

        return tensor_out, target_out