Exemple #1
0
    def forward_for_single_feature_map(self, anchors, objectness,
                                       box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            objectness: tensor of size N, A, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        device = objectness.device
        N, A, H, W = objectness.shape

        # put in the same format as anchors
        objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1)
        objectness = objectness.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)

        num_anchors = A * H * W

        pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
        objectness, topk_idx = objectness.topk(pre_nms_top_n,
                                               dim=1,
                                               sorted=True)

        batch_idx = torch.arange(N, device=device)[:, None]
        box_regression = box_regression[batch_idx, topk_idx]

        image_shapes = [box.size for box in anchors]
        concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
        concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]

        proposals = self.box_coder.decode(box_regression.view(-1, 4),
                                          concat_anchors.view(-1, 4))

        proposals = proposals.view(N, -1, 4)

        result = []
        for proposal, score, im_shape in zip(proposals, objectness,
                                             image_shapes):
            boxlist = BoxList(proposal, im_shape, mode="xyxy")
            boxlist.add_field("objectness", score)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            boxlist = boxlist_nms(
                boxlist,
                self.nms_thresh,
                max_proposals=self.post_nms_top_n,
                score_field="objectness",
            )
            result.append(boxlist)
        return result
Exemple #2
0
def _test_feature_extractors(self, extractors, overwrite_cfgs,
                             overwrite_in_channels):
    ''' Make sure roi box feature extractors run '''

    self.assertGreater(len(extractors), 0)

    in_channels_default = 64

    for name, builder in extractors.items():
        print('Testing {}...'.format(name))
        if name in overwrite_cfgs:
            cfg = load_config(overwrite_cfgs[name])
        else:
            # Use default config if config file is not specified
            cfg = copy.deepcopy(g_cfg)

        in_channels = overwrite_in_channels.get(name, in_channels_default)

        fe = builder(cfg, in_channels)
        self.assertIsNotNone(
            getattr(fe, 'out_channels', None),
            'Need to provide out_channels for feature extractor {}'.format(
                name))

        N, C_in, H, W = 2, in_channels, 24, 32
        input = jt.random([N, C_in, H, W]).float32()
        bboxes = [[1, 1, 10, 10], [5, 5, 8, 8], [2, 2, 3, 4]]
        img_size = [384, 512]
        box_list = BoxList(bboxes, img_size, "xyxy")
        out = fe([input], [box_list] * N)
        self.assertEqual(out.shape[:2], ([N * len(bboxes), fe.out_channels]))
Exemple #3
0
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        masks = [obj["segmentation"] for obj in anno]
        masks = SegmentationMask(masks, img.size, mode='poly')
        target.add_field("masks", masks)

        target = target.clip_to_image(remove_empty=True)

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, idx
Exemple #4
0
    def forward_for_single_feature_map(self, anchors, box_cls, box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            box_cls: tensor of size N, A * C, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        N, _, H, W = box_cls.shape
        A = box_regression.shape[1] // 4
        C = box_cls.shape[1] // A

        # put in the same format as anchors
        box_cls = permute_and_flatten(box_cls, N, A, C, H, W)
        box_cls = box_cls.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)

        num_anchors = A * H * W

        candidate_inds = box_cls > self.pre_nms_thresh

        pre_nms_top_n = candidate_inds.reshape(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max_v=self.pre_nms_top_n)

        results = []
        for i in range(box_cls.shape[0]):
            per_box_cls, per_box_regression, per_pre_nms_top_n,per_candidate_inds, per_anchors = \
                box_cls[i],box_regression[i],pre_nms_top_n[i],candidate_inds[i],anchors[i]

            # Sort and select TopN
            # TODO most of this can be made out of the loop for
            # all images.
            # TODO:Yang: Not easy to do. Because the numbers of detections are
            # different in each image. Therefore, this part needs to be done
            # per image.
            per_box_cls = per_box_cls[per_candidate_inds]

            per_box_cls, top_k_indices = \
                    per_box_cls.topk(per_pre_nms_top_n, sorted=False)

            per_candidate_nonzeros = \
                    per_candidate_inds.nonzero()[top_k_indices, :]

            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1]
            if per_class.numel() > 0:
                per_class += 1

            detections = self.box_coder.decode(
                per_box_regression[per_box_loc, :].view(-1, 4),
                per_anchors.bbox[per_box_loc, :].view(-1, 4))

            boxlist = BoxList(detections, per_anchors.size, mode="xyxy")
            boxlist.add_field("labels", per_class)
            boxlist.add_field("scores", per_box_cls)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)

        return results
 def prepare_boxlist(self, boxes, scores, image_shape):
     """
     Returns BoxList from `boxes` and adds probability scores information
     as an extra field
     `boxes` has shape (#detections, 4 * #classes), where each row represents
     a list of predicted bounding boxes for each of the object classes in the
     dataset (including the background class). The detections in each row
     originate from the same object proposal.
     `scores` has shape (#detection, #classes), where each row represents a list
     of object detection confidence scores for each of the object classes in the
     dataset (including the background class). `scores[i, j]`` corresponds to the
     box at `boxes[i, j * 4:(j + 1) * 4]`.
     """
     boxes = boxes.reshape(-1, 4)
     scores = scores.reshape(-1)
     boxlist = BoxList(boxes, image_shape, mode="xyxy")
     boxlist.add_field("scores", scores)
     return boxlist
    def __getitem__(self, item):
        img = Image.open(self.image_lists[item]).convert("RGB")

        # dummy target
        w, h = img.size
        target = BoxList([[0, 0, w, h]], img.size, mode="xyxy")

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target
Exemple #7
0
    def execute(self, x, boxes):
        """
        Arguments:
            x (Tensor): the mask logits
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for each image

        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra field mask
        """
        mask_prob = x.sigmoid()

        # select masks coresponding to the predicted classes
        num_masks = x.shape[0]
        labels = [bbox.get_field("labels") for bbox in boxes]
        labels = jt.contrib.concat(labels, dim=0)
        index = jt.arange(num_masks)
        mask_prob = mask_prob[index, labels].unsqueeze(1)

        boxes_per_image = [len(box) for box in boxes]
        mask_prob = mask_prob.split(boxes_per_image, dim=0)
        if self.masker:
            mask_prob = self.masker(mask_prob, boxes)

        results = []
        for prob, box in zip(mask_prob, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox.add_field("mask", prob)
            results.append(bbox)

        return results
Exemple #8
0
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)
        if not self.is_train:
            if self._transforms is not None:
                img, target = self._transforms(img, None)
            return img, target, idx
        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = np.array([obj["bbox"] for obj in anno], dtype=np.float32)
        boxes = boxes.reshape(-1, 4)
        target = BoxList(boxes, img.size, mode="xywh", to_jittor=False)
        target = target.convert("xyxy")
        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = np.array(classes, dtype=np.int32)
        target.add_field("labels", classes)

        if self.with_masks and anno and "segmentation" in anno[0]:
            masks = [obj["segmentation"] for obj in anno]
            masks = SegmentationMask(masks,
                                     img.size,
                                     mode='poly',
                                     to_jittor=False)
            target.add_field("masks", masks)

        if self.with_masks and anno and "keypoints" in anno[0]:
            keypoints = [obj["keypoints"] for obj in anno]
            keypoints = PersonKeypoints(keypoints, img.size, to_jittor=False)
            target.add_field("keypoints", keypoints)

        target = target.clip_to_image(remove_empty=True)
        if self._transforms is not None:
            img, target = self._transforms(img, target)
        img = img.astype(np.float32)
        return img, target, idx
 def forward(self, image_list, feature_maps):
     grid_sizes = [feature_map.shape[-2:] for feature_map in feature_maps]
     anchors_over_all_feature_maps = self.grid_anchors(grid_sizes)
     anchors = []
     for i, (image_height,
             image_width) in enumerate(image_list.image_sizes):
         anchors_in_image = []
         for anchors_per_feature_map in anchors_over_all_feature_maps:
             boxlist = BoxList(anchors_per_feature_map,
                               (image_width, image_height),
                               mode="xyxy")
             self.add_visibility_to(boxlist)
             anchors_in_image.append(boxlist)
         anchors.append(anchors_in_image)
     return anchors
Exemple #10
0
    def get_groundtruth(self, index):
        img_id = self.ids[index]
        anno = ET.parse(self._annopath % img_id).getroot()
        anno = self._preprocess_annotation(anno)

        height, width = anno["im_info"]
        target = BoxList(anno["boxes"], (width, height), mode="xyxy")
        target.add_field("labels", anno["labels"])
        target.add_field("difficult", anno["difficult"])
        return target
    def filter_results_v2(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes,4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        # inds_all = (scores > self.score_thresh).int()
        scores = scores[:,1:]
        inds_all = scores > self.score_thresh
        # print(inds_all.shape)
        # inds_all = inds_all.transpose(1,0)

        inds_all = inds_all.nonzero()
        labels = inds_all[:,1]+1
        ind_scores = scores[inds_all[:,0],inds_all[:,1]]
        ind_boxes = boxes[inds_all[:,0],inds_all[:,1],:]
        ind_boxes = ind_boxes.reshape(-1,4)
        result = BoxList(ind_boxes, boxlist.size, mode="xyxy")
        result.add_field("scores", ind_scores)
        result.add_field("labels", labels)
        result = boxlist_ml_nms(result, self.nms)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        # if number_of_detections > self.detections_per_img > 0:
        #     cls_scores = result.get_field("scores")
        #     image_thresh, _ = jt.kthvalue(
        #         cls_scores, number_of_detections - self.detections_per_img + 1
        #     )
        #     keep = cls_scores >= image_thresh
        #     keep = jt.nonzero(keep).squeeze(1)
        #     result = result[keep]
        # Absolute limit detection imgs
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            scores, indices = jt.topk(
                cls_scores, self.detections_per_img
            )
            result = result[indices]
        return result
Exemple #12
0
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(
                boxlist_for_class, self.nms
            )
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
            )
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
            )
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
Exemple #13
0
    def execute(self, x, boxes):
        mask_prob = x

        scores = None
        if self.keypointer:
            mask_prob, scores = self.keypointer(x, boxes)

        assert len(boxes) == 1, "Only non-batched inference supported for now"
        boxes_per_image = [box.bbox.size(0) for box in boxes]
        mask_prob = mask_prob.split(boxes_per_image, dim=0)
        scores = scores.split(boxes_per_image, dim=0)

        results = []
        for prob, box, score in zip(mask_prob, boxes, scores):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            prob = PersonKeypoints(prob, box.size)
            prob.add_field("logits", score)
            bbox.add_field("keypoints", prob)
            results.append(bbox)

        return results
Exemple #14
0
    def __getitem__(self, idx):
        img_path = self.img_paths[idx]
        ann_path = self.ann_paths[idx]

        if self.mode == "mask":
            ann = jt.array(np.asarray(Image.open(ann_path)))
            # masks are represented with tensors
            boxes, segmentations, labels = self._processBinayMasks(ann)
        else:
            with open(ann_path, "r") as ann_file:
                ann = json.load(ann_file)
            # masks are represented with polygons
            boxes, segmentations, labels = self._processPolygons(ann)

        boxes, segmentations, labels = self._filterGT(boxes, segmentations,
                                                      labels)

        if len(segmentations) == 0:
            empty_ann_path = self.get_img_info(idx)["ann_path"]
            print("EMPTY ENTRY:", empty_ann_path)
            # self.img_paths.pop(idx)
            # self.ann_paths.pop(idx)
            img, target, _ = self[(idx + 1) % len(self)]

            # just override this image with the next
            return img, target, idx

        img = Image.open(img_path)
        # Compose all into a BoxList instance
        target = BoxList(boxes, img.size, mode="xyxy")
        target.add_field("labels", jt.array(labels))
        masks = SegmentationMask(segmentations, img.size, mode=self.mode)
        target.add_field("masks", masks)
        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, idx
    def forward_for_single_feature_map(self, anchors, objectness,
                                       box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            objectness: tensor of size N, A, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        # global II
        # import pickle
        N, A, H, W = objectness.shape

        # put in the same format as anchors
        objectness = permute_and_flatten(objectness, N, A, 1, H,
                                         W).reshape(N, -1)
        # print('objectness',objectness.mean())

        objectness = objectness.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)
        # print('regression',box_regression.mean())

        num_anchors = A * H * W

        pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
        # print(pre_nms_top_n)
        #print('objectness',objectness)
        # objectness = jt.array(pickle.load(open(f'/home/lxl/objectness_0_{II}.pkl','rb')))

        # print(objectness.shape)
        objectness, topk_idx = objectness.topk(pre_nms_top_n,
                                               dim=1,
                                               sorted=True)

        # print(II,'topk',topk_idx.sum(),topk_idx.shape)
        batch_idx = jt.arange(N).unsqueeze(1)

        # pickle.dump(topk_idx.numpy(),open(f'/home/lxl/topk_idx_{II}_jt.pkl','wb'))
        # topk_idx_tmp = topk_idx.numpy()
        # batch_idx = jt.array(pickle.load(open(f'/home/lxl/batch_idx_{II}.pkl','rb')))
        # topk_idx = jt.array(pickle.load(open(f'/home/lxl/topk_idx_{II}.pkl','rb')))

        # err = np.abs(topk_idx_tmp-topk_idx.numpy())
        # print('Error!!!!!!!!!!!!!!!!',err.sum())
        # print(err.nonzero())

        #print('box_regression0',box_regression)
        #batch_idx = jt.index(topk_idx.shape,dim=0)
        box_regression = box_regression[batch_idx, topk_idx]
        #print('box_regression1',box_regression)

        image_shapes = [box.size for box in anchors]
        concat_anchors = jt.contrib.concat([a.bbox for a in anchors], dim=0)
        concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]

        # box_regression = jt.array(pickle.load(open(f'/home/lxl/box_regression_{II}.pkl','rb')))
        # concat_anchors = jt.array(pickle.load(open(f'/home/lxl/concat_anchors_{II}.pkl','rb')))

        proposals = self.box_coder.decode(box_regression.reshape(-1, 4),
                                          concat_anchors.reshape(-1, 4))

        proposals = proposals.reshape(N, -1, 4)

        # proposals = jt.array(pickle.load(open(f'/home/lxl/proposal_{II}.pkl','rb')))
        # objectness = jt.array(pickle.load(open(f'/home/lxl/objectness_{II}.pkl','rb')))
        # II+=1

        result = []
        for i in range(len(image_shapes)):
            proposal, score, im_shape = proposals[i], objectness[
                i], image_shapes[i]
            boxlist = BoxList(proposal, im_shape, mode="xyxy")
            boxlist.add_field("objectness", score)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            boxlist = boxlist_nms(
                boxlist,
                self.nms_thresh,
                max_proposals=self.post_nms_top_n,
                score_field="objectness",
            )
            result.append(boxlist)
        return result
def evaluate_box_proposals(
    predictions, dataset, thresholds=None, area="all", limit=None
):
    """Evaluate detection proposal recall metrics. This function is a much
    faster alternative to the official COCO API recall evaluation code. However,
    it produces slightly different results.
    """
    # Record max overlap value for each gt box
    # Return vector of overlap values
    areas = {
        "all": 0,
        "small": 1,
        "medium": 2,
        "large": 3,
        "96-128": 4,
        "128-256": 5,
        "256-512": 6,
        "512-inf": 7,
    }
    area_ranges = [
        [0 ** 2, 1e5 ** 2],  # all
        [0 ** 2, 32 ** 2],  # small
        [32 ** 2, 96 ** 2],  # medium
        [96 ** 2, 1e5 ** 2],  # large
        [96 ** 2, 128 ** 2],  # 96-128
        [128 ** 2, 256 ** 2],  # 128-256
        [256 ** 2, 512 ** 2],  # 256-512
        [512 ** 2, 1e5 ** 2],
    ]  # 512-inf
    assert area in areas, "Unknown area range: {}".format(area)
    area_range = area_ranges[areas[area]]
    gt_overlaps = []
    num_pos = 0

    for image_id, prediction in predictions.items():
        original_id = dataset.id_to_img_map[image_id]

        img_info = dataset.get_img_info(image_id)
        image_width = img_info["width"]
        image_height = img_info["height"]
        prediction = prediction.resize((image_width, image_height))

        # sort predictions in descending order
        # TODO maybe remove this and make it explicit in the documentation
        inds = prediction.get_field("objectness").sort(descending=True)[1]
        prediction = prediction[inds]

        ann_ids = dataset.coco.getAnnIds(imgIds=original_id)
        anno = dataset.coco.loadAnns(ann_ids)
        gt_boxes = [obj["bbox"] for obj in anno if obj["iscrowd"] == 0]
        gt_boxes = jt.array(gt_boxes).reshape(-1, 4)  # guard against no boxes
        gt_boxes = BoxList(gt_boxes, (image_width, image_height), mode="xywh").convert(
            "xyxy"
        )
        gt_areas = jt.array([obj["area"] for obj in anno if obj["iscrowd"] == 0])

        if len(gt_boxes) == 0:
            continue

        valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
        gt_boxes = gt_boxes[valid_gt_inds]

        num_pos += len(gt_boxes)

        if len(gt_boxes) == 0:
            continue

        if len(prediction) == 0:
            continue

        if limit is not None and len(prediction) > limit:
            prediction = prediction[:limit]

        overlaps = boxlist_iou(prediction, gt_boxes)

        _gt_overlaps = jt.zeros(len(gt_boxes))
        for j in range(min(len(prediction), len(gt_boxes))):
            # find which proposal box maximally covers each gt box
            # and get the iou amount of coverage for each gt box
            max_overlaps, argmax_overlaps = overlaps.max(dim=0)

            # find which gt box is 'best' covered (i.e. 'best' = most iou)
            gt_ovr, gt_ind = max_overlaps.max(dim=0)
            assert gt_ovr >= 0
            # find the proposal box that covers the best covered gt box
            box_ind = argmax_overlaps[gt_ind]
            # record the iou coverage of this gt box
            _gt_overlaps[j] = overlaps[box_ind, gt_ind]
            assert _gt_overlaps[j] == gt_ovr
            # mark the proposal box and the gt box as used
            overlaps[box_ind, :] = -1
            overlaps[:, gt_ind] = -1

        # append recorded iou coverage level
        gt_overlaps.append(_gt_overlaps)
    gt_overlaps = jt.contrib.concat(gt_overlaps, dim=0)
    _,gt_overlaps = jt.argsort(gt_overlaps)

    if thresholds is None:
        step = 0.05
        thresholds = jt.array(np.arange(0.5, 0.95 + 1e-5, step)).float32()
    recalls = jt.zeros(thresholds.shape,dtype=thresholds.dtype)
    # compute recall for each iou threshold
    for i, t in enumerate(thresholds):
        recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
    # ar = 2 * np.trapz(recalls, thresholds)
    ar = recalls.mean()
    return {
        "ar": ar,
        "recalls": recalls,
        "thresholds": thresholds,
        "gt_overlaps": gt_overlaps,
        "num_pos": num_pos,
    }
    def forward_for_single_feature_map(self, locations, box_cls,
                                       box_regression, centerness,
                                       image_sizes):
        """
        Arguments:
            anchors: list[BoxList]
            box_cls: tensor of size N, A * C, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        N, C, H, W = box_cls.shape

        # put in the same format as locations
        box_cls = box_cls.view(N, C, H, W).permute(0, 2, 3, 1)
        box_cls = box_cls.reshape(N, -1, self.num_classes - 1).sigmoid()
        box_regression = box_regression.view(N, self.dense_points * 4, H,
                                             W).permute(0, 2, 3, 1)
        box_regression = box_regression.reshape(N, -1, 4)
        centerness = centerness.view(N, self.dense_points, H,
                                     W).permute(0, 2, 3, 1)
        centerness = centerness.reshape(N, -1).sigmoid()

        candidate_inds = box_cls > self.pre_nms_thresh
        pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max_v=self.pre_nms_top_n)

        # multiply the classification scores with centerness scores
        box_cls = box_cls * centerness[:, :].unsqueeze(2)
        results = []
        #print('forward_for_single_feature_map start',N)
        for i in range(N):
            #print(i)
            per_box_cls = box_cls[i]

            per_candidate_inds = candidate_inds[i]
            #print(per_candidate_inds.shape,per_box_cls.shape)
            # if per_candidate_inds.sum().item()>0:
            #    per_box_cls = per_box_cls[per_candidate_inds]
            # else:
            #    per_box_cls = jt.zeros((0,),dtype=per_box_cls.dtype)

            #print(per_candidate_inds.shape,jt.sum(per_candidate_inds))
            per_box_cls = per_box_cls[per_candidate_inds]

            per_candidate_nonzeros = per_candidate_inds.nonzero()
            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1]
            # if per_candidate_nonzeros.numel()>0:
            #     per_class = per_candidate_nonzeros[:, 1] + 1
            per_class = per_candidate_nonzeros[:, 1] + 1
            #print(per_candidate_nonzeros.shape)

            per_box_regression = box_regression[i]
            #print('GG',per_box_loc.numel(),per_box_loc.shape)
            # if per_box_loc.numel()>0:
            #     per_box_regression = per_box_regression[per_box_loc]
            #     per_locations = locations[per_box_loc]
            # else:
            #     shape = list(per_box_regression.shape)
            #     shape[0]=0
            #     per_box_regression = jt.zeros(shape,dtype=per_box_regression.dtype)
            #     shape = list(locations.shape)
            #     shape[0]=0
            #     per_locations = jt.zeros(shape,dtype=locations.dtype)

            per_box_regression = per_box_regression[per_box_loc]
            per_locations = locations[per_box_loc]
            #print('??')
            #print('per_box_cls1',per_box_cls.mean())

            per_pre_nms_top_n = pre_nms_top_n[i]

            #print('per_locations',jt.mean(per_locations))
            #print('per_box_regressions',jt.mean(per_box_regression))
            #print(per_pre_nms_top_n.item(),per_candidate_inds.sum().item())
            if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
                per_box_cls, top_k_indices = \
                    per_box_cls.topk(per_pre_nms_top_n.item(), sorted=False)
                per_class = per_class[top_k_indices]
                per_box_regression = per_box_regression[top_k_indices]
                per_locations = per_locations[top_k_indices]

            #print('per_box_cls',per_box_cls.mean())
            #print('emmm',jt.mean(per_locations))
            #print('hhh',jt.mean(per_box_regression))
            # if per_box_loc.numel()>0:
            #     detections = jt.stack([
            #     per_locations[:, 0] - per_box_regression[:, 0],
            #     per_locations[:, 1] - per_box_regression[:, 1],
            #     per_locations[:, 0] + per_box_regression[:, 2],
            #     per_locations[:, 1] + per_box_regression[:, 3],
            # ], dim=1)
            # else:
            #     detections = jt.zeros((0,4),dtype=per_locations.dtype)
            detections = jt.stack([
                per_locations[:, 0] - per_box_regression[:, 0],
                per_locations[:, 1] - per_box_regression[:, 1],
                per_locations[:, 0] + per_box_regression[:, 2],
                per_locations[:, 1] + per_box_regression[:, 3],
            ],
                                  dim=1)
            #print('detections',jt.mean(detections),detections.shape)

            h, w = image_sizes[i]
            boxlist = BoxList(detections, (int(w), int(h)), mode="xyxy")
            boxlist.add_field("labels", per_class)
            if self.is_sqrt:
                boxlist.add_field("scores", per_box_cls.sqrt())
            else:
                boxlist.add_field("scores", per_box_cls)
            #print('??',boxlist.get_field('scores'))
            if boxlist.bbox.numel() > 0:
                boxlist = boxlist.clip_to_image(remove_empty=False)
                boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)
            #print('Good')

        return results
Exemple #18
0
def im_detect_bbox_aug(model, images):
    # Collect detections computed under different transformations
    boxlists_ts = []
    for _ in range(len(images)):
        boxlists_ts.append([])

    def add_preds_t(boxlists_t):
        for i, boxlist_t in enumerate(boxlists_t):
            if len(boxlists_ts[i]) == 0:
                # The first one is identity transform, no need to resize the boxlist
                boxlists_ts[i].append(boxlist_t)
            else:
                # Resize the boxlist as the first one
                boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))

    # Compute detections for the original image (identity transform)
    boxlists_i = im_detect_bbox(
        model,
        images,
        cfg.INPUT.MIN_SIZE_TEST,
        cfg.INPUT.MAX_SIZE_TEST,
    )
    add_preds_t(boxlists_i)

    # Perform detection on the horizontally flipped image
    if cfg.TEST.BBOX_AUG.H_FLIP:
        boxlists_hf = im_detect_bbox_hflip(
            model,
            images,
            cfg.INPUT.MIN_SIZE_TEST,
            cfg.INPUT.MAX_SIZE_TEST,
        )
        add_preds_t(boxlists_hf)

    # Compute detections at different scales
    for scale in cfg.TEST.BBOX_AUG.SCALES:
        max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
        boxlists_scl = im_detect_bbox_scale(
            model,
            images,
            scale,
            max_size,
        )
        add_preds_t(boxlists_scl)

        if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
            boxlists_scl_hf = im_detect_bbox_scale(model,
                                                   images,
                                                   scale,
                                                   max_size,
                                                   hflip=True)
            add_preds_t(boxlists_scl_hf)

    # Merge boxlists detected by different bbox aug params
    boxlists = []
    for i, boxlist_ts in enumerate(boxlists_ts):
        bbox = jt.contrib.concat([boxlist_t.bbox for boxlist_t in boxlist_ts])
        scores = jt.contrib.concat(
            [boxlist_t.get_field('scores') for boxlist_t in boxlist_ts])
        boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode)
        boxlist.add_field('scores', scores)
        boxlists.append(boxlist)

    # Apply NMS and limit the final detections
    results = []
    post_processor = make_roi_box_post_processor(cfg)
    for boxlist in boxlists:
        results.append(
            post_processor.filter_results(boxlist,
                                          cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES))

    return results
Exemple #19
0
 def calcIoU(box1, box2, image_size=(600, 600)):
     boxlist1 = BoxList(box1, image_size)
     boxlist2 = BoxList(box2, image_size)
     iou = boxlist_iou(boxlist1, boxlist2)
     return iou
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        # inds_all = (scores > self.score_thresh).int()
        inds_all = scores > self.score_thresh
        # print(self.score_thresh,num_classes)
        # print(inds_all.shape)
        # inds_all = inds_all.transpose(1,0)
        inds_nonzeros = [ inds_all[:,j].nonzero() for j in range(1, num_classes) ]
        jt.sync(inds_nonzeros)

        for j in range(1, num_classes):
            # with nvtx_scope("aa"):
            #     inds = inds_all[:,j].nonzero().squeeze(1)
                
            # with nvtx_scope("bb"):
            #     scores_j = scores[inds, j]
            #     boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
            # with nvtx_scope("cc"):
            #     boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            # with nvtx_scope("cc2"):
            #     boxlist_for_class.add_field("scores", scores_j)
            # with nvtx_scope("cc3"):
            #     boxlist_for_class = boxlist_nms(
            #         boxlist_for_class, self.nms
            #     )
            # with nvtx_scope("dd"):
            #     num_labels = len(boxlist_for_class)
            # with nvtx_scope("dd2"):
            #     boxlist_for_class.add_field(
            #         "labels", jt.full((num_labels,), j).int32()
            #     )
            #     result.append(boxlist_for_class)

            # inds = inds_all[:,j].nonzero().squeeze(1)
            inds = inds_nonzeros[j-1]
            if inds.shape[0] == 0:
                continue
            inds = inds.squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(
                    boxlist_for_class, self.nms
                )
            num_labels = len(boxlist_for_class)
            # print(j,num_labels)

            boxlist_for_class.add_field(
                    "labels", jt.full((num_labels,), j).int32()
                )
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        if not result.has_field('labels'):
            result.add_field('labels',jt.empty((0,)))
        if not result.has_field('scores'):
            result.add_field('scores',jt.empty((0,)))
        number_of_detections = len(result)

        #Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = jt.kthvalue(
                cls_scores, number_of_detections - self.detections_per_img + 1
            )
            keep = cls_scores >= image_thresh
            keep = jt.nonzero(keep).squeeze(1)
            result = result[keep]
        # # Absolute limit detection imgs
        # if number_of_detections > self.detections_per_img > 0:
        #     cls_scores = result.get_field("scores")
        #     scores, indices = jt.topk(
        #         cls_scores, self.detections_per_img
        #     )
        #     result = result[indices]
        return result