Exemple #1
0
    def forward_for_single_feature_map(self, anchors, box_cls, box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            box_cls: tensor of size N, A * C, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        N, _, H, W = box_cls.shape
        A = box_regression.shape[1] // 4
        C = box_cls.shape[1] // A

        # put in the same format as anchors
        box_cls = permute_and_flatten(box_cls, N, A, C, H, W)
        box_cls = box_cls.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)

        num_anchors = A * H * W

        candidate_inds = box_cls > self.pre_nms_thresh

        pre_nms_top_n = candidate_inds.reshape(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max_v=self.pre_nms_top_n)

        results = []
        for i in range(box_cls.shape[0]):
            per_box_cls, per_box_regression, per_pre_nms_top_n,per_candidate_inds, per_anchors = \
                box_cls[i],box_regression[i],pre_nms_top_n[i],candidate_inds[i],anchors[i]

            # Sort and select TopN
            # TODO most of this can be made out of the loop for
            # all images.
            # TODO:Yang: Not easy to do. Because the numbers of detections are
            # different in each image. Therefore, this part needs to be done
            # per image.
            per_box_cls = per_box_cls[per_candidate_inds]

            per_box_cls, top_k_indices = \
                    per_box_cls.topk(per_pre_nms_top_n, sorted=False)

            per_candidate_nonzeros = \
                    per_candidate_inds.nonzero()[top_k_indices, :]

            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1]
            if per_class.numel() > 0:
                per_class += 1

            detections = self.box_coder.decode(
                per_box_regression[per_box_loc, :].view(-1, 4),
                per_anchors.bbox[per_box_loc, :].view(-1, 4))

            boxlist = BoxList(detections, per_anchors.size, mode="xyxy")
            boxlist.add_field("labels", per_class)
            boxlist.add_field("scores", per_box_cls)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)

        return results
Exemple #2
0
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)

        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = [obj["bbox"] for obj in anno]
        boxes = torch.as_tensor(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh").convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        classes = torch.tensor(classes)
        target.add_field("labels", classes)

        masks = [obj["segmentation"] for obj in anno]
        masks = SegmentationMask(masks, img.size, mode='poly')
        target.add_field("masks", masks)

        target = target.clip_to_image(remove_empty=True)

        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, idx
Exemple #3
0
    def execute(self, x, boxes):
        """
        Arguments:
            x (Tensor): the mask logits
            boxes (list[BoxList]): bounding boxes that are used as
                reference, one for each image

        Returns:
            results (list[BoxList]): one BoxList for each image, containing
                the extra field mask
        """
        mask_prob = x.sigmoid()

        # select masks coresponding to the predicted classes
        num_masks = x.shape[0]
        labels = [bbox.get_field("labels") for bbox in boxes]
        labels = jt.contrib.concat(labels, dim=0)
        index = jt.arange(num_masks)
        mask_prob = mask_prob[index, labels].unsqueeze(1)

        boxes_per_image = [len(box) for box in boxes]
        mask_prob = mask_prob.split(boxes_per_image, dim=0)
        if self.masker:
            mask_prob = self.masker(mask_prob, boxes)

        results = []
        for prob, box in zip(mask_prob, boxes):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            bbox.add_field("mask", prob)
            results.append(bbox)

        return results
Exemple #4
0
    def get_groundtruth(self, index):
        img_id = self.ids[index]
        anno = ET.parse(self._annopath % img_id).getroot()
        anno = self._preprocess_annotation(anno)

        height, width = anno["im_info"]
        target = BoxList(anno["boxes"], (width, height), mode="xyxy")
        target.add_field("labels", anno["labels"])
        target.add_field("difficult", anno["difficult"])
        return target
Exemple #5
0
    def forward_for_single_feature_map(self, anchors, objectness,
                                       box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            objectness: tensor of size N, A, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        device = objectness.device
        N, A, H, W = objectness.shape

        # put in the same format as anchors
        objectness = permute_and_flatten(objectness, N, A, 1, H, W).view(N, -1)
        objectness = objectness.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)

        num_anchors = A * H * W

        pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
        objectness, topk_idx = objectness.topk(pre_nms_top_n,
                                               dim=1,
                                               sorted=True)

        batch_idx = torch.arange(N, device=device)[:, None]
        box_regression = box_regression[batch_idx, topk_idx]

        image_shapes = [box.size for box in anchors]
        concat_anchors = torch.cat([a.bbox for a in anchors], dim=0)
        concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]

        proposals = self.box_coder.decode(box_regression.view(-1, 4),
                                          concat_anchors.view(-1, 4))

        proposals = proposals.view(N, -1, 4)

        result = []
        for proposal, score, im_shape in zip(proposals, objectness,
                                             image_shapes):
            boxlist = BoxList(proposal, im_shape, mode="xyxy")
            boxlist.add_field("objectness", score)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            boxlist = boxlist_nms(
                boxlist,
                self.nms_thresh,
                max_proposals=self.post_nms_top_n,
                score_field="objectness",
            )
            result.append(boxlist)
        return result
    def filter_results_v2(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes,4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        # inds_all = (scores > self.score_thresh).int()
        scores = scores[:,1:]
        inds_all = scores > self.score_thresh
        # print(inds_all.shape)
        # inds_all = inds_all.transpose(1,0)

        inds_all = inds_all.nonzero()
        labels = inds_all[:,1]+1
        ind_scores = scores[inds_all[:,0],inds_all[:,1]]
        ind_boxes = boxes[inds_all[:,0],inds_all[:,1],:]
        ind_boxes = ind_boxes.reshape(-1,4)
        result = BoxList(ind_boxes, boxlist.size, mode="xyxy")
        result.add_field("scores", ind_scores)
        result.add_field("labels", labels)
        result = boxlist_ml_nms(result, self.nms)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        # if number_of_detections > self.detections_per_img > 0:
        #     cls_scores = result.get_field("scores")
        #     image_thresh, _ = jt.kthvalue(
        #         cls_scores, number_of_detections - self.detections_per_img + 1
        #     )
        #     keep = cls_scores >= image_thresh
        #     keep = jt.nonzero(keep).squeeze(1)
        #     result = result[keep]
        # Absolute limit detection imgs
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            scores, indices = jt.topk(
                cls_scores, self.detections_per_img
            )
            result = result[indices]
        return result
 def prepare_boxlist(self, boxes, scores, image_shape):
     """
     Returns BoxList from `boxes` and adds probability scores information
     as an extra field
     `boxes` has shape (#detections, 4 * #classes), where each row represents
     a list of predicted bounding boxes for each of the object classes in the
     dataset (including the background class). The detections in each row
     originate from the same object proposal.
     `scores` has shape (#detection, #classes), where each row represents a list
     of object detection confidence scores for each of the object classes in the
     dataset (including the background class). `scores[i, j]`` corresponds to the
     box at `boxes[i, j * 4:(j + 1) * 4]`.
     """
     boxes = boxes.reshape(-1, 4)
     scores = scores.reshape(-1)
     boxlist = BoxList(boxes, image_shape, mode="xyxy")
     boxlist.add_field("scores", scores)
     return boxlist
Exemple #8
0
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        device = scores.device
        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        inds_all = scores > self.score_thresh
        for j in range(1, num_classes):
            inds = inds_all[:, j].nonzero().squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(
                boxlist_for_class, self.nms
            )
            num_labels = len(boxlist_for_class)
            boxlist_for_class.add_field(
                "labels", torch.full((num_labels,), j, dtype=torch.int64, device=device)
            )
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        number_of_detections = len(result)

        # Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = torch.kthvalue(
                cls_scores.cpu(), number_of_detections - self.detections_per_img + 1
            )
            keep = cls_scores >= image_thresh.item()
            keep = torch.nonzero(keep).squeeze(1)
            result = result[keep]
        return result
    def select_over_all_levels(self, boxlists):
        num_images = len(boxlists)
        results = []
        for i in range(num_images):
            scores = boxlists[i].get_field("scores")
            labels = boxlists[i].get_field("labels")
            boxes = boxlists[i].bbox
            boxlist = boxlists[i]
            result = []
            # skip the background
            for j in range(1, self.num_classes):
                inds = (labels == j).nonzero().view(-1)

                scores_j = scores[inds]
                boxes_j = boxes[inds, :].view(-1, 4)
                boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
                boxlist_for_class.add_field("scores", scores_j)
                boxlist_for_class = boxlist_nms(boxlist_for_class,
                                                self.nms_thresh,
                                                score_field="scores")
                num_labels = len(boxlist_for_class)
                boxlist_for_class.add_field("labels",
                                            jt.full((num_labels, ), j).int32())
                result.append(boxlist_for_class)

            result = cat_boxlist(result)
            number_of_detections = len(result)

            # Limit to max_per_image detections **over all classes**
            if number_of_detections > self.fpn_post_nms_top_n > 0:
                cls_scores = result.get_field("scores")
                image_thresh, _ = jt.kthvalue(
                    cls_scores,
                    number_of_detections - self.fpn_post_nms_top_n + 1)
                keep = cls_scores >= image_thresh
                keep = jt.nonzero(keep).squeeze(1)
                result = result[keep]
            results.append(result)
        return results
Exemple #10
0
    def execute(self, x, boxes):
        mask_prob = x

        scores = None
        if self.keypointer:
            mask_prob, scores = self.keypointer(x, boxes)

        assert len(boxes) == 1, "Only non-batched inference supported for now"
        boxes_per_image = [box.bbox.size(0) for box in boxes]
        mask_prob = mask_prob.split(boxes_per_image, dim=0)
        scores = scores.split(boxes_per_image, dim=0)

        results = []
        for prob, box, score in zip(mask_prob, boxes, scores):
            bbox = BoxList(box.bbox, box.size, mode="xyxy")
            for field in box.fields():
                bbox.add_field(field, box.get_field(field))
            prob = PersonKeypoints(prob, box.size)
            prob.add_field("logits", score)
            bbox.add_field("keypoints", prob)
            results.append(bbox)

        return results
Exemple #11
0
    def __getitem__(self, idx):
        img_path = self.img_paths[idx]
        ann_path = self.ann_paths[idx]

        if self.mode == "mask":
            ann = jt.array(np.asarray(Image.open(ann_path)))
            # masks are represented with tensors
            boxes, segmentations, labels = self._processBinayMasks(ann)
        else:
            with open(ann_path, "r") as ann_file:
                ann = json.load(ann_file)
            # masks are represented with polygons
            boxes, segmentations, labels = self._processPolygons(ann)

        boxes, segmentations, labels = self._filterGT(boxes, segmentations,
                                                      labels)

        if len(segmentations) == 0:
            empty_ann_path = self.get_img_info(idx)["ann_path"]
            print("EMPTY ENTRY:", empty_ann_path)
            # self.img_paths.pop(idx)
            # self.ann_paths.pop(idx)
            img, target, _ = self[(idx + 1) % len(self)]

            # just override this image with the next
            return img, target, idx

        img = Image.open(img_path)
        # Compose all into a BoxList instance
        target = BoxList(boxes, img.size, mode="xyxy")
        target.add_field("labels", jt.array(labels))
        masks = SegmentationMask(segmentations, img.size, mode=self.mode)
        target.add_field("masks", masks)
        if self.transforms is not None:
            img, target = self.transforms(img, target)

        return img, target, idx
    def __getitem__(self, idx):
        img, anno = super(COCODataset, self).__getitem__(idx)
        if not self.is_train:
            if self._transforms is not None:
                img, target = self._transforms(img, None)
            return img, target, idx
        # filter crowd annotations
        # TODO might be better to add an extra field
        anno = [obj for obj in anno if obj["iscrowd"] == 0]

        boxes = np.array([obj["bbox"] for obj in anno])
        boxes = boxes.reshape(-1, 4)
        # boxes = jt.array(boxes).reshape(-1, 4)  # guard against no boxes
        target = BoxList(boxes, img.size, mode="xywh",
                         to_jittor=False)  #.convert("xyxy")

        classes = [obj["category_id"] for obj in anno]
        classes = [self.json_category_id_to_contiguous_id[c] for c in classes]
        # classes = jt.array(classes)
        target.add_field("labels", classes)

        if self.with_masks and anno and "segmentation" in anno[0]:
            masks = [obj["segmentation"] for obj in anno]
            masks = SegmentationMask(masks,
                                     img.size,
                                     mode='poly',
                                     to_jittor=False)
            target.add_field("masks", masks)

        if self.with_masks and anno and "keypoints" in anno[0]:
            keypoints = [obj["keypoints"] for obj in anno]
            keypoints = PersonKeypoints(keypoints, img.size)
            target.add_field("keypoints", keypoints)

        #target = target.clip_to_image(remove_empty=True)

        if self._transforms is not None:
            img, target = self._transforms(img, target)
        return img, target, idx
Exemple #13
0
    def forward_for_mask(self, boxlists, pixel_embed):
        N, dim, m_h, m_w = pixel_embed.shape
        new_boxlists = []
        stride = self.fpn_strides[0] / self.mask_scale_factor
        for im in range(N):
            boxlist = boxlists[im]
            boxes = boxlist.bbox
            input_w, input_h = boxlist.size
            proposal_embed = boxlist.get_field('proposal_embed')
            if proposal_embed.shape[0] == 0:
                new_boxlist = BoxList(boxes, boxlist.size, mode="xyxy")
                new_boxlist.add_field("labels", boxlist.get_field("labels"))
                new_boxlist.add_field("scores", boxlist.get_field("scores"))
                new_boxlist.add_field('mask', jt.array([]))
                if self.post_process_masks:
                    new_boxlist.add_field('stride', jt.array([1]))
                    new_boxlist.add_field('mask_th', jt.array([0.0]))
                else:
                    new_boxlist.add_field('stride', jt.array([stride]))
                    new_boxlist.add_field('mask_th', jt.array([self.mask_th]))

                new_boxlists.append(new_boxlist)
                continue

            mask_boxes = boxes / stride
            box_masks = boxes_to_masks(mask_boxes, m_h, m_w)
            proposal_margin = boxlist.get_field('proposal_margin')
            mask_prob = self.compute_mask_prob(pixel_embed[im], proposal_embed,
                                               proposal_margin, mask_boxes)
            masks = mask_prob * box_masks.float()

            if self.post_process_masks:
                masks = nn.interpolate(X=masks.unsqueeze(1).float(),
                                       scale_factor=stride,
                                       mode="bilinear",
                                       align_corners=False) > self.mask_th
                masks = masks[:, 0, :input_h, :input_w]
            new_boxlist = BoxList(boxes, boxlist.size, mode="xyxy")
            new_boxlist.add_field('mask', masks)
            new_boxlist.add_field("labels", boxlist.get_field("labels"))
            new_boxlist.add_field("scores", boxlist.get_field("scores"))
            if self.post_process_masks:
                new_boxlist.add_field('stride', jt.array([1]))
                new_boxlist.add_field('mask_th', jt.array([0.0]))
            else:
                new_boxlist.add_field('stride', jt.array([stride]))
                new_boxlist.add_field('mask_th', jt.array([self.mask_th]))

            new_boxlists.append(new_boxlist)

        return new_boxlists
Exemple #14
0
def im_detect_bbox_aug(model, images):
    # Collect detections computed under different transformations
    boxlists_ts = []
    for _ in range(len(images)):
        boxlists_ts.append([])

    def add_preds_t(boxlists_t):
        for i, boxlist_t in enumerate(boxlists_t):
            if len(boxlists_ts[i]) == 0:
                # The first one is identity transform, no need to resize the boxlist
                boxlists_ts[i].append(boxlist_t)
            else:
                # Resize the boxlist as the first one
                boxlists_ts[i].append(boxlist_t.resize(boxlists_ts[i][0].size))

    # Compute detections for the original image (identity transform)
    boxlists_i = im_detect_bbox(
        model,
        images,
        cfg.INPUT.MIN_SIZE_TEST,
        cfg.INPUT.MAX_SIZE_TEST,
    )
    add_preds_t(boxlists_i)

    # Perform detection on the horizontally flipped image
    if cfg.TEST.BBOX_AUG.H_FLIP:
        boxlists_hf = im_detect_bbox_hflip(
            model,
            images,
            cfg.INPUT.MIN_SIZE_TEST,
            cfg.INPUT.MAX_SIZE_TEST,
        )
        add_preds_t(boxlists_hf)

    # Compute detections at different scales
    for scale in cfg.TEST.BBOX_AUG.SCALES:
        max_size = cfg.TEST.BBOX_AUG.MAX_SIZE
        boxlists_scl = im_detect_bbox_scale(
            model,
            images,
            scale,
            max_size,
        )
        add_preds_t(boxlists_scl)

        if cfg.TEST.BBOX_AUG.SCALE_H_FLIP:
            boxlists_scl_hf = im_detect_bbox_scale(model,
                                                   images,
                                                   scale,
                                                   max_size,
                                                   hflip=True)
            add_preds_t(boxlists_scl_hf)

    # Merge boxlists detected by different bbox aug params
    boxlists = []
    for i, boxlist_ts in enumerate(boxlists_ts):
        bbox = jt.contrib.concat([boxlist_t.bbox for boxlist_t in boxlist_ts])
        scores = jt.contrib.concat(
            [boxlist_t.get_field('scores') for boxlist_t in boxlist_ts])
        boxlist = BoxList(bbox, boxlist_ts[0].size, boxlist_ts[0].mode)
        boxlist.add_field('scores', scores)
        boxlists.append(boxlist)

    # Apply NMS and limit the final detections
    results = []
    post_processor = make_roi_box_post_processor(cfg)
    for boxlist in boxlists:
        results.append(
            post_processor.filter_results(boxlist,
                                          cfg.MODEL.ROI_BOX_HEAD.NUM_CLASSES))

    return results
    def forward_for_single_feature_map(self, locations, box_cls,
                                       box_regression, centerness,
                                       image_sizes):
        """
        Arguments:
            anchors: list[BoxList]
            box_cls: tensor of size N, A * C, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        N, C, H, W = box_cls.shape

        # put in the same format as locations
        box_cls = box_cls.view(N, C, H, W).permute(0, 2, 3, 1)
        box_cls = box_cls.reshape(N, -1, self.num_classes - 1).sigmoid()
        box_regression = box_regression.view(N, self.dense_points * 4, H,
                                             W).permute(0, 2, 3, 1)
        box_regression = box_regression.reshape(N, -1, 4)
        centerness = centerness.view(N, self.dense_points, H,
                                     W).permute(0, 2, 3, 1)
        centerness = centerness.reshape(N, -1).sigmoid()

        candidate_inds = box_cls > self.pre_nms_thresh
        pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max_v=self.pre_nms_top_n)

        # multiply the classification scores with centerness scores
        box_cls = box_cls * centerness[:, :].unsqueeze(2)
        results = []
        #print('forward_for_single_feature_map start',N)
        for i in range(N):
            #print(i)
            per_box_cls = box_cls[i]

            per_candidate_inds = candidate_inds[i]
            #print(per_candidate_inds.shape,per_box_cls.shape)
            # if per_candidate_inds.sum().item()>0:
            #    per_box_cls = per_box_cls[per_candidate_inds]
            # else:
            #    per_box_cls = jt.zeros((0,),dtype=per_box_cls.dtype)

            #print(per_candidate_inds.shape,jt.sum(per_candidate_inds))
            per_box_cls = per_box_cls[per_candidate_inds]

            per_candidate_nonzeros = per_candidate_inds.nonzero()
            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1]
            # if per_candidate_nonzeros.numel()>0:
            #     per_class = per_candidate_nonzeros[:, 1] + 1
            per_class = per_candidate_nonzeros[:, 1] + 1
            #print(per_candidate_nonzeros.shape)

            per_box_regression = box_regression[i]
            #print('GG',per_box_loc.numel(),per_box_loc.shape)
            # if per_box_loc.numel()>0:
            #     per_box_regression = per_box_regression[per_box_loc]
            #     per_locations = locations[per_box_loc]
            # else:
            #     shape = list(per_box_regression.shape)
            #     shape[0]=0
            #     per_box_regression = jt.zeros(shape,dtype=per_box_regression.dtype)
            #     shape = list(locations.shape)
            #     shape[0]=0
            #     per_locations = jt.zeros(shape,dtype=locations.dtype)

            per_box_regression = per_box_regression[per_box_loc]
            per_locations = locations[per_box_loc]
            #print('??')
            #print('per_box_cls1',per_box_cls.mean())

            per_pre_nms_top_n = pre_nms_top_n[i]

            #print('per_locations',jt.mean(per_locations))
            #print('per_box_regressions',jt.mean(per_box_regression))
            #print(per_pre_nms_top_n.item(),per_candidate_inds.sum().item())
            if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
                per_box_cls, top_k_indices = \
                    per_box_cls.topk(per_pre_nms_top_n.item(), sorted=False)
                per_class = per_class[top_k_indices]
                per_box_regression = per_box_regression[top_k_indices]
                per_locations = per_locations[top_k_indices]

            #print('per_box_cls',per_box_cls.mean())
            #print('emmm',jt.mean(per_locations))
            #print('hhh',jt.mean(per_box_regression))
            # if per_box_loc.numel()>0:
            #     detections = jt.stack([
            #     per_locations[:, 0] - per_box_regression[:, 0],
            #     per_locations[:, 1] - per_box_regression[:, 1],
            #     per_locations[:, 0] + per_box_regression[:, 2],
            #     per_locations[:, 1] + per_box_regression[:, 3],
            # ], dim=1)
            # else:
            #     detections = jt.zeros((0,4),dtype=per_locations.dtype)
            detections = jt.stack([
                per_locations[:, 0] - per_box_regression[:, 0],
                per_locations[:, 1] - per_box_regression[:, 1],
                per_locations[:, 0] + per_box_regression[:, 2],
                per_locations[:, 1] + per_box_regression[:, 3],
            ],
                                  dim=1)
            #print('detections',jt.mean(detections),detections.shape)

            h, w = image_sizes[i]
            boxlist = BoxList(detections, (int(w), int(h)), mode="xyxy")
            boxlist.add_field("labels", per_class)
            if self.is_sqrt:
                boxlist.add_field("scores", per_box_cls.sqrt())
            else:
                boxlist.add_field("scores", per_box_cls)
            #print('??',boxlist.get_field('scores'))
            if boxlist.bbox.numel() > 0:
                boxlist = boxlist.clip_to_image(remove_empty=False)
                boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)
            #print('Good')

        return results
Exemple #16
0
    def forward_for_single_feature_map(self, locations, box_cls,
                                       box_regression, centerness,
                                       proposal_embed, proposal_margin,
                                       image_sizes, level):
        """
        Arguments:
            anchors: list[BoxList]
            box_cls: tensor of size N, A * C, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        N, C, H, W = box_cls.shape

        # put in the same format as locations
        box_cls = box_cls.view(N, C, H, W).transpose(0, 2, 3, 1)
        box_cls = box_cls.reshape(N, -1, C).sigmoid()
        box_regression = box_regression.view(N, 4, H, W).transpose(0, 2, 3, 1)
        box_regression = box_regression.reshape(N, -1, 4)
        centerness = centerness.view(N, 1, H, W).transpose(0, 2, 3, 1)
        centerness = centerness.reshape(N, -1).sigmoid()

        proposal_embed = proposal_embed.view(N, -1, H, W).transpose(0, 2, 3, 1)
        proposal_embed = proposal_embed.reshape(N, H * W, -1)
        proposal_margin = proposal_margin.view(N, 1, H,
                                               W).transpose(0, 2, 3, 1)
        proposal_margin = proposal_margin.reshape(N, -1)

        candidate_inds = box_cls > self.pre_nms_thresh
        pre_nms_top_n = candidate_inds.view(N, -1).sum(1)
        pre_nms_top_n = pre_nms_top_n.clamp(max_v=self.pre_nms_top_n)

        # multiply the classification scores with centerness scores
        box_cls = box_cls * centerness[:, :].unsqueeze(2)

        results = []
        for i in range(N):
            per_box_cls = box_cls[i]
            per_candidate_inds = candidate_inds[i]
            per_box_cls = per_box_cls[per_candidate_inds]

            per_candidate_nonzeros = per_candidate_inds.nonzero()
            per_box_loc = per_candidate_nonzeros[:, 0]
            per_class = per_candidate_nonzeros[:, 1]
            if per_candidate_nonzeros.numel() > 0:
                per_class = per_candidate_nonzeros[:, 1] + 1

            per_box_regression = box_regression[i]
            per_box_regression = per_box_regression[per_box_loc]
            per_locations = locations[per_box_loc]

            per_proposal_embed = proposal_embed[i]
            per_proposal_embed = per_proposal_embed[per_box_loc, :]
            per_proposal_margin = proposal_margin[i][per_box_loc]

            per_pre_nms_top_n = pre_nms_top_n[i]

            if per_candidate_inds.sum().item() > per_pre_nms_top_n.item():
                per_box_cls, top_k_indices = \
                    per_box_cls.topk(per_pre_nms_top_n.item(), sorted=False)
                per_class = per_class[top_k_indices]
                per_box_regression = per_box_regression[top_k_indices]
                per_locations = per_locations[top_k_indices]
                per_proposal_embed = per_proposal_embed[top_k_indices]
                per_proposal_margin = per_proposal_margin[top_k_indices]

            detections = jt.stack([
                per_locations[:, 0] - per_box_regression[:, 0],
                per_locations[:, 1] - per_box_regression[:, 1],
                per_locations[:, 0] + per_box_regression[:, 2],
                per_locations[:, 1] + per_box_regression[:, 3],
            ],
                                  dim=1)

            h, w = image_sizes[i]
            boxlist = BoxList(detections, (int(w), int(h)), mode="xyxy")
            boxlist.add_field("labels", per_class)
            boxlist.add_field("scores", per_box_cls)
            boxlist.add_field("proposal_embed", per_proposal_embed)
            boxlist.add_field("proposal_margin", per_proposal_margin)
            if boxlist.bbox.numel() > 0:
                boxlist = boxlist.clip_to_image(remove_empty=False)
                boxlist = remove_small_boxes(boxlist, self.min_size)
            results.append(boxlist)

        return results
    def filter_results(self, boxlist, num_classes):
        """Returns bounding-box detection results by thresholding on scores and
        applying non-maximum suppression (NMS).
        """
        # unwrap the boxlist to avoid additional overhead.
        # if we had multi-class NMS, we could perform this directly on the boxlist
        boxes = boxlist.bbox.reshape(-1, num_classes * 4)
        scores = boxlist.get_field("scores").reshape(-1, num_classes)

        result = []
        # Apply threshold on detection probabilities and apply NMS
        # Skip j = 0, because it's the background class
        # inds_all = (scores > self.score_thresh).int()
        inds_all = scores > self.score_thresh
        # print(self.score_thresh,num_classes)
        # print(inds_all.shape)
        # inds_all = inds_all.transpose(1,0)
        inds_nonzeros = [ inds_all[:,j].nonzero() for j in range(1, num_classes) ]
        jt.sync(inds_nonzeros)

        for j in range(1, num_classes):
            # with nvtx_scope("aa"):
            #     inds = inds_all[:,j].nonzero().squeeze(1)
                
            # with nvtx_scope("bb"):
            #     scores_j = scores[inds, j]
            #     boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
            # with nvtx_scope("cc"):
            #     boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            # with nvtx_scope("cc2"):
            #     boxlist_for_class.add_field("scores", scores_j)
            # with nvtx_scope("cc3"):
            #     boxlist_for_class = boxlist_nms(
            #         boxlist_for_class, self.nms
            #     )
            # with nvtx_scope("dd"):
            #     num_labels = len(boxlist_for_class)
            # with nvtx_scope("dd2"):
            #     boxlist_for_class.add_field(
            #         "labels", jt.full((num_labels,), j).int32()
            #     )
            #     result.append(boxlist_for_class)

            # inds = inds_all[:,j].nonzero().squeeze(1)
            inds = inds_nonzeros[j-1]
            if inds.shape[0] == 0:
                continue
            inds = inds.squeeze(1)
            scores_j = scores[inds, j]
            boxes_j = boxes[inds, j * 4 : (j + 1) * 4]
            boxlist_for_class = BoxList(boxes_j, boxlist.size, mode="xyxy")
            boxlist_for_class.add_field("scores", scores_j)
            boxlist_for_class = boxlist_nms(
                    boxlist_for_class, self.nms
                )
            num_labels = len(boxlist_for_class)
            # print(j,num_labels)

            boxlist_for_class.add_field(
                    "labels", jt.full((num_labels,), j).int32()
                )
            result.append(boxlist_for_class)

        result = cat_boxlist(result)
        if not result.has_field('labels'):
            result.add_field('labels',jt.empty((0,)))
        if not result.has_field('scores'):
            result.add_field('scores',jt.empty((0,)))
        number_of_detections = len(result)

        #Limit to max_per_image detections **over all classes**
        if number_of_detections > self.detections_per_img > 0:
            cls_scores = result.get_field("scores")
            image_thresh, _ = jt.kthvalue(
                cls_scores, number_of_detections - self.detections_per_img + 1
            )
            keep = cls_scores >= image_thresh
            keep = jt.nonzero(keep).squeeze(1)
            result = result[keep]
        # # Absolute limit detection imgs
        # if number_of_detections > self.detections_per_img > 0:
        #     cls_scores = result.get_field("scores")
        #     scores, indices = jt.topk(
        #         cls_scores, self.detections_per_img
        #     )
        #     result = result[indices]
        return result
    def forward_for_single_feature_map(self, anchors, objectness,
                                       box_regression):
        """
        Arguments:
            anchors: list[BoxList]
            objectness: tensor of size N, A, H, W
            box_regression: tensor of size N, A * 4, H, W
        """
        # global II
        # import pickle
        N, A, H, W = objectness.shape

        # put in the same format as anchors
        objectness = permute_and_flatten(objectness, N, A, 1, H,
                                         W).reshape(N, -1)
        # print('objectness',objectness.mean())

        objectness = objectness.sigmoid()

        box_regression = permute_and_flatten(box_regression, N, A, 4, H, W)
        # print('regression',box_regression.mean())

        num_anchors = A * H * W

        pre_nms_top_n = min(self.pre_nms_top_n, num_anchors)
        # print(pre_nms_top_n)
        #print('objectness',objectness)
        # objectness = jt.array(pickle.load(open(f'/home/lxl/objectness_0_{II}.pkl','rb')))

        # print(objectness.shape)
        objectness, topk_idx = objectness.topk(pre_nms_top_n,
                                               dim=1,
                                               sorted=True)

        # print(II,'topk',topk_idx.sum(),topk_idx.shape)
        batch_idx = jt.arange(N).unsqueeze(1)

        # pickle.dump(topk_idx.numpy(),open(f'/home/lxl/topk_idx_{II}_jt.pkl','wb'))
        # topk_idx_tmp = topk_idx.numpy()
        # batch_idx = jt.array(pickle.load(open(f'/home/lxl/batch_idx_{II}.pkl','rb')))
        # topk_idx = jt.array(pickle.load(open(f'/home/lxl/topk_idx_{II}.pkl','rb')))

        # err = np.abs(topk_idx_tmp-topk_idx.numpy())
        # print('Error!!!!!!!!!!!!!!!!',err.sum())
        # print(err.nonzero())

        #print('box_regression0',box_regression)
        #batch_idx = jt.index(topk_idx.shape,dim=0)
        box_regression = box_regression[batch_idx, topk_idx]
        #print('box_regression1',box_regression)

        image_shapes = [box.size for box in anchors]
        concat_anchors = jt.contrib.concat([a.bbox for a in anchors], dim=0)
        concat_anchors = concat_anchors.reshape(N, -1, 4)[batch_idx, topk_idx]

        # box_regression = jt.array(pickle.load(open(f'/home/lxl/box_regression_{II}.pkl','rb')))
        # concat_anchors = jt.array(pickle.load(open(f'/home/lxl/concat_anchors_{II}.pkl','rb')))

        proposals = self.box_coder.decode(box_regression.reshape(-1, 4),
                                          concat_anchors.reshape(-1, 4))

        proposals = proposals.reshape(N, -1, 4)

        # proposals = jt.array(pickle.load(open(f'/home/lxl/proposal_{II}.pkl','rb')))
        # objectness = jt.array(pickle.load(open(f'/home/lxl/objectness_{II}.pkl','rb')))
        # II+=1

        result = []
        for i in range(len(image_shapes)):
            proposal, score, im_shape = proposals[i], objectness[
                i], image_shapes[i]
            boxlist = BoxList(proposal, im_shape, mode="xyxy")
            boxlist.add_field("objectness", score)
            boxlist = boxlist.clip_to_image(remove_empty=False)
            boxlist = remove_small_boxes(boxlist, self.min_size)
            boxlist = boxlist_nms(
                boxlist,
                self.nms_thresh,
                max_proposals=self.post_nms_top_n,
                score_field="objectness",
            )
            result.append(boxlist)
        return result