Esempio n. 1
0
    def test_paste_mask_scriptable(self):
        scripted_f = torch.jit.script(paste_masks_in_image)
        N = 10
        masks = torch.rand(N, 28, 28)
        boxes = Boxes(random_boxes(N, 100)).tensor
        image_shape = (150, 150)

        out = paste_masks_in_image(masks, boxes, image_shape)
        scripted_out = scripted_f(masks, boxes, image_shape)
        self.assertTrue(torch.equal(out, scripted_out))
Esempio n. 2
0
    def process_annotation(self, ann, mask_side_len=28):
        # Parse annotation data
        img_info = self.coco.loadImgs(ids=[ann["image_id"]])[0]
        height, width = img_info["height"], img_info["width"]
        gt_polygons = [
            np.array(p, dtype=np.float64) for p in ann["segmentation"]
        ]
        gt_bbox = BoxMode.convert(ann["bbox"], BoxMode.XYWH_ABS,
                                  BoxMode.XYXY_ABS)
        gt_bit_mask = polygons_to_bitmask(gt_polygons, height, width)

        # Run rasterize ..
        torch_gt_bbox = torch.tensor(gt_bbox).to(dtype=torch.float32).reshape(
            -1, 4)
        box_bitmasks = {
            "polygon":
            PolygonMasks([gt_polygons
                          ]).crop_and_resize(torch_gt_bbox, mask_side_len)[0],
            "gridsample":
            rasterize_polygons_with_grid_sample(gt_bit_mask, gt_bbox,
                                                mask_side_len),
            "roialign":
            BitMasks(torch.from_numpy(
                gt_bit_mask[None, :, :])).crop_and_resize(
                    torch_gt_bbox, mask_side_len)[0],
        }

        # Run paste ..
        results = defaultdict(dict)
        for k, box_bitmask in box_bitmasks.items():
            padded_bitmask, scale = pad_masks(box_bitmask[None, :, :], 1)
            scaled_boxes = scale_boxes(torch_gt_bbox, scale)

            r = results[k]
            r["old"] = paste_mask_in_image_old(padded_bitmask[0],
                                               scaled_boxes[0],
                                               height,
                                               width,
                                               threshold=0.5)
            r["aligned"] = paste_masks_in_image(box_bitmask[None, :, :],
                                                Boxes(torch_gt_bbox),
                                                (height, width))[0]

        table = []
        for rasterize_method, r in results.items():
            for paste_method, mask in r.items():
                mask = np.asarray(mask)
                iou = iou_between_full_image_bit_masks(
                    gt_bit_mask.astype("uint8"), mask)
                table.append((rasterize_method, paste_method, iou))
        return table
Esempio n. 3
0
 def bench():
     for _ in range(n):
         paste_masks_in_image(m, b, image_shape)
     if device.type == "cuda":
         torch.cuda.synchronize()