Exemplo n.º 1
0
    def test_nms(self, model_type, tmp_path):
        torch.random.manual_seed(42)
        image_size = 512
        num_classes = 1
        batch_size = 2
        strides = [8, 16, 32, 64, 128]
        sizes = [(image_size // stride, ) * 2 for stride in strides]

        img = torch.zeros(batch_size, 1, image_size, image_size)
        pred_cls = [
            torch.rand(batch_size, num_classes, *size).sub_(0.45).clamp_min_(0)
            for size in sizes
        ]
        pred_cls[0].clamp_min(0.111)
        pred_reg = [
            torch.rand(batch_size, 4,
                       *size).mul_(image_size / 4).round_().clamp_min_(24)
            for size in sizes
        ]
        pred_centerness = [torch.rand(batch_size, 1, *size) for size in sizes]

        final_pred = model_type.postprocess(pred_cls,
                                            pred_reg,
                                            pred_centerness,
                                            strides,
                                            threshold=0.5,
                                            nms_threshold=None)
        final_boxes = final_pred[..., :4]
        final_scores = final_pred[..., 4:5]
        final_cls = final_pred[..., 5:]

        img_with_box = visualize_bbox(img, final_boxes, final_scores,
                                      final_cls)

        subpath = Path(
            self.DEST,
            "fcos_targets") if self.DEST is not None else Path(tmp_path)
        if subpath.is_dir():
            subpath.mkdir(exist_ok=True)

        for i, item in enumerate(img_with_box):
            filename = os.path.join(subpath, f"created_targets_no_nms_{i}.png")
            self.save(filename, item)

        final_pred = model_type.postprocess(pred_cls,
                                            pred_reg,
                                            pred_centerness,
                                            strides,
                                            threshold=0.5,
                                            nms_threshold=0.01)
        final_boxes = final_pred[..., :4]
        final_scores = final_pred[..., 4:5]
        final_cls = final_pred[..., 5:]

        img_with_box = visualize_bbox(img, final_boxes, final_cls,
                                      final_scores)

        for i, item in enumerate(img_with_box):
            filename = os.path.join(subpath, f"created_targets_nms_{i}.png")
            self.save(filename, item)
Exemplo n.º 2
0
    def test_class_names(self, img, label, bbox):
        if label is None:
            pytest.skip()
        class_names = {1: "foo", 2: "bar"}
        no_names = visualize_bbox(img, bbox, label)
        names = visualize_bbox(img, bbox, label, class_names=class_names)
        assert names.shape == no_names.shape and not torch.allclose(
            torch.as_tensor(names), torch.as_tensor(no_names))

        if self.DEST is not None and img.ndim == 3:
            dest = os.path.join(self.DEST, "test_class_names.png")
            self.save(dest, names)
Exemplo n.º 3
0
    def test_multiple_scores(self, img, label, bbox, scores):
        torch.random.manual_seed(42)
        if scores is None:
            pytest.skip()
        tensor1 = torch.rand_like(torch.as_tensor(scores))
        tensor2 = torch.rand_like(torch.as_tensor(scores))
        tensor = torch.cat([tensor1, tensor2], dim=-1)

        scores1 = visualize_bbox(img, bbox, label, scores=tensor1)
        scores2 = visualize_bbox(img, bbox, label, scores=tensor)
        assert scores1.shape == scores2.shape and not torch.allclose(
            torch.as_tensor(scores1), torch.as_tensor(scores2))

        if self.DEST is not None and img.ndim == 3:
            dest = os.path.join(self.DEST, "test_multiple_scores.png")
            self.save(dest, scores2)
Exemplo n.º 4
0
def bbox_overlay(img: Tensor, keypoint_dict: Dict[str, Tensor], **kwargs) -> Tensor:
    coords = keypoint_dict["coords"]
    cls = keypoint_dict.get("class", None)
    score = keypoint_dict.get("score", None)

    result = visualize_bbox(img, coords, cls, score, thickness=1, **kwargs)
    return result
Exemplo n.º 5
0
    def test_save_output(self, center_radius, tmp_path):
        image_size = 512
        num_classes = 2
        target_bbox = torch.tensor(
            [
                [140, 140, 144, 144],
                [10, 10, 128, 128],
                [32, 64, 128, 256],
                [250, 10, 250 + 31, 10 + 19],
                [256, 256, 400, 512],
            ]
        )
        img = torch.zeros(1, image_size, image_size)
        target_cls = torch.tensor([1, 0, 1, 1, 0]).unsqueeze_(-1)

        strides = (8, 16, 32, 64, 128)
        sizes: Tuple[Tuple[int, int]] = tuple((image_size // stride,) * 2 for stride in strides)  # type: ignore

        criterion = FCOSLoss(strides, num_classes, radius=center_radius)
        targets = criterion.create_targets(target_bbox, target_cls, sizes)
        cls_targets = [t[0] for t in targets]
        reg_targets = [t[1] for t in targets]
        centerness_targets = [t[2] for t in targets]

        reg_targets = [torch.linalg.norm(x.float().clamp_min(0), dim=-3, keepdim=True) for x in reg_targets]
        reg_targets = [x.div(x.amax(dim=(-1, -2, -3), keepdim=True).clamp_min_(1)) for x in reg_targets]
        centerness_targets = [x.clamp_min_(0) for x in centerness_targets]

        img_with_box = visualize_bbox(img, target_bbox, target_cls)[None]

        subpath = Path(self.DEST, "fcos_targets") if self.DEST is not None else Path(tmp_path)
        subpath.mkdir(exist_ok=True)

        subpath = Path(subpath, f"radius_{center_radius}")
        subpath.mkdir(exist_ok=True)

        for level in range(len(strides)):
            image_path = os.path.join(subpath)
            c = cls_targets[level][None]
            r = reg_targets[level][None]
            cent = centerness_targets[level][None]

            filename = os.path.join(image_path, f"reg_level_{level}.png")
            self.blend_and_save(filename, r, img_with_box)

            filename = os.path.join(image_path, f"centerness_level_{level}.png")
            self.blend_and_save(filename, cent, img_with_box)

            for cls_idx in range(c.shape[1]):
                filename = os.path.join(image_path, f"cls_{cls_idx}_level_{level}.png")
                self.blend_and_save(filename, c[..., cls_idx, :, :][None], img_with_box)
Exemplo n.º 6
0
    def test_visualize_bbox(self, img, label, bbox, class_names, scores):
        if not isinstance(img, torch.Tensor):
            pytest.skip()

        result = visualize_bbox(img, bbox, label, scores, class_names)
        assert isinstance(result, torch.Tensor)
        assert result.shape[-2:] == img.shape[-2:]
        assert result.shape[-3] == 3
        if img.ndim != 2:
            assert result.ndim == img.ndim

        if self.DEST is not None and img.ndim == 3:
            dest = os.path.join(self.DEST, "test_visualize_bbox.png")
            self.save(dest, result)
Exemplo n.º 7
0
    def test_save_output(self, model_type, tmp_path):
        torch.random.manual_seed(42)
        image_size = 512
        num_classes = 2
        batch_size = 3
        center_radius = 2
        target_bbox = (torch.tensor([
            [10, 10, 128, 128],
            [12, 12, 130, 130],
            [32, 64, 128, 256],
            [256, 256, 400, 512],
        ]).unsqueeze_(0).repeat(batch_size, 1, 1))
        img = torch.zeros(4, 1, image_size, image_size)
        target_cls = torch.tensor([0, 0, 1,
                                   1]).unsqueeze_(-1).repeat(batch_size, 1, 1)

        strides = (8, 16, 32, 64, 128)
        sizes: Tuple[Tuple[int, int]] = tuple(
            (image_size // stride, ) * 2 for stride in strides)  # type: ignore

        criterion = FCOSLoss(strides, num_classes, radius=center_radius)
        cls_targets, reg_targets, centerness_targets = criterion.create_targets(
            target_bbox, target_cls, sizes)

        final_pred = model_type.postprocess(cls_targets,
                                            reg_targets,
                                            centerness_targets,
                                            strides,
                                            threshold=0.5)
        final_boxes = final_pred[..., :4]
        final_scores = final_pred[..., 4:5]
        final_cls = final_pred[..., 5:]

        img_with_box = visualize_bbox(img, final_boxes, final_scores,
                                      final_cls)

        subpath = Path(
            self.DEST,
            "fcos_targets") if self.DEST is not None else Path(tmp_path)
        if subpath.is_dir():
            subpath.mkdir(exist_ok=True)

        for i, item in enumerate(img_with_box):
            filename = os.path.join(subpath, f"created_targets_{i}.png")
            self.save(filename, item)
Exemplo n.º 8
0
    def test_cuda(self, img, label, bbox, class_names, scores):
        if not isinstance(img, torch.Tensor):
            pytest.skip()

        img = img.cuda()
        bbox = bbox.cuda()
        label = label.cuda() if label is not None else None
        scores = scores.cuda() if scores is not None else None
        result = visualize_bbox(img, bbox, label, scores, class_names)
        assert isinstance(result, torch.Tensor)
        assert result.dtype == torch.uint8
        assert result.shape[-2:] == img.shape[-2:]
        assert result.shape[-3] == 3
        if img.ndim != 2:
            assert result.ndim == img.ndim

        if self.DEST is not None and img.ndim == 3:
            dest = os.path.join(self.DEST, "test_visualize_bbox.png")
            self.save(dest, result)
Exemplo n.º 9
0
    def test_save_output(self, model_type):
        torch.random.manual_seed(42)
        image_size = 512
        num_classes = 2
        batch_size = 3
        center_radius = 2
        target_bbox = (torch.tensor([
            [10, 10, 128, 128],
            [12, 12, 130, 130],
            [32, 64, 128, 256],
            [256, 256, 400, 512],
        ]).unsqueeze_(0).repeat(batch_size, 1, 1))
        img = torch.zeros(4, 1, image_size, image_size)
        target_cls = torch.tensor([0, 0, 1,
                                   1]).unsqueeze_(-1).repeat(batch_size, 1, 1)

        strides = [8, 16, 32, 64, 128]
        sizes = [(image_size // stride, ) * 2 for stride in strides]

        criterion = FCOSLoss(strides, num_classes, radius=center_radius)
        cls_targets, reg_targets, centerness_targets = criterion.create_targets(
            target_bbox, target_cls, sizes)

        final_pred, _ = model_type.create_boxes(cls_targets,
                                                reg_targets,
                                                centerness_targets,
                                                strides,
                                                threshold=0.5)
        final_boxes = final_pred[..., :4]
        final_scores = final_pred[..., 4:5]
        final_cls = final_pred[..., 5:]

        img_with_box = visualize_bbox(img, final_boxes, final_scores,
                                      final_cls)

        subpath = os.path.join(self.DEST, "fcos_targets")
        if not os.path.exists(subpath):
            os.makedirs(subpath)

        for i, item in enumerate(img_with_box):
            filename = os.path.join(subpath, f"created_targets_{i}.png")
            self.save(filename, item)
Exemplo n.º 10
0
def bbox_overlay(img: Tensor, keypoint_dict: Dict[str, Any],
                 **kwargs) -> Tensor:
    coords = keypoint_dict["coords"]
    cls = keypoint_dict.get("class", None)
    score = keypoint_dict.get("score", None)
    names = keypoint_dict.get("names", None)

    assert coords is None or isinstance(coords, Tensor)
    assert cls is None or isinstance(cls, Tensor)
    assert score is None or isinstance(score, Tensor)
    assert names is None or isinstance(names, Dict)

    result = visualize_bbox(img,
                            coords,
                            cls,
                            score,
                            names,
                            thickness=1,
                            **kwargs)
    return result
Exemplo n.º 11
0
    def test_inputs_unchanged(self, img, label, bbox, class_names, scores):
        def copy(x):
            return torch.as_tensor(x).clone()

        img_c = copy(img)
        bbox_c = copy(bbox)
        if label is not None:
            label_c = copy(label)
        if scores is not None:
            scores_c = copy(scores)

        result = visualize_bbox(img,
                                bbox=bbox,
                                classes=label,
                                scores=scores,
                                class_names=class_names)

        assert torch.allclose(torch.as_tensor(img), img_c)
        assert torch.allclose(torch.as_tensor(bbox), bbox_c)
        if label is not None:
            assert torch.allclose(torch.as_tensor(label), label_c)
        if scores is not None:
            assert torch.allclose(torch.as_tensor(scores), scores_c)