Ejemplo n.º 1
0
    def test_compute_loss(self):
        target_bbox = torch.tensor(
            [
                [0, 0, 9, 9],
                [3, 4, 8, 6],
                [4, 4, 6, 6],
                [32, 32, 88, 88],
            ]
        )
        target_cls = torch.tensor([0, 0, 1, 0]).unsqueeze_(-1)

        num_classes = 2
        strides = (8, 16, 32, 64, 128)
        base_size = 512
        sizes = [(base_size // stride,) * 2 for stride in strides]

        pred_cls = [torch.rand(num_classes, *size, requires_grad=True) for size in sizes]
        pred_reg = [torch.rand(4, *size, requires_grad=True).mul(512).round() for size in sizes]
        pred_centerness = [torch.rand(1, *size, requires_grad=True) for size in sizes]

        criterion = FCOSLoss(strides, num_classes)
        cls_loss, reg_loss, centerness_loss = criterion.compute_from_box_target(
            pred_cls, pred_reg, pred_centerness, target_bbox, target_cls
        )

        assert isinstance(cls_loss, Tensor)
        assert isinstance(reg_loss, Tensor)
        assert isinstance(centerness_loss, Tensor)

        assert cls_loss.numel() == 1
        assert reg_loss.numel() == 1
        assert centerness_loss.numel() == 1

        loss = cls_loss + reg_loss + centerness_loss
        loss.backward()
Ejemplo n.º 2
0
    def test_create_targets(self, center_radius):
        num_classes = 2
        target_bbox = torch.randint(0, 100, (2, 10, 4))
        target_cls = torch.randint(0, num_classes, (2, 10, 1))

        strides = (8, 16, 32, 64, 128)
        base_size = 512
        sizes: Tuple[Tuple[int, int], ...] = tuple((base_size // stride,) * 2 for stride in strides)  # type: ignore

        criterion = FCOSLoss(strides, num_classes)
        criterion.create_targets(target_bbox, target_cls, sizes)
Ejemplo n.º 3
0
    def test_save_output(self, center_radius, tmp_path):
        image_size = 512
        num_classes = 2
        target_bbox = torch.tensor(
            [
                [140, 140, 144, 144],
                [10, 10, 128, 128],
                [32, 64, 128, 256],
                [250, 10, 250 + 31, 10 + 19],
                [256, 256, 400, 512],
            ]
        )
        img = torch.zeros(1, image_size, image_size)
        target_cls = torch.tensor([1, 0, 1, 1, 0]).unsqueeze_(-1)

        strides = (8, 16, 32, 64, 128)
        sizes: Tuple[Tuple[int, int]] = tuple((image_size // stride,) * 2 for stride in strides)  # type: ignore

        criterion = FCOSLoss(strides, num_classes, radius=center_radius)
        targets = criterion.create_targets(target_bbox, target_cls, sizes)
        cls_targets = [t[0] for t in targets]
        reg_targets = [t[1] for t in targets]
        centerness_targets = [t[2] for t in targets]

        reg_targets = [torch.linalg.norm(x.float().clamp_min(0), dim=-3, keepdim=True) for x in reg_targets]
        reg_targets = [x.div(x.amax(dim=(-1, -2, -3), keepdim=True).clamp_min_(1)) for x in reg_targets]
        centerness_targets = [x.clamp_min_(0) for x in centerness_targets]

        img_with_box = visualize_bbox(img, target_bbox, target_cls)[None]

        subpath = Path(self.DEST, "fcos_targets") if self.DEST is not None else Path(tmp_path)
        subpath.mkdir(exist_ok=True)

        subpath = Path(subpath, f"radius_{center_radius}")
        subpath.mkdir(exist_ok=True)

        for level in range(len(strides)):
            image_path = os.path.join(subpath)
            c = cls_targets[level][None]
            r = reg_targets[level][None]
            cent = centerness_targets[level][None]

            filename = os.path.join(image_path, f"reg_level_{level}.png")
            self.blend_and_save(filename, r, img_with_box)

            filename = os.path.join(image_path, f"centerness_level_{level}.png")
            self.blend_and_save(filename, cent, img_with_box)

            for cls_idx in range(c.shape[1]):
                filename = os.path.join(image_path, f"cls_{cls_idx}_level_{level}.png")
                self.blend_and_save(filename, c[..., cls_idx, :, :][None], img_with_box)
Ejemplo n.º 4
0
    def test_forward_backward(self):
        target_bbox = torch.tensor(
            [
                [
                    [0, 0, 9, 9],
                    [10, 10, 490, 490],
                    [-1, -1, -1, -1],
                ],
                [
                    [32, 32, 88, 88],
                    [42, 32, 84, 96],
                    [-1, -1, -1, -1],
                ],
                [
                    [10, 20, 50, 60],
                    [10, 20, 500, 600],
                    [20, 20, 84, 84],
                ],
                [
                    [-1, -1, -1, -1],
                    [-1, -1, -1, -1],
                    [-1, -1, -1, -1],
                ],
            ]
        )

        target_cls = torch.tensor(
            [
                [0, 1, -1],
                [0, 0, -1],
                [0, 0, 1],
                [-1, -1, -1],
            ]
        ).unsqueeze_(-1)

        target_bbox.shape[0]
        num_classes = 2
        strides = (8, 16, 32, 64, 128)
        base_size = 512
        sizes: Tuple[Tuple[int, int], ...] = tuple((base_size // stride,) * 2 for stride in strides)  # type: ignore

        criterion = FCOSLoss(strides, num_classes, radius=1.5)
        pred_cls, pred_reg, pred_centerness = criterion.create_targets(target_bbox, target_cls, sizes)
        pred_cls = [torch.logit(x, 1e-4) for x in pred_cls]
        pred_centerness = [torch.logit(x.clamp_(min=0, max=1), 1e-4) for x in pred_centerness]
        pred_reg = [x.clamp_min(0) for x in pred_reg]

        output = FCOSDecoder.postprocess(pred_cls, pred_reg, pred_centerness, list(strides), from_logits=True)
        criterion(pred_cls, pred_reg, pred_centerness, target_bbox, target_cls)
Ejemplo n.º 5
0
    def test_create_classification_target(self, stride, center_radius, size_target):
        bbox = torch.tensor(
            [
                [0, 0, 9, 9],
                [3, 4, 8, 6],
                [4, 4, 6, 6],
            ]
        )
        cls = torch.tensor([0, 0, 1]).unsqueeze_(-1)
        mask = FCOSLoss.bbox_to_mask(bbox, stride, size_target, center_radius)
        num_classes = 2

        result = FCOSLoss.create_classification_target(bbox, cls, mask, num_classes, size_target)

        assert isinstance(result, Tensor)
        assert result.shape == torch.Size([num_classes, *size_target])
Ejemplo n.º 6
0
    def test_save_output(self, model_type, tmp_path):
        torch.random.manual_seed(42)
        image_size = 512
        num_classes = 2
        batch_size = 3
        center_radius = 2
        target_bbox = (torch.tensor([
            [10, 10, 128, 128],
            [12, 12, 130, 130],
            [32, 64, 128, 256],
            [256, 256, 400, 512],
        ]).unsqueeze_(0).repeat(batch_size, 1, 1))
        img = torch.zeros(4, 1, image_size, image_size)
        target_cls = torch.tensor([0, 0, 1,
                                   1]).unsqueeze_(-1).repeat(batch_size, 1, 1)

        strides = (8, 16, 32, 64, 128)
        sizes: Tuple[Tuple[int, int]] = tuple(
            (image_size // stride, ) * 2 for stride in strides)  # type: ignore

        criterion = FCOSLoss(strides, num_classes, radius=center_radius)
        cls_targets, reg_targets, centerness_targets = criterion.create_targets(
            target_bbox, target_cls, sizes)

        final_pred = model_type.postprocess(cls_targets,
                                            reg_targets,
                                            centerness_targets,
                                            strides,
                                            threshold=0.5)
        final_boxes = final_pred[..., :4]
        final_scores = final_pred[..., 4:5]
        final_cls = final_pred[..., 5:]

        img_with_box = visualize_bbox(img, final_boxes, final_scores,
                                      final_cls)

        subpath = Path(
            self.DEST,
            "fcos_targets") if self.DEST is not None else Path(tmp_path)
        if subpath.is_dir():
            subpath.mkdir(exist_ok=True)

        for i, item in enumerate(img_with_box):
            filename = os.path.join(subpath, f"created_targets_{i}.png")
            self.save(filename, item)
Ejemplo n.º 7
0
    def test_create_coordinate_grid(self, height, width, stride, indexing):
        grid = FCOSLoss.coordinate_grid(height, width, stride, indexing)
        assert tuple(grid.shape[-2:]) == (height, width)
        assert grid.shape[0] == 2
        assert torch.allclose(grid[:, 0, 0], torch.tensor([stride / 2, stride / 2]))

        expected = torch.tensor([width, height]).float().mul_(stride).sub_(stride / 2)
        if indexing == "hw":
            expected = expected.roll(1)
        assert torch.allclose(grid[:, -1, -1], expected)
Ejemplo n.º 8
0
    def test_save_output(self, model_type):
        torch.random.manual_seed(42)
        image_size = 512
        num_classes = 2
        batch_size = 3
        center_radius = 2
        target_bbox = (torch.tensor([
            [10, 10, 128, 128],
            [12, 12, 130, 130],
            [32, 64, 128, 256],
            [256, 256, 400, 512],
        ]).unsqueeze_(0).repeat(batch_size, 1, 1))
        img = torch.zeros(4, 1, image_size, image_size)
        target_cls = torch.tensor([0, 0, 1,
                                   1]).unsqueeze_(-1).repeat(batch_size, 1, 1)

        strides = [8, 16, 32, 64, 128]
        sizes = [(image_size // stride, ) * 2 for stride in strides]

        criterion = FCOSLoss(strides, num_classes, radius=center_radius)
        cls_targets, reg_targets, centerness_targets = criterion.create_targets(
            target_bbox, target_cls, sizes)

        final_pred, _ = model_type.create_boxes(cls_targets,
                                                reg_targets,
                                                centerness_targets,
                                                strides,
                                                threshold=0.5)
        final_boxes = final_pred[..., :4]
        final_scores = final_pred[..., 4:5]
        final_cls = final_pred[..., 5:]

        img_with_box = visualize_bbox(img, final_boxes, final_scores,
                                      final_cls)

        subpath = os.path.join(self.DEST, "fcos_targets")
        if not os.path.exists(subpath):
            os.makedirs(subpath)

        for i, item in enumerate(img_with_box):
            filename = os.path.join(subpath, f"created_targets_{i}.png")
            self.save(filename, item)
Ejemplo n.º 9
0
    def test_call(self):
        target_bbox = torch.tensor(
            [
                [
                    [0, 0, 9, 9],
                    [3, 4, 8, 6],
                    [-1, -1, -1, -1],
                ],
                [
                    [32, 32, 88, 88],
                    [-1, -1, -1, -1],
                    [-1, -1, -1, -1],
                ],
                [
                    [-1, -1, -1, -1],
                    [-1, -1, -1, -1],
                    [-1, -1, -1, -1],
                ],
            ]
        )

        target_cls = torch.tensor(
            [
                [0, 1, -1],
                [0, -1, -1],
                [-1, -1, -1],
            ]
        ).unsqueeze_(-1)

        batch_size = target_bbox.shape[0]
        num_classes = 2
        strides = (8, 16, 32, 64, 128)
        base_size = 512
        sizes = [(base_size // stride,) * 2 for stride in strides]

        pred_cls = [torch.rand(batch_size, num_classes, *size, requires_grad=True) for size in sizes]
        pred_reg = [torch.rand(batch_size, 4, *size, requires_grad=True).mul(512).round() for size in sizes]
        pred_centerness = [torch.rand(batch_size, 1, *size, requires_grad=True) for size in sizes]

        criterion = FCOSLoss(strides, num_classes)
        cls_loss, reg_loss, centerness_loss = criterion(pred_cls, pred_reg, pred_centerness, target_bbox, target_cls)

        assert isinstance(cls_loss, Tensor)
        assert isinstance(reg_loss, Tensor)
        assert isinstance(centerness_loss, Tensor)

        assert cls_loss.numel() == 1
        assert reg_loss.numel() == 1
        assert centerness_loss.numel() == 1

        loss = cls_loss + reg_loss + centerness_loss
        assert not loss.isnan().any()
        loss.backward()
Ejemplo n.º 10
0
    def test_create_regression_target(self, size_target, stride):
        bbox = torch.tensor(
            [
                [0, 0, 9, 9],
                [2, 3, 8, 7],
            ]
        ).mul_(stride)
        result = FCOSLoss.create_regression_target(bbox, stride, size_target)

        assert isinstance(result, Tensor)
        assert result.shape == torch.Size([bbox.shape[-2], 4, *size_target])

        for box, res in zip(bbox, result):
            h1, w1, h2, w2 = box[1], box[0], box[3], box[2]
            hs1 = h1.floor_divide(stride)
            ws1 = w1.floor_divide(stride)
            hs2 = h2.floor_divide(stride)
            ws2 = w2.floor_divide(stride)

            pos_region = res[..., hs1:hs2, ws1:ws2]
            if pos_region.numel():
                assert (pos_region >= 0).all()
                assert pos_region.max() <= box.max()

            def discretize(x):
                return x.float().floor_divide(stride).mul_(stride).add_(stride / 2)

            # left
            assert res[0, hs1, ws1] == stride / 2, "left target at top left corner"
            assert res[0, hs2, ws1] == stride / 2, "left target at bottom left corner"
            assert res[0, hs1, ws2] == discretize(w2 - w1), "left target at top right corner"
            assert res[0, hs2, ws2] == discretize(w2 - w1), "left target at bottom right corner"

            # top
            assert res[1, hs1, ws1] == stride / 2, "top target at top left corner"
            assert res[1, hs2, ws1] == discretize(h2 - h1), "top target at bottom left corner"
            assert res[1, hs1, ws2] == stride / 2, "top target at top right corner"
            assert res[1, hs2, ws2] == discretize(h2 - h1), "top target at bottom right corner"

            # right
            assert res[2, hs1, ws1] == w2 - w1 - stride / 2, "right target at top left corner"
            assert res[2, hs2, ws1] == w2 - w1 - stride / 2, "right target at bottom left corner"
            assert res[2, hs1, ws2] == stride / 2, "right target at top right corner"
            assert res[2, hs2, ws2] == stride / 2, "right target at bottom right corner"

            # bottom
            assert res[3, hs1, ws1] == h2 - h1 - stride / 2, "right target at top left corner"
            assert res[3, hs2, ws1] == stride / 2, "right target at bottom left corner"
            assert res[3, hs1, ws2] == h2 - h1 - stride / 2, "right target at top right corner"
            assert res[3, hs2, ws2] == stride / 2, "right target at bottom right corner"
Ejemplo n.º 11
0
    def test_bbox_to_mask(self, stride, center_radius, size_target):
        bbox = torch.tensor(
            [
                [0, 0, 9, 9],
                [2, 2, 5, 5],
                [1, 1, 2, 2],
            ]
        )
        result = FCOSLoss.bbox_to_mask(bbox, stride, size_target, center_radius)

        assert isinstance(result, Tensor)
        assert result.shape == torch.Size([bbox.shape[-2], *size_target])

        for box, res in zip(bbox, result):
            center_x = (box[0] + box[2]).true_divide(2)
            center_y = (box[1] + box[3]).true_divide(2)
            radius_x = (box[2] - box[0]).true_divide(2)
            radius_y = (box[3] - box[1]).true_divide(2)

            if center_radius is not None:
                x1 = center_x - center_radius * stride
                x2 = center_x + center_radius * stride
                y1 = center_y - center_radius * stride
                y2 = center_y + center_radius * stride
            else:
                x1 = center_x - radius_x
                x2 = center_x + radius_x
                y1 = center_y - radius_y
                y2 = center_y + radius_y

            x1.clamp_min_(center_x - radius_x)
            x2.clamp_max_(center_x + radius_x)
            y1.clamp_min_(center_y - radius_y)
            y2.clamp_max_(center_y + radius_y)

            h = torch.arange(res.shape[-2], dtype=torch.float, device=box.device)
            w = torch.arange(res.shape[-1], dtype=torch.float, device=box.device)

            mesh = torch.stack(torch.meshgrid(h, w), 0).mul_(stride).add_(stride / 2)
            lower_bound = torch.stack([x1, y1]).view(2, 1, 1)
            upper_bound = torch.stack([x2, y2]).view(2, 1, 1)
            mask = (mesh >= lower_bound).logical_and_(mesh <= upper_bound).all(dim=-3)
            pos_region = res[mask]

            assert res.any()
            assert pos_region.all()
            assert res.sum() - pos_region.sum() == 0
Ejemplo n.º 12
0
    def __init__(
        self,
        num_classes: int,
        effdet_backbone: str = "tf_efficientdet_d4",
        strides: List[int] = [8, 16, 32, 64, 128],
        sizes: List[Tuple[int, int]] = [(-1, 64), (64, 128), (128, 256), (256, 512), (512, 10000000)],
        threshold: Optional[float] = None,
        nms_threshold: Optional[float] = None,
        *args,
        **kwargs,
    ):
        super().__init__(*args, **kwargs)
        self.save_hyperparameters()
        self.num_classes = int(num_classes)
        self.strides = [int(x) for x in strides]
        self.sizes = [(int(x), int(y)) for x, y in sizes]

        # TODO train this from scratch using combustion EfficientDet
        # self._model = EffDetFCOS.from_predefined(
        #    compound_coeff, self.num_classes, fpn_levels=[3, 5, 7, 8, 9], strides=self.strides
        # )

        self._model = create_model(effdet_backbone, pretrained=True)
        del self._model.box_net
        del self._model.class_net

        fpn_filters = self._model.config.fpn_channels
        num_repeats = 4

        self.fcos = FCOSDecoder(fpn_filters, self.num_classes, num_repeats, strides)

        self.threshold = float(threshold) if threshold is not None else 0.05
        self.nms_threshold = float(nms_threshold) if nms_threshold is not None else 0.1
        self._criterion = FCOSLoss(self.strides, self.num_classes, radius=1, interest_range=self.sizes)

        # metrics
        metrics = MetricCollection({
            f"ap{thresh}": BoxAveragePrecision(iou_threshold=thresh / 100, compute_on_step=True)
            for thresh in (25, 50, 75)
        })
        self.val_metrics = metrics.clone(prefix="val/")
        self.test_metrics = metrics.clone(prefix="test/")

        # freeze backbone
        for param in self._model.backbone.parameters():
            param.requires_grad = False
Ejemplo n.º 13
0
    def test_assign_boxes_to_level(self, inclusive):
        bounds = (
            (-1, 64),
            (64, 128),
            (128, 256),
            (256, 512),
            (512, 10000000),
        )
        bounds = torch.tensor(bounds)

        batch_size = 2
        bbox = (
            torch.tensor([0, 0, 1, 1])
            .unsqueeze_(0)
            .repeat(len(bounds), 1)
            .mul_(bounds[..., 1].unsqueeze(-1))
            .clamp_max_(1024)
        )
        bbox = torch.cat([torch.tensor([0, 0, 10, 10]).unsqueeze_(0), bbox], dim=0)
        bbox = bbox.unsqueeze(0).repeat(batch_size, 1, 1)
        assignments = FCOSLoss.assign_boxes_to_levels(bbox, bounds, inclusive)

        has_assignment = assignments.any(dim=-1)
        assert has_assignment.all(), "one or more boxes was not assigned a level"

        diag = torch.eye(bbox.shape[-2] - 1, bounds.shape[-2]).bool()
        upper = torch.cat((diag[0:1], diag), dim=-2)
        lower = torch.cat((diag, diag[-1:]), dim=-2)
        both = upper.logical_or(lower)

        if inclusive == "lower":
            expected = lower
        elif inclusive == "upper":
            expected = upper
        elif inclusive == "both":
            expected = both
        else:
            raise ValueError(f"{inclusive}")

        assert (expected == assignments).all()
Ejemplo n.º 14
0
    def test_create_target_for_level(self, stride, center_radius, size_target):
        bbox = torch.tensor(
            [
                [0, 0, 9, 9],
                [3, 4, 8, 6],
                [4, 4, 6, 6],
            ]
        )
        cls = torch.tensor([0, 0, 1]).unsqueeze_(-1)
        num_classes = 2

        cls, reg, centerness = FCOSLoss.create_target_for_level(
            bbox, cls, num_classes, stride, size_target, (-1, 64), center_radius
        )

        assert cls.shape == torch.Size([num_classes, *size_target])
        assert reg.shape == torch.Size([4, *size_target])
        assert centerness.shape == torch.Size([1, *size_target])

        # TODO expand on this test

        assert centerness.max() <= 1.0
        assert ((centerness >= 0) | (centerness == -1)).all()