def test_transform_boxes_wh(self, device, dtype): boxes = torch.tensor( [ [139.2640, 103.0150, 258.0480, 307.5075], [1.0240, 80.5547, 510.9760, 431.4453], [165.2053, 262.1440, 345.4293, 246.7840], [119.8080, 144.2067, 137.2160, 265.9225], ], device=device, dtype=dtype, ) expected = torch.tensor( [ [372.7360, 103.0150, -258.0480, 307.5075], [510.9760, 80.5547, -510.9760, 431.4453], [346.7947, 262.1440, -345.4293, 246.7840], [392.1920, 144.2067, -137.2160, 265.9225], ], device=device, dtype=dtype, ) trans_mat = torch.tensor([[[-1.0, 0.0, 512.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype) out = transform_bbox(trans_mat, boxes, mode='xywh') assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def test_transform_boxes_wh(self, device, dtype): boxes = torch.tensor( [ [139.2640, 103.0150, 258.0480, 307.5075], [1.0240, 80.5547, 510.9760, 431.4453], [165.2053, 262.1440, 345.4293, 246.7840], [119.8080, 144.2067, 137.2160, 265.9225], ], device=device, dtype=dtype, ) expected = torch.tensor( [ [114.6880, 103.0150, 258.0480, 307.5075], [0.0000, 80.5547, 510.9760, 431.4453], [1.3654, 262.1440, 345.4293, 246.7840], [254.9760, 144.2067, 137.2160, 265.9225] ], device=device, dtype=dtype, ) trans_mat = torch.tensor([[[-1.0, 0.0, 512.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype) out = transform_bbox(trans_mat, boxes, mode='xywh', restore_coordinates=True) assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def test_transform_boxes(self, device, dtype): boxes = torch.tensor([[139.2640, 103.0150, 397.3120, 410.5225]], device=device, dtype=dtype) expected = torch.tensor([[372.7360, 103.0150, 114.6880, 410.5225]], device=device, dtype=dtype) trans_mat = torch.tensor([[[-1.0, 0.0, 512.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]], device=device, dtype=dtype) out = transform_bbox(trans_mat, boxes) assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def inverse_bbox(self, input: torch.Tensor, module: nn.Module, param: Optional[ParamItem] = None, mode: str = "xyxy") -> torch.Tensor: if isinstance(module, GeometricAugmentationBase2D): transform = module.compute_inverse_transformation( module.get_transformation_matrix( input, None if param is None else cast(Dict, param.data))) input = transform_bbox( torch.as_tensor(transform, device=input.device, dtype=input.dtype), input, mode) return input
def test_transform_multiple_boxes(self, device, dtype): boxes = torch.tensor( [ [139.2640, 103.0150, 397.3120, 410.5225], [1.0240, 80.5547, 512.0000, 512.0000], [165.2053, 262.1440, 510.6347, 508.9280], [119.8080, 144.2067, 257.0240, 410.1292], ], device=device, dtype=dtype, ) boxes = boxes.repeat(2, 1, 1) # 2 x 4 x 4 two images 4 boxes each expected = torch.tensor( [ [ [372.7360, 103.0150, 114.6880, 410.5225], [510.9760, 80.5547, 0.0000, 512.0000], [346.7947, 262.1440, 1.3653, 508.9280], [392.1920, 144.2067, 254.9760, 410.1292], ], [ [139.2640, 103.0150, 397.3120, 410.5225], [1.0240, 80.5547, 512.0000, 512.0000], [165.2053, 262.1440, 510.6347, 508.9280], [119.8080, 144.2067, 257.0240, 410.1292], ], ], device=device, dtype=dtype, ) trans_mat = torch.tensor( [ [[-1.0, 0.0, 512.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]], ], device=device, dtype=dtype, ) out = transform_bbox(trans_mat, boxes) assert_allclose(out, expected, atol=1e-4, rtol=1e-4)
def apply_to_bbox(self, input: torch.Tensor, module: nn.Module, param: Optional[ParamItem] = None, mode: str = "xyxy") -> torch.Tensor: if param is not None: _param = cast(Dict[str, torch.Tensor], param.data) else: _param = None # type: ignore if isinstance(module, GeometricAugmentationBase2D) and _param is None: raise ValueError( f"Transformation matrix for {module} has not been computed.") if isinstance(module, GeometricAugmentationBase2D) and _param is not None: input = transform_bbox( module.get_transformation_matrix(input, _param), input, mode) else: pass # No need to update anything return input