def compute_perspective_transformation3d( input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor: r"""Compute the applied transformation matrix :math: `(*, 4, 4)`. Args: input (torch.Tensor): Tensor to be transformed with shape (D, H, W), (C, D, H, W), (B, C, D, H, W). params (Dict[str, torch.Tensor]): - params['start_points']: Tensor containing [top-left, top-right, bottom-right, bottom-left] of the orignal image with shape Bx8x3. - params['end_points']: Tensor containing [top-left, top-right, bottom-right, bottom-left] of the transformed image with shape Bx8x3. Returns: torch.Tensor: The applied transformation matrix :math: `(*, 4, 4)` """ input = _transform_input3d(input) _validate_input_dtype( input, accepted_dtypes=[torch.float16, torch.float32, torch.float64]) perspective_transform: torch.Tensor = get_perspective_transform3d( params['start_points'], params['end_points']).type_as(input) transform: torch.Tensor = K.eye_like(4, input) transform = perspective_transform return transform
def compute_crop_transformation3d( input: torch.Tensor, params: Dict[str, torch.Tensor], flags: Dict[str, torch.Tensor] ) -> torch.Tensor: r"""Compute the cropping transformation matrix :math: `(*, 4, 4)`. Args: input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`. params (Dict[str, torch.Tensor]): - params['src']: The applied cropping src matrix :math: `(*, 8, 3)`. - params['dst']: The applied cropping dst matrix :math: `(*, 8, 3)`. Returns: torch.Tensor: The cropping transformation matrix :math: `(*, 4, 4)`. """ transform: torch.Tensor = get_perspective_transform3d(params['src'].to(input.dtype), params['dst'].to(input.dtype)) transform = transform.expand(input.shape[0], -1, -1).to(input) return transform
def compute_crop_transformation3d(input: torch.Tensor, params: Dict[str, torch.Tensor], flags: Dict[str, torch.Tensor]): r"""Compute the applied transformation matrix :math: `(*, 4, 4)`. Args: input (torch.Tensor): Tensor to be transformed with shape (H, W), (C, H, W), (B, C, H, W). params (Dict[str, torch.Tensor]): - params['src']: The applied cropping src matrix :math: `(*, 8, 3)`. - params['dst']: The applied cropping dst matrix :math: `(*, 8, 3)`. Returns: torch.Tensor: The applied transformation matrix :math: `(*, 4, 4)` """ input = _transform_input3d(input) _validate_input_dtype( input, accepted_dtypes=[torch.float16, torch.float32, torch.float64]) transform: torch.Tensor = get_perspective_transform3d( params['src'].to(input.dtype), params['dst'].to(input.dtype)) transform = transform.expand(input.shape[0], -1, -1).type_as(input) return transform
def compute_perspective_transformation3d(input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor: r"""Compute the perspective transformation matrix :math: `(*, 4, 4)`. Args: input (torch.Tensor): Tensor to be transformed with shape :math:`(*, C, D, H, W)`. params (Dict[str, torch.Tensor]): - params['start_points']: Tensor containing [top-left, top-right, bottom-right, bottom-left] of the orignal image with shape Bx8x3. - params['end_points']: Tensor containing [top-left, top-right, bottom-right, bottom-left] of the transformed image with shape Bx8x3. Returns: torch.Tensor: The perspective transformation matrix :math: `(*, 4, 4)` """ perspective_transform: torch.Tensor = get_perspective_transform3d( params['start_points'], params['end_points']).to(input) transform: torch.Tensor = K.eye_like(4, input) transform = perspective_transform return transform
def compute_transformation(self, input: Tensor, params: Dict[str, Tensor]) -> Tensor: return get_perspective_transform3d(params["start_points"], params["end_points"]).to(input)
def compute_transformation(self, input: Tensor, params: Dict[str, Tensor]) -> Tensor: transform: Tensor = get_perspective_transform3d(params["src"].to(input), params["dst"].to(input)) transform = transform.expand(input.shape[0], -1, -1) return transform
def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor: transform: torch.Tensor = get_perspective_transform3d(params['src'].to(input), params['dst'].to(input)) transform = transform.expand(input.shape[0], -1, -1) return transform
def compute_transformation(self, input: torch.Tensor, params: Dict[str, torch.Tensor]) -> torch.Tensor: return get_perspective_transform3d(params['start_points'], params['end_points']).to(input)