Example #1
0
    def test_stacked_transformation_assembly(self):
        first_matrix = torch.tensor([[[2.0, 0.0, 1.0], [0.0, 3.0, 2.0]]])
        second_matrix = torch.tensor([[[4.0, 0.0, 3.0], [0.0, 5.0, 4.0]]])
        trafo = StackedAffine([first_matrix, second_matrix])

        sample = {"data": torch.rand(1, 3, 25, 25)}

        matrix = trafo.assemble_matrix(**sample)

        target_matrix = matrix_to_cartesian(
            torch.bmm(matrix_to_homogeneous(first_matrix),
                      matrix_to_homogeneous(second_matrix)))

        self.assertTrue(torch.allclose(matrix, target_matrix))
Example #2
0
def create_rotation(rotation: AffineParamType,
                    batchsize: int,
                    ndim: int,
                    degree: bool = False,
                    device: Optional[Union[torch.device, str]] = None,
                    dtype: Optional[Union[torch.dtype, str]] = None,
                    image_transform: bool = True) -> torch.Tensor:
    """
    Formats the given scale parameters to a homogeneous transformation matrix

    Args:
        rotation: the rotation factor(s). Supported are:
            * a single parameter (as float or int), which will be replicated
            for all dimensions and batch samples
            * a parameter per sample, which will be
            replicated for all dimensions
            * a parameter per dimension, which will be replicated for all
            batch samples
            * a parameter per sampler per dimension
            * None will be treated as a rotation angle of 0
        batchsize: the number of samples per batch
        ndim : the dimensionality of the transform
        degree: whether the given rotation(s) are in degrees.
            Only valid for rotation parameters, which aren't passed as full
            transformation matrix.
        device: the device to put the resulting tensor to.
            Defaults to the torch default device
        dtype: the dtype of the resulting trensor.
            Defaults to the torch default dtype
        image_transform: bool
            inverts the rotation matrix to match expected behavior when
            applied to an image, e.g. rotation > 0 should rotate the image
            counter clockwise but the grid clockwise

    Returns:
        torch.Tensor: the homogeneous transformation matrix
            [N, NDIM + 1, NDIM + 1], N is the batch size and NDIM
            is the number of spatial dimensions

    """
    if rotation is None:
        rotation = 0
    num_rot_params = 1 if ndim == 2 else ndim

    rotation = expand_scalar_param(rotation, batchsize,
                                   num_rot_params).to(device=device,
                                                      dtype=dtype)
    if degree:
        rotation = deg_to_rad(rotation)

    matrix_fn = create_rotation_2d if ndim == 2 else create_rotation_3d
    sin, cos = torch.sin(rotation), torch.cos(rotation)
    rotation_matrix = torch.stack([matrix_fn(s, c) for s, c in zip(sin, cos)])

    homo_rotation_matrix = matrix_to_homogeneous(rotation_matrix)

    if image_transform:
        homo_rotation_matrix = orthogonal_inverse(homo_rotation_matrix)

    return homo_rotation_matrix
Example #3
0
    def assemble_matrix(self, **data) -> torch.Tensor:
        """
        Assembles the matrix (and takes care of batching and having it on the
        right device and in the correct dtype and dimensionality).

        Args:
            **data: the data to be transformed. Will be used to determine
                batchsize, dimensionality, dtype and device

        Returns:
            torch.Tensor: the (batched) transformation matrix
        """
        if self.matrix is None:
            raise ValueError("Matrix needs to be initialized or overwritten.")
        if not torch.is_tensor(self.matrix):
            self.matrix = torch.tensor(self.matrix)
        self.matrix = self.matrix.to(data[self.keys[0]])

        batchsize = data[self.keys[0]].shape[0]
        ndim = len(data[self.keys[0]].shape) - 2  # channel and batch dim

        # batch dimension missing -> Replicate for each sample in batch
        if len(self.matrix.shape) == 2:
            self.matrix = self.matrix[None].expand(batchsize, -1, -1).clone()
        if self.matrix.shape == (batchsize, ndim, ndim + 1):
            return self.matrix
        elif self.matrix.shape == (batchsize, ndim, ndim):
            return matrix_to_homogeneous(self.matrix)[:, :-1]
        elif self.matrix.shape == (batchsize, ndim + 1, ndim + 1):
            return matrix_to_cartesian(self.matrix)

        raise ValueError(
            "Invalid Shape for affine transformation matrix. "
            "Got %s but expected %s" % (str(tuple(self.matrix.shape)), str((batchsize, ndim, ndim + 1)))
        )
Example #4
0
    def test_matrix_to_homogeneous(self):
        inputs = [
            torch.tensor([[[1, 2], [3, 4]]]),  # single sample, no trans, 2d
            torch.tensor([[[1, 2, 5], [3, 4, 6]]]),  # single sample, trans, 2d
            torch.tensor([[[1, 2, 3], [4, 5, 6], [7, 8, 9]]]),  # single sample, no trans, 3d
            torch.tensor([[[1, 2, 3, 10], [4, 5, 6, 11], [7, 8, 9, 12]]]),  # single sample, trans, 3d
            torch.tensor([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]),  # multiple samples, no trans, 2d
            torch.tensor([[[1, 2, 10], [3, 4, 11]], [[5, 6, 12], [7, 8, 13]]]),  # multiple samples, trans, 2d
            # multiple samples, trans, 3d
            torch.tensor([[[1, 2, 3], [4, 5, 6], [7, 8, 9]], [[10, 11, 12], [13, 14, 15], [16, 17, 18]]]),
            # multiple samples, trans, 3d
            torch.tensor([[[1, 2, 3, 21], [4, 5, 6, 22], [7, 8, 9, 23]],
                          [[10, 11, 12, 24], [13, 14, 15, 25], [16, 17, 18, 26]]])
        ]

        expectations = [
            torch.tensor([[[1, 2, 0], [3, 4, 0], [0, 0, 1]]]),
            torch.tensor([[[1, 2, 5], [3, 4, 6], [0, 0, 1]]]),
            torch.tensor([[[1, 2, 3, 0], [4, 5, 6, 0], [7, 8, 9, 0], [0, 0, 0, 1]]]),
            torch.tensor([[[1, 2, 3, 10], [4, 5, 6, 11], [7, 8, 9, 12], [0, 0, 0, 1]]]),
            torch.tensor([[[1, 2, 0], [3, 4, 0], [0, 0, 1]], [[5, 6, 0], [7, 8, 0], [0, 0, 1]]]),
            torch.tensor([[[1, 2, 10], [3, 4, 11], [0, 0, 1]], [[5, 6, 12], [7, 8, 13], [0, 0, 1]]]),
            torch.tensor([[[1, 2, 3, 0], [4, 5, 6, 0], [7, 8, 9, 0], [0, 0, 0, 1]],
                          [[10, 11, 12, 0], [13, 14, 15, 0], [16, 17, 18, 0], [0, 0, 0, 1]]]),
            torch.tensor([[[1, 2, 3, 21], [4, 5, 6, 22], [7, 8, 9, 23], [0, 0, 0, 1]],
                          [[10, 11, 12, 24], [13, 14, 15, 25], [16, 17, 18, 26], [0, 0, 0, 1]]])
        ]

        for inp, exp in zip(inputs, expectations):
            with self.subTest(input=inp, expected=exp):
                self.assertTrue(torch.allclose(matrix_to_homogeneous(inp), exp))
Example #5
0
def create_translation(offset: AffineParamType,
                       batchsize: int,
                       ndim: int,
                       device: Optional[Union[torch.device, str]] = None,
                       dtype: Optional[Union[torch.dtype, str]] = None,
                       image_transform: bool = True) -> torch.Tensor:
    """
    Formats the given translation parameters to a homogeneous transformation
    matrix

    Args:
        offset: the translation offset(s). Supported are:
            * a single parameter (as float or int), which will be replicated
            for all dimensions and batch samples
            * a parameter per sample, which will be
            replicated for all dimensions
            * a parameter per dimension, which will be replicated for all
            batch samples
            * a parameter per sampler per dimension
            * None will be treated as a translation offset of 0
        batchsize: the number of samples per batch
        ndim: the dimensionality of the transform
        device: the device to put the resulting tensor to.
            Defaults to the torch default device
        dtype: the dtype of the resulting trensor.
            Defaults to the torch default dtype
        image_transform: bool
            inverts the translation matrix to match expected behavior when
            applied to an image, e.g. translation > 0 should move the image
            in the positive direction of an axis but the grid in the negative
            direction

    Returns:
        torch.Tensor: the homogeneous transformation matrix [N, NDIM + 1, NDIM + 1],
            N is the batch size and NDIM is the number of spatial dimensions
    """
    if offset is None:
        offset = 0
    offset = expand_scalar_param(offset, batchsize, ndim).to(device=device,
                                                             dtype=dtype)
    eye_batch = get_batched_eye(batchsize=batchsize,
                                ndim=ndim,
                                device=device,
                                dtype=dtype)
    translation_matrix = torch.stack([
        torch.cat([eye, o.view(-1, 1)], dim=1)
        for eye, o in zip(eye_batch, offset)
    ])
    if image_transform:
        translation_matrix[..., -1] = -translation_matrix[..., -1]
    return matrix_to_homogeneous(translation_matrix)
Example #6
0
def create_scale(scale: AffineParamType,
                 batchsize: int,
                 ndim: int,
                 device: Optional[Union[torch.device, str]] = None,
                 dtype: Optional[Union[torch.dtype, str]] = None,
                 image_transform: bool = True) -> torch.Tensor:
    """
    Formats the given scale parameters to a homogeneous transformation matrix

    Args:
        scale : the scale factor(s). Supported are:
            * a single parameter (as float or int), which will be replicated
            for all dimensions and batch samples
            * a parameter per sample, which will be
            replicated for all dimensions
            * a parameter per dimension, which will be replicated for all
            batch samples
            * a parameter per sampler per dimension
            * None will be treated as a scaling factor of 1
        batchsize: the number of samples per batch
        ndim: the dimensionality of the transform
        device: the device to put the resulting tensor to.
            Defaults to the torch default device
        dtype: the dtype of the resulting trensor.
            Defaults to the torch default dtype
        image_transform:  inverts the scale matrix to match expected behavior
            when applied to an image, e.g. scale>1 increases the size of an
            image but decrease the size of an grid

    Returns:
        torch.Tensor: the homogeneous transformation matrix
            [N, NDIM + 1, NDIM + 1], N is the batch size and NDIM is the
            number of spatial dimensions
    """
    if scale is None:
        scale = 1

    scale = expand_scalar_param(scale, batchsize, ndim).to(device=device,
                                                           dtype=dtype)
    if image_transform:
        scale = 1 / scale
    scale_matrix = torch.stack([
        eye * s for eye, s in zip(
            get_batched_eye(
                batchsize=batchsize, ndim=ndim, device=device, dtype=dtype),
            scale)
    ])
    return matrix_to_homogeneous(scale_matrix)
Example #7
0
    def assemble_matrix(self, **data) -> torch.Tensor:
        """
        Handles the matrix assembly and stacking

        Args:
            **data: the data to be transformed.
                Will be used to determine batchsize,
                dimensionality, dtype and device

        Returns:
            torch.Tensor: the (batched) transformation matrix

        """
        whole_trafo = None
        for trafo in self.transforms:
            matrix = matrix_to_homogeneous(trafo.assemble_matrix(**data))
            if whole_trafo is None:
                whole_trafo = matrix
            else:
                whole_trafo = torch.bmm(whole_trafo, matrix)
        return matrix_to_cartesian(whole_trafo)
Example #8
0
def affine_point_transform(point_batch: torch.Tensor, matrix_batch: torch.Tensor) -> torch.Tensor:
    """
    Function to perform an affine transformation onto point batches

    Args:
        point_batch: a point batch of shape [N, NP, NDIM]
            ``NP`` is the number of points,
            ``N`` is the batch size,
            ``NDIM`` is the number of spatial dimensions
        matrix_batch : torch.Tensor
            a batch of affine matrices with shape [N, NDIM, NDIM + 1],
            N is the batch size and NDIM is the number of spatial dimensions

    Returns:
        torch.Tensor: the batch of transformed points in cartesian coordinates)
            [N, NP, NDIM] ``NP`` is the number of points, ``N`` is the
            batch size, ``NDIM`` is the number of spatial dimensions
    """
    point_batch = points_to_homogeneous(point_batch)
    matrix_batch = matrix_to_homogeneous(matrix_batch)
    transformed_points = torch.bmm(point_batch, matrix_batch.permute(0, 2, 1))
    return points_to_cartesian(transformed_points)
Example #9
0
    def test_check_image_size(self):
        images = [
            torch.rand(11, 2, 3, 4, 5),
            torch.rand(11, 2, 3, 4),
            torch.rand(11, 2, 3, 3),
        ]
        img_sizes = [[3, 4, 5], [3, 4], 3]

        scales = [
            torch.tensor([2.0, 3.0, 4.0]),
            torch.tensor([2.0, 3.0]),
            torch.tensor([2.0, 3.0])
        ]
        rots = [[45.0, 90.0, 135.0], [45.0], [45.0]]
        trans = [[0.0, 10.0, 20.0], [10.0, 20.0], [10.0, 20.0]]

        edges = [
            [
                [0.0, 0.0, 0.0, 1.0],
                [0.0, 0.0, 5.0, 1.0],
                [0.0, 4.0, 0.0, 1.0],
                [0.0, 4.0, 5.0, 1.0],
                [3.0, 0.0, 0.0, 1.0],
                [3.0, 0.0, 5.0, 1.0],
                [3.0, 4.0, 0.0, 1.0],
                [3.0, 4.0, 5.0, 1.0],
            ],
            [[0.0, 0.0, 1.0], [0.0, 4.0, 1.0], [3.0, 0.0, 1.0],
             [3.0, 4.0, 1.0]],
            [[0.0, 0.0, 1.0], [0.0, 3.0, 1.0], [3.0, 0.0, 1.0],
             [3.0, 3.0, 1.0]],
        ]

        for img, size, scale, rot, tran, edge_pts in zip(
                images, img_sizes, scales, rots, trans, edges):
            ndim = scale.size(-1)
            with self.subTest(ndim=ndim):
                affine = matrix_to_homogeneous(
                    parametrize_matrix(
                        scale=scale,
                        rotation=rot,
                        translation=tran,
                        degree=True,
                        batchsize=1,
                        ndim=ndim,
                        dtype=torch.float,
                        image_transform=False,
                    ))

                edge_pts = torch.tensor(edge_pts, dtype=torch.float)
                img = img.to(torch.float)
                new_edges = torch.bmm(edge_pts.unsqueeze(0),
                                      affine.clone().permute(0, 2, 1))

                img_size_zero_border = new_edges.max(dim=1)[0][0]
                img_size_non_zero_border = (new_edges.max(dim=1)[0] -
                                            new_edges.min(dim=1)[0])[0]

                fn_result_zero_border = _check_new_img_size(
                    size,
                    matrix_to_cartesian(
                        affine.expand(img.size(0), -1, -1).clone()),
                    zero_border=True,
                )
                fn_result_non_zero_border = _check_new_img_size(
                    size,
                    matrix_to_cartesian(
                        affine.expand(img.size(0), -1, -1).clone()),
                    zero_border=False,
                )

                self.assertTrue(
                    torch.allclose(img_size_zero_border[:-1],
                                   fn_result_zero_border))
                self.assertTrue(
                    torch.allclose(img_size_non_zero_border[:-1],
                                   fn_result_non_zero_border))