Example #1
0
    def test_affine_transform_2d(self):
        t = np.pi / 3
        affine = [[np.cos(t), -np.sin(t), 0], [np.sin(t), np.cos(t), 0], [0, 0, 1]]
        affine = torch.as_tensor(affine, device=torch.device("cpu:0"), dtype=torch.float32)
        image = torch.arange(24.0).view(1, 1, 4, 6).to(device=torch.device("cpu:0"))
        xform = AffineTransform((3, 4), padding_mode="border", align_corners=True, mode="bilinear")
        out = xform(image, affine)
        out = out.detach().cpu().numpy()
        expected = [
            [
                [
                    [7.1525574e-07, 4.9999994e-01, 1.0000000e00, 1.4999999e00],
                    [3.8660259e00, 1.3660253e00, 1.8660252e00, 2.3660252e00],
                    [7.7320518e00, 3.0358994e00, 2.7320509e00, 3.2320507e00],
                ]
            ]
        ]
        np.testing.assert_allclose(out, expected, atol=1e-5)

        if torch.cuda.is_available():
            affine = torch.as_tensor(affine, device=torch.device("cuda:0"), dtype=torch.float32)
            image = torch.arange(24.0).view(1, 1, 4, 6).to(device=torch.device("cuda:0"))
            xform = AffineTransform(padding_mode="border", align_corners=True, mode="bilinear")
            out = xform(image, affine, (3, 4))
            out = out.detach().cpu().numpy()
            expected = [
                [
                    [
                        [7.1525574e-07, 4.9999994e-01, 1.0000000e00, 1.4999999e00],
                        [3.8660259e00, 1.3660253e00, 1.8660252e00, 2.3660252e00],
                        [7.7320518e00, 3.0358994e00, 2.7320509e00, 3.2320507e00],
                    ]
                ]
            ]
            np.testing.assert_allclose(out, expected, atol=1e-4)
Example #2
0
 def test_affine_shift_2(self):
     affine = torch.as_tensor([[1.0, 0.0, -1.0], [0.0, 1.0, 0.0]])
     image = torch.as_tensor([[[[4.0, 1.0, 3.0, 2.0], [7.0, 6.0, 8.0, 5.0], [3.0, 5.0, 3.0, 6.0]]]])
     out = AffineTransform()(image, affine)
     out = out.detach().cpu().numpy()
     expected = [[[[0, 0, 0, 0], [4, 1, 3, 2], [7, 6, 8, 5]]]]
     np.testing.assert_allclose(out, expected, atol=1e-5)
Example #3
0
 def test_affine_shift_1(self):
     affine = torch.as_tensor([[1, 0, -1], [0, 1, -1]])
     image = torch.as_tensor([[[[4, 1, 3, 2], [7, 6, 8, 5], [3, 5, 3, 6]]]])
     out = AffineTransform()(image, affine)
     out = out.detach().cpu().numpy()
     expected = [[[[0, 0, 0, 0], [0, 4, 1, 3], [0, 7, 6, 8]]]]
     np.testing.assert_allclose(out, expected, atol=1e-5)
    def test_forward_3d(self):
        x = torch.rand(2, 1, 4, 4, 4)
        theta = torch.Tensor([[[0, 0, -1, 0], [1, 0, 0, 0],
                               [0, 0, 1, 0]]]).repeat(2, 1, 1)
        grid = torch.nn.functional.affine_grid(theta,
                                               x.size(),
                                               align_corners=False)
        expected = torch.nn.functional.grid_sample(x,
                                                   grid,
                                                   align_corners=False)
        expected = expected.detach().cpu().numpy()

        actual = AffineTransform(normalized=True,
                                 reverse_indexing=False)(x, theta)
        actual = actual.detach().cpu().numpy()
        np.testing.assert_allclose(actual, expected)
        np.testing.assert_allclose(list(theta.shape), [2, 3, 4])

        theta = torch.Tensor([[0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 1, 0]])
        actual = AffineTransform(normalized=True,
                                 reverse_indexing=False)(x, theta)
        actual = actual.detach().cpu().numpy()
        np.testing.assert_allclose(actual, expected)
        np.testing.assert_allclose(list(theta.shape), [3, 4])

        theta = torch.Tensor([[[0, 0, -1, 0], [1, 0, 0, 0], [0, 0, 1, 0]]])
        actual = AffineTransform(normalized=True,
                                 reverse_indexing=False)(x, theta)
        actual = actual.detach().cpu().numpy()
        np.testing.assert_allclose(actual, expected)
        np.testing.assert_allclose(list(theta.shape), [1, 3, 4])
Example #5
0
    def test_affine_transform_3d(self):
        t = np.pi / 3
        affine = [[1, 0, 0, 0], [0.0, np.cos(t), -np.sin(t), 0], [0, np.sin(t), np.cos(t), 0], [0, 0, 0, 1]]
        affine = torch.as_tensor(affine, device=torch.device("cpu:0"), dtype=torch.float32)
        image = torch.arange(48.0).view(2, 1, 4, 2, 3).to(device=torch.device("cpu:0"))
        xform = AffineTransform((3, 4, 2), padding_mode="border", align_corners=False, mode="bilinear")
        out = xform(image, affine)
        out = out.detach().cpu().numpy()
        expected = [
            [
                [
                    [[0.00000006, 0.5000001], [2.3660254, 1.3660254], [4.732051, 2.4019241], [5.0, 3.9019237]],
                    [[6.0, 6.5], [8.366026, 7.3660254], [10.732051, 8.401924], [11.0, 9.901924]],
                    [[12.0, 12.5], [14.366026, 13.366025], [16.732052, 14.401924], [17.0, 15.901923]],
                ]
            ],
            [
                [
                    [[24.0, 24.5], [26.366024, 25.366024], [28.732052, 26.401924], [29.0, 27.901924]],
                    [[30.0, 30.5], [32.366028, 31.366026], [34.732048, 32.401924], [35.0, 33.901924]],
                    [[36.0, 36.5], [38.366024, 37.366024], [40.73205, 38.401924], [41.0, 39.901924]],
                ]
            ],
        ]
        np.testing.assert_allclose(out, expected, atol=1e-4)

        if torch.cuda.is_available():
            affine = torch.as_tensor(affine, device=torch.device("cuda:0"), dtype=torch.float32)
            image = torch.arange(48.0).view(2, 1, 4, 2, 3).to(device=torch.device("cuda:0"))
            xform = AffineTransform(padding_mode="border", align_corners=False, mode="bilinear")
            out = xform(image, affine, (3, 4, 2))
            out = out.detach().cpu().numpy()
            expected = [
                [
                    [
                        [[0.00000006, 0.5000001], [2.3660254, 1.3660254], [4.732051, 2.4019241], [5.0, 3.9019237]],
                        [[6.0, 6.5], [8.366026, 7.3660254], [10.732051, 8.401924], [11.0, 9.901924]],
                        [[12.0, 12.5], [14.366026, 13.366025], [16.732052, 14.401924], [17.0, 15.901923]],
                    ]
                ],
                [
                    [
                        [[24.0, 24.5], [26.366024, 25.366024], [28.732052, 26.401924], [29.0, 27.901924]],
                        [[30.0, 30.5], [32.366028, 31.366026], [34.732048, 32.401924], [35.0, 33.901924]],
                        [[36.0, 36.5], [38.366024, 37.366024], [40.73205, 38.401924], [41.0, 39.901924]],
                    ]
                ],
            ]
            np.testing.assert_allclose(out, expected, atol=1e-4)
 def test_zoom_1(self):
     affine = torch.as_tensor([[2.0, 0.0, 0.0], [0.0, 1.0, 0.0]])
     image = torch.arange(1.0,
                          13.0).view(1, 1, 3,
                                     4).to(device=torch.device("cpu:0"))
     out = AffineTransform()(image, affine, (1, 4))
     expected = [[[[1, 2, 3, 4]]]]
     np.testing.assert_allclose(out, expected, atol=_rtol)
 def test_zoom(self):
     affine = torch.as_tensor([[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]])
     image = torch.arange(1.0,
                          13.0).view(1, 1, 3,
                                     4).to(device=torch.device("cpu:0"))
     out = AffineTransform((3, 2))(image, affine)
     expected = [[[[1, 3], [5, 7], [9, 11]]]]
     np.testing.assert_allclose(out, expected, atol=1e-5, rtol=_rtol)
 def test_affine_transform_minimum(self):
     t = np.pi / 3
     affine = [[np.cos(t), -np.sin(t), 0], [np.sin(t),
                                            np.cos(t), 0], [0, 0, 1]]
     affine = torch.as_tensor(affine,
                              device=torch.device("cpu:0"),
                              dtype=torch.float32)
     image = torch.arange(24.0).view(1, 1, 4,
                                     6).to(device=torch.device("cpu:0"))
     out = AffineTransform()(image, affine)
     out = out.detach().cpu().numpy()
     expected = [[[
         [0.0, 0.06698727, 0.0, 0.0, 0.0, 0.0],
         [3.8660254, 0.86602557, 0.0, 0.0, 0.0, 0.0],
         [7.732051, 3.035899, 0.73205125, 0.0, 0.0, 0.0],
         [11.598076, 6.901923, 2.7631402, 0.0, 0.0, 0.0],
     ]]]
     np.testing.assert_allclose(out, expected, atol=1e-3, rtol=_rtol)
Example #9
0
 def test_zoom_zero_center(self):
     affine = torch.as_tensor([[2.0, 0.0, 0.0], [0.0, 2.0, 0.0]],
                              dtype=torch.float32)
     image = torch.arange(1.0,
                          13.0).view(1, 1, 3,
                                     4).to(device=torch.device("cpu:0"))
     out = AffineTransform((1, 2), zero_centered=True)(image, affine)
     expected = [[[[3, 5]]]]
     np.testing.assert_allclose(out, expected, atol=1e-5, rtol=_rtol)
Example #10
0
    def test_ill_affine_transform(self):
        with self.assertRaises(ValueError):  # image too small
            t = np.pi / 3
            affine = [[1, 0, 0, 0], [0.0, np.cos(t), -np.sin(t), 0], [0, np.sin(t), np.cos(t), 0], [0, 0, 0, 1]]
            affine = torch.as_tensor(affine, device=torch.device("cpu:0"), dtype=torch.float32)
            xform = AffineTransform((3, 4, 2), padding_mode="border", align_corners=False, mode="bilinear")
            xform(torch.as_tensor([1.0, 2.0, 3.0]), affine)

        with self.assertRaises(ValueError):  # output shape too small
            t = np.pi / 3
            affine = [[1, 0, 0, 0], [0.0, np.cos(t), -np.sin(t), 0], [0, np.sin(t), np.cos(t), 0], [0, 0, 0, 1]]
            affine = torch.as_tensor(affine, device=torch.device("cpu:0"), dtype=torch.float32)
            image = torch.arange(48).view(2, 1, 4, 2, 3).to(device=torch.device("cpu:0"))
            xform = AffineTransform((3, 4), padding_mode="border", align_corners=False, mode="bilinear")
            xform(image, affine)

        with self.assertRaises(ValueError):  # incorrect affine
            t = np.pi / 3
            affine = [[1, 0, 0, 0], [0.0, np.cos(t), -np.sin(t), 0], [0, np.sin(t), np.cos(t), 0], [0, 0, 0, 1]]
            affine = torch.as_tensor(affine, device=torch.device("cpu:0"), dtype=torch.float32)
            affine = affine.unsqueeze(0).unsqueeze(0)
            image = torch.arange(48).view(2, 1, 4, 2, 3).to(device=torch.device("cpu:0"))
            xform = AffineTransform((2, 3, 4), padding_mode="border", align_corners=False, mode="bilinear")
            xform(image, affine)

        with self.assertRaises(ValueError):  # batch doesn't match
            t = np.pi / 3
            affine = [[1, 0, 0, 0], [0.0, np.cos(t), -np.sin(t), 0], [0, np.sin(t), np.cos(t), 0], [0, 0, 0, 1]]
            affine = torch.as_tensor(affine, device=torch.device("cpu:0"), dtype=torch.float32)
            affine = affine.unsqueeze(0)
            affine = affine.repeat(3, 1, 1)
            image = torch.arange(48).view(2, 1, 4, 2, 3).to(device=torch.device("cpu:0"))
            xform = AffineTransform((2, 3, 4), padding_mode="border", align_corners=False, mode="bilinear")
            xform(image, affine)

        with self.assertRaises(RuntimeError):  # input grid dtypes different
            t = np.pi / 3
            affine = [[1, 0, 0, 0], [0.0, np.cos(t), -np.sin(t), 0], [0, np.sin(t), np.cos(t), 0], [0, 0, 0, 1]]
            affine = torch.as_tensor(affine, device=torch.device("cpu:0"), dtype=torch.float32)
            affine = affine.unsqueeze(0)
            affine = affine.repeat(2, 1, 1)
            image = torch.arange(48).view(2, 1, 4, 2, 3).to(device=torch.device("cpu:0"), dtype=torch.int32)
            xform = AffineTransform((2, 3, 4), padding_mode="border", mode="bilinear", normalized=True)
            xform(image, affine)

        with self.assertRaises(ValueError):  # wrong affine
            affine = torch.as_tensor([[1, 0, 0, 0], [0, 0, 0, 1]])
            image = torch.arange(48).view(2, 1, 4, 2, 3).to(device=torch.device("cpu:0"))
            xform = AffineTransform((2, 3, 4), padding_mode="border", align_corners=False, mode="bilinear")
            xform(image, affine)

        with self.assertRaises(RuntimeError):  # dtype doesn't match
            affine = torch.as_tensor([[2.0, 0.0, 0.0], [0.0, 2.0, 0.0]], dtype=torch.float64)
            image = torch.arange(1.0, 13.0).view(1, 1, 3, 4).to(device=torch.device("cpu:0"))
            out = AffineTransform((1, 2))(image, affine)
 def __init__(self, is_ref=True, reverse_indexing=False):
     super().__init__()
     self.is_ref = is_ref
     self.localization = nn.Sequential(
         nn.Conv2d(1, 8, kernel_size=7),
         nn.MaxPool2d(2, stride=2),
         nn.ReLU(True),
         nn.Conv2d(8, 10, kernel_size=5),
         nn.MaxPool2d(2, stride=2),
         nn.ReLU(True),
     )
     # Regressor for the 3 * 2 affine matrix
     self.fc_loc = nn.Sequential(nn.Linear(10 * 3 * 3, 32), nn.ReLU(True), nn.Linear(32, 3 * 2))
     # Initialize the weights/bias with identity transformation
     self.fc_loc[2].weight.data.zero_()
     self.fc_loc[2].bias.data.copy_(torch.tensor([1, 0, 0, 0, 1, 0], dtype=torch.float))
     if not self.is_ref:
         self.xform = AffineTransform(normalized=True, reverse_indexing=reverse_indexing)
Example #12
0
def write_nifti(
    data: np.ndarray,
    file_name: str,
    affine: Optional[np.ndarray] = None,
    target_affine: Optional[np.ndarray] = None,
    resample: bool = True,
    output_spatial_shape: Optional[Sequence[int]] = None,
    mode: Union[GridSampleMode, str] = GridSampleMode.BILINEAR,
    padding_mode: Union[GridSamplePadMode, str] = GridSamplePadMode.BORDER,
    align_corners: bool = False,
    dtype: Optional[np.dtype] = np.float64,
    output_dtype: Optional[np.dtype] = np.float32,
) -> None:
    """
    Write numpy data into NIfTI files to disk.  This function converts data
    into the coordinate system defined by `target_affine` when `target_affine`
    is specified.

    If the coordinate transform between `affine` and `target_affine` could be
    achieved by simply transposing and flipping `data`, no resampling will
    happen.  otherwise this function will resample `data` using the coordinate
    transform computed from `affine` and `target_affine`.  Note that the shape
    of the resampled `data` may subject to some rounding errors. For example,
    resampling a 20x20 pixel image from pixel size (1.5, 1.5)-mm to (3.0,
    3.0)-mm space will return a 10x10-pixel image.  However, resampling a
    20x20-pixel image from pixel size (2.0, 2.0)-mm to (3.0, 3.0)-mma space
    will output a 14x14-pixel image, where the image shape is rounded from
    13.333x13.333 pixels. In this case `output_spatial_shape` could be specified so
    that this function writes image data to a designated shape.

    When `affine` and `target_affine` are None, the data will be saved with an
    identity matrix as the image affine.

    This function assumes the NIfTI dimension notations.
    Spatially it supports up to three dimensions, that is, H, HW, HWD for
    1D, 2D, 3D respectively.
    When saving multiple time steps or multiple channels `data`, time and/or
    modality axes should be appended after the first three dimensions.  For
    example, shape of 2D eight-class segmentation probabilities to be saved
    could be `(64, 64, 1, 8)`. Also, data in shape (64, 64, 8), (64, 64, 8, 1)
    will be considered as a single-channel 3D image.

    Args:
        data: input data to write to file.
        file_name: expected file name that saved on disk.
        affine: the current affine of `data`. Defaults to `np.eye(4)`
        target_affine: before saving
            the (`data`, `affine`) as a Nifti1Image,
            transform the data into the coordinates defined by `target_affine`.
        resample: whether to run resampling when the target affine
            could not be achieved by swapping/flipping data axes.
        output_spatial_shape: spatial shape of the output image.
            This option is used when resample = True.
        mode: {``"bilinear"``, ``"nearest"``}
            This option is used when ``resample = True``.
            Interpolation mode to calculate output values. Defaults to ``"bilinear"``.
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
        padding_mode: {``"zeros"``, ``"border"``, ``"reflection"``}
            This option is used when ``resample = True``.
            Padding mode for outside grid values. Defaults to ``"border"``.
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
        align_corners: Geometrically, we consider the pixels of the input as squares rather than points.
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
        dtype: data type for resampling computation. Defaults to ``np.float64`` for best precision.
            If None, use the data type of input data. To be compatible with other modules,
            the output data type is always ``np.float32``.
        output_dtype: data type for saving data. Defaults to ``np.float32``.
    """
    assert isinstance(data, np.ndarray), "input data must be numpy array."
    dtype = dtype or data.dtype
    sr = min(data.ndim, 3)
    if affine is None:
        affine = np.eye(4, dtype=np.float64)
    affine = to_affine_nd(sr, affine)

    if target_affine is None:
        target_affine = affine
    target_affine = to_affine_nd(sr, target_affine)

    if np.allclose(affine, target_affine, atol=1e-3):
        # no affine changes, save (data, affine)
        results_img = nib.Nifti1Image(data.astype(output_dtype), to_affine_nd(3, target_affine))
        nib.save(results_img, file_name)
        return

    # resolve orientation
    start_ornt = nib.orientations.io_orientation(affine)
    target_ornt = nib.orientations.io_orientation(target_affine)
    ornt_transform = nib.orientations.ornt_transform(start_ornt, target_ornt)
    data_shape = data.shape
    data = nib.orientations.apply_orientation(data, ornt_transform)
    _affine = affine @ nib.orientations.inv_ornt_aff(ornt_transform, data_shape)
    if np.allclose(_affine, target_affine, atol=1e-3) or not resample:
        results_img = nib.Nifti1Image(data.astype(output_dtype), to_affine_nd(3, target_affine))
        nib.save(results_img, file_name)
        return

    # need resampling
    affine_xform = AffineTransform(
        normalized=False, mode=mode, padding_mode=padding_mode, align_corners=align_corners, reverse_indexing=True
    )
    transform = np.linalg.inv(_affine) @ target_affine
    if output_spatial_shape is None:
        output_spatial_shape, _ = compute_shape_offset(data.shape, _affine, target_affine)
    output_spatial_shape_ = list(output_spatial_shape)
    if data.ndim > 3:  # multi channel, resampling each channel
        while len(output_spatial_shape_) < 3:
            output_spatial_shape_ = output_spatial_shape_ + [1]
        spatial_shape, channel_shape = data.shape[:3], data.shape[3:]
        data_np = data.reshape(list(spatial_shape) + [-1])
        data_np = np.moveaxis(data_np, -1, 0)  # channel first for pytorch
        data_torch = affine_xform(
            torch.as_tensor(np.ascontiguousarray(data_np).astype(dtype)).unsqueeze(0),
            torch.as_tensor(np.ascontiguousarray(transform).astype(dtype)),
            spatial_size=output_spatial_shape_[:3],
        )
        data_np = data_torch.squeeze(0).detach().cpu().numpy()
        data_np = np.moveaxis(data_np, 0, -1)  # channel last for nifti
        data_np = data_np.reshape(list(data_np.shape[:3]) + list(channel_shape))
    else:  # single channel image, need to expand to have batch and channel
        while len(output_spatial_shape_) < len(data.shape):
            output_spatial_shape_ = output_spatial_shape_ + [1]
        data_torch = affine_xform(
            torch.as_tensor(np.ascontiguousarray(data).astype(dtype)[None, None]),
            torch.as_tensor(np.ascontiguousarray(transform).astype(dtype)),
            spatial_size=output_spatial_shape_[: len(data.shape)],
        )
        data_np = data_torch.squeeze(0).squeeze(0).detach().cpu().numpy()

    results_img = nib.Nifti1Image(data_np.astype(output_dtype), to_affine_nd(3, target_affine))
    nib.save(results_img, file_name)
    return
Example #13
0
def write_nifti(
    data,
    file_name: str,
    affine=None,
    target_affine=None,
    resample: bool = True,
    output_shape=None,
    interp_order: str = "bilinear",
    mode: str = "border",
    dtype=None,
):
    """
    Write numpy data into NIfTI files to disk.  This function converts data
    into the coordinate system defined by `target_affine` when `target_affine`
    is specified.

    If the coordinate transform between `affine` and `target_affine` could be
    achieved by simply transposing and flipping `data`, no resampling will
    happen.  otherwise this function will resample `data` using the coordinate
    transform computed from `affine` and `target_affine`.  Note that the shape
    of the resampled `data` may subject to some rounding errors. For example,
    resampling a 20x20 pixel image from pixel size (1.5, 1.5)-mm to (3.0,
    3.0)-mm space will return a 10x10-pixel image.  However, resampling a
    20x20-pixel image from pixel size (2.0, 2.0)-mm to (3.0, 3.0)-mma space
    will output a 14x14-pixel image, where the image shape is rounded from
    13.333x13.333 pixels. In this case `output_shape` could be specified so
    that this function writes image data to a designated shape.

    When `affine` and `target_affine` are None, the data will be saved with an
    identity matrix as the image affine.

    This function assumes the NIfTI dimension notations.
    Spatially it supports up to three dimensions, that is, H, HW, HWD for
    1D, 2D, 3D respectively.
    When saving multiple time steps or multiple channels `data`, time and/or
    modality axes should be appended after the first three dimensions.  For
    example, shape of 2D eight-class segmentation probabilities to be saved
    could be `(64, 64, 1, 8)`. Also, data in shape (64, 64, 8), (64, 64, 8, 1)
    will be considered as a single-channel 3D image.

    Args:
        data (numpy.ndarray): input data to write to file.
        file_name: expected file name that saved on disk.
        affine (numpy.ndarray): the current affine of `data`. Defaults to `np.eye(4)`
        target_affine (numpy.ndarray, optional): before saving
            the (`data`, `affine`) as a Nifti1Image,
            transform the data into the coordinates defined by `target_affine`.
        resample: whether to run resampling when the target affine
            could not be achieved by swapping/flipping data axes.
        output_shape (None or tuple of ints): output image shape.
            This option is used when resample = True.
        interp_order (`nearest|bilinear`): the interpolation mode, default is "bilinear".
            See also: https://pytorch.org/docs/stable/nn.functional.html#grid-sample
            This option is used when `resample = True`.
        mode (`zeros|border|reflection`):
            The mode parameter determines how the input array is extended beyond its boundaries.
            Defaults to "border". This option is used when `resample = True`.
        dtype (np.dtype, optional): convert the image to save to this data type.
    """
    assert isinstance(data, np.ndarray), "input data must be numpy array."
    sr = min(data.ndim, 3)
    if affine is None:
        affine = np.eye(4, dtype=np.float64)
    affine = to_affine_nd(sr, affine)

    if target_affine is None:
        target_affine = affine
    target_affine = to_affine_nd(sr, target_affine)

    if np.allclose(affine, target_affine, atol=1e-3):
        # no affine changes, save (data, affine)
        results_img = nib.Nifti1Image(data.astype(dtype),
                                      to_affine_nd(3, target_affine))
        nib.save(results_img, file_name)
        return

    # resolve orientation
    start_ornt = nib.orientations.io_orientation(affine)
    target_ornt = nib.orientations.io_orientation(target_affine)
    ornt_transform = nib.orientations.ornt_transform(start_ornt, target_ornt)
    data_shape = data.shape
    data = nib.orientations.apply_orientation(data, ornt_transform)
    _affine = affine @ nib.orientations.inv_ornt_aff(ornt_transform,
                                                     data_shape)
    if np.allclose(_affine, target_affine, atol=1e-3) or not resample:
        results_img = nib.Nifti1Image(data.astype(dtype),
                                      to_affine_nd(3, target_affine))
        nib.save(results_img, file_name)
        return

    # need resampling
    affine_xform = AffineTransform(normalized=False,
                                   mode=interp_order,
                                   padding_mode=mode,
                                   align_corners=True,
                                   reverse_indexing=True)
    transform = np.linalg.inv(_affine) @ target_affine
    if output_shape is None:
        output_shape, _ = compute_shape_offset(data.shape, _affine,
                                               target_affine)
    if data.ndim > 3:  # multi channel, resampling each channel
        while len(output_shape) < 3:
            output_shape = list(output_shape) + [1]
        spatial_shape, channel_shape = data.shape[:3], data.shape[3:]
        data_ = data.reshape(list(spatial_shape) + [-1])
        data_ = np.moveaxis(data_, -1, 0)  # channel first for pytorch
        data_ = affine_xform(
            torch.from_numpy((data_.astype(np.float64))[None]),
            torch.from_numpy(transform.astype(np.float64)),
            spatial_size=output_shape[:3],
        )
        data_ = data_.squeeze(0).detach().cpu().numpy()
        data_ = np.moveaxis(data_, 0, -1)  # channel last for nifti
        data_ = data_.reshape(list(data_.shape[:3]) + list(channel_shape))
    else:  # single channel image, need to expand to have batch and channel
        while len(output_shape) < len(data.shape):
            output_shape = list(output_shape) + [1]
        data_ = affine_xform(
            torch.from_numpy((data.astype(np.float64))[None, None]),
            torch.from_numpy(transform.astype(np.float64)),
            spatial_size=output_shape[:len(data.shape)],
        )
        data_ = data_.squeeze(0).squeeze(0).detach().cpu().numpy()
    dtype = dtype or data.dtype
    results_img = nib.Nifti1Image(data_.astype(dtype),
                                  to_affine_nd(3, target_affine))
    nib.save(results_img, file_name)
    return
Example #14
0
 def test_zoom_2(self):
     affine = torch.as_tensor([[2, 0, 0], [0, 2, 0]])
     image = torch.arange(1, 13).view(1, 1, 3, 4).to(device=torch.device("cpu:0"))
     out = AffineTransform((1, 2))(image, affine)
     expected = [[[[1, 3]]]]
     np.testing.assert_allclose(out, expected, atol=1e-5)