コード例 #1
0
ファイル: array.py プロジェクト: Nic-Ma/MONAI
    def __call__(self,
                 img: NdarrayOrTensor,
                 randomize: bool = True,
                 device: Optional[torch.device] = None) -> NdarrayOrTensor:
        img = convert_to_tensor(img, track_meta=get_track_meta())
        if randomize:
            self.randomize()

        if not self._do_transform:
            return img

        device = device if device is not None else self.device

        field = self.sfield()

        dgrid = self.grid + field.to(self.grid_dtype)
        dgrid = moveaxis(dgrid, 1, -1)  # type: ignore

        img_t = convert_to_tensor(img[None], torch.float32, device)

        out = grid_sample(
            input=img_t,
            grid=dgrid,
            mode=look_up_option(self.grid_mode, GridSampleMode),
            align_corners=self.grid_align_corners,
            padding_mode=look_up_option(self.grid_padding_mode,
                                        GridSamplePadMode),
        )

        out_t, *_ = convert_to_dst_type(out.squeeze(0), img)

        return out_t
コード例 #2
0
    def test_blend(self, image, label):
        blended = blend_images(image[None], label[None])
        self.assertEqual(type(image), type(blended))
        if isinstance(blended, torch.Tensor):
            self.assertEqual(blended.device, image.device)
            blended = blended.cpu().numpy()
        self.assertEqual((3, ) + image.shape, blended.shape)

        blended = moveaxis(blended, 0, -1)  # move RGB component to end
        if blended.ndim > 3:
            blended = blended[blended.shape[0] // 2]
        plt.imshow(blended)
コード例 #3
0
    def convert_to_channel_last(
        cls,
        data: NdarrayOrTensor,
        channel_dim: Union[None, int, Sequence[int]] = 0,
        squeeze_end_dims: bool = True,
        spatial_ndim: Optional[int] = 3,
        contiguous: bool = False,
    ):
        """
        Rearrange the data array axes to make the `channel_dim`-th dim the last
        dimension and ensure there are ``spatial_ndim`` number of spatial
        dimensions.

        When ``squeeze_end_dims`` is ``True``, a postprocessing step will be
        applied to remove any trailing singleton dimensions.

        Args:
            data: input data to be converted to "channel-last" format.
            channel_dim: specifies the channel axes of the data array to move to the last.
                ``None`` indicates no channel dimension, a new axis will be appended as the channel dimension.
                a sequence of integers indicates multiple non-spatial dimensions.
            squeeze_end_dims: if ``True``, any trailing singleton dimensions will be removed (after the channel
                has been moved to the end). So if input is `(H,W,D,C)` and C==1, then it will be saved as `(H,W,D)`.
                If D is also 1, it will be saved as `(H,W)`. If ``False``, image will always be saved as `(H,W,D,C)`.
            spatial_ndim: modifying the spatial dims if needed, so that output to have at least
                this number of spatial dims. If ``None``, the output will have the same number of
                spatial dimensions as the input.
            contiguous: if ``True``, the output will be contiguous.
        """
        # change data to "channel last" format
        if channel_dim is not None:
            _chns = ensure_tuple(channel_dim)
            data = moveaxis(data, _chns, tuple(range(-len(_chns), 0)))
        else:  # adds a channel dimension
            data = data[..., None]
        # To ensure at least ``spatial_ndim`` number of spatial dims
        if spatial_ndim:
            while len(
                    data.shape
            ) < spatial_ndim + 1:  # assuming the data has spatial + channel dims
                data = data[..., None, :]
            while len(data.shape) > spatial_ndim + 1:
                data = data[..., 0, :]
        # if desired, remove trailing singleton dimensions
        while squeeze_end_dims and data.shape[-1] == 1:
            data = np.squeeze(data, -1)
        if contiguous:
            data = ascontiguousarray(data)
        return data
コード例 #4
0
 def __call__(self, img: NdarrayOrTensor) -> NdarrayOrTensor:
     """
     Apply the transform to `img`.
     """
     return moveaxis(img, self.channel_dim, -1)
コード例 #5
0
def to_onehot(x):
    out = moveaxis(F.one_hot(torch.as_tensor(x).long())[0], -1, 0)
    out, *_ = convert_to_dst_type(out, x)
    return out
コード例 #6
0
# See the License for the specific language governing permissions and
# limitations under the License.

import unittest

import torch
import torch.nn.functional as F
from parameterized import parameterized

from monai.transforms.utils import get_unique_labels
from monai.transforms.utils_pytorch_numpy_unification import moveaxis
from tests.utils import TEST_NDARRAYS

grid_raw = [[0, 0, 0], [0, 0, 1], [2, 2, 3], [5, 5, 6], [3, 6, 2], [5, 6, 6]]
grid = torch.Tensor(grid_raw).unsqueeze(0).to(torch.int64)
grid_onehot = moveaxis(F.one_hot(grid)[0], -1, 0)

TESTS = []
for p in TEST_NDARRAYS:
    for o_h in (False, True):
        im = grid_onehot if o_h else grid
        TESTS.append([dict(img=p(im), is_onehot=o_h), {0, 1, 2, 3, 5, 6}])
        TESTS.append(
            [dict(img=p(im), is_onehot=o_h, discard=0), {1, 2, 3, 5, 6}])
        TESTS.append(
            [dict(img=p(im), is_onehot=o_h, discard=[1, 2]), {0, 3, 5, 6}])


class TestGetUniqueLabels(unittest.TestCase):
    @parameterized.expand(TESTS)
    def test_correct_results(self, args, expected):
コード例 #7
0
ファイル: test_as_channel_first.py プロジェクト: Nic-Ma/MONAI
 def test_value(self, in_type, input_param, expected_shape):
     test_data = in_type(np.random.randint(0, 2, size=[1, 2, 3, 4]))
     result = AsChannelFirst(**input_param)(test_data)
     self.assertTupleEqual(result.shape, expected_shape)
     expected = moveaxis(test_data, input_param["channel_dim"], 0)
     assert_allclose(result, expected, type_test="tensor")