コード例 #1
0
ファイル: test_compose.py プロジェクト: mhubii/MONAI
    def test_randomize_warn(self):
        class _RandomClass(Randomizable):
            def randomize(self, foo):
                pass

        c = Compose([_RandomClass(), _RandomClass()])
        with self.assertWarns(Warning):
            c.randomize()
コード例 #2
0
    def test_compose_flatten_does_not_affect_one_of(self):
        p = Compose([A(), B(), OneOf([C(), Inv(KEYS), Compose([X(), Y()])])])
        f = p.flatten()
        # in this case the flattened transform should be the same.

        def _match(a, b):
            self.assertEqual(type(a), type(b))
            for a_, b_ in zip(a.transforms, b.transforms):
                self.assertEqual(type(a_), type(b_))
                if isinstance(a_, (Compose, OneOf)):
                    _match(a_, b_)

        _match(p, f)
コード例 #3
0
ファイル: tta.py プロジェクト: Project-MONAI/MONAILabel
 def pre_transforms(self):
     t = [
         LoadImaged(keys="image", reader="nibabelreader"),
         AddChanneld(keys="image"),
         # Spacing might not be needed as resize transform is used later.
         # Spacingd(keys="image", pixdim=self.spacing),
         RandAffined(
             keys="image",
             prob=1,
             rotate_range=(np.pi / 4, np.pi / 4, np.pi / 4),
             padding_mode="zeros",
             as_tensor_output=False,
         ),
         RandFlipd(keys="image", prob=0.5, spatial_axis=0),
         RandRotated(keys="image",
                     range_x=(-5, 5),
                     range_y=(-5, 5),
                     range_z=(-5, 5)),
         Resized(keys="image", spatial_size=self.spatial_size),
     ]
     # If using TTA for deepedit
     if self.deepedit:
         t.append(DiscardAddGuidanced(keys="image"))
     t.append(ToTensord(keys="image"))
     return Compose(t)
コード例 #4
0
ファイル: test_dataset_summary.py プロジェクト: Nic-Ma/MONAI
    def test_anisotropic_spacing(self):
        with tempfile.TemporaryDirectory() as tempdir:

            pixdims = [[1.0, 1.0, 5.0], [1.0, 1.0, 4.0], [1.0, 1.0, 4.5], [1.0, 1.0, 2.0], [1.0, 1.0, 1.0]]
            for i in range(5):
                im, seg = create_test_image_3d(32, 32, 32, num_seg_classes=1, num_objs=3, rad_max=6, channel_dim=0)
                n = nib.Nifti1Image(im, np.eye(4))
                n.header["pixdim"][1:4] = pixdims[i]
                nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
                n = nib.Nifti1Image(seg, np.eye(4))
                n.header["pixdim"][1:4] = pixdims[i]
                nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

            train_images = sorted(glob.glob(os.path.join(tempdir, "img*.nii.gz")))
            train_labels = sorted(glob.glob(os.path.join(tempdir, "seg*.nii.gz")))
            data_dicts = [
                {"image": image_name, "label": label_name} for image_name, label_name in zip(train_images, train_labels)
            ]

            t = Compose([LoadImaged(keys=["image", "label"]), FromMetaTensord(keys=["image", "label"])])
            dataset = Dataset(data=data_dicts, transform=t)

            calculator = DatasetSummary(dataset, num_workers=4, meta_key_postfix=PostFix.meta())

            target_spacing = calculator.get_target_spacing(anisotropic_threshold=4.0, percentile=20.0)
            np.testing.assert_allclose(target_spacing, (1.0, 1.0, 1.8))
コード例 #5
0
ファイル: dataset.py プロジェクト: zimaxeg/MONAI
 def __init__(self, data, transform, cache_num=sys.maxsize, cache_rate=1.0):
     """
     Args:
         data (Iterable): input data to load and transform to generate dataset for model.
         transform (Callable): transforms to execute operations on input data.
         cache_num (int): number of items to be cached. Default is `sys.maxsize`.
             will take the minimum of (cache_num, data_length x cache_rate, data_length).
         cache_rate (float): percentage of cached data in total, default is 1.0 (cache all).
             will take the minimum of (cache_num, data_length x cache_rate, data_length).
     """
     if not isinstance(transform, Compose):
         transform = Compose(transform)
     super().__init__(data, transform)
     self.cache_num = min(cache_num, int(len(self) * cache_rate), len(self))
     self._cache = list()
     print('Load and cache transformed data...')
     for i in range(self.cache_num):
         process_bar(i + 1, self.cache_num)
         item = data[i]
         for _transform in transform.transforms:
             # execute all the deterministic transforms before the first random transform
             if isinstance(_transform, Randomizable):
                 break
             item = apply_transform(_transform, item)
         self._cache.append(item)
コード例 #6
0
    def test_spacing_intensity(self):
        set_determinism(seed=0)
        with tempfile.TemporaryDirectory() as tempdir:

            for i in range(5):
                im, seg = create_test_image_3d(32,
                                               32,
                                               32,
                                               num_seg_classes=1,
                                               num_objs=3,
                                               rad_max=6,
                                               channel_dim=0)
                n = nib.Nifti1Image(im, np.eye(4))
                nib.save(n, os.path.join(tempdir, f"img{i:d}.nii.gz"))
                n = nib.Nifti1Image(seg, np.eye(4))
                nib.save(n, os.path.join(tempdir, f"seg{i:d}.nii.gz"))

            train_images = sorted(
                glob.glob(os.path.join(tempdir, "img*.nii.gz")))
            train_labels = sorted(
                glob.glob(os.path.join(tempdir, "seg*.nii.gz")))
            data_dicts = [{
                "image": image_name,
                "label": label_name
            } for image_name, label_name in zip(train_images, train_labels)]

            t = Compose([
                LoadImaged(keys=["image", "label"]),
                ToNumpyd(keys=[
                    "image", "label", "image_meta_dict", "label_meta_dict"
                ]),
            ])
            dataset = Dataset(data=data_dicts, transform=t)

            # test **kwargs of `DatasetSummary` for `DataLoader`
            calculator = DatasetSummary(dataset,
                                        num_workers=4,
                                        meta_key="image_meta_dict",
                                        collate_fn=test_collate)

            target_spacing = calculator.get_target_spacing(
                spacing_key="pixdim")
            self.assertEqual(target_spacing, (1.0, 1.0, 1.0))
            calculator.calculate_statistics()
            np.testing.assert_allclose(calculator.data_mean,
                                       0.892599,
                                       rtol=1e-5,
                                       atol=1e-5)
            np.testing.assert_allclose(calculator.data_std,
                                       0.131731,
                                       rtol=1e-5,
                                       atol=1e-5)
            calculator.calculate_percentiles(sampling_flag=True, interval=2)
            self.assertEqual(calculator.data_max_percentile, 1.0)
            np.testing.assert_allclose(calculator.data_min_percentile,
                                       0.556411,
                                       rtol=1e-5,
                                       atol=1e-5)
コード例 #7
0
    def test_non_dict_compose(self):
        def a(i):
            return i + 'a'

        def b(i):
            return i + 'b'

        c = Compose([a, b, a, b])
        self.assertEqual(c(''), 'abab')
コード例 #8
0
    def test_inverse_compose(self):
        transform = Compose(
            [
                Resized(keys="img", spatial_size=[100, 100, 100]),
                OneOf(
                    [
                        RandScaleIntensityd(keys="img", factors=0.5, prob=1.0),
                        RandShiftIntensityd(keys="img", offsets=0.5, prob=1.0),
                    ]
                ),
            ]
        )
        transform.set_random_state(seed=0)
        result = transform({"img": np.ones((1, 101, 102, 103))})

        result = transform.inverse(result)
        # invert to the original spatial shape
        self.assertTupleEqual(result["img"].shape, (1, 101, 102, 103))
コード例 #9
0
    def __call__(self, batch: Any):
        """
        Args:
            batch: batch of data to pad-collate
        """
        # data is either list of dicts or list of lists
        is_list_of_dicts = isinstance(batch[0], dict)
        # loop over items inside of each element in a batch
        for key_or_idx in batch[0].keys() if is_list_of_dicts else range(
                len(batch[0])):
            # calculate max size of each dimension
            max_shapes = []
            for elem in batch:
                if not isinstance(elem[key_or_idx],
                                  (torch.Tensor, np.ndarray)):
                    break
                max_shapes.append(elem[key_or_idx].shape[1:])
            # len > 0 if objects were arrays, else skip as no padding to be done
            if not max_shapes:
                continue
            max_shape = np.array(max_shapes).max(axis=0)
            # If all same size, skip
            if np.all(np.array(max_shapes).min(axis=0) == max_shape):
                continue
            # Do we need to convert output to Tensor?
            output_to_tensor = isinstance(batch[0][key_or_idx], torch.Tensor)

            # Use `SpatialPadd` or `SpatialPad` to match sizes
            # Default params are central padding, padding with 0's
            # If input is dictionary, use the dictionary version so that the transformation is recorded

            padder = SpatialPad(spatial_size=max_shape,
                                method=self.method,
                                mode=self.mode,
                                **self.np_kwargs)
            transform = padder if not output_to_tensor else Compose(
                [padder, ToTensor()])

            for idx, batch_i in enumerate(batch):
                im = batch_i[key_or_idx]
                orig_size = im.shape[1:]
                padded = transform(batch_i[key_or_idx])
                batch = replace_element(padded, batch, idx, key_or_idx)

                # If we have a dictionary of data, append to list
                if is_list_of_dicts:
                    self.push_transform(batch[idx],
                                        key_or_idx,
                                        orig_size=orig_size)

        # After padding, use default list collator
        return list_data_collate(batch)
コード例 #10
0
    def test_dict_compose(self):
        def a(d):
            d = dict(d)
            d['a'] += 1
            return d

        def b(d):
            d = dict(d)
            d['b'] += 1
            return d

        c = Compose([a, b, a, b, a])
        self.assertDictEqual(c({'a': 0, 'b': 0}), {'a': 3, 'b': 2})
コード例 #11
0
ファイル: utils.py プロジェクト: mxochicale/MONAI
def allow_missing_keys_mode(transform: Union[MapTransform, Compose,
                                             Tuple[MapTransform],
                                             Tuple[Compose]]):
    """Temporarily set all MapTransforms to not throw an error if keys are missing. After, revert to original states.

    Args:
        transform: either MapTransform or a Compose

    Example:

    .. code-block:: python

        data = {"image": np.arange(16, dtype=float).reshape(1, 4, 4)}
        t = SpatialPadd(["image", "label"], 10, allow_missing_keys=False)
        _ = t(data)  # would raise exception
        with allow_missing_keys_mode(t):
            _ = t(data)  # OK!
    """
    # If given a sequence of transforms, Compose them to get a single list
    if issequenceiterable(transform):
        transform = Compose(transform)

    # Get list of MapTransforms
    transforms = []
    if isinstance(transform, MapTransform):
        transforms = [transform]
    elif isinstance(transform, Compose):
        # Only keep contained MapTransforms
        transforms = [
            t for t in transform.flatten().transforms
            if isinstance(t, MapTransform)
        ]
    if len(transforms) == 0:
        raise TypeError(
            "allow_missing_keys_mode expects either MapTransform(s) or Compose(s) containing MapTransform(s)"
        )

    # Get the state of each `allow_missing_keys`
    orig_states = [t.allow_missing_keys for t in transforms]

    try:
        # Set all to True
        for t in transforms:
            t.allow_missing_keys = True
        yield
    finally:
        # Revert
        for t, o_s in zip(transforms, orig_states):
            t.allow_missing_keys = o_s
コード例 #12
0
ファイル: test_compose.py プロジェクト: mhubii/MONAI
    def test_random_compose(self):
        class _Acc(Randomizable):
            self.rand = 0.0

            def randomize(self):
                self.rand = self.R.rand()

            def __call__(self, data):
                self.randomize()
                return self.rand + data

        c = Compose([_Acc(), _Acc()])
        self.assertNotAlmostEqual(c(0), c(0))
        c.set_random_state(123)
        self.assertAlmostEqual(c(1), 2.39293837)
        c.set_random_state(223)
        c.randomize()
        self.assertAlmostEqual(c(1), 2.57673391)
コード例 #13
0
ファイル: test_compose.py プロジェクト: mhubii/MONAI
    def test_list_dict_compose(self):
        def a(d):  # transform to handle dict data
            d = dict(d)
            d['a'] += 1
            return d

        def b(d):  # transform to generate a batch list of data
            d = dict(d)
            d['b'] += 1
            d = [d] * 5
            return d

        def c(d):  # transform to handle dict data
            d = dict(d)
            d['c'] += 1
            return d

        transforms = Compose([a, a, b, c, c])
        value = transforms({'a': 0, 'b': 0, 'c': 0})
        for item in value:
            self.assertDictEqual(item, {'a': 2, 'b': 1, 'c': 2})
コード例 #14
0
ファイル: tta.py プロジェクト: Project-MONAI/MONAILabel
 def post_transforms(self):
     return Compose([
         Activations(sigmoid=True),
         AsDiscrete(threshold_values=True),
     ])
コード例 #15
0
class InvB(Inv):
    def __init__(self, keys):
        super().__init__(keys)
        self.fwd_fn = lambda x: x + 100
        self.inv_fn = lambda x: x - 100


TESTS = [((X(), Y(), X()), (1, 2, 1), (0.25, 0.5, 0.25))]

KEYS = ["x", "y"]
TEST_INVERSES = [
    (OneOf((InvA(KEYS), InvB(KEYS))), True),
    (OneOf((OneOf((InvA(KEYS), InvB(KEYS))), OneOf(
        (InvB(KEYS), InvA(KEYS))))), True),
    (OneOf((Compose((InvA(KEYS), InvB(KEYS))), Compose(
        (InvB(KEYS), InvA(KEYS))))), True),
    (OneOf((NonInv(KEYS), NonInv(KEYS))), False),
]


class TestOneOf(unittest.TestCase):
    @parameterized.expand(TESTS)
    def test_normalize_weights(self, transforms, input_weights,
                               expected_weights):
        tr = OneOf(transforms, input_weights)
        self.assertTupleEqual(tr.weights, expected_weights)

    def test_no_weights_arg(self):
        p = OneOf((X(), Y(), X(), Y()))
        expected_weights = (0.25, ) * 4
コード例 #16
0
 def test_empty_compose(self):
     c = Compose()
     i = 1
     self.assertEqual(c(i), 1)