コード例 #1
0
    def __call__(self, data):
        d = dict(data)
        meta_dict: Dict = d[f"{self.ref_image}_{self.meta_key_postfix}"]

        for key, mode, align_corners, meta_key in self.key_iterator(
                d, self.mode, self.align_corners, self.meta_keys):
            image = d[key]

            # Undo Resize
            current_shape = image.shape
            cropped_shape = meta_dict[self.cropped_shape_key]
            if np.any(np.not_equal(current_shape, cropped_shape)):
                resizer = Resize(spatial_size=cropped_shape[1:], mode=mode)
                image = resizer(image, mode=mode, align_corners=align_corners)

            # Undo Crop
            original_shape = meta_dict[self.original_shape_key]
            result = np.zeros(original_shape, dtype=np.float32)
            box_start = meta_dict[self.start_coord_key]
            box_end = meta_dict[self.end_coord_key]

            spatial_dims = min(len(box_start), len(image.shape[1:]))
            slices = [slice(None)] + [
                slice(s, e) for s, e in zip(box_start[:spatial_dims],
                                            box_end[:spatial_dims])
            ]
            slices = tuple(slices)
            result[slices] = image

            # Undo Spacing
            current_size = result.shape[1:]
            # change spatial_shape from HWD to DHW
            spatial_shape = list(np.roll(meta_dict["spatial_shape"], 1))
            spatial_size = spatial_shape[-len(current_size):]

            if np.any(np.not_equal(current_size, spatial_size)):
                resizer = Resize(spatial_size=spatial_size, mode=mode)
                result = resizer(result,
                                 mode=mode,
                                 align_corners=align_corners)

            # Undo Slicing
            slice_idx = meta_dict.get("slice_idx")
            if slice_idx is None or self.slice_only:
                final_result = result if len(result.shape) <= 3 else result[0]
            else:
                slice_idx = meta_dict["slice_idx"][0]
                final_result = np.zeros(tuple(spatial_shape))
                final_result[slice_idx] = result
            d[key] = final_result

            meta_key = meta_key or f"{key}_{self.meta_key_postfix}"
            meta = d.get(meta_key)
            if meta is None:
                meta = dict()
                d[meta_key] = meta
            meta["slice_idx"] = slice_idx
            meta["affine"] = meta_dict["original_affine"]
        return d
コード例 #2
0
    def test_invalid_inputs(self):
        with self.assertRaises(ValueError):
            resize = Resize(spatial_size=(128, 128, 3), mode="order")
            resize(self.imt[0])

        with self.assertRaises(ValueError):
            resize = Resize(spatial_size=(128, ), mode="order")
            resize(self.imt[0])
コード例 #3
0
    def test_longest_shape(self, input_param, expected_shape):
        input_data = np.random.randint(0, 2, size=[3, 4, 7, 10])
        input_param["size_mode"] = "longest"
        result = Resize(**input_param)(input_data)
        np.testing.assert_allclose(result.shape[1:], expected_shape)

        set_track_meta(False)
        result = Resize(**input_param)(input_data)
        self.assertNotIsInstance(result, MetaTensor)
        np.testing.assert_allclose(result.shape[1:], expected_shape)
        set_track_meta(True)
コード例 #4
0
    def __init__(self,
                 data: str,
                 split: str,
                 extension: str,
                 length: Optional[int] = None):

        self.datapath = wsl_data_dir / data
        self.data = data

        if data in known_extensions.keys():
            self.extension = known_extensions[data]
        else:
            self.extension = extension

        self.names = pd.read_csv(wsl_csv_dir / data /
                                 f'{split}.csv').Id.tolist()

        if length is not None:
            self.names = random.sample(self.names, min(len(self.names),
                                                       length))

        self.new_size = (224, 224)
        self.image_transforms = Compose([
            Resize(self.new_size),
            RepeatChannel(repeats=3),
            CastToType(dtype=np.float32),
            ToTensor()
        ])
コード例 #5
0
 def test_longest_infinite_decimals(self):
     resize = Resize(spatial_size=1008,
                     size_mode="longest",
                     mode="bilinear",
                     align_corners=False)
     ret = resize(np.random.randint(0, 2, size=[1, 2544, 3032]))
     self.assertTupleEqual(ret.shape, (1, 846, 1008))
コード例 #6
0
def write_png(data,
              file_name: str,
              output_shape=None,
              interp_order: str = "bicubic",
              scale=None):
    """
    Write numpy data into png files to disk.
    Spatially it supports HW for 2D.(H,W) or (H,W,3) or (H,W,4).
    If `scale` is None, expect the input data in `np.uint8` or `np.uint16` type.
    It's based on the Image module in PIL library:
    https://pillow.readthedocs.io/en/stable/reference/Image.html

    Args:
        data (numpy.ndarray): input data to write to file.
        file_name: expected file name that saved on disk.
        output_shape (None or tuple of ints): output image shape.
        interp_order (`nearest|linear|bilinear|bicubic|trilinear|area`):
            the interpolation mode. Default="bicubic".
            See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
        scale (255, 65535): postprocess data by clipping to [0, 1] and scaling to
            [0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.

    """
    assert isinstance(data, np.ndarray), "input data must be numpy array."
    if len(
            data.shape
    ) == 3 and data.shape[2] == 1:  # PIL Image can't save image with 1 channel
        data = data.squeeze(2)
    if output_shape is not None:
        output_shape = ensure_tuple_rep(output_shape, 2)
        align_corners = False if interp_order in ("linear", "bilinear",
                                                  "bicubic",
                                                  "trilinear") else None
        xform = Resize(spatial_size=output_shape,
                       interp_order=interp_order,
                       align_corners=align_corners)
        _min, _max = np.min(data), np.max(data)
        if len(data.shape) == 3:
            data = np.moveaxis(data, -1, 0)  # to channel first
            data = xform(data)
            data = np.moveaxis(data, 0, -1)
        else:  # (H, W)
            data = np.expand_dims(data, 0)  # make a channel
            data = xform(data)[0]  # first channel
        if interp_order != "nearest":
            data = np.clip(data, _min, _max)

    if scale is not None:
        data = np.clip(data, 0.0,
                       1.0)  # png writer only can scale data in range [0, 1]
        if scale == np.iinfo(np.uint8).max:
            data = (scale * data).astype(np.uint8)
        elif scale == np.iinfo(np.uint16).max:
            data = (scale * data).astype(np.uint16)
        else:
            raise ValueError(f"unsupported scale value: {scale}.")

    img = Image.fromarray(data)
    img.save(file_name, "PNG")
    return
コード例 #7
0
def write_png(
    data: np.ndarray,
    file_name: str,
    output_spatial_shape: Optional[Sequence[int]] = None,
    mode: Union[InterpolateMode, str] = InterpolateMode.BICUBIC,
    scale: Optional[int] = None,
) -> None:
    """
    Write numpy data into png files to disk.
    Spatially it supports HW for 2D.(H,W) or (H,W,3) or (H,W,4).
    If `scale` is None, expect the input data in `np.uint8` or `np.uint16` type.
    It's based on the Image module in PIL library:
    https://pillow.readthedocs.io/en/stable/reference/Image.html

    Args:
        data: input data to write to file.
        file_name: expected file name that saved on disk.
        output_spatial_shape: spatial shape of the output image.
        mode: {``"nearest"``, ``"linear"``, ``"bilinear"``, ``"bicubic"``, ``"trilinear"``, ``"area"``}
            The interpolation mode. Defaults to ``"bicubic"``.
            See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
        scale: {``255``, ``65535``} postprocess data by clipping to [0, 1] and scaling to
            [0, 255] (uint8) or [0, 65535] (uint16). Default is None to disable scaling.

    Raises:
        ValueError: When ``scale`` is not one of [255, 65535].

    """
    if not isinstance(data, np.ndarray):
        raise AssertionError("input data must be numpy array.")
    if len(data.shape) == 3 and data.shape[2] == 1:  # PIL Image can't save image with 1 channel
        data = data.squeeze(2)
    if output_spatial_shape is not None:
        output_spatial_shape_ = ensure_tuple_rep(output_spatial_shape, 2)
        mode = InterpolateMode(mode)
        align_corners = None if mode in (InterpolateMode.NEAREST, InterpolateMode.AREA) else False
        xform = Resize(spatial_size=output_spatial_shape_, mode=mode, align_corners=align_corners)
        _min, _max = np.min(data), np.max(data)
        if len(data.shape) == 3:
            data = np.moveaxis(data, -1, 0)  # to channel first
            data = xform(data)
            data = np.moveaxis(data, 0, -1)
        else:  # (H, W)
            data = np.expand_dims(data, 0)  # make a channel
            data = xform(data)[0]  # first channel
        if mode != InterpolateMode.NEAREST:
            data = np.clip(data, _min, _max)  # type: ignore

    if scale is not None:
        data = np.clip(data, 0.0, 1.0)  # type: ignore # png writer only can scale data in range [0, 1]
        if scale == np.iinfo(np.uint8).max:
            data = (scale * data).astype(np.uint8)
        elif scale == np.iinfo(np.uint16).max:
            data = (scale * data).astype(np.uint16)
        else:
            raise ValueError(f"Unsupported scale: {scale}, available options are [255, 65535]")

    img = Image.fromarray(data)
    img.save(file_name, "PNG")
    return
コード例 #8
0
ファイル: post.py プロジェクト: Project-MONAI/MONAILabel
    def __call__(self, data):
        d = dict(data)
        meta_dict = d[f"{self.ref_image}_{self.meta_key_postfix}"]
        for idx, key in enumerate(self.keys):
            result = d[key]
            current_size = result.shape[
                1:] if self.has_channel else result.shape
            spatial_shape = meta_dict["spatial_shape"]
            spatial_size = spatial_shape[-len(current_size):]

            # Undo Spacing
            if np.any(np.not_equal(current_size, spatial_size)):
                resizer = Resize(spatial_size=spatial_size,
                                 mode=self.mode[idx])
                result = resizer(result,
                                 mode=self.mode[idx],
                                 align_corners=self.align_corners[idx])

            d[key] = result if len(
                result.shape
            ) <= 3 else result[0] if result.shape[0] == 1 else result

            meta = d.get(f"{key}_{self.meta_key_postfix}")
            if meta is None:
                meta = dict()
                d[f"{key}_{self.meta_key_postfix}"] = meta
            meta["affine"] = meta_dict.get("original_affine")
        return d
def get_diffusion_label_preprocess() -> Compose:
    return Compose([
        NormalizeIntensity(nonzero=True),
        Unsqueeze(),
        Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)),
        ToTensor(),
    ])
コード例 #10
0
 def test_correct_results(self, spatial_size, order, mode, cval, clip,
                          preserve_range, anti_aliasing):
     resize = Resize(
         spatial_size,
         order=order,
         mode=mode,
         cval=cval,
         clip=clip,
         preserve_range=preserve_range,
         anti_aliasing=anti_aliasing,
     )
     expected = list()
     for channel in self.imt[0]:
         expected.append(
             skimage.transform.resize(
                 channel,
                 spatial_size,
                 order=order,
                 mode=mode,
                 cval=cval,
                 clip=clip,
                 preserve_range=preserve_range,
                 anti_aliasing=anti_aliasing,
             ))
     expected = np.stack(expected).astype(np.float32)
     self.assertTrue(np.allclose(resize(self.imt[0]), expected))
コード例 #11
0
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
    images = [
        '/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz',
        '/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz',
        '/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz',
        '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz',
        '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz',
        '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz',
        '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz',
        '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz',
        '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz',
        '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz'
    ]
    # 2 binary labels for gender classification: man and woman
    labels = np.array([
        0, 0, 1, 0, 1, 0, 1, 0, 1, 0
    ])

    # Define transforms for image
    val_transforms = Compose([
        ScaleIntensity(),
        AddChannel(),
        Resize((96, 96, 96)),
        ToTensor()
    ])

    # Define nifti dataset
    val_ds = NiftiDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False)
    # create a validation data loader
    val_loader = DataLoader(val_ds, batch_size=2, num_workers=4, pin_memory=torch.cuda.is_available())

    # Create DenseNet121
    device = torch.device('cuda:0')
    model = monai.networks.nets.densenet.densenet121(
        spatial_dims=3,
        in_channels=1,
        out_channels=2,
    ).to(device)

    model.load_state_dict(torch.load('best_metric_model.pth'))
    model.eval()
    with torch.no_grad():
        num_correct = 0.
        metric_count = 0
        saver = CSVSaver(output_dir='./output')
        for val_data in val_loader:
            val_images, val_labels = val_data[0].to(device), val_data[1].to(device)
            val_outputs = model(val_images).argmax(dim=1)
            value = torch.eq(val_outputs, val_labels)
            metric_count += len(value)
            num_correct += value.sum().item()
            saver.save_batch(val_outputs, val_data[2])
        metric = num_correct / metric_count
        print('evaluation metric:', metric)
        saver.finalize()
コード例 #12
0
def resize(x, size_list=None, mode='nearest'):
    if (size_list):
        resize_data = Resize(spatial_size=(size_list[0], size_list[1],
                                           size_list[2]),
                             mode=mode)
        x = resize_data(x)
        print(x.shape)
    return x
コード例 #13
0
 def __init__(self, dicom_folders):
     self.dicom_folders = dicom_folders
     self.transforms = get_validation_augmentation()
     self.preprocessing = get_preprocessing(
         functools.partial(preprocess_input, **formatted_settings))
     self.transform3d = Compose(
         [ScaleIntensity(),
          Resize((160, 160, 160)),
          ToTensor()])
コード例 #14
0
    def test_correct_results(self, spatial_size, mode, anti_aliasing):
        """resize 'spatial_size' and 'mode'"""
        resize = Resize(spatial_size, mode=mode, anti_aliasing=anti_aliasing)
        _order = 0
        if mode.endswith("linear"):
            _order = 1
        if spatial_size == (32, -1):
            spatial_size = (32, 64)

        expected = [
            skimage.transform.resize(channel,
                                     spatial_size,
                                     order=_order,
                                     clip=False,
                                     preserve_range=False,
                                     anti_aliasing=anti_aliasing)
            for channel in self.imt[0]
        ]

        expected = np.stack(expected).astype(np.float32)
        for p in TEST_NDARRAYS_ALL:
            im = p(self.imt[0])
            out = resize(im)
            if isinstance(im, MetaTensor):
                if not out.applied_operations:
                    return  # skipped because good shape
                im_inv = resize.inverse(out)
                self.assertTrue(not im_inv.applied_operations)
                assert_allclose(im_inv.shape, im.shape)
                assert_allclose(im_inv.affine, im.affine, atol=1e-3, rtol=1e-3)
            if not anti_aliasing:
                assert_allclose(out, expected, type_test=False, atol=0.9)
                return
            # skimage uses reflect padding for anti-aliasing filter.
            # Our implementation reuses GaussianSmooth() as anti-aliasing filter, which uses zero padding instead.
            # Thus their results near the image boundary will be different.
            if isinstance(out, torch.Tensor):
                out = out.cpu().detach().numpy()
            good = np.sum(np.isclose(expected, out, atol=0.9))
            self.assertLessEqual(
                np.abs(good - expected.size) / float(expected.size), diff_t,
                f"at most {diff_t} percent mismatch ")
def get_longitudinal_preprocess(is_label: bool) -> List[Transform]:
    # only without cropping, somehow, there is not much left to crop in this dataset...
    if not is_label:
        return [
            NormalizeIntensity(nonzero=True),
            Unsqueeze(),
            SpatialPad(spatial_size=[215, 215, 215],
                       method="symmetric",
                       mode="constant"),
            Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)),
        ]
    else:
        return [
            NormalizeIntensity(nonzero=True),
            Unsqueeze(),
            SpatialPad(spatial_size=[215, 215, 215],
                       method="symmetric",
                       mode="constant"),
            Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)),
        ]
コード例 #16
0
ファイル: aug_3d.py プロジェクト: dodler/kgl
def get_rsna_train_aug(name=None, image_size=160):
    return Compose([
        ScaleIntensity(),
        Resize((image_size, image_size, image_size)),
        RandAffine(prob=0.5,
                   translate_range=(5, 5, 5),
                   rotate_range=(np.pi * 4, np.pi * 4, np.pi * 4),
                   scale_range=(0.15, 0.15, 0.15),
                   padding_mode='border'),
        ToTensor()
    ])
コード例 #17
0
    def create_transforms_fn(self, train_kws):
        from monai.transforms import Resize, Compose, ToTensor
        func_list = []
        common_resize = Resize((train_kws['target_size'], train_kws['target_size']))

        # if 'resolution' in train_kws.keys():
        #     func_list.append(
        #         Resize((train_kws['resolution'], train_kws['resolution']))
        #     )
        func_list.append(common_resize)
        func_list.append(ToTensor())
        return Compose(func_list)
コード例 #18
0
ファイル: numpy_dataset.py プロジェクト: folliefollie721/mood
def get_transforms_3d(target_size=128):
    """Returns a Transform which resizes 3D samples (1xZxYxX) to a target_size (1 x target_size x target_size x target_size)
    and then converts them to a pytorch tensor.

    Args:
        target_size (int, optional): [New spatial dimension of the input data]. Defaults to 128.

    Returns:
        [Transform]
    """
    transforms = Compose([Resize((target_size, target_size, target_size)), ToTensor()])
    return transforms
def get_preprocess(is_label: bool) -> List[Transform]:
    if not is_label:
        return [
            Crop(),
            NormalizeIntensity(nonzero=True),
            # Channel
            Unsqueeze(),
            SpatialPad(spatial_size=[193, 193, 193],
                       method="symmetric",
                       mode="constant"),
            Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)),
        ]
    else:
        return [
            Crop(),
            NormalizeIntensity(nonzero=True),
            Unsqueeze(),
            SpatialPad(spatial_size=[193, 193, 193],
                       method="symmetric",
                       mode="constant"),
            Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)),
        ]
コード例 #20
0
def write_png(
    data,
    file_name: str,
    output_shape=None,
    interp_order: str = "bicubic",
    scale: bool = False,
    plugin: Optional[str] = None,
    **plugin_args,
):
    """
    Write numpy data into png files to disk.
    Spatially it supports HW for 2D.(H,W) or (H,W,3) or (H,W,4)
    It's based on skimage library: https://scikit-image.org/docs/dev/api/skimage

    Args:
        data (numpy.ndarray): input data to write to file.
        file_name: expected file name that saved on disk.
        output_shape (None or tuple of ints): output image shape.
        interp_order (`nearest|linear|bilinear|bicubic|trilinear|area`):
            the interpolation mode. Default="bicubic".
            See also: https://pytorch.org/docs/stable/nn.functional.html#interpolate
        scale: whether to postprocess data by clipping to [0, 1] and scaling [0, 255] (uint8).
        plugin: name of plugin to use in `imsave`. By default, the different plugins
            are tried(starting with imageio) until a suitable candidate is found.
        plugin_args (keywords): arguments passed to the given plugin.

    """
    assert isinstance(data, np.ndarray), "input data must be numpy array."

    if output_shape is not None:
        output_shape = ensure_tuple_rep(output_shape, 2)
        xform = Resize(spatial_size=output_shape, interp_order=interp_order)
        _min, _max = np.min(data), np.max(data)
        if len(data.shape) == 3:
            data = np.moveaxis(data, -1, 0)  # to channel first
            data = xform(data)
            data = np.moveaxis(data, 0, -1)
        else:  # (H, W)
            data = np.expand_dims(data, 0)  # make a channel
            data = xform(data)[0]  # first channel
        if interp_order != "nearest":
            data = np.clip(data, _min, _max)

    if scale:
        data = np.clip(data, 0.0,
                       1.0)  # png writer only can scale data in range [0, 1].
        data = 255 * data
    data = data.astype(np.uint8)
    io.imsave(file_name, data, plugin=plugin, **plugin_args)
    return
コード例 #21
0
ファイル: loaders.py プロジェクト: bbearce/wsl
    def __init__(self, data: str, split: str, extension: str,
                 classes: int, col_name: str,
                 regression: bool, debug: bool = False):

        if regression and classes != 1:
            print('Support for multi-class regression is not available.')
            sys.exit(1)

        self.datapath = wsl_data_dir / data
        self.data = data
        self.classes = classes

        known_extensions = {'rsna': 'dcm', 'chexpert': 'jpg'}
        if data in known_extensions.keys():
            self.extension = known_extensions[data]
        else:
            self.extension = extension

        df = pd.read_csv(wsl_csv_dir / data / 'info.csv', converters={col_name: literal_eval})
        self.df = df
        df = df.drop_duplicates(subset='Id', keep='first', ignore_index=True)
        Ids = pd.read_csv(wsl_csv_dir / data / f'{split}.csv').Id.tolist()
        df = df[df.Id.isin(Ids)]

        self.names = df.Id.to_list()
        self.labels = df[col_name].tolist()

        if debug:
            self.names = self.names[0:100]
            self.labels = self.labels[0:100]

        self.image_transforms = Compose([
            Resize((224, 224)),
            RepeatChannel(repeats=3),
            CastToType(dtype=np.float32),
            ToTensor()])

        if regression:
            self.lmax = df[col_name].max()
            self.lmin = df[col_name].min()
            self.labels = [[round((x - self.lmin) / self.lmax, 2)] for x in self.labels]
        else:
            if classes == 1:
                self.labels = [[x] for x in self.labels]
            else:
                self.class_names = self.labels[0].keys()
                self.labels = [list(x.values()) for x in self.labels]

            self.pos_weight = [round((len(col) - sum(col)) / sum(col), 2) for col in zip(*self.labels)]
コード例 #22
0
 def test_correct_results(self, spatial_size, mode):
     resize = Resize(spatial_size, mode=mode)
     _order = 0
     if mode.endswith("linear"):
         _order = 1
     if spatial_size == (32, -1):
         spatial_size = (32, 64)
     expected = []
     for channel in self.imt[0]:
         expected.append(
             skimage.transform.resize(channel,
                                      spatial_size,
                                      order=_order,
                                      clip=False,
                                      preserve_range=False,
                                      anti_aliasing=False))
     expected = np.stack(expected).astype(np.float32)
     out = resize(self.imt[0])
     np.testing.assert_allclose(out, expected, atol=0.9)
コード例 #23
0
    def create_to_transforms_fn(self, train_kws):
        # func_list = []

        # if 'resolution' in train_kws.keys():
        #     func_list.append(
        #         torch.nn.Upsample((train_kws['resolution'], train_kws['resolution']), mode="bilinear")
        #     )
            
        # func_list.append(
        #     torch.nn.Upsample((train_kws['target_size'], train_kws['target_size']), mode="bilinear")
        # )
        
        # def to_transforms(data):
        #     for func in func_list:
        #         data = func(data)
        #     return data

        from monai.transforms import Resize
        to_transforms = Resize((train_kws['target_size'], train_kws['target_size']))

        return to_transforms
コード例 #24
0
    def test_correct_results(self, spatial_size, mode):
        resize = Resize(spatial_size, mode=mode)
        _order = 0
        if mode.endswith("linear"):
            _order = 1
        if spatial_size == (32, -1):
            spatial_size = (32, 64)
        expected = [
            skimage.transform.resize(channel,
                                     spatial_size,
                                     order=_order,
                                     clip=False,
                                     preserve_range=False,
                                     anti_aliasing=False)
            for channel in self.imt[0]
        ]

        expected = np.stack(expected).astype(np.float32)
        for p in TEST_NDARRAYS:
            out = resize(p(self.imt[0]))
            assert_allclose(out, expected, type_test=False, atol=0.9)
コード例 #25
0
    def score_pixel_2d(self, np_array, **kwargs):
        from monai.transforms import Resize

        origin_size = np_array.shape[-1]
        from_transforms = Resize((origin_size, origin_size))
        to_transforms = self.to_transforms

        np_array = self.transpose(np_array)
        np_array = to_transforms(np_array)
        data_tensor = torch.from_numpy(np_array).float().cuda()

        result = self.get_pixel_score(self.model, data_tensor, **kwargs)

        for key in result.keys():
            if key == 'sp': continue
            tensor = result[key]
            array = tensor.detach().cpu().numpy()
            array = from_transforms(array)
            array = self.revert_transpose(array)
            result[key] = array

        return result
コード例 #26
0
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # create a temporary directory and 40 random image, mask paris
    tempdir = tempfile.mkdtemp()
    print('generating synthetic data to {} (this may take a while)'.format(tempdir))
    for i in range(40):
        im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)

        n = nib.Nifti1Image(im, np.eye(4))
        nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i))

        n = nib.Nifti1Image(seg, np.eye(4))
        nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i))

    images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz')))
    segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz')))

    # define transforms for image and segmentation
    train_imtrans = Compose([
        ScaleIntensity(),
        AddChannel(),
        RandSpatialCrop((96, 96, 96), random_size=False),
        ToTensor()
    ])
    train_segtrans = Compose([
        AddChannel(),
        RandSpatialCrop((96, 96, 96), random_size=False),
        ToTensor()
    ])
    val_imtrans = Compose([
        ScaleIntensity(),
        AddChannel(),
        Resize((96, 96, 96)),
        ToTensor()
    ])
    val_segtrans = Compose([
        AddChannel(),
        Resize((96, 96, 96)),
        ToTensor()
    ])

    # define nifti dataset, data loader
    check_ds = NiftiDataset(images, segs, transform=train_imtrans, seg_transform=train_segtrans)
    check_loader = DataLoader(check_ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available())
    im, seg = monai.utils.misc.first(check_loader)
    print(im.shape, seg.shape)

    # create a training data loader
    train_ds = NiftiDataset(images[:20], segs[:20], transform=train_imtrans, seg_transform=train_segtrans)
    train_loader = DataLoader(train_ds, batch_size=5, shuffle=True, num_workers=8, pin_memory=torch.cuda.is_available())
    # create a validation data loader
    val_ds = NiftiDataset(images[-20:], segs[-20:], transform=val_imtrans, seg_transform=val_segtrans)
    val_loader = DataLoader(val_ds, batch_size=5, num_workers=8, pin_memory=torch.cuda.is_available())

    # create UNet, DiceLoss and Adam optimizer
    net = monai.networks.nets.UNet(
        dimensions=3,
        in_channels=1,
        out_channels=1,
        channels=(16, 32, 64, 128, 256),
        strides=(2, 2, 2, 2),
        num_res_units=2,
    )
    loss = monai.losses.DiceLoss(do_sigmoid=True)
    lr = 1e-3
    opt = torch.optim.Adam(net.parameters(), lr)
    device = torch.device('cuda:0')

    # ignite trainer expects batch=(img, seg) and returns output=loss at every iteration,
    # user can add output_transform to return other values, like: y_pred, y, etc.
    trainer = create_supervised_trainer(net, opt, loss, device, False)

    # adding checkpoint handler to save models (network params and optimizer stats) during training
    checkpoint_handler = ModelCheckpoint('./runs/', 'net', n_saved=10, require_empty=False)
    trainer.add_event_handler(event_name=Events.EPOCH_COMPLETED,
                              handler=checkpoint_handler,
                              to_save={'net': net, 'opt': opt})

    # StatsHandler prints loss at every iteration and print metrics at every epoch,
    # we don't set metrics for trainer here, so just print loss, user can also customize print functions
    # and can use output_transform to convert engine.state.output if it's not a loss value
    train_stats_handler = StatsHandler(name='trainer')
    train_stats_handler.attach(trainer)

    # TensorBoardStatsHandler plots loss at every iteration and plots metrics at every epoch, same as StatsHandler
    train_tensorboard_stats_handler = TensorBoardStatsHandler()
    train_tensorboard_stats_handler.attach(trainer)

    validation_every_n_epochs = 1
    # Set parameters for validation
    metric_name = 'Mean_Dice'
    # add evaluation metric to the evaluator engine
    val_metrics = {metric_name: MeanDice(add_sigmoid=True, to_onehot_y=False)}

    # ignite evaluator expects batch=(img, seg) and returns output=(y_pred, y) at every iteration,
    # user can add output_transform to return other values
    evaluator = create_supervised_evaluator(net, val_metrics, device, True)


    @trainer.on(Events.EPOCH_COMPLETED(every=validation_every_n_epochs))
    def run_validation(engine):
        evaluator.run(val_loader)


    # add early stopping handler to evaluator
    early_stopper = EarlyStopping(patience=4,
                                  score_function=stopping_fn_from_metric(metric_name),
                                  trainer=trainer)
    evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=early_stopper)

    # add stats event handler to print validation stats via evaluator
    val_stats_handler = StatsHandler(
        name='evaluator',
        output_transform=lambda x: None,  # no need to print loss value, so disable per iteration output
        global_epoch_transform=lambda x: trainer.state.epoch)  # fetch global epoch number from trainer
    val_stats_handler.attach(evaluator)

    # add handler to record metrics to TensorBoard at every validation epoch
    val_tensorboard_stats_handler = TensorBoardStatsHandler(
        output_transform=lambda x: None,  # no need to plot loss value, so disable per iteration output
        global_epoch_transform=lambda x: trainer.state.epoch)  # fetch global epoch number from trainer
    val_tensorboard_stats_handler.attach(evaluator)

    # add handler to draw the first image and the corresponding label and model output in the last batch
    # here we draw the 3D output as GIF format along Depth axis, at every validation epoch
    val_tensorboard_image_handler = TensorBoardImageHandler(
        batch_transform=lambda batch: (batch[0], batch[1]),
        output_transform=lambda output: predict_segmentation(output[0]),
        global_iter_transform=lambda x: trainer.state.epoch
    )
    evaluator.add_event_handler(event_name=Events.EPOCH_COMPLETED, handler=val_tensorboard_image_handler)

    train_epochs = 30
    state = trainer.run(train_loader, train_epochs)
    shutil.rmtree(tempdir)
コード例 #27
0
    '/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz',
    '/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz',
    '/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz',
    '/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz',
    '/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz',
    '/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz',
    '/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz'
]
# 2 binary labels for gender classification: man and woman
labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0])

# Define transforms
train_transforms = Compose([
    ScaleIntensity(),
    AddChannel(),
    Resize((96, 96, 96)),
    RandRotate90(),
    ToTensor()
])
val_transforms = Compose(
    [ScaleIntensity(),
     AddChannel(),
     Resize((96, 96, 96)),
     ToTensor()])

# Define nifti dataset, data loader
check_ds = NiftiDataset(image_files=images,
                        labels=labels,
                        transform=train_transforms)
check_loader = DataLoader(check_ds,
                          batch_size=2,
コード例 #28
0

# In[ ]:


def default_collate(batch):
    data = torch.stack([item[0] for item in batch])
    target = torch.stack([item[1] for item in batch])  # image labels.
    return data, target


# In[ ]:


train_transforms = Compose([ScaleIntensity(), 
                            Resize((image_size, image_size, image_size)), 
                            RandAffine( 
                                      prob=0.5,
#                                       rotate_range=(np.pi * 2, np.pi * 2, np.pi * 2),
                                      scale_range=(0.15, 0.15, 0.15),
                                      padding_mode='border'),
                            ToTensor()])
val_transforms = Compose([ScaleIntensity(),Resize((image_size, image_size, image_size)),ToTensor()])


# In[ ]:


dataset_show = RSNADataset3D(df_study.head(5), 'train', transform=val_transforms)
dataset_show_aug = RSNADataset3D(df_study.head(5), 'train', transform=train_transforms)
# from pylab import rcParams
コード例 #29
0
def main():
    monai.config.print_config()
    logging.basicConfig(stream=sys.stdout, level=logging.INFO)

    # IXI dataset as a demo, downloadable from https://brain-development.org/ixi-dataset/
    # the path of ixi IXI-T1 dataset
    data_path = os.sep.join(
        [".", "workspace", "data", "medical", "ixi", "IXI-T1"])
    images = [
        "IXI607-Guys-1097-T1.nii.gz",
        "IXI175-HH-1570-T1.nii.gz",
        "IXI385-HH-2078-T1.nii.gz",
        "IXI344-Guys-0905-T1.nii.gz",
        "IXI409-Guys-0960-T1.nii.gz",
        "IXI584-Guys-1129-T1.nii.gz",
        "IXI253-HH-1694-T1.nii.gz",
        "IXI092-HH-1436-T1.nii.gz",
        "IXI574-IOP-1156-T1.nii.gz",
        "IXI585-Guys-1130-T1.nii.gz",
    ]
    images = [os.sep.join([data_path, f]) for f in images]

    # 2 binary labels for gender classification: man and woman
    labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0], dtype=np.int64)

    # Define transforms for image
    val_transforms = Compose(
        [ScaleIntensity(),
         AddChannel(),
         Resize((96, 96, 96)),
         EnsureType()])

    # Define image dataset
    val_ds = ImageDataset(image_files=images,
                          labels=labels,
                          transform=val_transforms,
                          image_only=False)
    # create a validation data loader
    val_loader = DataLoader(val_ds,
                            batch_size=2,
                            num_workers=4,
                            pin_memory=torch.cuda.is_available())

    # Create DenseNet121
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    model = monai.networks.nets.DenseNet121(spatial_dims=3,
                                            in_channels=1,
                                            out_channels=2).to(device)

    model.load_state_dict(
        torch.load("best_metric_model_classification3d_array.pth"))
    model.eval()
    with torch.no_grad():
        num_correct = 0.0
        metric_count = 0
        saver = CSVSaver(output_dir="./output")
        for val_data in val_loader:
            val_images, val_labels = val_data[0].to(device), val_data[1].to(
                device)
            val_outputs = model(val_images).argmax(dim=1)
            value = torch.eq(val_outputs, val_labels)
            metric_count += len(value)
            num_correct += value.sum().item()
            saver.save_batch(val_outputs, val_data[2])
        metric = num_correct / metric_count
        print("evaluation metric:", metric)
        saver.finalize()
コード例 #30
0
# print("use monai model:", monai_model_file)

target_cols = [
    'rv_lv_ratio_gte_1',  # exam level
    "central_pe",
    "leftsided_pe",
    "rightsided_pe",
    "acute_and_chronic_pe",
    "chronic_pe"
]
out_dim = len(target_cols)
image_size = 100

val_transforms = Compose([
    ScaleIntensity(),
    Resize((image_size, image_size, image_size)),
    ToTensor()
])
val_transforms.set_random_state(seed=42)


def monai_preprocess(imgs512):
    imgs = imgs512[:, :, 43:-55, 43:-55]
    img_monai = imgs[int(imgs.shape[0] * 0.25):int(imgs.shape[0] * 0.75)]
    img_monai = np.transpose(img_monai, (1, 2, 3, 0))
    img_monai = apply_transform(val_transforms, img_monai)
    img_monai = np.expand_dims(img_monai, axis=0)
    img_monai = torch.from_numpy(img_monai).cuda()
    return img_monai