def test_value_errors(self): input_data = np.array([[0.0, 3.0, 0.0, 4.0], [0.0, 4.0, 0.0, 5.0]]) normalizer = NormalizeIntensity(nonzero=True, channel_wise=True, subtrahend=[1]) with self.assertRaises(ValueError): normalizer(input_data) normalizer = NormalizeIntensity(nonzero=True, channel_wise=True, subtrahend=[1, 2], divisor=[1]) with self.assertRaises(ValueError): normalizer(input_data)
def get_diffusion_label_preprocess() -> Compose: return Compose([ NormalizeIntensity(nonzero=True), Unsqueeze(), Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)), ToTensor(), ])
def test_channel_wise(self, im_type): normalizer = NormalizeIntensity(nonzero=True, channel_wise=True) input_data = im_type( np.array([[0.0, 3.0, 0.0, 4.0], [0.0, 4.0, 0.0, 5.0]])) expected = np.array([[0.0, -1.0, 0.0, 1.0], [0.0, -1.0, 0.0, 1.0]]) normalized = normalizer(input_data) assert_allclose(normalized, im_type(expected), type_test="tensor")
def test_default(self, im_type): im = im_type(self.imt.copy()) normalizer = NormalizeIntensity() normalized = normalizer(im) self.assertTrue(normalized.dtype in (np.float32, torch.float32)) expected = (self.imt - np.mean(self.imt)) / np.std(self.imt) assert_allclose(normalized, expected, type_test="tensor", rtol=1e-3)
def test_default(self, im_type): im = im_type(self.imt.copy()) normalizer = NormalizeIntensity() normalized = normalizer(im) self.assertEqual(type(im), type(normalized)) if isinstance(normalized, torch.Tensor): self.assertEqual(im.device, normalized.device) self.assertTrue(normalized.dtype in (np.float32, torch.float32)) expected = (self.imt - np.mean(self.imt)) / np.std(self.imt) assert_allclose(normalized, expected, type_test=False, rtol=1e-3)
def get_longitudinal_preprocess(is_label: bool) -> List[Transform]: # only without cropping, somehow, there is not much left to crop in this dataset... if not is_label: return [ NormalizeIntensity(nonzero=True), Unsqueeze(), SpatialPad(spatial_size=[215, 215, 215], method="symmetric", mode="constant"), Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)), ] else: return [ NormalizeIntensity(nonzero=True), Unsqueeze(), SpatialPad(spatial_size=[215, 215, 215], method="symmetric", mode="constant"), Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)), ]
def get_preprocess(is_label: bool) -> List[Transform]: if not is_label: return [ Crop(), NormalizeIntensity(nonzero=True), # Channel Unsqueeze(), SpatialPad(spatial_size=[193, 193, 193], method="symmetric", mode="constant"), Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)), ] else: return [ Crop(), NormalizeIntensity(nonzero=True), Unsqueeze(), SpatialPad(spatial_size=[193, 193, 193], method="symmetric", mode="constant"), Resize((IMAGESIZE, IMAGESIZE, IMAGESIZE)), ]
def __init__( self, keys, clip_values, pixdim, normalize_values, model_mode, ) -> None: super().__init__(keys) self.keys = keys self.low = clip_values[0] self.high = clip_values[1] self.target_spacing = pixdim self.mean = normalize_values[0] self.std = normalize_values[1] self.training = False self.crop_foreg = CropForegroundd(keys=["image", "label"], source_key="image") self.normalize_intensity = NormalizeIntensity(nonzero=True, channel_wise=True) if model_mode in ["train"]: self.training = True
def predict_mask(model, images): image_array = [] for image in images: image_array.append(image.pixel_array) image_array = np.expand_dims( np.transpose(np.array(image_array).astype('float32')), (0, 1)) data_transforms = Compose([AddChannel(), NormalizeIntensity(), ToTensor()]) dataset = monai.data.Dataset(data=image_array, transform=data_transforms) print(dataset[0].shape) test_mask = sliding_window_inference(dataset[0], roi_size=[128, 128, 16], sw_batch_size=1, predictor=model) test_mask = test_mask.argmax(1).detach().cpu().numpy() test_mask = np.transpose(np.squeeze(test_mask, 0)) test_mask = test_mask.astype('uint8') test_mask = np.asarray(test_mask, order='C') return test_mask
def get_2D_diffusion_preprocess() -> Compose: return Compose([NormalizeIntensity(nonzero=True), Transpose2DInput()])
def test_channel_wise(self): normalizer = NormalizeIntensity(nonzero=True, channel_wise=True) input_data = np.array([[0.0, 3.0, 0.0, 4.0], [0.0, 4.0, 0.0, 5.0]]) expected = np.array([[0.0, -1.0, 0.0, 1.0], [0.0, -1.0, 0.0, 1.0]]) np.testing.assert_allclose(expected, normalizer(input_data))
def test_nonzero(self, input_param, input_data, expected_data): normalizer = NormalizeIntensity(**input_param) np.testing.assert_allclose(expected_data, normalizer(input_data))
def test_default(self): normalizer = NormalizeIntensity() normalized = normalizer(self.imt) self.assertTrue(normalized.dtype == np.float32) expected = (self.imt - np.mean(self.imt)) / np.std(self.imt) np.testing.assert_allclose(normalized, expected, rtol=1e-6)
def test_image_normalize_intensity(self): normalizer = NormalizeIntensity() normalised = normalizer(self.imt) expected = (self.imt - np.mean(self.imt)) / np.std(self.imt) self.assertTrue(np.allclose(normalised, expected))
def test_nonzero(self, in_type, input_param, input_data, expected_data): normalizer = NormalizeIntensity(**input_param) im = in_type(input_data) normalized = normalizer(im) assert_allclose(normalized, in_type(expected_data), type_test="tensor")
def normalize_img(in_img): normalizer = NormalizeIntensity() return torch.Tensor(normalizer(in_img))