Exemple #1
0
    def __call__(self, data: Any):
        if self.depth:
            img, seg = create_test_image_3d(
                self.height,
                self.width,
                self.depth,
                self.num_objs,
                self.rad_max,
                self.rad_min,
                self.noise_max,
                self.num_seg_classes,
                self.channel_dim,
                self.random_state,
            )
        else:
            img, seg = create_test_image_2d(
                self.height,
                self.width,
                self.num_objs,
                self.rad_max,
                self.rad_min,
                self.noise_max,
                self.num_seg_classes,
                self.channel_dim,
                self.random_state,
            )

        return img, seg
Exemple #2
0
    def setUp(self):
        im, msk = create_test_image_3d(self.im_shape[0], self.im_shape[1],
                                       self.im_shape[2], 4, 20, 0,
                                       self.num_classes)

        self.imt = im[None, None]
        self.seg1 = (msk[None, None] > 0).astype(np.float32)
        self.segn = msk[None, None]
def run_test(batch_size=2, device=torch.device("cpu:0")):

    im, seg = create_test_image_3d(25,
                                   28,
                                   63,
                                   rad_max=10,
                                   noise_max=1,
                                   num_objs=4,
                                   num_seg_classes=1)
    input_shape = im.shape
    img_name = make_nifti_image(im)
    seg_name = make_nifti_image(seg)
    ds = NiftiDataset([img_name], [seg_name],
                      transform=AddChannel(),
                      seg_transform=AddChannel(),
                      image_only=False)
    loader = DataLoader(ds, batch_size=1, pin_memory=torch.cuda.is_available())

    net = UNet(
        dimensions=3,
        in_channels=1,
        num_classes=1,
        channels=(4, 8, 16, 32),
        strides=(2, 2, 2),
        num_res_units=2,
    )
    roi_size = (16, 32, 48)
    sw_batch_size = batch_size

    def _sliding_window_processor(_engine, batch):
        net.eval()
        img, seg, meta_data = batch
        with torch.no_grad():
            seg_probs = sliding_window_inference(img, roi_size, sw_batch_size,
                                                 lambda x: net(x)[0], device)
            return predict_segmentation(seg_probs)

    infer_engine = Engine(_sliding_window_processor)

    with tempfile.TemporaryDirectory() as temp_dir:
        SegmentationSaver(output_path=temp_dir,
                          output_ext='.nii.gz',
                          output_postfix='seg').attach(infer_engine)

        infer_engine.run(loader)

        basename = os.path.basename(img_name)[:-len('.nii.gz')]
        saved_name = os.path.join(temp_dir, basename,
                                  '{}_seg.nii.gz'.format(basename))
        testing_shape = nib.load(saved_name).get_fdata().shape

    if os.path.exists(img_name):
        os.remove(img_name)
    if os.path.exists(seg_name):
        os.remove(seg_name)

    return testing_shape == input_shape
Exemple #4
0
from monai.handlers.stats_handler import StatsHandler
from monai.handlers.mean_dice import MeanDice
from monai.visualize import img2tensorboard
from monai.data.synthetic import create_test_image_3d
from monai.handlers.utils import stopping_fn_from_metric

# assumes the framework is found here, change as necessary
sys.path.append("..")

config.print_config()

# Create a temporary directory and 50 random image, mask paris
tempdir = tempfile.mkdtemp()

for i in range(50):
    im, seg = create_test_image_3d(256, 256, 256)

    n = nib.Nifti1Image(im, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i))

    n = nib.Nifti1Image(seg, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i))

images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz')))
segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz')))

# Define transforms for image and segmentation
imtrans = transforms.Compose(
    [Rescale(),
     AddChannel(),
     UniformRandomPatch((64, 64, 64)),
Exemple #5
0
from monai.networks.nets.unet import UNet
from monai.networks.utils import predict_segmentation
from monai.data.synthetic import create_test_image_3d
from monai.utils.sliding_window_inference import sliding_window_inference
from monai.handlers.stats_handler import StatsHandler
from monai.handlers.mean_dice import MeanDice

config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)

tempdir = tempfile.mkdtemp()
# tempdir = './temp'
print(
    'generating synthetic data to {} (this may take a while)'.format(tempdir))
for i in range(5):
    im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1)

    n = nib.Nifti1Image(im, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i))

    n = nib.Nifti1Image(seg, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i))

images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz')))
segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz')))

# Define transforms for image and segmentation
imtrans = transforms.Compose([Rescale(), AddChannel()])
segtrans = transforms.Compose([AddChannel()])
ds = NiftiDataset(images,
                  segs,
Exemple #6
0
from monai.data.nifti_reader import NiftiDataset
from monai.transforms import (AddChannel, Rescale, ToTensor, UniformRandomPatch)
from monai.handlers.stats_handler import StatsHandler
from monai.handlers.mean_dice import MeanDice
from monai.visualize import img2tensorboard
from monai.data.synthetic import create_test_image_3d
from monai.handlers.utils import stopping_fn_from_metric

monai.config.print_config()

# Create a temporary directory and 50 random image, mask paris
tempdir = tempfile.mkdtemp()

for i in range(50):
    im, seg = create_test_image_3d(128, 128, 128)

    n = nib.Nifti1Image(im, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i))

    n = nib.Nifti1Image(seg, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i))

images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz')))
segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz')))

# Define transforms for image and segmentation
imtrans = transforms.Compose([
    Rescale(), 
    AddChannel(), 
    UniformRandomPatch((96, 96, 96)), 
from monai.handlers.checkpoint_loader import CheckpointLoader
from monai.handlers.stats_handler import StatsHandler
from monai.handlers.mean_dice import MeanDice
from monai import config

config.print_config()
logging.basicConfig(stream=sys.stdout, level=logging.INFO)

tempdir = tempfile.mkdtemp()
# tempdir = './temp'
print(
    'generating synthetic data to {} (this may take a while)'.format(tempdir))
for i in range(5):
    im, seg = create_test_image_3d(128,
                                   128,
                                   128,
                                   num_seg_classes=1,
                                   channel_dim=-1)

    n = nib.Nifti1Image(im, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i))

    n = nib.Nifti1Image(seg, np.eye(4))
    nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i))

images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz')))
segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz')))
val_files = [{'img': img, 'seg': seg} for img, seg in zip(images, segs)]

# Define transforms for image and segmentation
val_transforms = transforms.Compose([
Exemple #8
0
from parameterized import parameterized

from monai.data.synthetic import create_test_image_2d, create_test_image_3d
from monai.transforms.utils_pytorch_numpy_unification import moveaxis
from monai.utils.module import optional_import
from monai.visualize.utils import blend_images
from tests.utils import TEST_NDARRAYS

plt, has_matplotlib = optional_import("matplotlib.pyplot")

TESTS = []
for p in TEST_NDARRAYS:
    image, label = create_test_image_2d(100, 101)
    TESTS.append((p(image), p(label)))

    image, label = create_test_image_3d(100, 101, 102)
    TESTS.append((p(image), p(label)))


@skipUnless(has_matplotlib, "Matplotlib required")
class TestBlendImages(unittest.TestCase):
    @parameterized.expand(TESTS)
    def test_blend(self, image, label):
        blended = blend_images(image[None], label[None])
        self.assertEqual(type(image), type(blended))
        if isinstance(blended, torch.Tensor):
            self.assertEqual(blended.device, image.device)
            blended = blended.cpu().numpy()
        self.assertEqual((3, ) + image.shape, blended.shape)

        blended = moveaxis(blended, 0, -1)  # move RGB component to end
Exemple #9
0
from tests.utils import TEST_NDARRAYS

plt, has_matplotlib = optional_import("matplotlib.pyplot")


def get_alpha(img):
    return 0.5 * np.arange(img.size).reshape(img.shape) / img.size


TESTS = []
for p in TEST_NDARRAYS:
    image, label = create_test_image_2d(100, 101, channel_dim=0)
    TESTS.append((p(image), p(label), 0.5))
    TESTS.append((p(image), p(label), p(get_alpha(image))))

    image, label = create_test_image_3d(100, 101, 102, channel_dim=0)
    TESTS.append((p(image), p(label), 0.5))
    TESTS.append((p(image), p(label), p(get_alpha(image))))


@skipUnless(has_matplotlib, "Matplotlib required")
class TestBlendImages(unittest.TestCase):
    @parameterized.expand(TESTS)
    def test_blend(self, image, label, alpha):
        blended = blend_images(image, label, alpha)
        self.assertEqual(type(image), type(blended))
        if isinstance(blended, torch.Tensor):
            self.assertEqual(blended.device, image.device)
            blended = blended.cpu().numpy()
        self.assertEqual((3,) + image[0].shape, blended.shape)