print( 'generating synthetic data to {} (this may take a while)'.format(tempdir)) for i in range(5): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1) n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) # Define transforms for image and segmentation imtrans = transforms.Compose([Rescale(), AddChannel()]) segtrans = transforms.Compose([AddChannel()]) ds = NiftiDataset(images, segs, transform=imtrans, seg_transform=segtrans, image_only=False) device = torch.device("cuda:0") net = UNet( dimensions=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2,
for i in range(50): im, seg = create_test_image_3d(256, 256, 256) n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) # Define transforms for image and segmentation imtrans = transforms.Compose( [Rescale(), AddChannel(), UniformRandomPatch((64, 64, 64)), ToTensor()]) segtrans = transforms.Compose( [AddChannel(), UniformRandomPatch((64, 64, 64)), ToTensor()]) # Define nifti dataset, dataloader. ds = NiftiDataset(images, segs, transform=imtrans, seg_transform=segtrans) loader = DataLoader(ds, batch_size=10, num_workers=2, pin_memory=torch.cuda.is_available()) im, seg = monai.utils.misc.first(loader) print(im.shape, seg.shape)
"/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz" ] # 2 binary labels for gender classification: man and woman labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) val_files = [{ 'img': img, 'label': label } for img, label in zip(images, labels)] # Define transforms for image val_transforms = transforms.Compose([ LoadNiftid(keys=['img']), AddChanneld(keys=['img']), Rescaled(keys=['img']), Resized(keys=['img'], output_spatial_shape=(96, 96, 96)) ]) # Create DenseNet121 net = monai.networks.nets.densenet3d.densenet121( in_channels=1, out_channels=2, ) device = torch.device("cuda:0") def prepare_batch(batch, device=None, non_blocking=False): return _prepare_batch((batch['img'], batch['label']), device, non_blocking)
channel_dim=-1) n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) val_files = [{'img': img, 'seg': seg} for img, seg in zip(images, segs)] # Define transforms for image and segmentation val_transforms = transforms.Compose([ LoadNiftid(keys=['img', 'seg']), AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), Rescaled(keys=['img', 'seg']) ]) val_ds = monai.data.Dataset(data=val_files, transform=val_transforms) device = torch.device("cuda:0") net = UNet( dimensions=3, in_channels=1, out_channels=1, channels=(16, 32, 64, 128, 256), strides=(2, 2, 2, 2), num_res_units=2, ) net.to(device)
# 2 binary labels for gender classification: man and woman labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) train_files = [{ 'img': img, 'label': label } for img, label in zip(images[:10], labels[:10])] val_files = [{ 'img': img, 'label': label } for img, label in zip(images[-10:], labels[-10:])] # Define transforms for image train_transforms = transforms.Compose([ LoadNiftid(keys=['img']), AddChanneld(keys=['img']), Rescaled(keys=['img']), Resized(keys=['img'], output_spatial_shape=(96, 96, 96)), RandRotate90d(keys=['img'], prob=0.8, spatial_axes=[0, 2]) ]) val_transforms = transforms.Compose([ LoadNiftid(keys=['img']), AddChanneld(keys=['img']), Rescaled(keys=['img']), Resized(keys=['img'], output_spatial_shape=(96, 96, 96)) ]) # Define dataset, dataloader check_ds = monai.data.Dataset(data=train_files, transform=train_transforms) check_loader = DataLoader(check_ds, batch_size=2, num_workers=4,
"/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz" ] # 2 binary labels for gender classification: man and woman labels = np.array([0, 0, 0, 1, 0, 0, 0, 1, 1, 0, 0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) # Define transforms train_transforms = transforms.Compose( [Rescale(), AddChannel(), Resize((96, 96, 96)), RandRotate90()]) val_transforms = transforms.Compose( [Rescale(), AddChannel(), Resize((96, 96, 96))]) # Define nifti dataset, dataloader check_ds = NiftiDataset(image_files=images, labels=labels, transform=train_transforms) check_loader = DataLoader(check_ds, batch_size=2, num_workers=2, pin_memory=torch.cuda.is_available()) im, label = monai.utils.misc.first(check_loader) print(type(im), im.shape, label)
n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(tempdir, 'img%i.nii.gz' % i)) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) images = sorted(glob(os.path.join(tempdir, 'img*.nii.gz'))) segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) train_files = [{'img': img, 'seg': seg} for img, seg in zip(images[:20], segs[:20])] val_files = [{'img': img, 'seg': seg} for img, seg in zip(images[-20:], segs[-20:])] # Define transforms for image and segmentation train_transforms = transforms.Compose([ LoadNiftid(keys=['img', 'seg']), AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), Rescaled(keys=['img', 'seg']), RandCropByPosNegLabeld(keys=['img', 'seg'], label_key='seg', size=[96, 96, 96], pos=1, neg=1, num_samples=4), RandRotate90d(keys=['img', 'seg'], prob=0.8, spatial_axes=[0, 2]) ]) val_transforms = transforms.Compose([ LoadNiftid(keys=['img', 'seg']), AsChannelFirstd(keys=['img', 'seg'], channel_dim=-1), Rescaled(keys=['img', 'seg']) ]) # Define dataset, dataloader check_ds = monai.data.Dataset(data=train_files, transform=train_transforms) # use batch_size=2 to load images and use RandCropByPosNegLabeld to generate 2 x 4 images for network training check_loader = DataLoader(check_ds, batch_size=2, num_workers=4, collate_fn=list_data_collate, pin_memory=torch.cuda.is_available()) check_data = monai.utils.misc.first(check_loader)
'generating synthetic data to {} (this may take a while)'.format(tempdir)) for i in range(40): im, seg = create_test_image_3d(128, 128, 128, num_seg_classes=1) n = nib.Nifti1Image(im, np.eye(4)) nib.save(n, os.path.join(tempdir, 'im%i.nii.gz' % i)) n = nib.Nifti1Image(seg, np.eye(4)) nib.save(n, os.path.join(tempdir, 'seg%i.nii.gz' % i)) images = sorted(glob(os.path.join(tempdir, 'im*.nii.gz'))) segs = sorted(glob(os.path.join(tempdir, 'seg*.nii.gz'))) # Define transforms for image and segmentation train_imtrans = transforms.Compose( [Rescale(), AddChannel(), UniformRandomPatch((96, 96, 96))]) train_segtrans = transforms.Compose( [AddChannel(), UniformRandomPatch((96, 96, 96))]) val_imtrans = transforms.Compose( [Rescale(), AddChannel(), Resize((96, 96, 96))]) val_segtrans = transforms.Compose([AddChannel(), Resize((96, 96, 96))]) # Define nifti dataset, dataloader check_ds = NiftiDataset(images, segs, transform=train_imtrans, seg_transform=train_segtrans) check_loader = DataLoader(check_ds, batch_size=10, num_workers=2,
"/workspace/data/medical/ixi/IXI-T1/IXI607-Guys-1097-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI175-HH-1570-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI385-HH-2078-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI344-Guys-0905-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI409-Guys-0960-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI584-Guys-1129-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI253-HH-1694-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI092-HH-1436-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI574-IOP-1156-T1.nii.gz", "/workspace/data/medical/ixi/IXI-T1/IXI585-Guys-1130-T1.nii.gz" ] # 2 binary labels for gender classification: man and woman labels = np.array([0, 0, 1, 0, 1, 0, 1, 0, 1, 0]) # Define transforms for image val_transforms = transforms.Compose( [Rescale(), AddChannel(), Resize((96, 96, 96))]) # Define nifti dataset val_ds = NiftiDataset(image_files=images, labels=labels, transform=val_transforms, image_only=False) # Create DenseNet121 net = monai.networks.nets.densenet3d.densenet121( in_channels=1, out_channels=2, ) device = torch.device("cuda:0") metric_name = 'Accuracy' # add evaluation metric to the evaluator engine val_metrics = {metric_name: Accuracy()}