Ejemplo n.º 1
0
    def test_no_rotation(self):
        transform = tio.RandomAffine(
            scales=(1, 1),
            degrees=(0, 0),
            default_pad_value=0,
            center='image',
        )
        transformed = transform(self.sample_subject)
        self.assertTensorAlmostEqual(
            self.sample_subject.t1.data,
            transformed.t1.data,
        )

        transform = tio.RandomAffine(
            scales=(1, 1),
            degrees=(180, 180),
            default_pad_value=0,
            center='image',
        )
        transformed = transform(self.sample_subject)
        transformed = transform(transformed)
        self.assertTensorAlmostEqual(
            self.sample_subject.t1.data,
            transformed.t1.data,
        )
Ejemplo n.º 2
0
 def test_different_spaces(self):
     t1 = self.sample_subject.t1
     label = tio.Resample(2)(self.sample_subject.label)
     new_subject = tio.Subject(t1=t1, label=label)
     with self.assertRaises(RuntimeError):
         tio.RandomAffine()(new_subject)
     tio.RandomAffine(check_shape=False)(new_subject)
Ejemplo n.º 3
0
 def test_transforms(self):
     landmarks_dict = dict(
         t1=np.linspace(0, 100, 13),
         t2=np.linspace(0, 100, 13),
     )
     elastic = torchio.RandomElasticDeformation(max_displacement=1)
     transforms = (
         torchio.CropOrPad((9, 21, 30)),
         torchio.ToCanonical(),
         torchio.Resample((1, 1.1, 1.25)),
         torchio.RandomFlip(axes=(0, 1, 2), flip_probability=1),
         torchio.RandomMotion(),
         torchio.RandomGhosting(axes=(0, 1, 2)),
         torchio.RandomSpike(),
         torchio.RandomNoise(),
         torchio.RandomBlur(),
         torchio.RandomSwap(patch_size=2, num_iterations=5),
         torchio.Lambda(lambda x: 2 * x, types_to_apply=torchio.INTENSITY),
         torchio.RandomBiasField(),
         torchio.RescaleIntensity((0, 1)),
         torchio.ZNormalization(masking_method='label'),
         torchio.HistogramStandardization(landmarks_dict=landmarks_dict),
         elastic,
         torchio.RandomAffine(),
         torchio.OneOf({
             torchio.RandomAffine(): 3,
             elastic: 1
         }),
         torchio.Pad((1, 2, 3, 0, 5, 6), padding_mode='constant', fill=3),
         torchio.Crop((3, 2, 8, 0, 1, 4)),
     )
     transform = torchio.Compose(transforms)
     transform(self.sample)
Ejemplo n.º 4
0
    def test_parse_scales(self):
        def do_assert(transform):
            self.assertEqual(transform.scales, 3 * (0.9, 1.1))

        do_assert(tio.RandomAffine(scales=0.1))
        do_assert(tio.RandomAffine(scales=(0.9, 1.1)))
        do_assert(tio.RandomAffine(scales=3 * (0.1, )))
        do_assert(tio.RandomAffine(scales=3 * [0.9, 1.1]))
Ejemplo n.º 5
0
    def test_parse_degrees(self):
        def do_assert(transform):
            self.assertEqual(transform.degrees, 3 * (-10, 10))

        do_assert(tio.RandomAffine(degrees=10))
        do_assert(tio.RandomAffine(degrees=(-10, 10)))
        do_assert(tio.RandomAffine(degrees=3 * (10, )))
        do_assert(tio.RandomAffine(degrees=3 * [-10, 10]))
Ejemplo n.º 6
0
    def test_parse_translation(self):
        def do_assert(transform):
            self.assertEqual(transform.translation, 3 * (-10, 10))

        do_assert(tio.RandomAffine(translation=10))
        do_assert(tio.RandomAffine(translation=(-10, 10)))
        do_assert(tio.RandomAffine(translation=3 * (10, )))
        do_assert(tio.RandomAffine(translation=3 * [-10, 10]))
Ejemplo n.º 7
0
 def get_transform(self, channels, is_3d=True, labels=True):
     landmarks_dict = {
         channel: np.linspace(0, 100, 13)
         for channel in channels
     }
     disp = 1 if is_3d else (1, 1, 0.01)
     elastic = tio.RandomElasticDeformation(max_displacement=disp)
     cp_args = (9, 21, 30) if is_3d else (21, 30, 1)
     resize_args = (10, 20, 30) if is_3d else (10, 20, 1)
     flip_axes = axes_downsample = (0, 1, 2) if is_3d else (0, 1)
     swap_patch = (2, 3, 4) if is_3d else (3, 4, 1)
     pad_args = (1, 2, 3, 0, 5, 6) if is_3d else (0, 0, 3, 0, 5, 6)
     crop_args = (3, 2, 8, 0, 1, 4) if is_3d else (0, 0, 8, 0, 1, 4)
     remapping = {1: 2, 2: 1, 3: 20, 4: 25}
     transforms = [
         tio.CropOrPad(cp_args),
         tio.EnsureShapeMultiple(2, method='crop'),
         tio.Resize(resize_args),
         tio.ToCanonical(),
         tio.RandomAnisotropy(downsampling=(1.75, 2), axes=axes_downsample),
         tio.CopyAffine(channels[0]),
         tio.Resample((1, 1.1, 1.25)),
         tio.RandomFlip(axes=flip_axes, flip_probability=1),
         tio.RandomMotion(),
         tio.RandomGhosting(axes=(0, 1, 2)),
         tio.RandomSpike(),
         tio.RandomNoise(),
         tio.RandomBlur(),
         tio.RandomSwap(patch_size=swap_patch, num_iterations=5),
         tio.Lambda(lambda x: 2 * x, types_to_apply=tio.INTENSITY),
         tio.RandomBiasField(),
         tio.RescaleIntensity(out_min_max=(0, 1)),
         tio.ZNormalization(),
         tio.HistogramStandardization(landmarks_dict),
         elastic,
         tio.RandomAffine(),
         tio.OneOf({
             tio.RandomAffine(): 3,
             elastic: 1,
         }),
         tio.RemapLabels(remapping=remapping, masking_method='Left'),
         tio.RemoveLabels([1, 3]),
         tio.SequentialLabels(),
         tio.Pad(pad_args, padding_mode=3),
         tio.Crop(crop_args),
     ]
     if labels:
         transforms.append(tio.RandomLabelsToImage(label_key='label'))
     return tio.Compose(transforms)
Ejemplo n.º 8
0
def byol_aug(filename):
    """
        BYOL minimizes the distance between representations of each sample and a transformation of that sample.
        Examples of transformations include: translation, rotation, blurring, color inversion, color jitter, gaussian noise.

        Return an augmented dataset that consisted the above mentioned transformation. Will be used in the training.
        """
    image = tio.ScalarImage(filename)
    get_foreground = tio.ZNormalization.mean
    training_transform = tio.Compose([
        tio.CropOrPad((180, 220, 170)),  # zero mean, unit variance of foreground
        tio.ZNormalization(
            masking_method=get_foreground),
        tio.RandomBlur(p=0.25),  # blur 25% of times
        tio.RandomNoise(p=0.25),  # Gaussian noise 25% of times
        tio.OneOf({  # either
            tio.RandomAffine(): 0.8,  # random affine
            tio.RandomElasticDeformation(): 0.2,  # or random elastic deformation
        }, p=0.8),  # applied to 80% of images
        tio.RandomBiasField(p=0.3),  # magnetic field inhomogeneity 30% of times
        tio.OneOf({  # either
            tio.RandomMotion(): 1,  # random motion artifact
            tio.RandomSpike(): 2,  # or spikes
            tio.RandomGhosting(): 2,  # or ghosts
        }, p=0.5),  # applied to 50% of images
    ])

    tfs_image = training_transform(image)
    return tfs_image
Ejemplo n.º 9
0
def get_train_transform(landmarks_path, resection_params=None):
    spatial_transform = tio.Compose((
        tio.OneOf({
            tio.RandomAffine(): 0.9,
            tio.RandomElasticDeformation(): 0.1,
        }),
        tio.RandomFlip(),
    ))
    resolution_transform = tio.OneOf(
        (
            tio.RandomAnisotropy(),
            tio.RandomBlur(),
        ),
        p=0.75,
    )
    transforms = []
    if resection_params is not None:
        transforms.append(get_simulation_transform(resection_params))
    if landmarks_path is not None:
        transforms.append(
            tio.HistogramStandardization({'image': landmarks_path}))
    transforms.extend([
        # tio.RandomGamma(p=0.2),
        resolution_transform,
        tio.RandomGhosting(p=0.2),
        tio.RandomSpike(p=0.2),
        tio.RandomMotion(p=0.2),
        tio.RandomBiasField(p=0.5),
        tio.ZNormalization(masking_method=tio.ZNormalization.mean),
        tio.RandomNoise(p=0.75),  # always after ZNorm and after blur!
        spatial_transform,
        get_tight_crop(),
    ])
    return tio.Compose(transforms)
Ejemplo n.º 10
0
    def __getitem__(self, idx):
        # Generate one batch of data
        # ScalarImage expect 4DTensor, so add a singleton dimension
        image = self.CT_partition[idx].unsqueeze(0)
        mask = self.mask_partition[idx].unsqueeze(0)
        if self.augment:
            aug = tio.Compose([tio.OneOf\
                               ({tio.RandomAffine(scales= (0.9, 1.1, 0.9, 1.1, 1, 1),
                                                  degrees= (5.0, 5.0, 0)): 0.35,
                                 tio.RandomElasticDeformation(num_control_points=9,
                                                  max_displacement= (0.1, 0.1, 0.1),
                                                  locked_borders= 2,
                                                  image_interpolation= 'linear'): 0.35,
                                 tio.RandomFlip(axes=(2,)):.3}),
                              ])
            subject = tio.Subject(ct=tio.ScalarImage(tensor=image),
                                  mask=tio.ScalarImage(tensor=mask))
            output = aug(subject)
            augmented_image = output['ct']
            augmented_mask = output['mask']
            image = augmented_image.data
            mask = augmented_mask.data
        # note that mask is integer
        mask = mask.type(torch.IntTensor)
        image = image.type(torch.FloatTensor)

        #The tensor we pass into ScalarImage is C x W x H x D, so permute axes to
        # C x D x H x W. At the end we have N x 1 x D x H x W.
        image = image.permute(0, 3, 2, 1)
        mask = mask.permute(0, 3, 2, 1)

        # Return image and mask pair tensors
        return image, mask
Ejemplo n.º 11
0
    def test_different_interpolation(self):
        def model_probs(subject):
            subject = copy.deepcopy(subject)
            subject.im.set_data(torch.rand_like(subject.im.data))
            return subject

        def model_label(subject):
            subject = model_probs(subject)
            subject.im.set_data(torch.bernoulli(subject.im.data))
            return subject

        transform = tio.RandomAffine(image_interpolation='bspline')
        subject = copy.deepcopy(self.sample_subject)
        tensor = (torch.rand(1, 20, 20, 20) > 0.5).float()  # 0s and 1s
        subject = tio.Subject(im=tio.ScalarImage(tensor=tensor))
        transformed = transform(subject)
        assert transformed.im.data.min() < 0
        assert transformed.im.data.max() > 1

        subject_probs = model_probs(transformed)
        transformed_back = subject_probs.apply_inverse_transform()
        assert transformed_back.im.data.min() < 0
        assert transformed_back.im.data.max() > 1
        transformed_back_linear = subject_probs.apply_inverse_transform(
            image_interpolation='linear', )
        assert transformed_back_linear.im.data.min() >= 0
        assert transformed_back_linear.im.data.max() <= 1

        subject_label = model_label(transformed)
        transformed_back = subject_label.apply_inverse_transform()
        assert transformed_back.im.data.min() < 0
        assert transformed_back.im.data.max() > 1
        transformed_back_linear = subject_label.apply_inverse_transform(
            image_interpolation='nearest', )
        assert transformed_back_linear.im.data.unique().tolist() == [0, 1]
Ejemplo n.º 12
0
 def test_default_value_label_map(self):
     # From https://github.com/fepegar/torchio/issues/626
     a = torch.tensor([[1, 0, 0], [0, 1, 0], [0, 0, 1]]).reshape(1, 3, 3, 1)
     image = tio.LabelMap(tensor=a)
     aff = tio.RandomAffine(translation=(0, 1, 1), default_pad_value='otsu')
     transformed = aff(image)
     assert all(n in (0, 1) for n in transformed.data.flatten())
Ejemplo n.º 13
0
 def test_rotation_origin(self):
     # Rotation around far away point, image should be empty
     transform = tio.RandomAffine(
         degrees=(90, 90),
         default_pad_value=0,
         center='origin',
     )
     transformed = transform(self.sample_subject)
     total = transformed.t1.data.sum()
     self.assertEqual(total, 0)
Ejemplo n.º 14
0
 def test_rotation_image(self):
     # Rotation around image center
     transform = tio.RandomAffine(
         degrees=(90, 90),
         default_pad_value=0,
         center='image',
     )
     transformed = transform(self.sample_subject)
     total = transformed.t1.data.sum()
     self.assertNotEqual(total, 0)
Ejemplo n.º 15
0
 def get_transform(self, channels, is_3d=True, labels=True):
     landmarks_dict = {
         channel: np.linspace(0, 100, 13)
         for channel in channels
     }
     disp = 1 if is_3d else (1, 1, 0.01)
     elastic = torchio.RandomElasticDeformation(max_displacement=disp)
     cp_args = (9, 21, 30) if is_3d else (21, 30, 1)
     flip_axes = axes_downsample = (0, 1, 2) if is_3d else (0, 1)
     swap_patch = (2, 3, 4) if is_3d else (3, 4, 1)
     pad_args = (1, 2, 3, 0, 5, 6) if is_3d else (0, 0, 3, 0, 5, 6)
     crop_args = (3, 2, 8, 0, 1, 4) if is_3d else (0, 0, 8, 0, 1, 4)
     transforms = [
         torchio.CropOrPad(cp_args),
         torchio.ToCanonical(),
         torchio.RandomDownsample(downsampling=(1.75, 2),
                                  axes=axes_downsample),
         torchio.Resample((1, 1.1, 1.25)),
         torchio.RandomFlip(axes=flip_axes, flip_probability=1),
         torchio.RandomMotion(),
         torchio.RandomGhosting(axes=(0, 1, 2)),
         torchio.RandomSpike(),
         torchio.RandomNoise(),
         torchio.RandomBlur(),
         torchio.RandomSwap(patch_size=swap_patch, num_iterations=5),
         torchio.Lambda(lambda x: 2 * x, types_to_apply=torchio.INTENSITY),
         torchio.RandomBiasField(),
         torchio.RescaleIntensity((0, 1)),
         torchio.ZNormalization(),
         torchio.HistogramStandardization(landmarks_dict),
         elastic,
         torchio.RandomAffine(),
         torchio.OneOf({
             torchio.RandomAffine(): 3,
             elastic: 1,
         }),
         torchio.Pad(pad_args, padding_mode=3),
         torchio.Crop(crop_args),
     ]
     if labels:
         transforms.append(torchio.RandomLabelsToImage(label_key='label'))
     return torchio.Compose(transforms)
Ejemplo n.º 16
0
    def __init__(self, use_tio_flip=True):

        self.flip = tio.RandomFlip(p=0.5) if use_tio_flip is True else Flip3D()
        self.affine = tio.RandomAffine(p=0.5,
                                       scales=0.1,
                                       degrees=5,
                                       translation=0,
                                       image_interpolation="nearest")
        self.random_noise = tio.RandomNoise(
            p=0.5, std=(0, 0.1), include=["x"])  # don't apply noise to mask
        self.transform = tio.Compose(
            [self.flip, self.affine, self.random_noise], include=["x", "y"])
Ejemplo n.º 17
0
def get_transform(augmentation, landmarks_path):
    import datasets
    import torchio as tio
    if augmentation:
        return datasets.get_train_transform(landmarks_path)
    else:
        preprocess = datasets.get_test_transform(landmarks_path)
        augment = tio.Compose((tio.RandomFlip(),
                               tio.OneOf({
                                   tio.RandomAffine(): 0.8,
                                   tio.RandomElasticDeformation(): 0.2,
                               })))
        return tio.Compose((preprocess, augment))
Ejemplo n.º 18
0
def get_dataset(
    input_path,
    tta_iterations=0,
    interpolation='bspline',
    tolerance=0.1,
    mni_transform_path=None,
):
    if mni_transform_path is None:
        image = tio.ScalarImage(input_path)
    else:
        affine = tio.io.read_matrix(mni_transform_path)
        image = tio.ScalarImage(input_path, **{TO_MNI: affine})
    subject = tio.Subject({IMAGE_NAME: image})
    landmarks = np.array([
        0., 0.31331614, 0.61505419, 0.76732501, 0.98887953, 1.71169384,
        3.21741126, 13.06931455, 32.70817796, 40.87807389, 47.83508873,
        63.4408591, 100.
    ])
    hist_std = tio.HistogramStandardization({IMAGE_NAME: landmarks})
    preprocess_transforms = [
        tio.ToCanonical(),
        hist_std,
        tio.ZNormalization(masking_method=tio.ZNormalization.mean),
    ]
    zooms = nib.load(input_path).header.get_zooms()
    pixdim = np.array(zooms)
    diff_to_1_iso = np.abs(pixdim - 1)
    if np.any(diff_to_1_iso > tolerance) or mni_transform_path is not None:
        kwargs = {'image_interpolation': interpolation}
        if mni_transform_path is not None:
            kwargs['pre_affine_name'] = TO_MNI
            kwargs['target'] = tio.datasets.Colin27().t1.path
        resample_transform = tio.Resample(**kwargs)
        preprocess_transforms.append(resample_transform)
    preprocess_transforms.append(tio.EnsureShapeMultiple(8, method='crop'))
    preprocess_transform = tio.Compose(preprocess_transforms)
    no_aug_dataset = tio.SubjectsDataset([subject],
                                         transform=preprocess_transform)

    aug_subjects = tta_iterations * [subject]
    if not aug_subjects:
        return no_aug_dataset
    augment_transform = tio.Compose((
        preprocess_transform,
        tio.RandomFlip(),
        tio.RandomAffine(image_interpolation=interpolation),
    ))
    aug_dataset = tio.SubjectsDataset(aug_subjects,
                                      transform=augment_transform)
    dataset = torch.utils.data.ConcatDataset((no_aug_dataset, aug_dataset))
    return dataset
Ejemplo n.º 19
0
def main(hdf_file, plot_dir):
    os.makedirs(plot_dir, exist_ok=True)

    # setup the datasource
    extractor = extr.DataExtractor(categories=(defs.KEY_IMAGES, defs.KEY_LABELS))
    indexing_strategy = extr.SliceIndexing()
    dataset = extr.PymiaDatasource(hdf_file, indexing_strategy, extractor)

    seed = 1
    np.random.seed(seed)
    sample_idx = 55

    # set up transformations without augmentation
    transforms_augmentation = []
    transforms_before_augmentation = [tfm.Permute(permutation=(2, 0, 1)), ]  # to have the channel-dimension first
    transforms_after_augmentation = [tfm.Squeeze(entries=(defs.KEY_LABELS,)), ]  # get rid of the channel-dimension for the labels
    train_transforms = tfm.ComposeTransform(transforms_before_augmentation + transforms_augmentation + transforms_after_augmentation)
    dataset.set_transform(train_transforms)
    sample = dataset[sample_idx]
    plot_sample(plot_dir, 'none', sample)

    # augmentation with pymia
    transforms_augmentation = [augm.RandomRotation90(axes=(-2, -1)), augm.RandomMirror()]
    train_transforms = tfm.ComposeTransform(
        transforms_before_augmentation + transforms_augmentation + transforms_after_augmentation)
    dataset.set_transform(train_transforms)
    sample = dataset[sample_idx]
    plot_sample(plot_dir, 'pymia', sample)

    # augmentation with batchgenerators
    transforms_augmentation = [BatchgeneratorsTransform([
        bg_tfm.spatial_transforms.MirrorTransform(axes=(0, 1), data_key=defs.KEY_IMAGES, label_key=defs.KEY_LABELS),
        bg_tfm.noise_transforms.GaussianBlurTransform(blur_sigma=(0.2, 1.0), data_key=defs.KEY_IMAGES, label_key=defs.KEY_LABELS),
    ])]
    train_transforms = tfm.ComposeTransform(
        transforms_before_augmentation + transforms_augmentation + transforms_after_augmentation)
    dataset.set_transform(train_transforms)
    sample = dataset[sample_idx]
    plot_sample(plot_dir, 'batchgenerators', sample)

    # augmentation with TorchIO
    transforms_augmentation = [TorchIOTransform(
        [tio.RandomFlip(axes=('LR'), flip_probability=1.0, keys=(defs.KEY_IMAGES, defs.KEY_LABELS), seed=seed),
         tio.RandomAffine(scales=(0.9, 1.2), degrees=(10), isotropic=False, default_pad_value='otsu',
                          image_interpolation='NEAREST', keys=(defs.KEY_IMAGES, defs.KEY_LABELS), seed=seed),
         ])]
    train_transforms = tfm.ComposeTransform(
        transforms_before_augmentation + transforms_augmentation + transforms_after_augmentation)
    dataset.set_transform(train_transforms)
    sample = dataset[sample_idx]
    plot_sample(plot_dir, 'torchio', sample)
Ejemplo n.º 20
0
 def test_keep_original(self):
     subject = copy.deepcopy(self.sample_subject)
     old, new = 't1', 't1_original'
     transformed = tio.RandomAffine(keep={old: new})(subject)
     assert old in transformed
     assert new in transformed
     self.assertTensorEqual(
         transformed[new].data,
         subject[old].data,
     )
     self.assertTensorNotEqual(
         transformed[new].data,
         transformed[old].data,
     )
Ejemplo n.º 21
0
    def _ml_logic(self, X, train=True):      
        # transformations - shift intensity, rotate
        x = X[0].squeeze()
        batch_size = x.size(0)
        tr = tio.RandomAffine(scales=(0.95, 1.05), translation=(-5,5), degrees=(-5,5), isotropic=True, image_interpolation='nearest')
        x1 = tr(x)
        x2 = tr(x)

        x1 = x1.unsqueeze(1)
        x2 = x2.unsqueeze(1)

        x1 = x1.to(self.device)
        x2 = x2.to(self.device)
        
        loss_dict = self._ml_logic_per_pair(x1, x2)        
        return loss_dict
Ejemplo n.º 22
0
 def test_batch_history(self):
     # https://github.com/fepegar/torchio/discussions/743
     subject = self.sample_subject
     transform = tio.Compose([
         tio.RandomAffine(),
         tio.CropOrPad(5),
         tio.OneHot(),
     ])
     dataset = tio.SubjectsDataset([subject], transform=transform)
     loader = torch.utils.data.DataLoader(
         dataset,
         collate_fn=tio.utils.history_collate
     )
     batch = tio.utils.get_first_item(loader)
     transformed: tio.Subject = tio.utils.get_subjects_from_batch(batch)[0]
     inverse = transformed.apply_inverse_transform()
     images1 = subject.get_images(intensity_only=False)
     images2 = inverse.get_images(intensity_only=False)
     for image1, image2 in zip(images1, images2):
         assert image1.shape == image2.shape
Ejemplo n.º 23
0
import os
from typing import Tuple

import numpy as np
import pandas as pd
import torch
import torchio as tio
from torch.utils.data import DataLoader
from torch.utils.data import Dataset
from torchio import Image
from torchvision.transforms import Compose

transforms_dict = {
    tio.RandomAffine(): 0.55,
    tio.RandomElasticDeformation(): 0.25
}

transforms_dict2 = {tio.RandomBlur(): 0.25, tio.RandomMotion(): 0.25}
# for aumentation
transform_flip = tio.OneOf(transforms_dict)


class ADNIDataloaderAllData(Dataset):
    def __init__(self, df, root_dir, transform):
        self.df = df
        self.root_dir = root_dir
        self.transform = transform

    def __len__(self):
        return len(self.df)
Ejemplo n.º 24
0
    def __init__(self):
        # ID and Name
        self.id = "506b95"
        self.experiment_name = "ma_crosstr_v{}".format(self.id)
        self.debug = False

        # System
        self.checkpointsBasePath = "./checkpoints/"
        self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/'
        self.labelpath = '/local/DEEPLEARNING/MULTI_ATLAS/MULTI_ATLAS/nnUNet_preprocessed/Task017_BCV/nnUNetData_plans_v2.1_stage1/'
        self.datapath = self.labelpath


        self.input_shape = [512,512,256]
        # self.filters = [16, 32, 64, 128]
        # self.filters = [64, 192, 448, 704]
        # self.filters = [16, 32, 64, 128, 256]
        self.filters = [32, 64, 128, 256, 512]
        d_model = self.filters[-1]

        # skip_idx = [1,3,5,6]
        # self.patch_size=(128,128,128)
        self.patch_size=(192,192,48)
        # n_layers=6
        self.clip = False
        self.patched = True
        # GPU
        self.gpu = '1'
        os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu
        # torch.backends.cudnn.benchmark = False

        # Model
        number_of_cross_heads = 8
        number_of_self_heads = 8
        number_of_self_layer = 6

        self.n_classes = 14
        self.net = CrossPatch3DTr(filters=self.filters,patch_size=[1,1,1],
                                d_model=d_model,n_classes=self.n_classes,
                                n_cheads=1,n_sheads=number_of_self_heads,
                                bn=True,up_mode='deconv',
                                n_strans=number_of_self_layer, do_cross=True,
                                enc_grad=False)
        self.net.inference_apply_nonlin = softmax_helper
        self.n_parameters = count_parameters(self.net)
        print("N PARAMS : {}".format(self.n_parameters))

        # self.model_path = './checkpoints/models/deep_crosstr.pth'
        self.model_path = './checkpoints/models/506/modlast.pt'
        
         
        
        max_displacement = 5,5,5
        deg = (0,5,10)
        scales = 0
        self.transform = tio.Compose([
            tio.RandomElasticDeformation(max_displacement=max_displacement),
            tio.RandomAffine(scales=scales, degrees=deg)
        ])


        # Training
        self.start_epoch = 1000
        self.epoch = 2000

        # self.loss = torch.nn.CrossEntropyLoss()

        self.loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})
        self.ds_scales = ((1, 1, 1), (0.5, 0.5, 0.5), (0.25, 0.25, 0.25), (0.125,0.125,0.125))
        ################# Here we wrap the loss for deep supervision ############
        # we need to know the number of outputs of the network
        net_numpool = 4

        # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
        # this gives higher resolution outputs more weight in the loss
        weights = np.array([1 / (2 ** i) for i in range(net_numpool)])

        # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
        mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
        weights[~mask] = 0
        weights = weights / weights.sum()
        self.ds_loss_weights = weights
        # now wrap the loss
        self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
        ################# END ###################

        self.batchsize = 2
        self.lr_rate = 1e-3

        self.load_lr = False
        self.load_model()
        self.net.reinit_decoder()
        self.net.reinit_crostrans(dim=d_model, depth=1, heads=number_of_cross_heads, dim_head=1024, mlp_dim=1024, dropout = 0.1)
        self.optimizer = optim.SGD(self.net.parameters(), lr = self.lr_rate, weight_decay=3e-5, momentum=0.99, nesterov=True)
        self.optimizer.zero_grad()
        self.validate_every_k_epochs = 1
        # self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch
        self.lr_scheduler = get_scheduler(self.optimizer, "poly", self.lr_rate, max_epochs=self.epoch)

        # Other
        self.classes_name = ['background','spleen','right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland']
Ejemplo n.º 25
0
    def __init__(self):
        # ID and Name
        self.id = 206
        self.experiment_name = "tcia_revunet_03_d3_e1000_CE_adam_wd0_da_f1_lr5_gr1_id{}".format(self.id)
        self.debug = False

        # System
        self.checkpointsBasePath = "./checkpoints/"
        self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/'
        self.labelpath = "/local/SSD_DEEPLEARNING/PANCREAS_MULTI_RES/160_160_64/"
        self.datapath = self.labelpath
        self.im_dim = (160,160,64)

        
        # GPU
        self.gpu = '2'
        os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu

        # Model
        self.channels = [64, 128, 256, 512, 1024]
        self.channels = [int(x) for x in self.channels]
        self.n_classes = 2
        self.n_groups = 1
        self.net = RevUnet3D(1, self.channels, self.n_classes , depth = 3 ,interpolation = None, groups = self.n_groups)#(512,512,198))
        # self.net = RevUnet3D(1, self.channels, 12, interpolation = (256,256,99))
        self.n_parameters = count_parameters(self.net)
        print("N PARAMS : {}".format(self.n_parameters))

        self.model_path = './checkpoints/models/revunet_tcia_160_160_64_d3_gr1.pth'
        self.load_model()
        self.split = 1
         
        
        max_displacement = 5,5,5
        deg = (0,5,10)
        scales = 0
        self.transform = tio.Compose([
            tio.RandomElasticDeformation(max_displacement=max_displacement),
            tio.RandomAffine(scales=scales, degrees=deg)
        ])


        # Training
        self.train_original_classes = False
        self.start_epoch = 0
        self.epoch = 1000

        self.loss = torch.nn.CrossEntropyLoss()
        # self.loss =  SoftDiceLoss(self.n_classes)

        self.hot = 0
        self.batchsize = 2
        self.lr_rate = 5e-5 #5e-4 # 1e-2 #5e-5
        self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate, weight_decay=0)
        self.optimizer.zero_grad()
        self.validate_every_k_epochs = 1
        # Scheduler list : [lambdarule_1]
        # self.lr_scheduler = get_scheduler(self.optimizer, "multistep")
        self.lr_scheduler = get_scheduler(self.optimizer, "constant", self.lr_rate)
        # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_1", self.lr_rate)

        # Other
        self.classes_name = ['background','pancreas']
        self.look_small = False
Ejemplo n.º 26
0
        )
    if in_channels == 3:
        subject = tio.Subject(
            hr=tio.ScalarImage(t2_file),
            lr_1=tio.ScalarImage(t2_file),
            lr_2=tio.ScalarImage(t2_file),
            lr_3=tio.ScalarImage(t2_file),
        )

    subjects.append(subject)

print('DHCP Dataset size:', len(subjects), 'subjects')

# DATA AUGMENTATION
normalization = tio.ZNormalization()
spatial = tio.RandomAffine(scales=0.1, degrees=10, translation=0, p=0.75)
flip = tio.RandomFlip(axes=('LR', ), flip_probability=0.5)

tocanonical = tio.ToCanonical()

b1 = tio.Blur(std=(0.001, 0.001, 1), include='lr_1')  #blur
d1 = tio.Resample((0.8, 0.8, 2), include='lr_1')  #downsampling
u1 = tio.Resample(target='hr', include='lr_1')  #upsampling

if in_channels == 3:
    b2 = tio.Blur(std=(0.001, 1, 0.001), include='lr_2')  #blur
    d2 = tio.Resample((0.8, 2, 0.8), include='lr_2')  #downsampling
    u2 = tio.Resample(target='hr', include='lr_2')  #upsampling

    b3 = tio.Blur(std=(1, 0.001, 0.001), include='lr_3')  #blur
    d3 = tio.Resample((2, 0.8, 0.8), include='lr_3')  #downsampling
Ejemplo n.º 27
0
    def __init__(self):
        # ID and Name
        self.id = 601
        self.experiment_name = "ma_cotr_v{}".format(self.id)
        self.debug = False

        # System
        self.checkpointsBasePath = "./checkpoints/"
        self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/'
        # self.labelpath = "/local/DEEPLEARNING/MULTI_ATLAS/multi_atlas//512_512_256/"
        self.labelpath = '/local/DEEPLEARNING/VP_multiorgan_v2/'
        self.datapath = self.labelpath


        self.input_shape = [512,512,256]
        # filters = [4, 8, 16, 32]
        # skip_idx = [1,3,5,6]
        # self.patch_size=(128,128,128)
        self.patch_size=(192,192,48)
        # n_layers=6
        self.clip = True
        self.patched = True
        # GPU
        self.gpu = '0'
        os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu

        # Model
        self.n_classes = 8
        self.net = ResTranUnet(norm_cfg='IN', activation_cfg='LeakyReLU', img_size=self.patch_size, num_classes=self.n_classes, weight_std=False, deep_supervision=True)
        self.net.inference_apply_nonlin = softmax_helper
        self.n_parameters = count_parameters(self.net)
        print("N PARAMS : {}".format(self.n_parameters))

        self.model_path = './checkpoints/models/vp_cotr.pth'
        # self.model_path = './checkpoints/models/403/mod.pt'
        
         
        
        max_displacement = 5,5,5
        deg = (0,5,10)
        scales = 0
        self.transform = tio.Compose([
            tio.RandomElasticDeformation(max_displacement=max_displacement),
            tio.RandomAffine(scales=scales, degrees=deg)
        ])


        # Training
        self.start_epoch = 0
        self.epoch = 1000

        # self.loss = torch.nn.CrossEntropyLoss()

        self.loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})

        self.ds_scales = ((1, 1, 1), (0.5, 0.5, 1), (0.25, 0.25, 0.5))
        ################# Here we wrap the loss for deep supervision ############
        # we need to know the number of outputs of the network
        net_numpool = 4

        # we give each output a weight which decreases exponentially (division by 2) as the resolution decreases
        # this gives higher resolution outputs more weight in the loss
        weights = np.array([1 / (2 ** i) for i in range(net_numpool)])

        # we don't use the lowest 2 outputs. Normalize weights so that they sum to 1
        mask = np.array([True] + [True if i < net_numpool - 1 else False for i in range(1, net_numpool)])
        weights[~mask] = 0
        weights = weights / weights.sum()
        self.ds_loss_weights = weights
        # now wrap the loss
        self.loss = MultipleOutputLoss2(self.loss, self.ds_loss_weights)
        ################# END ###################


        self.batchsize = 2
        self.lr_rate = 2e-2
        # self.final_lr_rate = 1e-5
        # self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate)
        self.optimizer = optim.SGD(self.net.parameters(), lr = self.lr_rate, weight_decay=3e-5, momentum=0.99)

        self.optimizer.zero_grad()
        self.validate_every_k_epochs = 10
        # self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch
        self.lr_scheduler = get_scheduler(self.optimizer, "poly", self.lr_rate, max_epochs=self.epoch)


        self.load_model()
        # Other
        self.classes_name = ["Background", "Liver","Gallbladder","Spleen","Left_Kidney","Right_Kidney","Pancreas","Stomach"]
    def __init__(self):
        # ID and Name
        self.id = 100
        self.experiment_name = "multi_atlas_unet_016_e1000_CE_adam_wd6_da_id{}".format(self.id)
        self.debug = False

        # System
        self.checkpointsBasePath = "./checkpoints/"
        self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/'
#        self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_512_512_198_res_1.0_1.0_1.0.hdf5"
        # self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_256_256_99_res_0.5_0.5.hdf5"
        self.labelpath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_80_80_32_res_0.16.hdf5"
        self.datapath = "/local/SSD_DEEPLEARNING/MULTI_ATLAS/multi_atlas/data_3D_size_80_80_32_res_0.16.hdf5"
        
        # GPU
        self.gpu = '1'
        os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu

        # Model
        self.channels = [64, 128, 256, 512, 1024]
        self.channels = [int(x) for x in self.channels]
        self.net = unet_3D(self.channels, n_classes=14, is_batchnorm=False, in_channels=1, interpolation = None)#1, self.channels, 12, interpolation = (512,512,198))
        # self.net = RevUnet3D(1, self.channels, 12, interpolation = (256,256,99))
        self.n_parameters = count_parameters(self.net)
        print("N PARAMS : {}".format(self.n_parameters))

        self.n_classes = 14
        max_displacement = 5,5,5
        deg = (0,5,10)
        scales = 0
        self.transform = tio.Compose([
            tio.RandomElasticDeformation(max_displacement=max_displacement),
            tio.RandomAffine(scales=scales, degrees=deg)
        ])


        # Training
        self.train_original_classes = False
        self.epoch = 1000
        # def loss(outputs, labels):
        #     return atlasUtils.atlasDiceLoss(outputs, labels, n_classe = self.n_classes)
        # self.loss = loss
        # self.loss =  SoftDiceLoss(self.n_classes)
        self.loss = torch.nn.CrossEntropyLoss()
        self.hot = 0

        self.batchsize = 1
        # self.optimizer = optim.Ada(self.net.parameters(),
        #                       lr= 0.01, #to do
        #                       momentum=0.9,
        #                       nesterov=True,
        #                       weight_decay=1e-5) #todo
        self.lr_rate = 5e-4
        self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate, weight_decay=1e-6)
        
        # self.optimizer = optim.SGD(self.net.parameters(),
        #                             lr=self.lr_rate)
        self.optimizer.zero_grad()
        self.validate_every_k_epochs = 1
        # Scheduler list : [lambdarule_1]
        # self.lr_scheduler = get_scheduler(self.optimizer, "multistep")
        self.lr_scheduler = get_scheduler(self.optimizer, "multistep", self.lr_rate)
        # self.lr_scheduler = get_scheduler(self.optimizer, "lambdarule_1", self.lr_rate)

        # Other
        self.classes_name = ['background','spleen','right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland']
        self.look_small = False
Ejemplo n.º 29
0
    def __init__(self):
        # ID and Name
        self.id = -1
        self.experiment_name = "ma_cotr_pred_v{}".format(self.id)
        self.debug = False

        # System
        self.checkpointsBasePath = "./checkpoints/"
        self.checkpointsBasePathMod = self.checkpointsBasePath + 'models/'
        self.labelpath = "/local/DEEPLEARNING/MULTI_ATLAS/multi_atlas//512_512_256/"
        self.datapath = self.labelpath


        self.input_shape = [512,512,256]
        # filters = [4, 8, 16, 32]
        # skip_idx = [1,3,5,6]
        self.patch_size=(128,128,128)
        # self.patch_size=(192,192,48)
        # n_layers=6
        self.clip = True
        self.patched = True
        # GPU
        self.gpu = '0'
        os.environ["CUDA_VISIBLE_DEVICES"] = self.gpu

        # Model
        self.n_classes = 14
        self.net = ResTranUnet(norm_cfg='IN', activation_cfg='LeakyReLU', img_size=self.patch_size, num_classes=self.n_classes, weight_std=False, deep_supervision=False)
        self.net.inference_apply_nonlin = softmax_helper
        self.n_parameters = count_parameters(self.net)
        print("N PARAMS : {}".format(self.n_parameters))

        # self.model_path = './checkpoints/models/cotr.pth'
        self.model_path = './checkpoints/models/400/mod.pt'
        
         
        
        max_displacement = 5,5,5
        deg = (0,5,10)
        scales = 0
        self.transform = tio.Compose([
            tio.RandomElasticDeformation(max_displacement=max_displacement),
            tio.RandomAffine(scales=scales, degrees=deg)
        ])


        # Training
        self.start_epoch = 1000
        self.epoch = 1000

        self.loss = torch.nn.CrossEntropyLoss()

        self.loss = DC_and_CE_loss({'batch_dice': True, 'smooth': 1e-5, 'do_bg': False}, {})

        self.batchsize = 2
        self.lr_rate = 2e-2
        # self.final_lr_rate = 1e-5
        # self.optimizer = optim.Adam(self.net.parameters(), lr = self.lr_rate)
        self.optimizer = optim.SGD(self.net.parameters(), lr = self.lr_rate, weight_decay=3e-5, momentum=0.99)

        self.optimizer.zero_grad()
        self.validate_every_k_epochs = 10
        # self.decay = (self.lr_rate/self.final_lr_rate - 1)/self.epoch
        self.lr_scheduler = get_scheduler(self.optimizer, "poly", self.lr_rate, max_epochs=self.epoch)


        self.load_model()
        # Other
        self.classes_name = ['background','spleen','right kidney','left kidney','gallbladder','esophagus','liver','stomach','aorta','inferior vena cava','portal vein and splenic vein','pancreas','right adrenal gland','left adrenal gland']
Ejemplo n.º 30
0
"""
Exclude images from transform
=============================

In this example we show how the kwargs ``include`` and ``exclude`` can be
used to apply a transform to only some of the images within a subject.
"""

import torch
import torchio as tio


torch.manual_seed(0)

subject = tio.datasets.Pediatric(years=(4.5, 8.5))
subject.plot()
transform = tio.Compose([
    tio.RandomAffine(degrees=(20, 30), exclude='t1'),
    tio.RandomBlur(std=(3, 4), include='t2'),
])
transformed = transform(subject)
transformed.plot()