def initialize_transforms_simple(p=0.8):
    transforms = [
        RandomFlip(axes=(0, 1, 2), flip_probability=1, p=p),

        #RandomAffine(scales=(0.9, 1.1), degrees=(10), isotropic=False,
        #             default_pad_value='otsu', image_interpolation=Interpolation.LINEAR,
        #             p = p, seed=None),

        # *** SLOWS DOWN DATALOADER ***
        #RandomElasticDeformation(num_control_points = 7, max_displacement = 7.5,
        #                         locked_borders = 2, image_interpolation = Interpolation.LINEAR,
        #                         p = 0.5, seed = None),
        RandomMotion(degrees=10,
                     translation=10,
                     num_transforms=2,
                     image_interpolation='linear',
                     p=p),
        RandomAnisotropy(axes=(0, 1, 2), downsampling=2),
        RandomBiasField(coefficients=0.5, order=3, p=p),
        RandomBlur(std=(0, 2), p=p),
        RandomNoise(mean=0, std=(0, 5), p=p),
        RescaleIntensity((0, 255))
    ]
    transform = tio.Compose(transforms)
    return transform
Ejemplo n.º 2
0
 def test_transforms(self):
     landmarks_dict = dict(
         t1=np.linspace(0, 100, 13),
         t2=np.linspace(0, 100, 13),
     )
     transforms = (
         CropOrPad((9, 21, 30)),
         ToCanonical(),
         Resample((1, 1.1, 1.25)),
         RandomFlip(axes=(0, 1, 2), flip_probability=1),
         RandomMotion(proportion_to_augment=1),
         RandomGhosting(proportion_to_augment=1, axes=(0, 1, 2)),
         RandomSpike(),
         RandomNoise(),
         RandomBlur(),
         RandomSwap(patch_size=2, num_iterations=5),
         Lambda(lambda x: 1.5 * x, types_to_apply=INTENSITY),
         RandomBiasField(),
         RescaleIntensity((0, 1)),
         ZNormalization(masking_method='label'),
         HistogramStandardization(landmarks_dict=landmarks_dict),
         RandomElasticDeformation(proportion_to_augment=1),
         RandomAffine(),
         Pad((1, 2, 3, 0, 5, 6), padding_mode='constant', fill=3),
         Crop((3, 2, 8, 0, 1, 4)),
     )
     transformed = self.sample
     for transform in transforms:
         transformed = transform(transformed)
Ejemplo n.º 3
0
    def create_transforms(self):
        transforms = []

        # clipping to remove outliers (if any)
        # clip_intensity = Lambda(VolumeDataset.clip_image, types_to_apply=[torchio.INTENSITY])
        # transforms.append(clip_intensity)

        rescale = RescaleIntensity((-1, 1), percentiles=(0.5, 99.5))
        # normalize with mu = 0 and sigma = 1/3 to have data in -1...1 almost
        # ZNormalization()

        transforms.append(rescale)

        # transforms = [rescale]
        # # As RandomAffine is faster then RandomElasticDeformation, we choose to
        # # apply RandomAffine 80% of the times and RandomElasticDeformation the rest
        # # Also, there is a 25% chance that none of them will be applied
        # if self.opt.isTrain:
        #     spatial = OneOf(
        #         {RandomAffine(translation=5): 0.8, RandomElasticDeformation(): 0.2},
        #         p=0.75,
        #     )
        #     transforms += [RandomFlip(axes=(0, 2), p=0.8), spatial]

        self.ratio = self.min_size / np.max(self.input_size)
        transforms.append(Resample(self.ratio))
        transforms.append(CropOrPad(self.input_size))
        transform = Compose(transforms)
        return transform
Ejemplo n.º 4
0
    def build(self):
        SEED = 42
        data = pd.read_csv(self.data)
        ab = data.label

        ############################################
        transforms = [
            RescaleIntensity((0, 1)),
            RandomAffine(),
            transformss.ToTensor(),
        ]
        transform = Compose(transforms)
        #############################################

        dataset_dir = self.dataset_dir
        dataset_dir = Path(dataset_dir)

        images_dir = dataset_dir
        labels_dir = dataset_dir
        image_paths = sorted(images_dir.glob('**/*.nii'))
        label_paths = sorted(labels_dir.glob('**/*.nii'))
        assert len(image_paths) == len(label_paths)

        # These two names are arbitrary
        MRI = 'features'
        BRAIN = 'targets'

        #split dataset into training and validation
        from catalyst.utils import split_dataframe_train_test

        train_image_paths, valid_image_paths = split_dataframe_train_test(
            image_paths, test_size=0.2, random_state=SEED)

        #training data
        subjects = []
        i = 0
        for (image_path, label_path) in zip(train_image_paths, label_paths):
            subject_dict = {
                MRI: torchio.Image(image_path, torchio.INTENSITY),
                BRAIN: ab[i],
            }
            i = i + 1
            subject = torchio.Subject(subject_dict)
            subjects.append(subject)
        train_data = torchio.ImagesDataset(subjects)

        #validation data
        subjects = []
        for (image_path, label_path) in zip(valid_image_paths, label_paths):
            subject_dict = {
                MRI: torchio.Image(image_path, torchio.INTENSITY),
                BRAIN: ab[i],
            }
            i = i + 1
            subject = torchio.Subject(subject_dict)
            subjects.append(subject)
        test_data = torchio.ImagesDataset(subjects)
        return train_data, test_data
Ejemplo n.º 5
0
 def test_rescale_to_same_intentisy(self):
     min_t1 = float(self.sample_subject.t1.data.min())
     max_t1 = float(self.sample_subject.t1.data.max())
     transform = RescaleIntensity(out_min_max=(min_t1, max_t1))
     transformed = transform(self.sample_subject)
     assert np.allclose(transformed.t1.data,
                        self.sample_subject.t1.data,
                        rtol=0,
                        atol=1e-06)
Ejemplo n.º 6
0
def load_pretrain_datasets(data_shape, batch=3, workers=4, transform=None):

    data_path = '/home/mitch/Data/MSD/'
    directories = sorted(glob.glob(data_path + '*/'))

    loaders = []  #var to store dataloader for each task
    datasets = []  #store dataset objects before turning into loaders

    if transform == None:
        transform = tio.RandomFlip(p=0.)
    #preprocess all
    clippy = Lambda(lambda x: torch.clip(x, -80, 300),
                    types_to_apply=[tio.INTENSITY])
    normal = RescaleIntensity((0., 1.))
    resize = Lambda(lambda x: torch.squeeze(
        interpolate(torch.unsqueeze(x, dim=0), data_shape), dim=0))
    rounding = Lambda(lambda x: torch.round(x), types_to_apply=[tio.LABEL])
    transform = tio.Compose([clippy, normal, resize, rounding, transform])

    #deal with weird shapes
    braintransform = Lambda(lambda x: torch.unsqueeze(x[:, :, :, 2], dim=0),
                            types_to_apply=[tio.INTENSITY])
    braintransform = tio.Compose([braintransform, transform])
    prostatetransform = Lambda(lambda x: torch.unsqueeze(x[:, :, :, 1], dim=0),
                               types_to_apply=[tio.INTENSITY])
    prostatetransform = tio.Compose([prostatetransform, transform])

    for i, directory in enumerate(directories):
        images = sorted(glob.glob(directory + 'imagesTr/*'))
        segs = sorted(glob.glob(directory + 'labelsTr/*'))

        subject_list = []

        for image, seg in zip(images, segs):

            subject_list.append(
                tio.Subject(img=tio.ScalarImage(image),
                            label=tio.LabelMap(seg)))

        #handle special cases
        if i == 0:
            datasets.append(
                tio.SubjectsDataset(subject_list, transform=braintransform))
        elif i == 4:
            datasets.append(
                tio.SubjectsDataset(subject_list, transform=prostatetransform))
        else:
            datasets.append(
                tio.SubjectsDataset(subject_list, transform=transform))

        loaders.append(
            DataLoader(datasets[-1],
                       num_workers=workers,
                       batch_size=batch,
                       pin_memory=True))

    return loaders
Ejemplo n.º 7
0
 def test_percentiles(self):
     low_quantile = np.percentile(self.sample_subject.t1.data, 5)
     high_quantile = np.percentile(self.sample_subject.t1.data, 95)
     low_indices = (self.sample_subject.t1.data < low_quantile).nonzero(
         as_tuple=True)
     high_indices = (self.sample_subject.t1.data > high_quantile).nonzero(
         as_tuple=True)
     transform = RescaleIntensity(out_min_max=(0., 1.), percentiles=(5, 95))
     transformed = transform(self.sample_subject)
     assert (transformed.t1.data[low_indices] == 0.).all()
     assert (transformed.t1.data[high_indices] == 1.).all()
Ejemplo n.º 8
0
 def _get_default_transforms(self):
     io_transforms = Compose([
         RandomMotion(),
         RandomFlip(axes=(1, )),
         RandomAffine(scales=(0.9, 1.2),
                      degrees=(10),
                      isotropic=False,
                      default_pad_value='otsu',
                      image_interpolation='bspline'),
         RescaleIntensity((0, 1))
     ])
     return io_transforms
Ejemplo n.º 9
0
 def test_masking_using_label(self):
     transform = RescaleIntensity(out_min_max=(0., 1.),
                                  percentiles=(5, 95),
                                  masking_method='label')
     transformed = transform(self.sample_subject)
     mask = self.sample_subject.label.data > 0
     low_quantile = np.percentile(self.sample_subject.t1.data[mask], 5)
     high_quantile = np.percentile(self.sample_subject.t1.data[mask], 95)
     low_indices = (self.sample_subject.t1.data < low_quantile).nonzero(
         as_tuple=True)
     high_indices = (self.sample_subject.t1.data > high_quantile).nonzero(
         as_tuple=True)
     self.assertEqual(transformed.t1.data.min(), 0.)
     self.assertEqual(transformed.t1.data.max(), 1.)
     assert (transformed.t1.data[low_indices] == 0.).all()
     assert (transformed.t1.data[high_indices] == 1.).all()
Ejemplo n.º 10
0
 def test_too_many_values_for_percentiles(self):
     with self.assertRaises(ValueError):
         RescaleIntensity(out_min_max=(0., 1.), percentiles=(1., 2., 3.))
Ejemplo n.º 11
0
 def test_min_percentile_higher_than_max_percentile(self):
     with self.assertRaises(ValueError):
         RescaleIntensity(out_min_max=(0., 1.), percentiles=(1., 0.))
Ejemplo n.º 12
0
 def test_wrong_out_min_max_type(self):
     with self.assertRaises(ValueError):
         RescaleIntensity(out_min_max='wrong')
Ejemplo n.º 13
0
 def test_too_many_values_for_out_min_max(self):
     with self.assertRaises(ValueError):
         RescaleIntensity(out_min_max=(1., 2., 3.))
Ejemplo n.º 14
0
 def test_out_min_higher_than_out_max(self):
     with self.assertRaises(ValueError):
         RescaleIntensity(out_min_max=(1., 0.))
Ejemplo n.º 15
0
def get_dataset_from_option(options):

    fin = options.image_in
    dir_sample = options.sample_dir
    add_affine_zoom, add_affine_rot = options.add_affine_zoom, options.add_affine_rot

    batch_size, num_workers = options.batch_size, options.num_workers

    doit = do_training('/tmp/', 'not_use', verbose=True)
    # adding transformation
    tc = []
    name_suffix = ''
    #Attention pas de _ dans le name_suffix
    if options.add_cut_mask > 0:
        target_shape, mask_key = (182, 218, 182), 'brain'
        tc = [
            CropOrPad(target_shape=target_shape, mask_name=mask_key),
        ]
        name_suffix += '_tCropBrain'

    if add_affine_rot > 0 or add_affine_zoom > 0:
        if add_affine_zoom == 0: add_affine_zoom = 1  #0 -> no affine so 1
        tc.append(
            RandomAffine(scales=(add_affine_zoom, add_affine_zoom),
                         degrees=(add_affine_rot, add_affine_rot),
                         image_interpolation=Interpolation.NEAREST))
        name_suffix += '_tAffineS{}R{}'.format(add_affine_zoom, add_affine_rot)

    # for hcp should be before RescaleIntensity
    mask_brain = False
    if options.add_mask_brain:
        tc.append(ApplyMask(masking_method='brain'))
        name_suffix += '_tMaskBrain'
        mask_brain = True

    if options.add_rescal_Imax:
        tc.append(RescaleIntensity(percentiles=(0, 99)))
        name_suffix += '_tRescale-0-99'

    if options.add_elastic1:
        tc.append(get_motion_transform(type='elastic1'))
        name_suffix += '_tElastic1'

    if options.add_bias:
        tc.append(RandomBiasField())
        name_suffix += '_tBias'

    if len(name_suffix) == 0:
        name_suffix = '_Raw'

    target = None
    if len(tc) == 0: tc = None

    add_to_load, add_to_load_regexp = None, None

    if len(dir_sample) > 0:
        print('loading from {}'.format(dir_sample))
        if options.add_orig:
            add_to_load, add_to_load_regexp = 'original', 'notused'

        data_name = get_parent_path(dir_sample)[1]
        if mask_brain and 'hcp' in data_name:
            add_to_load_regexp = 'brain_T'
            if add_to_load is None:
                add_to_load = 'brain'
            else:
                add_to_load += 'brain'

        doit.set_data_loader(batch_size=batch_size,
                             num_workers=num_workers,
                             load_from_dir=dir_sample,
                             transforms=tc,
                             add_to_load=add_to_load,
                             add_to_load_regexp=add_to_load_regexp)

        name_suffix = 'On_' + data_name + name_suffix
        target = options.target  #'ssim' #suppose that if from sample, it should be simulation so set target
    else:
        print('working on ')
        for ff in fin:
            print(ff)

        doit.set_data_loader_from_file_list(fin,
                                            transforms=tc,
                                            batch_size=batch_size,
                                            num_workers=num_workers,
                                            mask_key=mask_key,
                                            mask_regex='^mask')

    return doit, name_suffix, target
Ejemplo n.º 16
0
 def test_wrong_percentiles_type(self):
     with self.assertRaises(ValueError):
         RescaleIntensity(out_min_max=(0., 1.), percentiles='wrong')
Ejemplo n.º 17
0
training_batch_size = 12
validation_batch_size = 6
patch_size = 32
samples_per_volume = 20
max_queue_length = 80

training_name = "denseNet3D_torchIO_patch_{}_samples_{}_ADAMOptim_{}Epochs_BS{}_GlorotWeights_SSIM_1511".format(
    patch_size, samples_per_volume, Epochs, training_batch_size)
train_writer = SummaryWriter(
    os.path.join("runs", "Densenets", training_name + "_training"))
validation_writer = SummaryWriter(
    os.path.join("runs", "Densenets", training_name + "_validation"))

training_subjects, test_subjects, validation_subjects = train_test_val_split()

training_transform = Compose([RescaleIntensity((0, 1)), RandomNoise(p=0.05)])
validation_transform = Compose([RescaleIntensity((0, 1))])
test_transform = Compose([RescaleIntensity((0, 1))])

training_dataset = tio.SubjectsDataset(training_subjects,
                                       transform=training_transform)
validation_dataset = tio.SubjectsDataset(validation_subjects,
                                         transform=validation_transform)
test_dataset = tio.SubjectsDataset(test_subjects, transform=test_transform)
'''Patching'''

patches_training_set = tio.Queue(
    subjects_dataset=training_dataset,
    max_length=max_queue_length,
    samples_per_volume=samples_per_volume,
    sampler=tio.sampler.UniformSampler(patch_size),
Ejemplo n.º 18
0
    target_shape, mask_key = (182, 218, 182), 'brain'
    add_log += 'adding a CropOrPad {} with mask key {}'.format(
        target_shape, mask_key)
    print(add_log)
    tc.append([
        CropOrPad(target_shape=target_shape, mask_name=mask_key),
    ])

# before RescaleIntensity for hcp le 07/04/2020 mais pas pour cati
if mask_brain:
    tc.append(ApplyMask(masking_method='brain'))
    add_log += 'adding a ApplyMask brain '
    base_name += '_Mask'

if 'T1' in data_name_train:
    tc.append(RescaleIntensity(percentiles=(0, 99)))
    #tc.append(RandomAffine())
    add_log += 'adding a RESCALE Intensity 0 99 '
    base_name += '_rescale'
    print(add_log)

if len(tc) == 0: tc = None
if len(add_log) == 0: add_log = None

dir_cache = get_cache_dir(root_fs=root_fs)
load_from_dir = [
    '{}/{}/'.format(dir_cache, data_name_train),
    '{}/{}/'.format(dir_cache, data_name_val)
]
res_name = '{}_{}'.format(base_name, data_name_train)
Ejemplo n.º 19
0
state_dict = torch.load(
    "Models/DenseNet_3x3Conv_no Scale Aug/denseNet3D_torchIO_patch_32_samples_20_ADAMOptim_50Epochs_BS6_GlorotWeights_SSIM_3X3.pth"
)
model = DenseNetModel.DenseNet(num_init_features=4,
                               growth_rate=6,
                               block_config=(6, 6, 6)).to("cuda")
model.load_state_dict(state_dict["model_state_dict"])
ground_truths = Path("IXI-T1/Actual_Images")
ground_paths = sorted(ground_truths.glob('*.nii.gz'))
compressed_dirs = [
    sorted(Path((os.path.join("IXI-T1", comp))).glob('*.nii.gz'))
    for comp in os.listdir("IXI-T1") if "Compressed" in comp
]
validation_batch_size = 12
test_transform = Compose([RescaleIntensity((0, 1))])


def test_network(sample):
    patch_size = 48, 48, 48
    patch_overlap = 4, 4, 4
    model.eval()
    grid_sampler = tio.inference.GridSampler(sample, patch_size, patch_overlap)
    patch_loader = torch.utils.data.DataLoader(grid_sampler,
                                               int(validation_batch_size / 4))
    aggregator = tio.inference.GridAggregator(grid_sampler,
                                              overlap_mode="average")
    with torch.no_grad():
        for batch in patch_loader:
            inputs = batch["compressed"][DATA].to("cuda")
            logits = model(inputs)
Ejemplo n.º 20
0
def generate_dataset(data_path,
                     data_root='',
                     ref_path=None,
                     nb_subjects=5,
                     resampling='mni',
                     masking_method='label'):
    """
    Generate a torchio dataset from a csv file defining paths to subjects.

    :param data_path: path to a csv file
    :param data_root:
    :param ref_path:
    :param nb_subjects:
    :param resampling:
    :param masking_method:
    :return:
    """
    ds = pd.read_csv(data_path)
    ds = ds.dropna(subset=['suj'])
    np.random.seed(0)
    subject_idx = np.random.choice(range(len(ds)), nb_subjects, replace=False)
    directories = ds.iloc[subject_idx, 1]
    dir_list = directories.tolist()
    dir_list = map(lambda partial_dir: data_root + partial_dir, dir_list)

    subject_list = []
    for directory in dir_list:
        img_path = glob.glob(os.path.join(directory, 's*.nii.gz'))[0]

        mask_path = glob.glob(os.path.join(directory, 'niw_Mean*'))[0]
        coregistration_path = glob.glob(os.path.join(directory, 'aff*.txt'))[0]

        coregistration = np.loadtxt(coregistration_path, delimiter=' ')
        coregistration = np.linalg.inv(coregistration)

        subject = torchio.Subject(
            t1=torchio.Image(img_path,
                             torchio.INTENSITY,
                             coregistration=coregistration),
            label=torchio.Image(mask_path, torchio.LABEL),
            #ref=torchio.Image(ref_path, torchio.INTENSITY)
            # coregistration=coregistration,
        )
        print('adding img {} \n mask {}\n'.format(img_path, mask_path))
        subject_list.append(subject)

    transforms = [
        # Resample(1),
        RescaleIntensity((0, 1), (0, 99), masking_method=masking_method),
    ]

    if resampling == 'mni':
        # resampling_transform = ResampleWithFoV(
        #     target=nib.load(ref_path), image_interpolation=Interpolation.BSPLINE, coregistration_key='coregistration'
        # )
        resampling_transform = Resample(
            target='ref',
            image_interpolation=Interpolation.BSPLINE,
            coregistration='coregistration')
        transforms.insert(0, resampling_transform)
    elif resampling == 'mm':
        # resampling_transform = ResampleWithFoV(target=nib.load(ref_path), image_interpolation=Interpolation.BSPLINE)
        resampling_transform = Resample(
            target=ref_path, image_interpolation=Interpolation.BSPLINE)
        transforms.insert(0, resampling_transform)

    transform = Compose(transforms)

    return torchio.ImagesDataset(subject_list, transform=transform)
Ejemplo n.º 21
0
 def test_min_max(self):
     transform = RescaleIntensity(out_min_max=(0., 1.))
     transformed = transform(self.sample_subject)
     self.assertEqual(transformed.t1.data.min(), 0.)
     self.assertEqual(transformed.t1.data.max(), 1.)
Ejemplo n.º 22
0
    RandomMotion,
    RandomBiasField,
    RescaleIntensity,
    Resample,
    ToCanonical,
    ZNormalization,
    CropOrPad,
    HistogramStandardization,
    OneOf,
    Compose,
)

landmarks = np.load('landmarks.npy')

transform = Compose([
    RescaleIntensity((0, 1)),
    HistogramStandardization({'mri': landmarks}),
    ZNormalization(masking_method=ZNormalization.mean),
    ToCanonical(),
    Resample((1, 1, 1)),
    CropOrPad((224, 224, 224)),
])

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def create_paths(datapath):
    #     Create paths to all nested images
    imagepaths = []
    for root, dirs, files in os.walk(datapath, topdown=False):
        for name in files: