コード例 #1
0
ファイル: test_crop_pad.py プロジェクト: moondaiy/torchio
 def test_different_shape(self):
     shape = self.sample['t1'][DATA].shape[1:]
     target_shape = 9, 21, 30
     transform = CropOrPad(target_shape)
     transformed = transform(self.sample)
     for key in transformed:
         result_shape = transformed[key][DATA].shape[1:]
         self.assertNotEqual(shape, result_shape)
コード例 #2
0
 def test_center_mask(self):
     """The mask bounding box and the input image have the same center"""
     target_shape = 8, 22, 30
     transform_center = CropOrPad(target_shape)
     transform_mask = CropOrPad(target_shape, mask_name='label')
     mask = self.sample['label'][DATA]
     mask *= 0
     mask[0, 4:6, 9:11, 14:16] = 1
     transformed_center = transform_center(self.sample)
     transformed_mask = transform_mask(self.sample)
     zipped = zip(transformed_center.values(), transformed_mask.values())
     for image_center, image_mask in zipped:
         assert_array_equal(
             image_center[DATA], image_mask[DATA],
             'Data is different after cropping',
         )
         assert_array_equal(
             image_center[AFFINE], image_mask[AFFINE],
             'Physical position is different after cropping',
         )
コード例 #3
0
ファイル: data.py プロジェクト: JIiminIT/Torch
def training_network(landmarks, dataset, subjects):
    training_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        RandomMotion(),
        HistogramStandardization({'mri': landmarks}),
        RandomBiasField(),
        ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
    ])

    validation_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        HistogramStandardization({'mri': landmarks}),
        ZNormalization(masking_method=ZNormalization.mean),
    ])

    training_split_ratio = 0.9
    num_subjects = len(dataset)
    num_training_subjects = int(training_split_ratio * num_subjects)

    training_subjects = subjects[:num_training_subjects]
    validation_subjects = subjects[num_training_subjects:]

    training_set = tio.SubjectsDataset(training_subjects,
                                       transform=training_transform)

    validation_set = tio.SubjectsDataset(validation_subjects,
                                         transform=validation_transform)

    print('Training set:', len(training_set), 'subjects')
    print('Validation set:', len(validation_set), 'subjects')
    return training_set, validation_set
コード例 #4
0
ファイル: test_crop_pad.py プロジェクト: mingjie0508/torchio
 def test_no_changes_mask(self):
     sample_t1 = self.sample_subject['t1']
     sample_mask = self.sample_subject['label'].data
     sample_mask *= 0
     shape = sample_t1.spatial_shape
     transform = CropOrPad(shape, mask_name='label')
     with self.assertWarns(RuntimeWarning):
         transformed = transform(self.sample_subject)
     for key in transformed:
         image = self.sample_subject[key]
         self.assertTensorEqual(image.data, transformed[key].data)
         self.assertTensorEqual(image.affine, transformed[key].affine)
コード例 #5
0
 def test_no_changes_mask(self):
     sample_t1 = self.sample['t1']
     sample_mask = self.sample['label'][DATA]
     sample_mask *= 0
     shape = sample_t1.spatial_shape
     transform = CropOrPad(shape, mask_name='label')
     with self.assertWarns(UserWarning):
         transformed = transform(self.sample)
     for key in transformed:
         image_dict = self.sample[key]
         assert_array_equal(image_dict[DATA], transformed[key][DATA])
         assert_array_equal(image_dict[AFFINE], transformed[key][AFFINE])
コード例 #6
0
ファイル: test_crop_pad.py プロジェクト: mingjie0508/torchio
 def test_center_mask(self):
     """The mask bounding box and the input image have the same center"""
     target_shape = 8, 22, 30
     transform_center = CropOrPad(target_shape)
     transform_mask = CropOrPad(target_shape, mask_name='label')
     mask = self.sample_subject['label'].data
     mask *= 0
     mask[0, 4:6, 9:11, 14:16] = 1
     transformed_center = transform_center(self.sample_subject)
     transformed_mask = transform_mask(self.sample_subject)
     zipped = zip(transformed_center.values(), transformed_mask.values())
     for image_center, image_mask in zipped:
         self.assertTensorEqual(
             image_center.data,
             image_mask.data,
             'Data is different after cropping',
         )
         self.assertTensorEqual(
             image_center.affine,
             image_mask.affine,
             'Physical position is different after cropping',
         )
コード例 #7
0
ファイル: test_crop_pad.py プロジェクト: jwitos/torchio
 def test_no_changes_mask(self):
     sample_t1 = self.sample_subject['t1']
     sample_mask = self.sample_subject['label'][DATA]
     sample_mask *= 0
     shape = sample_t1.spatial_shape
     transform = CropOrPad(shape, mask_name='label')
     with self.assertWarns(RuntimeWarning):
         transformed = transform(self.sample_subject)
     for key in transformed:
         image_dict = self.sample_subject[key]
         self.assertTensorEqual(image_dict[DATA], transformed[key][DATA])
         self.assertTensorEqual(image_dict[AFFINE],
                                transformed[key][AFFINE])
コード例 #8
0
 def test_mask_corners(self):
     """The mask bounding box and the input image have the same center"""
     target_shape = 8, 22, 30
     transform_center = CropOrPad(target_shape)
     transform_mask = CropOrPad(target_shape, mask_name='label')
     mask = self.sample_subject['label'][DATA]
     mask *= 0
     mask[0, 0, 0, 0] = 1
     mask[0, -1, -1, -1] = 1
     transformed_center = transform_center(self.sample_subject)
     transformed_mask = transform_mask(self.sample_subject)
     zipped = zip(transformed_center.values(), transformed_mask.values())
     for image_center, image_mask in zipped:
         self.assertTensorEqual(
             image_center[DATA],
             image_mask[DATA],
             'Data is different after cropping',
         )
         self.assertTensorEqual(
             image_center[AFFINE],
             image_mask[AFFINE],
             'Physical position is different after cropping',
         )
コード例 #9
0
    def transform(self):

        if hp.mode == '3d':
            training_transform = Compose([
                # ToCanonical(),
                CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size,
                           hp.crop_or_pad_size),
                          padding_mode='reflect'),
                RandomMotion(),
                RandomBiasField(),
                ZNormalization(),
                RandomNoise(),
                RandomFlip(axes=(0, )),
                OneOf({
                    RandomAffine(): 0.8,
                    RandomElasticDeformation(): 0.2,
                }),
            ])
        elif hp.mode == '2d':
            training_transform = Compose([
                CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size, 1),
                          padding_mode='reflect'),
                RandomMotion(),
                RandomBiasField(),
                ZNormalization(),
                RandomNoise(),
                RandomFlip(axes=(0, )),
                OneOf({
                    RandomAffine(): 0.8,
                    RandomElasticDeformation(): 0.2,
                }),
            ])
        else:
            raise Exception('no such kind of mode!')

        return training_transform
コード例 #10
0
 def test_mask_only_crop(self):
     target_shape = 9, 18, 30
     transform = CropOrPad(target_shape, mask_name='label')
     mask = self.sample['label'][DATA]
     mask *= 0
     mask[0, 4:6, 5:8, 3:7] = 1
     transformed = transform(self.sample)
     shapes = []
     for key in transformed:
         result_shape = transformed[key].spatial_shape
         shapes.append(result_shape)
     set_shapes = set(shapes)
     message = f'Images have different shapes: {set_shapes}'
     assert len(set_shapes) == 1, message
     for key in transformed:
         result_shape = transformed[key].spatial_shape
         self.assertEqual(
             target_shape, result_shape,
             f'Wrong shape for image: {key}',
         )
コード例 #11
0
ファイル: test_crop_pad.py プロジェクト: mingjie0508/torchio
 def mask_only(self, target_shape):
     transform = CropOrPad(target_shape, mask_name='label')
     mask = self.sample_subject['label'].data
     mask *= 0
     mask[0, 4:6, 5:8, 3:7] = 1
     transformed = transform(self.sample_subject)
     shapes = []
     for key in transformed:
         result_shape = transformed[key].spatial_shape
         shapes.append(result_shape)
     set_shapes = set(shapes)
     message = f'Images have different shapes: {set_shapes}'
     assert len(set_shapes) == 1, message
     for key in transformed:
         result_shape = transformed[key].spatial_shape
         self.assertEqual(
             target_shape,
             result_shape,
             f'Wrong shape for image: {key}',
         )
コード例 #12
0
ファイル: __init__.py プロジェクト: sarthakpati/GaNDLF
def centercrop_transform(patch_size):
    return CropOrPad(target_shape=generic_3d_check(patch_size))
コード例 #13
0
 def test_wrong_mask_name(self):
     cop = CropOrPad(1, mask_name='wrong')
     with self.assertWarns(UserWarning):
         cop(self.sample)
コード例 #14
0
def define_transform(transform,
                     p,
                     blur_std=4,
                     motion_trans=10,
                     motion_deg=10,
                     motion_num=2,
                     biascoeff=0.5,
                     noise_std=0.25,
                     affine_trans=10,
                     affine_deg=10,
                     elastic_disp=7.5,
                     resample_size=1,
                     target_shape=0):
    ### (1) try with different blur
    if transform == 'blur':
        transforms = [RandomBlur(std=(blur_std, blur_std), p=p, seed=None)]
        transforms = Compose(transforms)

    ### (2) try with different motion artifacts
    if transform == 'motion':
        transforms = [
            RandomMotion(degrees=motion_deg,
                         translation=motion_trans,
                         num_transforms=motion_num,
                         image_interpolation=Interpolation.LINEAR,
                         p=p,
                         seed=None),
        ]
        transforms = Compose(transforms)
    ### (3) with random bias fields
    if transform == 'biasfield':
        transforms = [
            RandomBiasField(coefficients=biascoeff, order=3, p=p, seed=None)
        ]
        transforms = Compose(transforms)

    ### (4) try with different noise artifacts
    if transform == 'noise':
        transforms = [
            RandomNoise(mean=0, std=(noise_std, noise_std), p=p, seed=None)
        ]
        transforms = Compose(transforms)

    ### (5) try with different warp (affine transformatins)
    if transform == 'affine':
        transforms = [
            RandomAffine(scales=(1, 1),
                         degrees=(affine_deg),
                         isotropic=False,
                         default_pad_value='otsu',
                         image_interpolation=Interpolation.LINEAR,
                         p=p,
                         seed=None)
        ]
        transforms = Compose(transforms)

    ### (6) try with different warp (elastic transformations)
    if transform == 'elastic':
        transforms = [
            RandomElasticDeformation(num_control_points=elastic_disp,
                                     max_displacement=20,
                                     locked_borders=2,
                                     image_interpolation=Interpolation.LINEAR,
                                     p=p,
                                     seed=None),
        ]
        transforms = Compose(transforms)

    if transform == 'resample':
        transforms = [
            Resample(target=resample_size,
                     image_interpolation=Interpolation.LINEAR,
                     p=p),
            CropOrPad(target_shape=target_shape, p=1)
        ]

        transforms = Compose(transforms)

    return transforms
コード例 #15
0
dataset.save_sample(s, dict(T1='/home/romain/QCcnn//mask_mvt_val_cati_T1/mot_float.nii'))
sample = torch.load('/home/romain/QCcnn/mask_mvt_val_cati_T1/sample00220_sample.pt')
tensor = sample['image']['data'][0]  # remove channels dim
affine = sample['image']['affine']
write_image(tensor, affine, '/home/romain/QCcnn//mask_mvt_val_cati_T1/mot_li.nii')

ff1 = t.fitpars_interp

suj = [Subject(image=Image('/data/romain/HCPdata/suj_150423/mT1w_1mm.nii', INTENSITY),
         maskk=Image('/data/romain/HCPdata/suj_150423/mask_brain.nii',  LABEL))]

tc = ApplyMask(masking_method='maskk')
t = RandomAffine(scales=(0.8,0.8), degrees=(10,10) )

tc = CenterCropOrPad(target_shape=(182, 218,212))
tc = CropOrPad(target_shape=(182, 218,182), mask_name='maskk')
#dico_elast = {'num_control_points': 6, 'deformation_std': (30, 30, 30), 'max_displacement': (4, 4, 4),
#              'proportion_to_augment': 1, 'image_interpolation': Interpolation.LINEAR}
#tc = RandomElasticDeformation(**dico_elast)

dico_p = {'num_control_points': 8, 'deformation_std': (20, 20, 20), 'max_displacement': (4, 4, 4),
              'p': 1, 'image_interpolation': Interpolation.LINEAR}
dico_p = { 'num_control_points': 6,
           #'max_displacement': (20, 20, 20),
           'max_displacement': (30, 30, 30),
           'p': 1, 'image_interpolation': Interpolation.LINEAR }

t = Compose([ RandomElasticDeformation(**dico_p), tc])
t = Compose([RandomNoise(std=(0.020,0.2)),  RandomElasticDeformation(**dico_p) ])
t = Compose([RandomNoise(),  RandomElasticDeformation() ])
コード例 #16
0
    'cuda': cuda,
    'max_epochs': max_epochs
}
#'conv_block':[8, 16, 32, 64, 128]

dir_cache = get_cache_dir(root_fs=root_fs)
#load_from_dir = ['{}/{}/'.format(dir_cache, data_name_train), '{}/{}/'.format(dir_cache, data_name_val)]
res_name = '{}_{}'.format(base_name, data_name_train)
load_from_dir = [None]

if 'cati' in data_name_train:
    target_shape, mask_key = (182, 218, 182), 'brain'
    print('adding a CropOrPad {} with mask key {}'.format(
        target_shape, mask_key))
    tc = [
        CropOrPad(target_shape=target_shape, mode='mask', mask_key=mask_key),
    ]
else:
    tc = None

doit = do_training(res_dir, res_name, verbose)

transforms = get_motion_transform('random_noise_1')

if do_eval:
    val_csv_file = train_csv_file

doit.set_data_loader(train_csv_file=train_csv_file,
                     val_csv_file=val_csv_file,
                     transforms=transforms,
                     batch_size=batch_size,
コード例 #17
0
ファイル: test_predic_cnn.py プロジェクト: GFabien/torchQC
tensor = sample['image']['data'][0]  # remove channels dim
affine = sample['image']['affine']
write_image(tensor, affine,
            '/home/romain/QCcnn//mask_mvt_val_cati_T1/mot_li.nii')

ff1 = t.fitpars_interp

suj = [
    Subject(image=Image('/data/romain/HCPdata/suj_150423/mT1w_1mm.nii',
                        INTENSITY),
            maskk=Image('/data/romain/HCPdata/suj_150423/mask_brain.nii',
                        LABEL))
]

tc = CenterCropOrPad(target_shape=(182, 218, 212))
tc = CropOrPad(target_shape=(182, 218, 182), mode='mask', mask_key='maskk')
#dico_elast = {'num_control_points': 6, 'deformation_std': (30, 30, 30), 'max_displacement': (4, 4, 4),
#              'proportion_to_augment': 1, 'image_interpolation': Interpolation.LINEAR}
#tc = RandomElasticDeformation(**dico_elast)

dico_p = {
    'num_control_points': 8,
    'deformation_std': (20, 20, 20),
    'max_displacement': (4, 4, 4),
    'proportion_to_augment': 1,
    'image_interpolation': Interpolation.LINEAR
}
dico_p = {
    'num_control_points': 6,
    #'max_displacement': (20, 20, 20),
    'max_displacement': (30, 30, 30),
コード例 #18
0
ファイル: 3dunet.py プロジェクト: obhutara/CardiacDeep
znorm_transform = tio.ZNormalization(masking_method=tio.ZNormalization.mean)

sample = dataset[0]
transform = tio.Compose([histogram_transform, znorm_transform])
znormed = transform(sample)

fig, ax = plt.subplots(dpi=100)
plot_histogram(ax, znormed.mri.data, label='Z-normed', alpha=1)
ax.set_title('Intensity values of one sample after z-normalization')
ax.set_xlabel('Intensity')
ax.grid()

training_transform = Compose([
    ToCanonical(),
    #  Resample(4),
    CropOrPad((112, 112, 48), padding_mode=0),  #reflect , original 112,112,48
    RandomMotion(num_transforms=6, image_interpolation='nearest', p=0.2),
    HistogramStandardization({'mri': landmarks}),
    RandomBiasField(p=0.2),
    RandomBlur(p=0.2),
    ZNormalization(masking_method=ZNormalization.mean),
    RandomFlip(axes=['inferior-superior'], flip_probability=0.2),
    #  RandomNoise(std=0.5, p=0.2),
    RandomGhosting(intensity=1.8, p=0.2),
    #  RandomNoise(),
    #  RandomFlip(axes=(0,)),
    #  OneOf({
    #      RandomAffine(): 0.8,
    #      RandomElasticDeformation(): 0.2,
    #  }),
])
コード例 #19
0
 def test_shape_string(self):
     with self.assertRaises(ValueError):
         CropOrPad('')
コード例 #20
0
 def test_shape_negative(self):
     with self.assertRaises(ValueError):
         CropOrPad(-1)
コード例 #21
0
             'cuda': cuda, 'max_epochs': max_epochs}
#'conv_block':[8, 16, 32, 64, 128]

tc=[]
add_log = ''

if mask_brain and 'hcp' in data_name_train:
    add_to_load, add_to_load_regexp = 'brain', 'brain_T'
else:
    add_to_load, add_to_load_regexp = None, None

if 'cati' in data_name_train:
    target_shape, mask_key = (182, 218, 182), 'brain'
    add_log += 'adding a CropOrPad {} with mask key {}'.format(target_shape, mask_key)
    print(add_log)
    tc.append( [CropOrPad(target_shape=target_shape, mask_name=mask_key), ] )

# before RescaleIntensity for hcp le 07/04/2020 mais pas pour cati
if mask_brain:
    tc.append(ApplyMask(masking_method='brain'))
    add_log += 'adding a ApplyMask brain '
    base_name += '_Mask'

if 'T1' in data_name_train:
    tc.append(RescaleIntensity(percentiles=(0, 99)))
    #tc.append(RandomAffine())
    add_log += 'adding a RESCALE Intensity 0 99 '
    base_name += '_rescale'
    print(add_log)

if len(tc) == 0: tc = None
コード例 #22
0
 def reverse_resample(self, min_value=-1):
     transforms = [Resample(1 / self.ratio)]
     return Compose(transforms + [CropOrPad(self.opt.origshape, padding_mode=min_value)])
コード例 #23
0
 def test_shape_float(self):
     with self.assertRaises(ValueError):
         CropOrPad(2.5)
コード例 #24
0
    ZNormalization,
    CropOrPad,
    HistogramStandardization,
    OneOf,
    Compose,
)

landmarks = np.load('landmarks.npy')

transform = Compose([
    RescaleIntensity((0, 1)),
    HistogramStandardization({'mri': landmarks}),
    ZNormalization(masking_method=ZNormalization.mean),
    ToCanonical(),
    Resample((1, 1, 1)),
    CropOrPad((224, 224, 224)),
])

device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")


def create_paths(datapath):
    #     Create paths to all nested images
    imagepaths = []
    for root, dirs, files in os.walk(datapath, topdown=False):
        for name in files:
            imagepaths.append(os.path.join(root, name))
    return imagepaths


def get_label(imagepath, csvpath):
コード例 #25
0
 def test_shape_one(self):
     transform = CropOrPad(1)
     transformed = transform(self.sample)
     for key in transformed:
         result_shape = transformed[key].spatial_shape
         self.assertEqual((1, 1, 1), result_shape)
コード例 #26
0
ファイル: utils_cmd.py プロジェクト: SayJMC/torchQC
def get_dataset_from_option(options):

    fin = options.image_in
    dir_sample = options.sample_dir
    add_affine_zoom, add_affine_rot = options.add_affine_zoom, options.add_affine_rot

    batch_size, num_workers = options.batch_size, options.num_workers

    doit = do_training('/tmp/', 'not_use', verbose=True)
    # adding transformation
    tc = []
    name_suffix = ''
    #Attention pas de _ dans le name_suffix
    if options.add_cut_mask > 0:
        target_shape, mask_key = (182, 218, 182), 'brain'
        tc = [
            CropOrPad(target_shape=target_shape, mask_name=mask_key),
        ]
        name_suffix += '_tCropBrain'

    if add_affine_rot > 0 or add_affine_zoom > 0:
        if add_affine_zoom == 0: add_affine_zoom = 1  #0 -> no affine so 1
        tc.append(
            RandomAffine(scales=(add_affine_zoom, add_affine_zoom),
                         degrees=(add_affine_rot, add_affine_rot),
                         image_interpolation=Interpolation.NEAREST))
        name_suffix += '_tAffineS{}R{}'.format(add_affine_zoom, add_affine_rot)

    # for hcp should be before RescaleIntensity
    mask_brain = False
    if options.add_mask_brain:
        tc.append(ApplyMask(masking_method='brain'))
        name_suffix += '_tMaskBrain'
        mask_brain = True

    if options.add_rescal_Imax:
        tc.append(RescaleIntensity(percentiles=(0, 99)))
        name_suffix += '_tRescale-0-99'

    if options.add_elastic1:
        tc.append(get_motion_transform(type='elastic1'))
        name_suffix += '_tElastic1'

    if options.add_bias:
        tc.append(RandomBiasField())
        name_suffix += '_tBias'

    if len(name_suffix) == 0:
        name_suffix = '_Raw'

    target = None
    if len(tc) == 0: tc = None

    add_to_load, add_to_load_regexp = None, None

    if len(dir_sample) > 0:
        print('loading from {}'.format(dir_sample))
        if options.add_orig:
            add_to_load, add_to_load_regexp = 'original', 'notused'

        data_name = get_parent_path(dir_sample)[1]
        if mask_brain and 'hcp' in data_name:
            add_to_load_regexp = 'brain_T'
            if add_to_load is None:
                add_to_load = 'brain'
            else:
                add_to_load += 'brain'

        doit.set_data_loader(batch_size=batch_size,
                             num_workers=num_workers,
                             load_from_dir=dir_sample,
                             transforms=tc,
                             add_to_load=add_to_load,
                             add_to_load_regexp=add_to_load_regexp)

        name_suffix = 'On_' + data_name + name_suffix
        target = options.target  #'ssim' #suppose that if from sample, it should be simulation so set target
    else:
        print('working on ')
        for ff in fin:
            print(ff)

        doit.set_data_loader_from_file_list(fin,
                                            transforms=tc,
                                            batch_size=batch_size,
                                            num_workers=num_workers,
                                            mask_key=mask_key,
                                            mask_regex='^mask')

    return doit, name_suffix, target
コード例 #27
0
                         load_from_dir=load_from_dir,
                         shuffel_train=False)
    doit.set_model(par_model)
    td = doit.train_dataloader
    llog = doit.log
    for i, data in enumerate(td):
        dd = data['image']['data'].reshape(-1).numpy()
        llog.info('{} max is {}'.format(i, np.max(dd)))

else:
    if 'cati' in data_name_train:
        target_shape, mask_key = (182, 218, 182), 'brain'
        print('adding a CropOrPad {} with mask key {}'.format(
            target_shape, mask_key))
        tc = [
            CropOrPad(target_shape=target_shape, mask_name=mask_key),
        ]
    else:
        tc = None
    if make_uniform:
        doit.set_data_loader(batch_size=batch_size,
                             num_workers=num_workers,
                             load_from_dir=load_from_dir,
                             transforms=tc,
                             get_condition_csv='res_motion.csv',
                             get_condition_field='ssim_brain')
    else:
        doit.set_data_loader(batch_size=batch_size,
                             num_workers=num_workers,
                             load_from_dir=load_from_dir,
                             transforms=tc)
コード例 #28
0
ファイル: test_crop_pad.py プロジェクト: mingjie0508/torchio
 def test_wrong_mask_name(self):
     cop = CropOrPad(1, mask_name='wrong')
     with self.assertWarns(RuntimeWarning):
         cop(self.sample_subject)