def transform(self):

        if hp.mode == '3d':
            if hp.aug:
                training_transform = Compose([
                    # ToCanonical(),
                    CropOrPad((hp.crop_or_pad_size), padding_mode='reflect'),
                    # RandomMotion(),
                    RandomBiasField(),
                    ZNormalization(),
                    RandomNoise(),
                    RandomFlip(axes=(0, )),
                    OneOf({
                        RandomAffine(): 0.8,
                        RandomElasticDeformation(): 0.2,
                    }),
                ])
            else:
                training_transform = Compose([
                    CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size,
                               hp.crop_or_pad_size),
                              padding_mode='reflect'),
                    ZNormalization(),
                ])
        elif hp.mode == '2d':
            if hp.aug:
                training_transform = Compose([
                    CropOrPad((hp.crop_or_pad_size), padding_mode='reflect'),
                    # RandomMotion(),
                    RandomBiasField(),
                    ZNormalization(),
                    RandomNoise(),
                    RandomFlip(axes=(0, )),
                    OneOf({
                        RandomAffine(): 0.8,
                        RandomElasticDeformation(): 0.2,
                    }),
                ])
            else:
                training_transform = Compose([
                    CropOrPad((hp.crop_or_pad_size, hp.crop_or_pad_size,
                               hp.crop_or_pad_size),
                              padding_mode='reflect'),
                    ZNormalization(),
                ])

        else:
            raise Exception('no such kind of mode!')

        return training_transform
Exemple #2
0
 def test_reproducibility_oneof(self):
     subject1, subject2 = self.get_subjects()
     trsfm = Compose([
         OneOf([RandomNoise(p=1.0),
                RandomSpike(num_spikes=3, p=1.0)]),
         RandomNoise(p=.5)
     ])
     transformed1 = trsfm(subject1)
     history1 = transformed1.history
     trsfm_hist, seeds_hist = compose_from_history(history=history1)
     transformed2 = self.apply_transforms(subject2,
                                          trsfm_list=trsfm_hist,
                                          seeds_list=seeds_hist)
     data1, data2 = transformed1.img.data, transformed2.img.data
     self.assertTensorEqual(data1, data2)
 def get_torchio_transformer(mask=False):
     if mask:
         # Don't apply noise on mask
         proba = 0
     else:
         proba = p
     return RandomNoise(std, proba, seed, is_tensor)
Exemple #4
0
    def __init__(self, transform1, m1, p1, transform2, m2, p2):
        ranges = {
            'flip': np.zeros(10),
            'affine': np.linspace(0, 180, 10),
            'noise': np.linspace(0, 0.5, 10),
            'blur': np.arange(10),
            'elasticD': np.zeros(10)
        }

        transforms = {
            'flip': lambda magnitude, p: RandomFlip(p=p),
            'affine':
            lambda magnitude, p: RandomAffine(degrees=(magnitude), p=p),
            'noise': lambda magnitude, p: RandomNoise(std=magnitude, p=p),
            'blur': lambda magnitude, p: RandomBlur(std=magnitude, p=p),
            'elasticD': lambda magnitude, p: RandomElasticDeformation(p=p)
        }

        self.transform1 = transforms[transform1]
        self.t1_input = transform1
        self.m1 = ranges[transform1][m1]
        self.m1_input = m1
        self.p1 = p1

        self.transform2 = transforms[transform2]
        self.t2_input = transform2
        self.m2 = ranges[transform2][m2]
        self.m2_input = m2
        self.p2 = p2

        self.kappa = 0.0
def initialize_transforms_simple(p=0.8):
    transforms = [
        RandomFlip(axes=(0, 1, 2), flip_probability=1, p=p),

        #RandomAffine(scales=(0.9, 1.1), degrees=(10), isotropic=False,
        #             default_pad_value='otsu', image_interpolation=Interpolation.LINEAR,
        #             p = p, seed=None),

        # *** SLOWS DOWN DATALOADER ***
        #RandomElasticDeformation(num_control_points = 7, max_displacement = 7.5,
        #                         locked_borders = 2, image_interpolation = Interpolation.LINEAR,
        #                         p = 0.5, seed = None),
        RandomMotion(degrees=10,
                     translation=10,
                     num_transforms=2,
                     image_interpolation='linear',
                     p=p),
        RandomAnisotropy(axes=(0, 1, 2), downsampling=2),
        RandomBiasField(coefficients=0.5, order=3, p=p),
        RandomBlur(std=(0, 2), p=p),
        RandomNoise(mean=0, std=(0, 5), p=p),
        RescaleIntensity((0, 255))
    ]
    transform = tio.Compose(transforms)
    return transform
Exemple #6
0
    def test_transforms(self):
        landmarks_dict = dict(
            t1=np.linspace(0, 100, 13),
            t2=np.linspace(0, 100, 13),
        )
        random_transforms = (
            RandomFlip(axes=(0, 1, 2), flip_probability=1),
            RandomNoise(),
            RandomBiasField(),
            RandomElasticDeformation(proportion_to_augment=1),
            RandomAffine(),
            RandomMotion(proportion_to_augment=1),
        )
        intensity_transforms = (
            Rescale(),
            ZNormalization(),
            HistogramStandardization(landmarks_dict=landmarks_dict),
        )
        for transform in random_transforms:
            sample = self.get_sample()
            transformed = transform(sample)

        for transform in intensity_transforms:
            sample = self.get_sample()
            transformed = transform(sample)
Exemple #7
0
def get_motion_transform(type='motion1'):
    if 'motion1' in type:
        dico_params_mot = {
            "maxDisp": (1, 6),
            "maxRot": (1, 6),
            "noiseBasePars": (5, 20, 0.8),
            "swallowFrequency": (2, 6, 0.5),
            "swallowMagnitude": (3, 6),
            "suddenFrequency": (2, 6, 0.5),
            "suddenMagnitude": (3, 6),
            "verbose": False,
            "keep_original": True,
            "proba_to_augment": 1,
            "preserve_center_pct": 0.1,
            "keep_original": True,
            "compare_to_original": True,
            "oversampling_pct": 0,
            "correct_motion": False
        }

        dico_params_mot = {
            "maxDisp": (1, 4),
            "maxRot": (1, 4),
            "noiseBasePars": (5, 20, 0.8),
            "swallowFrequency": (2, 6, 0.5),
            "swallowMagnitude": (3, 4),
            "suddenFrequency": (2, 6, 0.5),
            "suddenMagnitude": (3, 4),
            "verbose": False,
            "keep_original": True,
            "proba_to_augment": 1,
            "preserve_center_pct": 0.1,
            "keep_original": True,
            "compare_to_original": True,
            "oversampling_pct": 0,
            "correct_motion": False
        }

    if 'elastic1' in type:
        dico_elast = {
            'num_control_points': 6,
            'max_displacement': (30, 30, 30),
            'proportion_to_augment': 1,
            'image_interpolation': Interpolation.LINEAR
        }

    if type == 'motion1':
        transforms = Compose([
            RandomMotionFromTimeCourse(**dico_params_mot),
        ])

    elif type == 'elastic1_and_motion1':
        transforms = Compose([
            RandomElasticDeformation(**dico_elast),
            RandomMotionFromTimeCourse(**dico_params_mot)
        ])
    if type == 'random_noise_1':
        transforms = Compose([RandomNoise(std=(0.020, 0.2))])

    return transforms
Exemple #8
0
 def test_transforms(self):
     landmarks_dict = dict(
         t1=np.linspace(0, 100, 13),
         t2=np.linspace(0, 100, 13),
     )
     transforms = (
         CenterCropOrPad((9, 21, 30)),
         ToCanonical(),
         Resample((1, 1.1, 1.25)),
         RandomFlip(axes=(0, 1, 2), flip_probability=1),
         RandomMotion(proportion_to_augment=1),
         RandomGhosting(proportion_to_augment=1, axes=(0, 1, 2)),
         RandomSpike(),
         RandomNoise(),
         RandomBlur(),
         RandomSwap(patch_size=2, num_iterations=5),
         Lambda(lambda x: 1.5 * x, types_to_apply=INTENSITY),
         RandomBiasField(),
         Rescale((0, 1)),
         ZNormalization(masking_method='label'),
         HistogramStandardization(landmarks_dict=landmarks_dict),
         RandomElasticDeformation(proportion_to_augment=1),
         RandomAffine(),
         Pad((1, 2, 3, 0, 5, 6)),
         Crop((3, 2, 8, 0, 1, 4)),
     )
     transformed = self.get_sample()
     for transform in transforms:
         transformed = transform(transformed)
 def get_torchio_transformer(mask=False):
     if mask:
         # Don't apply noise on mask
         proba = 0
     else:
         proba = p
     return RandomNoise(mean=mean, std=std, p=proba, seed=seed)
Exemple #10
0
 def test_reproducibility_no_seed(self):
     trsfm = RandomNoise()
     subject1, subject2 = self.get_subjects()
     transformed1 = trsfm(subject1)
     transformed2 = trsfm(subject2)
     data1, data2 = transformed1.img.data, transformed2.img.data
     seed1, seed2 = transformed1.history[0][1][
         'seed'], transformed2.history[0][1]['seed']
     self.assertNotEqual(seed1, seed2)
     self.assertTensorNotEqual(data1, data2)
Exemple #11
0
 def test_reproducibility_from_history(self):
     trsfm = RandomNoise()
     subject1, subject2 = self.get_subjects()
     transformed1 = trsfm(subject1)
     history1 = transformed1.history
     trsfm_hist, seeds_hist = compose_from_history(history=history1)
     transformed2 = self.apply_transforms(subject2,
                                          trsfm_list=trsfm_hist,
                                          seeds_list=seeds_hist)
     data1, data2 = transformed1.img.data, transformed2.img.data
     self.assertTensorEqual(data1, data2)
Exemple #12
0
 def test_rng_state(self):
     trsfm = RandomNoise()
     subject1, subject2 = self.get_subjects()
     transformed1 = trsfm(subject1)
     seed1 = transformed1.history[0][1]['seed']
     value1_torch, value1_np = torch.rand(1).item(), np.random.rand()
     transformed2 = trsfm(subject2, seed=seed1)
     value2_torch, value2_np = torch.rand(1).item(), np.random.rand()
     data1, data2 = transformed1.img.data, transformed2.img.data
     self.assertNotEqual(value1_torch, value2_torch)
     self.assertNotEqual(value1_np, value2_np)
     self.assertTensorEqual(data1, data2)
Exemple #13
0
def get_brats(
        data_root='/scratch/weina/dld_data/brats2019/MICCAI_BraTS_2019_Data_Training/',
        fold=1,
        seed=torch.distributed.get_rank()
    if torch.distributed.is_initialized() else 0,
        **kwargs):
    """ data iter for brats
    """
    logging.debug("BratsIter:: fold = {}, seed = {}".format(fold, seed))
    # args for transforms
    d_size, h_size, w_size = 155, 240, 240
    input_size = [7, 223, 223]
    spacing = (d_size / input_size[0], h_size / input_size[1],
               w_size / input_size[2])
    Mean, Std, Max = read_brats_mean(fold, data_root)
    normalize = transforms.Normalize(mean=Mean, std=Std)
    training_transform = Compose([
        # RescaleIntensity((0, 1)),  # so that there are no negative values for RandomMotion
        # RandomMotion(),
        # HistogramStandardization({MRI: landmarks}),
        RandomBiasField(),
        # ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        ToCanonical(),
        Resample(spacing),
        # CropOrPad((48, 60, 48)),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
        normalize
    ])
    val_transform = Compose([Resample(spacing), normalize])

    train = BratsIter(csv_file=os.path.join(data_root, 'IDH_label',
                                            'train_fold_{}.csv'.format(fold)),
                      brats_path=os.path.join(data_root, 'all'),
                      brats_transform=training_transform,
                      shuffle=True)

    val = BratsIter(csv_file=os.path.join(data_root, 'IDH_label',
                                          'val_fold_{}.csv'.format(fold)),
                    brats_path=os.path.join(data_root, 'all'),
                    brats_transform=val_transform,
                    shuffle=False)
    return train, val
Exemple #14
0
def random_augment(x):
    '''Randomly augment input data.

    Returns: Randomly augmented input
    '''

    # Data augmentations to be used
    transforms_dict = {
        RandomFlip(): 1,
        RandomElasticDeformation(): 1,
        RandomAffine(): 1,
        RandomNoise(): 1,
        RandomBlur(): 1
    }

    # Create random transform, with a p chance to apply augmentation
    transform = OneOf(transforms_dict, p=0.95)
    return augment(x, transform)
Exemple #15
0
def predict_majority(model, x, y):
    '''Augments all samples of the original data, and chooses majority predictions predicted by the model.

    Usage: predict_majority(model, x_original, y_original)
    '''

    # Reshape arrays
    x = np.reshape(x, (len(x), 40, 40, 4, 1))
    y = [x - 1 for x in y]
    y = to_categorical(y, 5)

    # Predict majority
    x_flip = augment(x.copy(), RandomFlip())
    x_ed = augment(x.copy(), RandomElasticDeformation())
    x_affine = augment(x.copy(), RandomAffine())
    x_noise = augment(x.copy(), RandomNoise())
    x_blur = augment(x.copy(), RandomBlur())

    y_true = pred_list(y)
    y_pred = pred_list(model.predict(x.copy()))
    y_flip = pred_list(model.predict(x_flip.copy()))
    y_ed = pred_list(model.predict(x_ed.copy()))
    y_affine = pred_list(model.predict(x_affine.copy()))
    y_noise = pred_list(model.predict(x_noise.copy()))
    y_blur = pred_list(model.predict(x_blur.copy()))

    y_most = []
    correct = 0
    print(
        '\nEntry Number | Prediction (None, Flip, Elastic Deformation, Affine, Noise, Blur) | Actual'
    )
    for i in range(len(y_true)):
        preds = [
            y_pred[i], y_flip[i], y_ed[i], y_affine[i], y_noise[i], y_blur[i]
        ]
        most = max(set(preds), key=preds.count)
        y_most.append(most)
        print('Entry', i, '| Predictions:', preds, '| Most Occuring:', most,
              '| Correct:', y_true[i])
        if most == y_true[i]:
            correct += 1
    print('\nTest Accuracy: ', correct / len(y_true))
    print('Quadratic Weighted Kappa: ',
          cohen_kappa_score(y_true, y_most, weights='quadratic'))
Exemple #16
0
def training_network(landmarks, dataset, subjects):
    training_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        RandomMotion(),
        HistogramStandardization({'mri': landmarks}),
        RandomBiasField(),
        ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
    ])

    validation_transform = Compose([
        ToCanonical(),
        Resample(4),
        CropOrPad((48, 60, 48), padding_mode='reflect'),
        HistogramStandardization({'mri': landmarks}),
        ZNormalization(masking_method=ZNormalization.mean),
    ])

    training_split_ratio = 0.9
    num_subjects = len(dataset)
    num_training_subjects = int(training_split_ratio * num_subjects)

    training_subjects = subjects[:num_training_subjects]
    validation_subjects = subjects[num_training_subjects:]

    training_set = tio.SubjectsDataset(training_subjects,
                                       transform=training_transform)

    validation_set = tio.SubjectsDataset(validation_subjects,
                                         transform=validation_transform)

    print('Training set:', len(training_set), 'subjects')
    print('Validation set:', len(validation_set), 'subjects')
    return training_set, validation_set
Exemple #17
0
load_from_dir = [None]

doit = do_training(res_dir, res_name, verbose)

#transforms = get_motion_transform('random_noise_1')
transforms = get_motion_transform('AffFFT_random_noise')

if do_eval:

    from torchio.transforms import CropOrPad, RandomAffine, RescaleIntensity, ApplyMask, RandomBiasField, RandomNoise, \
        Interpolation, RandomAffineFFT

    from utils_file import get_parent_path, gfile, gdir
    from utils import get_ep_iter_from_res_name

    tc = [RandomNoise(std=(0.020, 0.2))]

    if add_affine_rot > 0 or add_affine_zoom > 0:
        if add_affine_zoom == 0: add_affine_zoom = 1  # 0 -> no affine so 1
        # tc.append(RandomAffine(scales=(add_affine_zoom, add_affine_zoom), degrees=(add_affine_rot, add_affine_rot),
        #                        image_interpolation = Interpolation.NEAREST ))
        # name_suffix = '_tAff_nearest_S{}R{}'.format(add_affine_zoom, add_affine_rot)
        tc.append(
            RandomAffineFFT(scales=(add_affine_zoom, add_affine_zoom),
                            degrees=(add_affine_rot, add_affine_rot),
                            oversampling_pct=0.2))

        name_suffix = '_tAff_fft_S{}R{}'.format(add_affine_zoom,
                                                add_affine_rot)
    else:
        name_suffix = '_raw'
Exemple #18
0
def define_transform(transform,
                     p,
                     blur_std=4,
                     motion_trans=10,
                     motion_deg=10,
                     motion_num=2,
                     biascoeff=0.5,
                     noise_std=0.25,
                     affine_trans=10,
                     affine_deg=10,
                     elastic_disp=7.5,
                     resample_size=1,
                     target_shape=0):
    ### (1) try with different blur
    if transform == 'blur':
        transforms = [RandomBlur(std=(blur_std, blur_std), p=p, seed=None)]
        transforms = Compose(transforms)

    ### (2) try with different motion artifacts
    if transform == 'motion':
        transforms = [
            RandomMotion(degrees=motion_deg,
                         translation=motion_trans,
                         num_transforms=motion_num,
                         image_interpolation=Interpolation.LINEAR,
                         p=p,
                         seed=None),
        ]
        transforms = Compose(transforms)
    ### (3) with random bias fields
    if transform == 'biasfield':
        transforms = [
            RandomBiasField(coefficients=biascoeff, order=3, p=p, seed=None)
        ]
        transforms = Compose(transforms)

    ### (4) try with different noise artifacts
    if transform == 'noise':
        transforms = [
            RandomNoise(mean=0, std=(noise_std, noise_std), p=p, seed=None)
        ]
        transforms = Compose(transforms)

    ### (5) try with different warp (affine transformatins)
    if transform == 'affine':
        transforms = [
            RandomAffine(scales=(1, 1),
                         degrees=(affine_deg),
                         isotropic=False,
                         default_pad_value='otsu',
                         image_interpolation=Interpolation.LINEAR,
                         p=p,
                         seed=None)
        ]
        transforms = Compose(transforms)

    ### (6) try with different warp (elastic transformations)
    if transform == 'elastic':
        transforms = [
            RandomElasticDeformation(num_control_points=elastic_disp,
                                     max_displacement=20,
                                     locked_borders=2,
                                     image_interpolation=Interpolation.LINEAR,
                                     p=p,
                                     seed=None),
        ]
        transforms = Compose(transforms)

    if transform == 'resample':
        transforms = [
            Resample(target=resample_size,
                     image_interpolation=Interpolation.LINEAR,
                     p=p),
            CropOrPad(target_shape=target_shape, p=1)
        ]

        transforms = Compose(transforms)

    return transforms
Exemple #19
0
def noise(mean, std, p=1):
    return RandomNoise(mean=mean, std=std, p=p)
Exemple #20
0
def get_data_loader(cfg: DictConfig, _) -> dict:
    log = logging.getLogger(__name__)

    transform = Compose([
        RandomMotion(),
        RandomBiasField(),
        RandomNoise(),
        RandomFlip(axes=(0, )),
    ])

    log.info(f"Data loader selected: {cfg['dataset']}")
    try:
        log.info("Attempting to use defined data loader")
        dataset = getattr(datasets, cfg["dataset"])(cfg, transform)
    except ImportError:
        log.info(
            "Not a defined data loader... Attempting to use torchio loader")
        dataset = getattr(torchio.datasets,
                          cfg["dataset"])(root=cfg["base_path"],
                                          transform=transform,
                                          download=True)

    for subject in random.sample(dataset._subjects, cfg["plot_number"]):
        plot_subject(
            subject,
            os.path.join(os.environ["OUTPUT_PATH"], cfg["save_plot_dir"],
                         subject["subject_id"]),
        )

    sampler = GridSampler(patch_size=cfg["patch_size"])
    samples_per_volume = len(sampler._compute_locations(
        dataset[0]))  # type: ignore

    with open_dict(cfg):
        cfg["size"] = dataset[0].spatial_shape

    val_size = max(1, int(0.2 * len(dataset)))
    test_set, train_set, val_set = split_dataset(
        dataset, [21, len(dataset) - val_size - 21, val_size])

    train_loader = __create_data_loader(
        train_set,
        queue_max_length=samples_per_volume * cfg["queue_length"],
        queue_samples_per_volume=samples_per_volume,
        sampler=sampler,
        verbose=log.level > 0,
        batch_size=cfg["batch"],
    )

    val_loader = __create_data_loader(
        val_set,
        queue_max_length=samples_per_volume * cfg["queue_length"],
        queue_samples_per_volume=samples_per_volume,
        sampler=sampler,
        verbose=log.level > 0,
        batch_size=cfg["batch"],
    )

    test_loader = __create_data_loader(
        test_set,
        queue_max_length=samples_per_volume * cfg["queue_length"],
        queue_samples_per_volume=samples_per_volume,
        sampler=sampler,
        verbose=log.level > 0,
        batch_size=cfg["batch"],
    )

    return {
        "data_loader_train": train_loader,
        "data_loader_val": val_loader,
        "data_loader_test": test_loader,
    }
Exemple #21
0
    num_images=100,
    size_range=(193, 229),
    force=False,
)

# Each element of subjects_list is an instance of torchio.Subject:
# subject = Subject(
#     one_image=torchio.Image(path_to_one_image, torchio.INTENSITY),
#     another_image=torchio.Image(path_to_another_image, torchio.INTENSITY),
#     a_label=torchio.Image(path_to_a_label, torchio.LABEL),
# )

# Define transforms for data normalization and augmentation
transforms = (
    ZNormalization(),
    RandomNoise(std=(0, 0.25)),
    RandomAffine(scales=(0.9, 1.1), degrees=10),
    RandomFlip(axes=(0, )),
)
transform = Compose(transforms)
subjects_dataset = ImagesDataset(subjects_list, transform)

# Run a benchmark for different numbers of workers
workers = range(mp.cpu_count() + 1)
for num_workers in workers:
    print('Number of workers:', num_workers)

    # Define the dataset as a queue of patches
    queue_dataset = Queue(
        subjects_dataset,
        queue_length,
Exemple #22
0
def get_motion_transform(type='motion1'):
    if 'motion1' in type:
        from torchio.metrics import SSIM3D, MetricWrapper, MapMetricWrapper
        from torchio.metrics.ssim import functional_ssim
        from torchio.metrics.old_metrics import th_pearsonr, NCC
        from torch.nn import MSELoss, L1Loss
        #from torch_similarity.modules import NormalizedCrossCorrelation

        metrics = {
            # "L1": MetricWrapper("L1", L1Loss()), #same as L1_map
            #"NCC_c": MetricWrapper("L1", NCC()),
            "L1_map":
            MapMetricWrapper("L1_map",
                             lambda x, y: torch.abs(x - y),
                             average_method="mean",
                             mask_keys=['brain']),
            # "L2": MapMetricWrapper("L2", MSELoss(), mask_keys=['brain']),
            # "SSIM": SSIM3D(average_method="mean"),
            "SSIM_mask":
            SSIM3D(average_method="mean", mask_keys=["brain"]),
            "NCC":
            MetricWrapper("NCC_th_brain",
                          lambda x, y: th_pearsonr(x, y),
                          use_mask=True,
                          mask_key='brain'),
            "NCC2":
            MetricWrapper("NCC_th",
                          lambda x, y: th_pearsonr(x, y),
                          use_mask=False),

            # "SSIM": MetricWrapper("SSIM", lambda x, y: functional_ssim(x, y, return_map=False),
            #                               use_mask=True, mask_key="brain"),
            "ssim_base":
            MapMetricWrapper('SSIM_base',
                             lambda x, y: ssim3D(x, y, size_average=True),
                             average_method="mean",
                             mask_keys=['brain'])
        }
        # metrics = {"L1_map": MapMetricWrapper("L1_map", lambda x, y: torch.abs(x - y), average_method="mean",
        #                                       mask_keys=['brain'])}

        dico_params_mot = {
            "maxDisp": (1, 6),
            "maxRot": (1, 6),
            "noiseBasePars": (5, 20, 0.8),
            "swallowFrequency": (2, 6, 0.5),
            "swallowMagnitude": (3, 6),
            "suddenFrequency": (2, 6, 0.5),
            "suddenMagnitude": (3, 6),
            "verbose": False,
            "proba_to_augment": 1,
            "preserve_center_pct": 0.1,
            "compare_to_original": True,
            "oversampling_pct": 0,
            "correct_motion": False
        }

        dico_params_mot = {
            "maxDisp": (1, 4),
            "maxRot": (1, 4),
            "noiseBasePars": (5, 20, 0.8),
            "swallowFrequency": (2, 6, 0.5),
            "swallowMagnitude": (3, 4),
            "suddenFrequency": (2, 6, 0.5),
            "suddenMagnitude": (3, 4),
            "verbose": False,
            "proba_to_augment": 1,
            "preserve_center_pct": 0.1,
            "compare_to_original": True,
            "metrics": metrics,
            "oversampling_pct": 0,
            "correct_motion": False
        }

    if 'elastic1' in type:
        dico_elast = {
            'num_control_points': 6,
            'max_displacement': (30, 30, 30),
            'p': 1,
            'image_interpolation': Interpolation.LINEAR
        }

    if type == 'motion1':
        transforms = Compose([
            RandomMotionFromTimeCourse(**dico_params_mot),
        ])

    if type == 'elastic1':
        transforms = Compose([
            RandomElasticDeformation(**dico_elast),
        ])

    elif type == 'elastic1_and_motion1':
        transforms = Compose([
            RandomElasticDeformation(**dico_elast),
            RandomMotionFromTimeCourse(**dico_params_mot)
        ])
    if type == 'random_noise_1':
        transforms = Compose([RandomNoise(std=(0.020, 0.2))])

    if type == 'AffFFT_random_noise':
        transforms = Compose([
            RandomAffineFFT(scales=(0.8, 1.2),
                            degrees=10,
                            oversampling_pct=0.2,
                            p=0.75),
            RandomNoise(std=(0.020, 0.2))
        ])
    if type == 'AffFFT_random_noise':
        transforms = Compose([
            RandomAffine(scales=(0.8, 1.2),
                         degrees=10,
                         p=0.75,
                         image_interpolation=Interpolation.NEAREST),
            RandomNoise(std=(0.020, 0.2))
        ])

    return transforms
training_batch_size = 12
validation_batch_size = 6
patch_size = 32
samples_per_volume = 20
max_queue_length = 80

training_name = "denseNet3D_torchIO_patch_{}_samples_{}_ADAMOptim_{}Epochs_BS{}_GlorotWeights_SSIM_1511".format(
    patch_size, samples_per_volume, Epochs, training_batch_size)
train_writer = SummaryWriter(
    os.path.join("runs", "Densenets", training_name + "_training"))
validation_writer = SummaryWriter(
    os.path.join("runs", "Densenets", training_name + "_validation"))

training_subjects, test_subjects, validation_subjects = train_test_val_split()

training_transform = Compose([RescaleIntensity((0, 1)), RandomNoise(p=0.05)])
validation_transform = Compose([RescaleIntensity((0, 1))])
test_transform = Compose([RescaleIntensity((0, 1))])

training_dataset = tio.SubjectsDataset(training_subjects,
                                       transform=training_transform)
validation_dataset = tio.SubjectsDataset(validation_subjects,
                                         transform=validation_transform)
test_dataset = tio.SubjectsDataset(test_subjects, transform=test_transform)
'''Patching'''

patches_training_set = tio.Queue(
    subjects_dataset=training_dataset,
    max_length=max_queue_length,
    samples_per_volume=samples_per_volume,
    sampler=tio.sampler.UniformSampler(patch_size),
Exemple #24
0
def main():
    opt = parsing_data()

    print("[INFO]Reading data")
    # Dictionary with data parameters for NiftyNet Reader
    if torch.cuda.is_available():
        print('[INFO] GPU available.')
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    else:
        raise Exception(
            "[INFO] No GPU found or Wrong gpu id, please run without --cuda")

    # FOLDERS
    fold_dir = opt.model_dir
    fold_dir_model = os.path.join(fold_dir, 'models')
    if not os.path.exists(fold_dir_model):
        os.makedirs(fold_dir_model)
    save_path = os.path.join(fold_dir_model, './CP_{}.pth')

    output_path = os.path.join(fold_dir, 'output')
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    output_path = os.path.join(output_path, 'output_{}.nii.gz')

    # LOGGING
    orig_stdout = sys.stdout
    if os.path.exists(os.path.join(fold_dir, 'out.txt')):
        compt = 0
        while os.path.exists(
                os.path.join(fold_dir, 'out_' + str(compt) + '.txt')):
            compt += 1
        f = open(os.path.join(fold_dir, 'out_' + str(compt) + '.txt'), 'w')
    else:
        f = open(os.path.join(fold_dir, 'out.txt'), 'w')
    sys.stdout = f

    # SPLITS
    split_path_source = opt.dataset_split_source
    assert os.path.isfile(split_path_source), 'source file not found'

    split_path_target = opt.dataset_split_target
    assert os.path.isfile(split_path_target), 'target file not found'

    split_path = dict()
    split_path['source'] = split_path_source
    split_path['target'] = split_path_target

    path_file = dict()
    path_file['source'] = opt.path_source
    path_file['target'] = opt.path_target

    list_split = [
        'training',
        'validation',
    ]
    paths_dict = dict()

    for domain in ['source', 'target']:
        df_split = pd.read_csv(split_path[domain], header=None)
        list_file = dict()
        for split in list_split:
            list_file[split] = df_split[df_split[1].isin([split])][0].tolist()

        paths_dict_domain = {split: [] for split in list_split}
        for split in list_split:
            for subject in list_file[split]:
                subject_data = []
                for modality in MODALITIES[domain]:
                    subject_data.append(
                        Image(
                            modality,
                            path_file[domain] + subject + modality + '.nii.gz',
                            torchio.INTENSITY))
                if split in ['training', 'validation']:
                    subject_data.append(
                        Image('label',
                              path_file[domain] + subject + 'Label.nii.gz',
                              torchio.LABEL))

                    #subject_data[] =
                paths_dict_domain[split].append(Subject(*subject_data))
            print(domain, split, len(paths_dict_domain[split]))
        paths_dict[domain] = paths_dict_domain

    # PREPROCESSING
    transform_training = dict()
    transform_validation = dict()
    for domain in ['source', 'target']:
        transform_training[domain] = (
            ToCanonical(),
            ZNormalization(),
            CenterCropOrPad((144, 192, 48)),
            RandomAffine(scales=(0.9, 1.1), degrees=10),
            RandomNoise(std_range=(0, 0.10)),
            RandomFlip(axes=(0, )),
        )

        transform_training[domain] = Compose(transform_training[domain])

        transform_validation[domain] = (
            ToCanonical(),
            ZNormalization(),
            CenterCropOrPad((144, 192, 48)),
        )
        transform_validation[domain] = Compose(transform_validation[domain])

    transform = {
        'training': transform_training,
        'validation': transform_validation
    }

    # MODEL
    norm_op_kwargs = {'eps': 1e-5, 'affine': True}
    dropout_op_kwargs = {'p': 0, 'inplace': True}
    net_nonlin = nn.LeakyReLU
    net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}

    print("[INFO] Building model")
    model = Generic_UNet(input_modalities=MODALITIES_TARGET,
                         base_num_features=32,
                         num_classes=nb_classes,
                         num_pool=4,
                         num_conv_per_stage=2,
                         feat_map_mul_on_downscale=2,
                         conv_op=torch.nn.Conv3d,
                         norm_op=torch.nn.InstanceNorm3d,
                         norm_op_kwargs=norm_op_kwargs,
                         nonlin=net_nonlin,
                         nonlin_kwargs=net_nonlin_kwargs,
                         convolutional_pooling=False,
                         convolutional_upsampling=False,
                         final_nonlin=torch.nn.Softmax(1))

    print("[INFO] Training")
    train(paths_dict, model, transform, device, save_path, opt)

    sys.stdout = orig_stdout
    f.close()
Exemple #25
0
def compose_transforms() -> Compose:
    print(f"{ctime()}:  Setting up transformations...")
    """
    # Our Preprocessing Options available in TorchIO are:

    * Intensity
        - NormalizationTransform
        - RescaleIntensity
        - ZNormalization
        - HistogramStandardization
    * Spatial
        - CropOrPad
        - Crop
        - Pad
        - Resample
        - ToCanonical

    We should read and experiment with these, but for now will just use a bunch with
    the default values.

    """

    preprocessors = [
        ToCanonical(p=1),
        ZNormalization(masking_method=None,
                       p=1),  # alternately, use RescaleIntensity
    ]
    """
    # Our Augmentation Options available in TorchIO are:

    * Spatial
        - RandomFlip
        - RandomAffine
        - RandomElasticDeformation

    * Intensity
        - RandomMotion
        - RandomGhosting
        - RandomSpike
        - RandomBiasField
        - RandomBlur
        - RandomNoise
        - RandomSwap



    We should read and experiment with these, but for now will just use a bunch with
    the default values.

    """
    augments = [
        RandomFlip(axes=(0, 1, 2), flip_probability=0.5),
        RandomAffine(image_interpolation="linear",
                     p=0.8),  # default, compromise on speed + quality
        # this will be most processing intensive, leave out for now, see results
        # RandomElasticDeformation(p=1),
        RandomMotion(),
        RandomSpike(),
        RandomBiasField(),
        RandomBlur(),
        RandomNoise(),
    ]
    transform = Compose(preprocessors + augments)
    print(f"{ctime()}:  Transformations registered.")
    return transform
Exemple #26
0
 def random_stuff(self, seed=42):
     transform = RandomNoise(std=(100, 100))
     transformed = transform(self.sample_subject, seed=seed)
     value = transformed.img.data.sum().item()
     seed = transformed.history[0][1]['seed']
     return value, seed
Exemple #27
0
# Mock PyTorch model
model = lambda x: x

# Define training and patches sampling parameters
num_epochs = 4
patch_size = 128
queue_length = 100
samples_per_volume = 1
batch_size = 2

# Define transforms for data normalization and augmentation
transforms = (
    ZNormalization(),
    RandomAffine(scales=(0.9, 1.1), degrees=10),
    RandomNoise(std_range=(0, 0.25)),
    RandomFlip(axes=(0, )),
)
transform = Compose(transforms)

# Populate a list with dictionaries of paths
one_subject_dict = {
    'T1':
    dict(path='../BRATS2018_crop_renamed/LGG75_T1.nii.gz',
         type=torchio.INTENSITY),
    'T2':
    dict(path='../BRATS2018_crop_renamed/LGG75_T2.nii.gz',
         type=torchio.INTENSITY),
    'label':
    dict(path='../BRATS2018_crop_renamed/LGG75_Label.nii.gz',
         type=torchio.LABEL),
        HistogramStandardization,
        OneOf,
        Compose,
    )

    d_size, h_size, w_size = 155, 240, 240
    input_size = [7, 223, 223]
    spacing = (d_size / input_size[0], h_size / input_size[1],
               w_size / input_size[2])
    training_transform = Compose([
        # RescaleIntensity((0, 1)),  # so that there are no negative values for RandomMotion
        # RandomMotion(),
        # HistogramStandardization({MRI: landmarks}),
        RandomBiasField(),
        # ZNormalization(masking_method=ZNormalization.mean),
        RandomNoise(),
        ToCanonical(),
        Resample(spacing),
        # CropOrPad((48, 60, 48)),
        RandomFlip(axes=(0, )),
        OneOf({
            RandomAffine(): 0.8,
            RandomElasticDeformation(): 0.2,
        }),
    ])

    fold = 1
    data_root = '../../dld_data/brats2019/MICCAI_BraTS_2019_Data_Training/'

    torch.manual_seed(0)
    torch.cuda.manual_seed(0)
Exemple #29
0
tc = CenterCropOrPad(target_shape=(182, 218,212))
tc = CropOrPad(target_shape=(182, 218,182), mask_name='maskk')
#dico_elast = {'num_control_points': 6, 'deformation_std': (30, 30, 30), 'max_displacement': (4, 4, 4),
#              'proportion_to_augment': 1, 'image_interpolation': Interpolation.LINEAR}
#tc = RandomElasticDeformation(**dico_elast)

dico_p = {'num_control_points': 8, 'deformation_std': (20, 20, 20), 'max_displacement': (4, 4, 4),
              'p': 1, 'image_interpolation': Interpolation.LINEAR}
dico_p = { 'num_control_points': 6,
           #'max_displacement': (20, 20, 20),
           'max_displacement': (30, 30, 30),
           'p': 1, 'image_interpolation': Interpolation.LINEAR }

t = Compose([ RandomElasticDeformation(**dico_p), tc])
t = Compose([RandomNoise(std=(0.020,0.2)),  RandomElasticDeformation(**dico_p) ])
t = Compose([RandomNoise(),  RandomElasticDeformation() ])

dataset = ImagesDataset(suj, transform=t); dataset0 = ImagesDataset(suj);
from torch.utils.data import DataLoader
dl = DataLoader(dataset, batch_size=2,
                collate_fn=lambda x: x,  # this creates a list of Subjects
                )
samples = next(iter(dl))

s = dataset[0]; s0=dataset0[0]
ov(s['image']['data'][0])

for i in range(1,50):
    s=dataset[0]
    dataset.save_sample(s, dict(image='/home/romain/QCcnn/random_motion/random{:.2}.nii'.format(100*s['random_noise'])))
Exemple #30
0
    'num_control_points': 8,
    'deformation_std': (20, 20, 20),
    'max_displacement': (4, 4, 4),
    'proportion_to_augment': 1,
    'image_interpolation': Interpolation.LINEAR
}
dico_p = {
    'num_control_points': 6,
    #'max_displacement': (20, 20, 20),
    'max_displacement': (30, 30, 30),
    'proportion_to_augment': 1,
    'image_interpolation': Interpolation.LINEAR
}

t = Compose([RandomElasticDeformation(**dico_p), tc])
t = Compose([RandomNoise(std=(0.020, 0.2))])

dataset = ImagesDataset(suj, transform=t)
s = dataset[0]
ov(s['image']['data'][0])

for i in range(1, 50):
    s = dataset[0]
    dataset.save_sample(
        s,
        dict(image='/home/romain/QCcnn/random_motion/random{:.2}.nii'.format(
            100 * s['random_noise'])))

t = dataset.get_transform()
type(t)
isinstance(t, Compose)