def initialize_transforms_simple(p=0.8):
    transforms = [
        RandomFlip(axes=(0, 1, 2), flip_probability=1, p=p),

        #RandomAffine(scales=(0.9, 1.1), degrees=(10), isotropic=False,
        #             default_pad_value='otsu', image_interpolation=Interpolation.LINEAR,
        #             p = p, seed=None),

        # *** SLOWS DOWN DATALOADER ***
        #RandomElasticDeformation(num_control_points = 7, max_displacement = 7.5,
        #                         locked_borders = 2, image_interpolation = Interpolation.LINEAR,
        #                         p = 0.5, seed = None),
        RandomMotion(degrees=10,
                     translation=10,
                     num_transforms=2,
                     image_interpolation='linear',
                     p=p),
        RandomAnisotropy(axes=(0, 1, 2), downsampling=2),
        RandomBiasField(coefficients=0.5, order=3, p=p),
        RandomBlur(std=(0, 2), p=p),
        RandomNoise(mean=0, std=(0, 5), p=p),
        RescaleIntensity((0, 255))
    ]
    transform = tio.Compose(transforms)
    return transform
示例#2
0
 def test_transforms(self):
     landmarks_dict = dict(
         t1=np.linspace(0, 100, 13),
         t2=np.linspace(0, 100, 13),
     )
     transforms = (
         CenterCropOrPad((9, 21, 30)),
         ToCanonical(),
         Resample((1, 1.1, 1.25)),
         RandomFlip(axes=(0, 1, 2), flip_probability=1),
         RandomMotion(proportion_to_augment=1),
         RandomGhosting(proportion_to_augment=1, axes=(0, 1, 2)),
         RandomSpike(),
         RandomNoise(),
         RandomBlur(),
         RandomSwap(patch_size=2, num_iterations=5),
         Lambda(lambda x: 1.5 * x, types_to_apply=INTENSITY),
         RandomBiasField(),
         Rescale((0, 1)),
         ZNormalization(masking_method='label'),
         HistogramStandardization(landmarks_dict=landmarks_dict),
         RandomElasticDeformation(proportion_to_augment=1),
         RandomAffine(),
         Pad((1, 2, 3, 0, 5, 6)),
         Crop((3, 2, 8, 0, 1, 4)),
     )
     transformed = self.get_sample()
     for transform in transforms:
         transformed = transform(transformed)
示例#3
0
    def __init__(self, transform1, m1, p1, transform2, m2, p2):
        ranges = {
            'flip': np.zeros(10),
            'affine': np.linspace(0, 180, 10),
            'noise': np.linspace(0, 0.5, 10),
            'blur': np.arange(10),
            'elasticD': np.zeros(10)
        }

        transforms = {
            'flip': lambda magnitude, p: RandomFlip(p=p),
            'affine':
            lambda magnitude, p: RandomAffine(degrees=(magnitude), p=p),
            'noise': lambda magnitude, p: RandomNoise(std=magnitude, p=p),
            'blur': lambda magnitude, p: RandomBlur(std=magnitude, p=p),
            'elasticD': lambda magnitude, p: RandomElasticDeformation(p=p)
        }

        self.transform1 = transforms[transform1]
        self.t1_input = transform1
        self.m1 = ranges[transform1][m1]
        self.m1_input = m1
        self.p1 = p1

        self.transform2 = transforms[transform2]
        self.t2_input = transform2
        self.m2 = ranges[transform2][m2]
        self.m2_input = m2
        self.p2 = p2

        self.kappa = 0.0
示例#4
0
def random_augment(x):
    '''Randomly augment input data.

    Returns: Randomly augmented input
    '''

    # Data augmentations to be used
    transforms_dict = {
        RandomFlip(): 1,
        RandomElasticDeformation(): 1,
        RandomAffine(): 1,
        RandomNoise(): 1,
        RandomBlur(): 1
    }

    # Create random transform, with a p chance to apply augmentation
    transform = OneOf(transforms_dict, p=0.95)
    return augment(x, transform)
示例#5
0
def predict_majority(model, x, y):
    '''Augments all samples of the original data, and chooses majority predictions predicted by the model.

    Usage: predict_majority(model, x_original, y_original)
    '''

    # Reshape arrays
    x = np.reshape(x, (len(x), 40, 40, 4, 1))
    y = [x - 1 for x in y]
    y = to_categorical(y, 5)

    # Predict majority
    x_flip = augment(x.copy(), RandomFlip())
    x_ed = augment(x.copy(), RandomElasticDeformation())
    x_affine = augment(x.copy(), RandomAffine())
    x_noise = augment(x.copy(), RandomNoise())
    x_blur = augment(x.copy(), RandomBlur())

    y_true = pred_list(y)
    y_pred = pred_list(model.predict(x.copy()))
    y_flip = pred_list(model.predict(x_flip.copy()))
    y_ed = pred_list(model.predict(x_ed.copy()))
    y_affine = pred_list(model.predict(x_affine.copy()))
    y_noise = pred_list(model.predict(x_noise.copy()))
    y_blur = pred_list(model.predict(x_blur.copy()))

    y_most = []
    correct = 0
    print(
        '\nEntry Number | Prediction (None, Flip, Elastic Deformation, Affine, Noise, Blur) | Actual'
    )
    for i in range(len(y_true)):
        preds = [
            y_pred[i], y_flip[i], y_ed[i], y_affine[i], y_noise[i], y_blur[i]
        ]
        most = max(set(preds), key=preds.count)
        y_most.append(most)
        print('Entry', i, '| Predictions:', preds, '| Most Occuring:', most,
              '| Correct:', y_true[i])
        if most == y_true[i]:
            correct += 1
    print('\nTest Accuracy: ', correct / len(y_true))
    print('Quadratic Weighted Kappa: ',
          cohen_kappa_score(y_true, y_most, weights='quadratic'))
示例#6
0
def blur(std, p=1):
    return RandomBlur(std=std, p=p)
示例#7
0
def compose_transforms() -> Compose:
    print(f"{ctime()}:  Setting up transformations...")
    """
    # Our Preprocessing Options available in TorchIO are:

    * Intensity
        - NormalizationTransform
        - RescaleIntensity
        - ZNormalization
        - HistogramStandardization
    * Spatial
        - CropOrPad
        - Crop
        - Pad
        - Resample
        - ToCanonical

    We should read and experiment with these, but for now will just use a bunch with
    the default values.

    """

    preprocessors = [
        ToCanonical(p=1),
        ZNormalization(masking_method=None,
                       p=1),  # alternately, use RescaleIntensity
    ]
    """
    # Our Augmentation Options available in TorchIO are:

    * Spatial
        - RandomFlip
        - RandomAffine
        - RandomElasticDeformation

    * Intensity
        - RandomMotion
        - RandomGhosting
        - RandomSpike
        - RandomBiasField
        - RandomBlur
        - RandomNoise
        - RandomSwap



    We should read and experiment with these, but for now will just use a bunch with
    the default values.

    """
    augments = [
        RandomFlip(axes=(0, 1, 2), flip_probability=0.5),
        RandomAffine(image_interpolation="linear",
                     p=0.8),  # default, compromise on speed + quality
        # this will be most processing intensive, leave out for now, see results
        # RandomElasticDeformation(p=1),
        RandomMotion(),
        RandomSpike(),
        RandomBiasField(),
        RandomBlur(),
        RandomNoise(),
    ]
    transform = Compose(preprocessors + augments)
    print(f"{ctime()}:  Transformations registered.")
    return transform
示例#8
0
def define_transform(transform,
                     p,
                     blur_std=4,
                     motion_trans=10,
                     motion_deg=10,
                     motion_num=2,
                     biascoeff=0.5,
                     noise_std=0.25,
                     affine_trans=10,
                     affine_deg=10,
                     elastic_disp=7.5,
                     resample_size=1,
                     target_shape=0):
    ### (1) try with different blur
    if transform == 'blur':
        transforms = [RandomBlur(std=(blur_std, blur_std), p=p, seed=None)]
        transforms = Compose(transforms)

    ### (2) try with different motion artifacts
    if transform == 'motion':
        transforms = [
            RandomMotion(degrees=motion_deg,
                         translation=motion_trans,
                         num_transforms=motion_num,
                         image_interpolation=Interpolation.LINEAR,
                         p=p,
                         seed=None),
        ]
        transforms = Compose(transforms)
    ### (3) with random bias fields
    if transform == 'biasfield':
        transforms = [
            RandomBiasField(coefficients=biascoeff, order=3, p=p, seed=None)
        ]
        transforms = Compose(transforms)

    ### (4) try with different noise artifacts
    if transform == 'noise':
        transforms = [
            RandomNoise(mean=0, std=(noise_std, noise_std), p=p, seed=None)
        ]
        transforms = Compose(transforms)

    ### (5) try with different warp (affine transformatins)
    if transform == 'affine':
        transforms = [
            RandomAffine(scales=(1, 1),
                         degrees=(affine_deg),
                         isotropic=False,
                         default_pad_value='otsu',
                         image_interpolation=Interpolation.LINEAR,
                         p=p,
                         seed=None)
        ]
        transforms = Compose(transforms)

    ### (6) try with different warp (elastic transformations)
    if transform == 'elastic':
        transforms = [
            RandomElasticDeformation(num_control_points=elastic_disp,
                                     max_displacement=20,
                                     locked_borders=2,
                                     image_interpolation=Interpolation.LINEAR,
                                     p=p,
                                     seed=None),
        ]
        transforms = Compose(transforms)

    if transform == 'resample':
        transforms = [
            Resample(target=resample_size,
                     image_interpolation=Interpolation.LINEAR,
                     p=p),
            CropOrPad(target_shape=target_shape, p=1)
        ]

        transforms = Compose(transforms)

    return transforms
示例#9
0
znormed = transform(sample)

fig, ax = plt.subplots(dpi=100)
plot_histogram(ax, znormed.mri.data, label='Z-normed', alpha=1)
ax.set_title('Intensity values of one sample after z-normalization')
ax.set_xlabel('Intensity')
ax.grid()

training_transform = Compose([
    ToCanonical(),
    #  Resample(4),
    CropOrPad((112, 112, 48), padding_mode=0),  #reflect , original 112,112,48
    RandomMotion(num_transforms=6, image_interpolation='nearest', p=0.2),
    HistogramStandardization({'mri': landmarks}),
    RandomBiasField(p=0.2),
    RandomBlur(p=0.2),
    ZNormalization(masking_method=ZNormalization.mean),
    RandomFlip(axes=['inferior-superior'], flip_probability=0.2),
    #  RandomNoise(std=0.5, p=0.2),
    RandomGhosting(intensity=1.8, p=0.2),
    #  RandomNoise(),
    #  RandomFlip(axes=(0,)),
    #  OneOf({
    #      RandomAffine(): 0.8,
    #      RandomElasticDeformation(): 0.2,
    #  }),
])

validation_transform = Compose([
    ToCanonical(),
    #  Resample(4),
def main():
    opt = parsing_data()

    print("[INFO]Reading data")
    # Dictionary with data parameters for NiftyNet Reader
    if torch.cuda.is_available():
        print('[INFO] GPU available.')
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    else:
        raise Exception(
            "[INFO] No GPU found or Wrong gpu id, please run without --cuda")

    # FOLDERS
    fold_dir = opt.model_dir
    fold_dir_model = os.path.join(fold_dir, 'models')
    if not os.path.exists(fold_dir_model):
        os.makedirs(fold_dir_model)
    save_path = os.path.join(fold_dir_model, './CP_{}.pth')

    output_path = os.path.join(fold_dir, 'output')
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    output_path = os.path.join(output_path, 'output_{}.nii.gz')

    # LOGGING
    orig_stdout = sys.stdout
    if os.path.exists(os.path.join(fold_dir, 'out.txt')):
        compt = 0
        while os.path.exists(
                os.path.join(fold_dir, 'out_' + str(compt) + '.txt')):
            compt += 1
        f = open(os.path.join(fold_dir, 'out_' + str(compt) + '.txt'), 'w')
    else:
        f = open(os.path.join(fold_dir, 'out.txt'), 'w')
    sys.stdout = f

    # SPLITS
    split_path = dict()
    split_path['control'] = opt.split_control
    split_path['augm_control'] = opt.split_control
    split_path['lesion'] = opt.split_lesion

    for dataset in DATASETS:
        assert os.path.isfile(
            split_path[dataset]), f'{dataset}: split not found'

    path_file = dict()
    path_file['control'] = opt.path_control
    path_file['augm_control'] = opt.path_control
    path_file['lesion'] = opt.path_lesion

    list_split = ['training', 'validation']
    paths_dict = dict()

    for dataset in DATASETS:
        df_split = pd.read_csv(split_path[dataset], header=None)
        list_file = dict()
        for split in list_split:
            list_file[split] = df_split[df_split[1].isin([split])][0].tolist()

        paths_dict_dataset = {split: [] for split in list_split}
        for split in list_split:
            for subject in list_file[split]:
                subject_data = []
                for modality in MODALITIES[dataset]:
                    subject_data.append(
                        Image(
                            modality, path_file[dataset] + subject + modality +
                            '.nii.gz', torchio.INTENSITY))
                if split in ['training', 'validation']:
                    subject_data.append(
                        Image('label',
                              path_file[dataset] + subject + 'Label.nii.gz',
                              torchio.LABEL))
                paths_dict_dataset[split].append(Subject(*subject_data))
            print(dataset, split, len(paths_dict_dataset[split]))
        paths_dict[dataset] = paths_dict_dataset

    # PREPROCESSING
    transform_training = dict()
    transform_validation = dict()
    for dataset in DATASETS:
        if dataset == 'augm_control':
            transform_training[dataset] = (
                Rescale((0, 1)),
                ToCanonical(),
                RandomMotion(),
                RandomGhosting(),
                RandomBiasField(),
                RandomBlur((0, 2)),
                ZNormalization(),
                CenterCropOrPad((144, 192, 144)),
                RandomAffine(scales=(0.9, 1.1), degrees=10),
                RandomNoise(std_range=(0, 0.10)),
                RandomFlip(axes=(0, )),
            )
            transform_training[dataset] = Compose(transform_training[dataset])

        else:
            transform_training[dataset] = (
                ToCanonical(),
                ZNormalization(),
                CenterCropOrPad((144, 192, 144)),
                RandomAffine(scales=(0.9, 1.1), degrees=10),
                RandomNoise(std_range=(0, 0.10)),
                RandomFlip(axes=(0, )),
            )
            transform_training[dataset] = Compose(transform_training[dataset])

        transform_validation[dataset] = (
            ToCanonical(),
            ZNormalization(),
            CenterCropOrPad((144, 192, 144)),
        )
        transform_validation[dataset] = Compose(transform_validation[dataset])

    transform = {
        'training': transform_training,
        'validation': transform_validation
    }

    # MODEL
    norm_op_kwargs = {'eps': 1e-5, 'affine': True}
    dropout_op_kwargs = {'p': 0, 'inplace': True}
    net_nonlin = nn.LeakyReLU
    net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}

    print("[INFO] Building model")
    model = Generic_UNet(input_modalities=['T1', 'all'],
                         base_num_features=32,
                         num_classes=nb_classes,
                         num_pool=4,
                         num_conv_per_stage=2,
                         feat_map_mul_on_downscale=2,
                         conv_op=torch.nn.Conv3d,
                         norm_op=torch.nn.InstanceNorm3d,
                         norm_op_kwargs=norm_op_kwargs,
                         nonlin=net_nonlin,
                         nonlin_kwargs=net_nonlin_kwargs,
                         convolutional_pooling=False,
                         convolutional_upsampling=False,
                         final_nonlin=torch.nn.Softmax(1),
                         input_features={
                             'T1': 1,
                             'all': 4
                         })

    print("[INFO] Training")
    train(paths_dict, model, transform, device, save_path, opt)

    sys.stdout = orig_stdout
    f.close()
示例#11
0
def blur(parameters):
    return RandomBlur(std=parameters["std"], p=parameters["probability"])