コード例 #1
0
ファイル: test_subject.py プロジェクト: soumickmj/torchio
 def test_no_sample(self):
     with tempfile.NamedTemporaryFile() as f:
         input_dict = {'image': ScalarImage(f.name)}
         subject = Subject(input_dict)
         with self.assertRaises(RuntimeError):
             RandomFlip()(subject)
コード例 #2
0
ファイル: test_subject.py プロジェクト: soumickmj/torchio
 def test_positional_args(self):
     with self.assertRaises(ValueError):
         with tempfile.NamedTemporaryFile() as f:
             Subject(ScalarImage(f.name))
コード例 #3
0
ファイル: test_subject.py プロジェクト: soumickmj/torchio
 def test_input_dict(self):
     with tempfile.NamedTemporaryFile() as f:
         input_dict = {'image': ScalarImage(f.name)}
         Subject(input_dict)
         Subject(**input_dict)
コード例 #4
0
 def test_input_dict(self):
     with tempfile.NamedTemporaryFile() as f:
         input_dict = {'image': Image(f.name, INTENSITY)}
         Subject(input_dict)
         Subject(**input_dict)
コード例 #5
0
ファイル: example_lambda.py プロジェクト: zhangjh705/torchio
from torchio.transforms import Lambda
from torchio import Image, ImagesDataset, INTENSITY, LABEL, Subject

subject = Subject(
    Image('label', '~/Dropbox/MRI/t1_brain_seg.nii.gz', LABEL),
    Image('t1', '~/Dropbox/MRI/t1.nii.gz', INTENSITY),
)
subjects_list = [subject]

dataset = ImagesDataset(subjects_list)
sample = dataset[0]
transform = Lambda(lambda x: -1.5 * x, types_to_apply=INTENSITY)
transformed = transform(sample)
dataset.save_sample(transformed, {'t1': '/tmp/t1_lambda.nii'})
コード例 #6
0
    def __call__(self, subject: tio.Subject):
        # Sampler random parameters
        resection_params = self.get_params(
            self.volumes,
            self.volumes_range,
            self.sigmas_range,
            self.radii_ratio_range,
            self.angles_range,
            self.wm_lesion_p,
            self.clot_p,
        )
        # Convert images to SimpleITK
        with timer('Convert to SITK', self.verbose):
            t1_pre = subject[self.image_name].as_sitk()
            hemisphere = resection_params['hemisphere']
            gm_name = f'resection_gray_matter_{hemisphere}'
            gray_matter_image = subject[gm_name]
            gray_matter_mask = gray_matter_image.as_sitk()
            resectable_name = f'resection_resectable_{hemisphere}'
            resectable_tissue_image = subject[resectable_name]
            resectable_tissue_mask = resectable_tissue_image.as_sitk()

            add_wm = resection_params['add_wm_lesion']
            add_clot = resection_params['add_clot']
            use_csf_image = self.texture == 'csf' or add_wm or add_clot
            if use_csf_image:
                noise_image = subject['resection_noise'].as_sitk()
            else:
                noise_image = None

        # Simulate resection
        with timer('Resection', self.verbose):
            results = resect(
                t1_pre,
                gray_matter_mask,
                resectable_tissue_mask,
                resection_params['sigmas'],
                resection_params['radii'],
                noise_image=noise_image,
                shape=self.shape,
                texture=self.texture,
                angles=resection_params['angles'],
                noise_offset=resection_params['noise_offset'],
                sphere_poly_data=self.sphere_poly_data,
                wm_lesion=add_wm,
                clot=add_clot,
                simplex_path=self.simplex_path,
                center_ras=self.center_ras,
                verbose=self.verbose,
            )
        resected_brain, resection_mask, resection_center, clot_center = results

        # Store centers for visualization purposes
        resection_params['resection_center'] = resection_center
        resection_params['clot_center'] = clot_center

        # Convert from SITK
        with timer('Convert from SITK', self.verbose):
            resected_brain_array = self.sitk_to_array(resected_brain)
            resected_mask_array = self.sitk_to_array(resection_mask)
            image_resected = self.add_channels_axis(resected_brain_array)
            resection_label = self.add_channels_axis(resected_mask_array)
        assert image_resected.ndim == 4
        assert resection_label.ndim == 4

        # Update subject
        if self.delete_resection_keys:
            subject.remove_image('resection_gray_matter_left')
            subject.remove_image('resection_gray_matter_right')
            subject.remove_image('resection_resectable_left')
            subject.remove_image('resection_resectable_right')
            if use_csf_image:
                subject.remove_image('resection_noise')

        # Add resected image and label to subject
        if self.add_params:
            subject['random_resection'] = resection_params
        if self.keep_original:
            subject['image_original'] = copy.deepcopy(subject[self.image_name])
        subject[self.image_name].data = torch.from_numpy(image_resected)
        label = tio.LabelMap(
            tensor=resection_label,
            affine=subject[self.image_name].affine,
        )
        subject.add_image(label, 'label')

        if self.add_resected_structures:
            subject['resected_structures'] = self.get_resected_structures(
                subject, resection_mask)

        return subject
コード例 #7
0
def test_transform(unet,
                   transforms,
                   crop_im,
                   np_labels,
                   crop_cur_seg,
                   crop_next_input,
                   mean_arr,
                   std_arr,
                   device,
                   sav_dir,
                   transform_type,
                   val,
                   transform_next=0,
                   resample=0):
    ### transforms to apply to crop_im

    if not transform_next:
        inputs = crop_im

    else:
        inputs = crop_next_input

    inputs = torch.tensor(inputs, dtype=torch.float, requires_grad=False)

    crop_cur_seg_tensor = torch.tensor(crop_cur_seg,
                                       dtype=torch.float,
                                       requires_grad=False)
    crop_next_tensor = torch.tensor(crop_next_input,
                                    dtype=torch.float,
                                    requires_grad=False)
    crop_im_tensor = torch.tensor(crop_im,
                                  dtype=torch.float,
                                  requires_grad=False)

    #labels = torch.tensor(labels, dtype = torch.long, requires_grad=False)
    labels = np_labels
    labels = torch.tensor(labels, dtype=torch.float, requires_grad=False)

    subject_a = Subject(
        one_image=Image(None, torchio.INTENSITY,
                        inputs),  # *** must be tensors!!!
        a_segmentation=Image(None, torchio.LABEL, labels),
        a_cur_seg=Image(None, torchio.LABEL, crop_cur_seg_tensor),
        a_next_im=Image(None, torchio.INTENSITY, crop_next_tensor),
        a_cur_im=Image(None, torchio.INTENSITY, crop_im_tensor),
    )

    subjects_list = [subject_a]

    subjects_dataset = ImagesDataset(subjects_list, transform=transforms)
    subject_sample = subjects_dataset[0]
    """ MUST ALSO TRANSFORM THE SEED IF IS ELASTIC, rotational transformation!!!"""

    X = subject_sample['one_image']['data'].numpy()
    Y = subject_sample['a_segmentation']['data'].numpy()

    if resample:
        np_labels = Y[0]

        crop_cur_seg = subject_sample['a_cur_seg']['data'].numpy()[0]
        if not transform_next:
            crop_next_input = subject_sample['a_next_im']['data'].numpy()[0]
        else:
            crop_im = subject_sample['a_cur_im']['data'].numpy()[0]

    # if next_bool:
    #     batch_x = np.zeros((4, ) + np.shape(crop_im))
    #     batch_x[0,...] = X
    #     batch_x[1,...] = crop_cur_seg
    #     batch_x[2,...] = crop_next_input
    #     batch_x[3,...] = crop_next_seg
    #     batch_x = np.moveaxis(batch_x, -1, 1)
    #     batch_x = np.expand_dims(batch_x, axis=0)

    # else:
    if not transform_next:
        batch_x = np.zeros((3, ) + np.shape(crop_im))
        batch_x[0, ...] = X
        batch_x[1, ...] = crop_cur_seg
        batch_x[2, ...] = crop_next_input
        #batch_x[3,...] = crop_next_seg
        #batch_x = np.moveaxis(batch_x, -1, 1)
        batch_x = np.expand_dims(batch_x, axis=0)

    else:
        batch_x = np.zeros((3, ) + np.shape(crop_im))
        batch_x[0, ...] = crop_im
        batch_x[1, ...] = crop_cur_seg
        batch_x[2, ...] = X
        #batch_x[3,...] = crop_next_seg
        #batch_x = np.moveaxis(batch_x, -1, 1)
        batch_x = np.expand_dims(batch_x, axis=0)

    ### NORMALIZE
    batch_x = normalize(batch_x, mean_arr, std_arr)

    ### Convert to Tensor
    inputs_val = torch.tensor(batch_x,
                              dtype=torch.float,
                              device=device,
                              requires_grad=False)

    np_labels = np.expand_dims(np_labels, axis=0)
    np_labels[np_labels > 0] = 1
    labels = torch.tensor(np_labels,
                          dtype=torch.float,
                          device=device,
                          requires_grad=False)

    # forward pass to check validation
    output_train = unet(inputs_val)
    """ Convert back to cpu """
    output_val = np.moveaxis(output_train.cpu().data.numpy(), 1, -1)
    seg_train = np.moveaxis(np.argmax(output_val[0], axis=-1), 0, -1)

    normalized = (X - np.min(X)) / (np.max(X) - np.min(X))

    m_in = plot_max(normalized[0], ax=0, plot=0)
    m_cur_s = plot_max(crop_cur_seg, ax=0, plot=0)
    m_next = plot_max(crop_next_input, ax=0, plot=0)
    m_labels = plot_max(np_labels[0], ax=0, plot=0)
    m_OUT = plot_max(seg_train, ax=-1, plot=0)

    imsave(sav_dir + transform_type + '_val_' + str(val) + '_1_input.tif',
           np.uint8(m_in * 255))
    m_cur_s[m_cur_s == 50] = 255
    m_cur_s[m_cur_s == 10] = 50
    imsave(sav_dir + transform_type + '_val_' + str(val) + '_2_cur_seg.tif',
           np.uint8(m_cur_s))
    imsave(sav_dir + transform_type + '_val_' + str(val) + '_3_next_input.tif',
           np.uint8(m_next))
    imsave(sav_dir + transform_type + '_val_' + str(val) + '_4_labels.tif',
           np.uint8(m_labels) * 255)
    imsave(sav_dir + transform_type + '_val_' + str(val) + '_5_OUTPUT.tif',
           np.uint8(m_OUT) * 255)
    """ Training loss """
    """ ********************* figure out how to do spatial weighting??? """
    """ Training loss """
    #tracker.train_loss_per_batch.append(loss.cpu().data.numpy());  # Training loss
    #loss_train += loss.cpu().data.numpy()
    """ Calculate Jaccard on GPU """

    jacc = jacc_eval_GPU_torch(output_train, labels)
    jacc = jacc.cpu().data.numpy()
    print(jacc)

    return jacc, X[0]
コード例 #8
0
def ImagesFromDataFrame(dataframe, 
                        psize, 
                        headers, 
                        q_max_length = 10, 
                        q_samples_per_volume = 1, 
                        q_num_workers = 2, 
                        q_verbose = False, 
                        sampler = 'label', 
                        train = True, 
                        augmentations = None, 
                        preprocessing = None, 
                        in_memory = False):
    # Finding the dimension of the dataframe for computational purposes later
    num_row, num_col = dataframe.shape
    # num_channels = num_col - 1 # for non-segmentation tasks, this might be different
    # changing the column indices to make it easier
    dataframe.columns = range(0,num_col)
    dataframe.index = range(0,num_row)
    # This list will later contain the list of subjects
    subjects_list = []

    channelHeaders = headers['channelHeaders']
    labelHeader = headers['labelHeader']
    predictionHeaders = headers['predictionHeaders']
    subjectIDHeader = headers['subjectIDHeader']
    
    sampler = sampler.lower() # for easier parsing

    # define the control points and swap axes for augmentation
    augmentation_patchAxesPoints = copy.deepcopy(psize)
    for i in range(len(augmentation_patchAxesPoints)):
        augmentation_patchAxesPoints[i] = max(round(augmentation_patchAxesPoints[i] / 10), 1) # always at least have 1

    # iterating through the dataframe
    resizeCheck = False
    for patient in range(num_row):
        # We need this dict for storing the meta data for each subject
        # such as different image modalities, labels, any other data
        subject_dict = {}
        subject_dict['subject_id'] = dataframe[subjectIDHeader][patient]
        # iterating through the channels/modalities/timepoints of the subject
        for channel in channelHeaders:
            # assigning the dict key to the channel
            if not in_memory:
                subject_dict[str(channel)] = Image(str(dataframe[channel][patient]), type=torchio.INTENSITY)
            else:
                img = sitk.ReadImage(str(dataframe[channel][patient]))
                array = np.expand_dims(sitk.GetArrayFromImage(img), axis=0)
                subject_dict[str(channel)] = Image(tensor=array, type=torchio.INTENSITY, path=dataframe[channel][patient])

            # if resize has been defined but resample is not (or is none)
            if not resizeCheck:
                if not(preprocessing is None) and ('resize' in preprocessing):
                    if (preprocessing['resize'] is not None):
                        resizeCheck = True
                        if not('resample' in preprocessing):
                            preprocessing['resample'] = {}
                            if not('resolution' in preprocessing['resample']):
                                preprocessing['resample']['resolution'] = resize_image_resolution(subject_dict[str(channel)].as_sitk(), preprocessing['resize'])
                        else:
                            print('WARNING: \'resize\' is ignored as \'resample\' is defined under \'data_processing\', this will be skipped', file = sys.stderr)
                else:
                    resizeCheck = True
        
        # # for regression
        # if predictionHeaders:
        #     # get the mask
        #     if (subject_dict['label'] is None) and (class_list is not None):
        #         sys.exit('The \'class_list\' parameter has been defined but a label file is not present for patient: ', patient)

        if labelHeader is not None:
            if not in_memory:
                subject_dict['label'] = Image(str(dataframe[labelHeader][patient]), type=torchio.LABEL)
            else:
                img = sitk.ReadImage(str(dataframe[labelHeader][patient]))
                array = np.expand_dims(sitk.GetArrayFromImage(img), axis=0)
                subject_dict['label'] = Image(tensor=array, type=torchio.LABEL, path=dataframe[labelHeader][patient])

            
            subject_dict['path_to_metadata'] = str(dataframe[labelHeader][patient])
        else:
            subject_dict['label'] = "NA"
            subject_dict['path_to_metadata'] = str(dataframe[channel][patient])
        
        # iterating through the values to predict of the subject
        valueCounter = 0
        for values in predictionHeaders:
            # assigning the dict key to the channel
            subject_dict['value_' + str(valueCounter)] = np.array(dataframe[values][patient])
            valueCounter = valueCounter + 1
        
        # Initializing the subject object using the dict
        subject = Subject(subject_dict)

        # padding image, but only for label sampler, because we don't want to pad for uniform
        if 'label' in sampler or 'weight' in sampler:
            psize_pad = list(np.asarray(np.round(np.divide(psize,2)), dtype=int))
            padder = Pad(psize_pad, padding_mode = 'symmetric') # for modes: https://numpy.org/doc/stable/reference/generated/numpy.pad.html
            subject = padder(subject)

        # Appending this subject to the list of subjects
        subjects_list.append(subject)

    augmentation_list = []

    # first, we want to do thresholding, followed by clipping, if it is present - required for inference as well
    if not(preprocessing is None):
        if train: # we want the crop to only happen during training
            if 'crop_external_zero_planes' in preprocessing:
                augmentation_list.append(global_preprocessing_dict['crop_external_zero_planes'](psize))
        for key in ['threshold','clip']:
            if key in preprocessing:
                augmentation_list.append(global_preprocessing_dict[key](min=preprocessing[key]['min'], max=preprocessing[key]['max']))
        
        # first, we want to do the resampling, if it is present - required for inference as well
        if 'resample' in preprocessing:
            if 'resolution' in preprocessing['resample']:
                # resample_split = str(aug).split(':')
                resample_values = tuple(np.array(preprocessing['resample']['resolution']).astype(np.float))
                if len(resample_values) == 2:
                    resample_values = tuple(np.append(resample_values,1))
                augmentation_list.append(Resample(resample_values))

        # next, we want to do the intensity normalize - required for inference as well
        if 'normalize' in preprocessing:
            augmentation_list.append(global_preprocessing_dict['normalize'])
        elif 'normalize_nonZero' in preprocessing:
            augmentation_list.append(global_preprocessing_dict['normalize_nonZero'])
        elif 'normalize_nonZero_masked' in preprocessing:
            augmentation_list.append(global_preprocessing_dict['normalize_nonZero_masked'])

    # other augmentations should only happen for training - and also setting the probabilities
    # for the augmentations
    if train and not(augmentations == None):
        for aug in augmentations:
            if aug != 'default_probability':
                actual_function = None

                if aug == 'flip':
                    if ('axes_to_flip' in augmentations[aug]):
                        print('WARNING: \'flip\' augmentation needs the key \'axis\' instead of \'axes_to_flip\'', file = sys.stderr)
                        augmentations[aug]['axis'] = augmentations[aug]['axes_to_flip']
                    actual_function = global_augs_dict[aug](axes = augmentations[aug]['axis'], p=augmentations[aug]['probability'])
                elif aug in ['rotate_90', 'rotate_180']:
                    for axis in augmentations[aug]['axis']:
                        augmentation_list.append(global_augs_dict[aug](axis=axis, p=augmentations[aug]['probability']))
                elif aug in ['swap', 'elastic']:
                    actual_function = global_augs_dict[aug](patch_size=augmentation_patchAxesPoints, p=augmentations[aug]['probability'])
                elif aug == 'blur':
                    actual_function = global_augs_dict[aug](std=augmentations[aug]['std'], p=augmentations[aug]['probability'])
                elif aug == 'noise':
                    actual_function = global_augs_dict[aug](mean=augmentations[aug]['mean'], std=augmentations[aug]['std'], p=augmentations[aug]['probability'])
                elif aug == 'anisotropic':
                    actual_function = global_augs_dict[aug](axes=augmentations[aug]['axis'], downsampling=augmentations[aug]['downsampling'], p=augmentations[aug]['probability'])
                else:
                    actual_function = global_augs_dict[aug](p=augmentations[aug]['probability'])
                if actual_function is not None:
                    augmentation_list.append(actual_function)
    
    if augmentation_list:
        transform = Compose(augmentation_list)
    else:
        transform = None
    subjects_dataset = torchio.SubjectsDataset(subjects_list, transform=transform)
    if not train:
        return subjects_dataset
    if sampler in ('weighted', 'weightedsampler', 'weightedsample'):
        sampler = global_sampler_dict[sampler](psize, probability_map = 'label')
    else:
        sampler = global_sampler_dict[sampler](psize)
    # all of these need to be read from model.yaml
    patches_queue = torchio.Queue(subjects_dataset, max_length=q_max_length,
                                  samples_per_volume=q_samples_per_volume,
                                  sampler=sampler, num_workers=q_num_workers,
                                  shuffle_subjects=True, shuffle_patches=True, verbose=q_verbose)
    return patches_queue
コード例 #9
0
    def get_volume_torchio(self, idx, return_orig=False):
        subject_row = self.get_row(idx)
        dict_suj = dict()
        if not pd.isna(subject_row["image_filename"]):
            path_imgs = self.read_path(subject_row["image_filename"])
            if isinstance(path_imgs, list):
                imgs = ScalarImage(tensor=np.asarray(
                    [nb.load(p).get_fdata() for p in path_imgs]))
            else:
                imgs = ScalarImage(path_imgs)
            dict_suj["volume"] = imgs

        if "label_filename" in subject_row.keys() and not pd.isna(
                subject_row["label_filename"]):
            path_imgs = self.read_path(subject_row["label_filename"])
            if isinstance(path_imgs, list):
                imgs = LabelMap(tensor=np.asarray(
                    [nb.load(p).get_fdata() for p in path_imgs]))
            else:
                imgs = LabelMap(path_imgs)
            dict_suj["label"] = imgs
        sub = Subject(dict_suj)
        if return_orig or "transfo_order" not in self.df_data.columns:
            return sub
        else:
            trsfms, seeds = self.get_transformations(idx)
            for tr in trsfms.transform.transforms:
                if isinstance(tr, torchio.transforms.RandomLabelsToImage):
                    tr.label_key = "label"
                if isinstance(tr,
                              torchio.transforms.RandomMotionFromTimeCourse):
                    output_path = opj(self.out_tmp, "{}.png".format(idx))
                    if "fitpars" in self.df_data.columns:
                        fitpars = np.loadtxt(self.df_data["fitpars"][idx])
                        tr.fitpars = fitpars
                        tr.simulate_displacement = False
                    else:

                        res = sub
                        for trsfm, seed in zip(trsfms.transform.transforms,
                                               seeds):
                            if seed:
                                res = trsfm(res, seed)
                            else:
                                res = trsfm(res)
                        del res
                        fitpars = tr.fitpars
                    plt.figure()
                    plt.plot(fitpars.T)
                    plt.legend([
                        "trans_x", "trans_y", "trans_z", "rot_x", "rot_y",
                        "rot_z"
                    ])
                    plt.xlabel("Timesteps")
                    plt.ylabel("Magnitude")
                    plt.title("Motion parameters")
                    plt.savefig(output_path)
                    plt.close()
                    self.written_files.append(output_path)
            res = sub
            for trsfm, seed in zip(trsfms.transform.transforms, seeds):
                if seed:
                    res = trsfm(res, seed)
                else:
                    res = trsfm(res)
            #res = trsfms(sub, seeds)
            return res
コード例 #10
0
"""
Another way of getting this result is by running the command-line tool:

$ torchio-transform ~/Dropbox/MRI/t1.nii.gz RandomMotion /tmp/t1_motion.nii.gz --seed 42 --kwargs "degrees=10 translation=10 num_transforms=3"

"""

from pprint import pprint
from torchio import Image, ImagesDataset, transforms, INTENSITY, LABEL, Subject

subject = Subject(
    label=Image('~/Dropbox/MRI/t1_brain_seg.nii.gz', LABEL),
    t1=Image('~/Dropbox/MRI/t1.nii.gz', INTENSITY),
)
subjects_list = [subject]

dataset = ImagesDataset(subjects_list)
sample = dataset[0]
transform = transforms.RandomMotion(
    seed=42,
    degrees=10,
    translation=10,
    num_transforms=3,
)
transformed = transform(sample)

pprint(transformed['t1']['random_motion_times'])
pprint(transformed['t1']['random_motion_degrees'])
pprint(transformed['t1']['random_motion_translation'])

dataset.save_sample(transformed, dict(t1='/tmp/t1_motion.nii.gz'))
コード例 #11
0
def main():
    opt = parsing_data()

    print("[INFO]Reading data")
    # Dictionary with data parameters for NiftyNet Reader
    if torch.cuda.is_available():
        print('[INFO] GPU available.')
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    else:
        raise Exception(
            "[INFO] No GPU found or Wrong gpu id, please run without --cuda")

    # FOLDERS
    fold_dir = opt.model_dir
    fold_dir_model = os.path.join(fold_dir, 'models')
    if not os.path.exists(fold_dir_model):
        os.makedirs(fold_dir_model)
    save_path = os.path.join(fold_dir_model, './CP_{}.pth')

    output_path = os.path.join(fold_dir, 'output')
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    output_path = os.path.join(output_path, 'output_{}.nii.gz')

    # LOGGING
    orig_stdout = sys.stdout
    if os.path.exists(os.path.join(fold_dir, 'out.txt')):
        compt = 0
        while os.path.exists(
                os.path.join(fold_dir, 'out_' + str(compt) + '.txt')):
            compt += 1
        f = open(os.path.join(fold_dir, 'out_' + str(compt) + '.txt'), 'w')
    else:
        f = open(os.path.join(fold_dir, 'out.txt'), 'w')
    sys.stdout = f

    # SPLITS
    split_path_source = opt.dataset_split_source
    assert os.path.isfile(split_path_source), 'source file not found'

    split_path_target = opt.dataset_split_target
    assert os.path.isfile(split_path_target), 'target file not found'

    split_path = dict()
    split_path['source'] = split_path_source
    split_path['target'] = split_path_target

    path_file = dict()
    path_file['source'] = opt.path_source
    path_file['target'] = opt.path_target

    list_split = [
        'training',
        'validation',
    ]
    paths_dict = dict()

    for domain in ['source', 'target']:
        df_split = pd.read_csv(split_path[domain], header=None)
        list_file = dict()
        for split in list_split:
            list_file[split] = df_split[df_split[1].isin([split])][0].tolist()

        paths_dict_domain = {split: [] for split in list_split}
        for split in list_split:
            for subject in list_file[split]:
                subject_data = []
                for modality in MODALITIES[domain]:
                    subject_data.append(
                        Image(
                            modality,
                            path_file[domain] + subject + modality + '.nii.gz',
                            torchio.INTENSITY))
                if split in ['training', 'validation']:
                    subject_data.append(
                        Image('label',
                              path_file[domain] + subject + 'Label.nii.gz',
                              torchio.LABEL))

                    #subject_data[] =
                paths_dict_domain[split].append(Subject(*subject_data))
            print(domain, split, len(paths_dict_domain[split]))
        paths_dict[domain] = paths_dict_domain

    # PREPROCESSING
    transform_training = dict()
    transform_validation = dict()
    for domain in ['source', 'target']:
        transform_training[domain] = (
            ToCanonical(),
            ZNormalization(),
            CenterCropOrPad((144, 192, 48)),
            RandomAffine(scales=(0.9, 1.1), degrees=10),
            RandomNoise(std_range=(0, 0.10)),
            RandomFlip(axes=(0, )),
        )

        transform_training[domain] = Compose(transform_training[domain])

        transform_validation[domain] = (
            ToCanonical(),
            ZNormalization(),
            CenterCropOrPad((144, 192, 48)),
        )
        transform_validation[domain] = Compose(transform_validation[domain])

    transform = {
        'training': transform_training,
        'validation': transform_validation
    }

    # MODEL
    norm_op_kwargs = {'eps': 1e-5, 'affine': True}
    dropout_op_kwargs = {'p': 0, 'inplace': True}
    net_nonlin = nn.LeakyReLU
    net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}

    print("[INFO] Building model")
    model = Generic_UNet(input_modalities=MODALITIES_TARGET,
                         base_num_features=32,
                         num_classes=nb_classes,
                         num_pool=4,
                         num_conv_per_stage=2,
                         feat_map_mul_on_downscale=2,
                         conv_op=torch.nn.Conv3d,
                         norm_op=torch.nn.InstanceNorm3d,
                         norm_op_kwargs=norm_op_kwargs,
                         nonlin=net_nonlin,
                         nonlin_kwargs=net_nonlin_kwargs,
                         convolutional_pooling=False,
                         convolutional_upsampling=False,
                         final_nonlin=torch.nn.Softmax(1))

    print("[INFO] Training")
    train(paths_dict, model, transform, device, save_path, opt)

    sys.stdout = orig_stdout
    f.close()
コード例 #12
0
        #plt.plot(x,y)

    for xx in mvt_axes:
        fp[xx,:] = y
    return fp

def corrupt_data_both( x0, sigma= 5, amplitude=20, method='gauss'):
    fp1 = corrupt_data(x0, sigma, amplitude=amplitude, method='gauss')
    fp2 = corrupt_data(30, 2, amplitude=-amplitude, method='step')
    fp = fp1 + fp2
    return fp


suj_type='brain'#'synth'#'suj'
if suj_type=='suj':
    suj = [ Subject(image=Image('/data/romain/data_exemple/suj_150423/mT1w_1mm.nii', INTENSITY)), ]
    #suj = [ Subject(image=Image('/data/romain/data_exemple/s_S02_t1_mpr_sag_1iso_p2.nii.gz', INTENSITY)), ]
elif suj_type=='brain':
    suj = [ Subject(image=Image('/data/romain/data_exemple/suj_150423/mask_brain.nii', INTENSITY)), ]
elif suj_type=='synth':
    dr = '/data/romain/data_exemple/suj_274542/ROI_PVE_1mm/'
    label_list = [ "GM", "WM", "CSF", "both_R_Accu", "both_R_Amyg", "both_R_Caud", "both_R_Hipp", "both_R_Pall", "both_R_Puta", "both_R_Thal",
                   "cereb_GM", "cereb_WM", "skin", "skull", "background" ]

    suj = [Subject (label=Image(type=LABEL, path=[dr + ll + '.nii.gz' for ll in label_list]))]
    tlab = torchio.transforms.RandomLabelsToImage(label_key='label', image_key='image', mean=[0.6, 1, 0.2, 0.6,0.6,0.6,0.6,0.6,0.6,0.6,0.6,1, 1, 0.1, 0],
                                               default_std = 0.001 )



dico_params = { "fitpars": None, "oversampling_pct":0,
コード例 #13
0
def main():
    opt = parsing_data()

    print("[INFO] Reading data.")
    # Dictionary with data parameters for NiftyNet Reader
    if torch.cuda.is_available():
        print('[INFO] GPU available.')
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    else:
        raise Exception(
            "[INFO] No GPU found or Wrong gpu id, please run without --cuda")

    # FOLDERS
    fold_dir = opt.model_dir
    checkpoint_path = os.path.join(fold_dir, 'models', './CP_{}.pth')
    checkpoint_path = checkpoint_path.format(opt.epoch_infe)
    assert os.path.isfile(checkpoint_path), 'no checkpoint found'

    output_path = opt.output_dir
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    output_path = os.path.join(output_path, 'output_{}.nii.gz')

    # SPLITS
    split_path = opt.dataset_split
    assert os.path.isfile(split_path), 'split file not found'
    print('Split file found: {}'.format(split_path))

    # Reading csv file
    df_split = pd.read_csv(split_path, header=None)
    list_file = dict()
    list_split = ['inference', 'validation']
    for split in list_split:
        list_file[split] = df_split[df_split[1].isin([split.lower()
                                                      ])][0].tolist()

    # filing paths
    paths_dict = {split: [] for split in list_split}
    for split in list_split:
        for subject in list_file[split]:
            subject_data = []
            for modality in MODALITIES:
                subject_modality = opt.path_file + subject + modality + '.nii.gz'
                if os.path.isfile(subject_modality):
                    subject_data.append(
                        Image(modality, subject_modality, torchio.INTENSITY))
            if len(subject_data) > 0:
                paths_dict[split].append(Subject(*subject_data))

    transform_inference = (
        ToCanonical(),
        ZNormalization(),
    )
    transform_inference = Compose(transform_inference)

    # MODEL
    norm_op_kwargs = {'eps': 1e-5, 'affine': True}
    dropout_op_kwargs = {'p': 0, 'inplace': True}
    net_nonlin = nn.LeakyReLU
    net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}

    print("[INFO] Building model.")
    model = Generic_UNet(input_modalities=['T1', 'FLAIR'],
                         base_num_features=32,
                         num_classes=opt.nb_classes,
                         num_pool=4,
                         num_conv_per_stage=2,
                         feat_map_mul_on_downscale=2,
                         conv_op=torch.nn.Conv3d,
                         norm_op=torch.nn.InstanceNorm3d,
                         norm_op_kwargs=norm_op_kwargs,
                         nonlin=net_nonlin,
                         nonlin_kwargs=net_nonlin_kwargs,
                         convolutional_pooling=False,
                         convolutional_upsampling=False,
                         final_nonlin=lambda x: x)

    paths_inf = paths_dict['inference'] + paths_dict['validation']
    inference_padding(paths_inf, model, transform_inference, device,
                      output_path, checkpoint_path, opt)
コード例 #14
0
from torchio.transforms import (
    ZNormalization,
    RandomNoise,
    RandomFlip,
    RandomAffine,
)

# Define training and patches sampling parameters
num_epochs = 4
patch_size = 128
queue_length = 100
samples_per_volume = 1
batch_size = 2

# Populate a list with images
one_subject = Subject(
    T1=Image(../BRATS2018_crop_renamed/LGG75_T1.nii.gz', torchio.INTENSITY),
    T2=Image('../BRATS2018_crop_renamed/LGG75_T2.nii.gz', torchio.INTENSITY),
    label=Image('../BRATS2018_crop_renamed/LGG75_Label.nii.gz', torchio.LABEL),
)

# This subject doesn't have a T2 MRI!
another_subject = Subject(
    T1=Image(../BRATS2018_crop_renamed/LGG74_T1.nii.gz', torchio.INTENSITY),
    label=Image('../BRATS2018_crop_renamed/LGG74_Label.nii.gz', torchio.LABEL),
)

subjects = [
    one_subject,
    another_subject,
]
コード例 #15
0
ファイル: test_subject.py プロジェクト: soumickmj/torchio
 def test_plot_one_image(self):
     subject = Subject(t1=ScalarImage(self.get_image_path('t1_plot')))
     subject.plot(show=False)
コード例 #16
0
sell_col = [ 'L1', 'MSE', 'corr', 'ssim', 'ssim_all']

for rr in res:
    rr=rr.loc[:,sell_col]
    sns.pairplot(rr)


#test MOTION CATI

ss = [ Image('T1', '/home/romain/QCcnn/mask_mvt_val_cati_T1/s_S07_3DT1.nii.gz', INTENSITY),
         Image('T3', '/home/romain/QCcnn/mask_mvt_val_cati_T1/s_S07_3DT1.nii.gz', INTENSITY),]

suj = [[ Image('T1', '/home/romain/QCcnn/mask_mvt_val_cati_T1/s_S07_3DT1_float.nii.gz', INTENSITY), ]]
suj = [[Image('T1', '/data/romain/HCPdata/suj_150423/T1w_1mm.nii.gz', INTENSITY), ]]

ss = [Subject(Image('T1', '/home/romain/QCcnn/mask_mvt_val_cati_T1/s_S07_3DT1.nii.gz', INTENSITY) ) ]

suj = [Subject(ss) for ss in suj]

dico_params = {"maxDisp": (1, 4), "maxRot": (1, 4), "noiseBasePars": (5, 20, 0.8),
               "swallowFrequency": (2, 6, 0.5), "swallowMagnitude": (3, 4),
               "suddenFrequency": (2, 6, 0.5), "suddenMagnitude": (3, 4),
               "verbose": False, "keep_original": True, "proba_to_augment": 1,
               "preserve_center_pct": 0.1, "keep_original": True, "compare_to_original": True,
               "oversampling_pct": 0, "correct_motion": True}

fipar = pd.read_csv('/home/romain/QCcnn/mask_mvt_val_cati_T1/ssim_0.6956839561462402_sample00220_suj_cat12_s_S07_3DT1_mvt.csv', header=None)
dico_params['fitpars'] = fipar.values
t = RandomMotionFromTimeCourse(**dico_params)

dataset = ImagesDataset(suj, transform=Compose((CenterCropOrPad(target_shape=(182, 218,182)),t)))
コード例 #17
0
    fp1 = gfile(dir_img, '^p1', {"items": 1})
    fp2 = gfile(dir_img, '^p2', {"items": 1})
    if len(fm) == 0:  #may be in cat12 subdir (like for HCP)
        fm = gfile(dir_img, '^brain_T1', {"items": 1})
        #dir_cat = gdir(dir_img,'cat12')
        #fm = gfile(dir_cat, '^mask_brain', {"items": 1})
        #fp1 = gfile(dir_cat, '^p1', {"items": 1})
        #fp2 = gfile(dir_cat, '^p2', {"items": 1})

    one_suj = {'image': Image(fin, INTENSITY), 'brain': Image(fm[0], LABEL)}
    if len(fp1) == 1:
        one_suj['p1'] = Image(fp1[0], LABEL)
    if len(fp2) == 1:
        one_suj['p2'] = Image(fp2[0], LABEL)

    subject = [Subject(one_suj) for i in range(0, nb_sample)]
    #subject = [ one_suj for i in range(0,nb_sample) ]
    print('input list is duplicated {} '.format(len(subject)))
    #subject = Subject(subject)
    dataset = ImagesDataset(subject, transform=transfo)

    for i in range(0, nb_sample):

        sample = dataset[i]  #in n time sample[0] it is cumulativ

        image_dict = sample['image']
        volume_path = image_dict['path']
        dd = volume_path.split('/')
        volume_name = dd[len(dd) - 2] + '_' + image_dict['stem']
        #nb_saved = image_dict['index'] #
コード例 #18
0
def main():
    opt = parsing_data()

    print("[INFO] Reading data")
    # Dictionary with data parameters for NiftyNet Reader
    if torch.cuda.is_available():
        print('[INFO] GPU available.')
        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    else:
        raise Exception(
            "[INFO] No GPU found or Wrong gpu id, please run without --cuda")

    # FOLDERS
    fold_dir = opt.model_dir
    fold_dir_model = os.path.join(fold_dir, 'models')
    if not os.path.exists(fold_dir_model):
        os.makedirs(fold_dir_model)
    save_path = os.path.join(fold_dir_model, './CP_{}.pth')

    output_path = os.path.join(fold_dir, 'output')
    if not os.path.exists(output_path):
        os.makedirs(output_path)
    output_path = os.path.join(output_path, 'output_{}.nii.gz')

    # LOGGING
    orig_stdout = sys.stdout
    if os.path.exists(os.path.join(fold_dir, 'out.txt')):
        compt = 0
        while os.path.exists(
                os.path.join(fold_dir, 'out_' + str(compt) + '.txt')):
            compt += 1
        f = open(os.path.join(fold_dir, 'out_' + str(compt) + '.txt'), 'w')
    else:
        f = open(os.path.join(fold_dir, 'out.txt'), 'w')
    #sys.stdout = f

    print("[INFO] Hyperparameters")
    print('Alpha: {}'.format(opt.alpha))
    print('Beta: {}'.format(opt.beta))
    print('Beta_DA: {}'.format(opt.beta_da))
    print('Weight Reg: {}'.format(opt.weight_crf))

    # SPLITS
    split_path_source = opt.dataset_split_source
    assert os.path.isfile(split_path_source), 'source file not found'

    split_path_target = opt.dataset_split_target
    assert os.path.isfile(split_path_target), 'target file not found'

    split_path = dict()
    split_path['source'] = split_path_source
    split_path['target'] = split_path_target

    path_file = dict()
    path_file['source'] = opt.path_source
    path_file['target'] = opt.path_target

    list_split = ['training', 'validation', 'inference']
    paths_dict = dict()

    for domain in ['source', 'target']:
        df_split = pd.read_csv(split_path[domain], header=None)
        list_file = dict()
        for split in list_split:
            list_file[split] = df_split[df_split[1].isin([split])][0].tolist()

        list_file['inference'] += list_file['validation']

        paths_dict_domain = {split: [] for split in list_split}
        for split in list_split:
            for subject in list_file[split]:
                subject_data = []
                for modality in MODALITIES[domain]:
                    subject_data.append(
                        Image(
                            modality,
                            path_file[domain] + subject + modality + '.nii.gz',
                            torchio.INTENSITY))
                if split in ['training', 'validation']:
                    if domain == 'source':
                        subject_data.append(
                            Image(
                                'label',
                                path_file[domain] + subject + 't1_seg.nii.gz',
                                torchio.LABEL))
                    else:
                        subject_data.append(
                            Image(
                                'scribble', path_file[domain] + subject +
                                't2scribble_cor.nii.gz', torchio.LABEL))
                    #subject_data[] =
                paths_dict_domain[split].append(Subject(*subject_data))
            print(domain, split, len(paths_dict_domain[split]))
        paths_dict[domain] = paths_dict_domain

    # PREPROCESSING
    transform_training = dict()
    transform_validation = dict()

    for domain in ['source', 'target']:
        transformations = (
            ToCanonical(),
            ZNormalization(),
            CenterCropOrPad((288, 128, 48)),
            RandomAffine(scales=(0.9, 1.1), degrees=10),
            RandomNoise(std_range=(0, 0.10)),
            RandomFlip(axes=(0, )),
        )
        transform_training[domain] = Compose(transformations)

    for domain in ['source', 'target']:
        transformations = (ToCanonical(), ZNormalization(),
                           CenterCropOrPad((288, 128, 48)))
        transform_validation[domain] = Compose(transformations)

    transform = {
        'training': transform_training,
        'validation': transform_validation
    }

    # MODEL
    norm_op_kwargs = {'eps': 1e-5, 'affine': True}
    net_nonlin = nn.LeakyReLU
    net_nonlin_kwargs = {'negative_slope': 1e-2, 'inplace': True}

    print("[INFO] Building model")
    model = UNet2D5(input_channels=1,
                    base_num_features=16,
                    num_classes=NB_CLASSES,
                    num_pool=4,
                    conv_op=nn.Conv3d,
                    norm_op=nn.InstanceNorm3d,
                    norm_op_kwargs=norm_op_kwargs,
                    nonlin=net_nonlin,
                    nonlin_kwargs=net_nonlin_kwargs)

    print("[INFO] Training")
    #criterion = DC_and_CE_loss({}, {})
    criterion = DC_CE(NB_CLASSES)

    train(paths_dict, model, transform, criterion, device, save_path, opt)