def run(self, args): mask = mdt.load_brain_mask(os.path.realpath(args.mask)) file_names = [] for file in args.input_files: file_names.extend(glob.glob(file)) for file in file_names: mdt.apply_mask_to_file(file, mask)
def run(self, args, extra_args): mask = mdt.load_brain_mask( nifti_filepath_resolution(os.path.realpath(args.mask))) file_names = [] for file in args.input_files: file_names.extend(glob.glob(file)) for file in file_names: if args.overwrite: mdt.apply_mask_to_file(file, mask) else: folder, basename, ext = split_image_path( nifti_filepath_resolution(os.path.realpath(file))) mdt.apply_mask_to_file(file, mask, output_fname=os.path.join( folder, basename + '_masked' + ext))
def view_mask(self): mask = np.expand_dims(load_brain_mask(self.selectedOutputText.text()), axis=3) image_data = load_nifti(self.selectedImageText.text()).get_data() masked_image = image_data * mask data = SimpleDataInfo({ 'Masked': masked_image, 'DWI': image_data, 'Mask': mask }) config = MapPlotConfig() config.dimension = 2 config.slice_index = image_data.shape[2] // 2 config.maps_to_show = ['DWI', 'Masked', 'Mask'] start_gui(data=data, config=config, app_exec=False)
def get_mask(dataset_name): if dataset_name == 'mgh': mask_name = 'mgh_1003_slice_44_mask' else: mask_name = 'rheinland_v3a_1_ms20_slice_36_mask' return mdt.load_brain_mask(input_pjoin(dataset_name, mask_name))
from scipy.ndimage import binary_erosion import mdt __author__ = 'Robbert Harms' __date__ = '2018-12-20' __maintainer__ = 'Robbert Harms' __email__ = '*****@*****.**' __licence__ = 'LGPL v3' output_pjoin = mdt.make_path_joiner( '/home/robbert/phd-data/papers/uncertainty_paper/registration/') mask = mdt.load_brain_mask( '/usr/share/data/fsl-mni152-templates/FMRIB58_FA_1mm.nii.gz') mask = binary_erosion(mask, iterations=1) maps = {} subjects_to_load = ['mgh_1005', 'mgh_1016', 'mgh_1017'] for subject in subjects_to_load: point_map = mdt.load_nifti( output_pjoin(subject, 'warped_BinghamNODDI_r1_w_in0.w')).get_data() std_map = mdt.load_nifti( output_pjoin(subject, 'warped_BinghamNODDI_r1_w_in0.w.std')).get_data() maps[subject + '.std'] = std_map maps[subject] = point_map mdt.apply_mask(maps, mask)
def pearson_correlation_coefficient(x, y, dataset, model_name, data_type): coef_mat = np.corrcoef(np.squeeze(x), np.squeeze(y)) print(dataset, model_name, data_type, coef_mat[0, 1]) percentages = [] for dataset in datasets: for model_name in model_names: print(dataset, model_name) mle, mle_std = get_mle_results(dataset, model_name) mcmc, mcmc_std = get_mcmc_results(dataset, model_name) wm_mask = mdt.load_brain_mask(output_base_pjoin(dataset, 'wm_mask')) # wm_mask = mdt.load_brain_mask(masks[dataset]) # wm_mask *= (np.abs(mle - mcmc) < 0.01)[..., 0] # wm_mask *= (mle >= 0.1)[..., 0] # wm_mask *= (mle <= 0.9)[..., 0] # wm_mask *= (mcmc >= 0.1)[..., 0] # wm_mask *= (mcmc <= 0.9)[..., 0] # wm_mask = mle >= 0.1 # wm_mask *= (np.abs(mle_std - mcmc_std) < 0.01)[..., 0] # items = {'mle': mle, 'mle_std': mle_std, 'mcmc': mcmc, 'mcmc_std': mcmc_std, # 'p_diff': np.abs(mle - mcmc), 'std_diff': np.abs(mle_std - mcmc_std), # 'std_threshold_map': np.abs(mle_std - mcmc_std) > 0.01