def _core_run(self, stopping_path, stopping_thr, seeding_path, seed_density, use_sh, pam, out_tract): stop, affine = load_nifti(stopping_path) classifier = ThresholdTissueClassifier(stop, stopping_thr) logging.info('classifier done') seed_mask, _ = load_nifti(seeding_path) seeds = \ utils.seeds_from_mask( seed_mask, density=[seed_density, seed_density, seed_density], affine=affine) logging.info('seeds done') direction_getter = pam if use_sh: direction_getter = \ DeterministicMaximumDirectionGetter.from_shcoeff( pam.shm_coeff, max_angle=30., sphere=pam.sphere) streamlines = LocalTracking(direction_getter, classifier, seeds, affine, step_size=.5) logging.info('LocalTracking initiated') tractogram = Tractogram(streamlines, affine_to_rasmm=np.eye(4)) save(tractogram, out_tract) logging.info('Saved {0}'.format(out_tract))
def run(self, static_image_file, moving_image_files, affine_matrix_file, out_dir='', out_file='transformed.nii.gz'): """ Parameters ---------- static_image_file : string Path of the static image file. moving_image_files : string Path of the moving image(s). It can be a single image or a folder containing multiple images. affine_matrix_file : string The text file containing the affine matrix for transformation. out_dir : string, optional Directory to save the transformed files (default ''). out_file : string, optional Name of the transformed file (default 'transformed.nii.gz'). It is recommended to use the flag --mix-names to prevent the output files from being overwritten. """ io = self.get_io_iterator() for static_image_file, moving_image_file, affine_matrix_file, \ out_file in io: # Loading the image data from the input files into object. static_image, static_grid2world = load_nifti(static_image_file) moving_image, moving_grid2world = load_nifti(moving_image_file) # Doing a sanity check for validating the dimensions of the input # images. ImageRegistrationFlow.check_dimensions(static_image, moving_image) # Loading the affine matrix. affine_matrix = np.loadtxt(affine_matrix_file) # Setting up the affine transformation object. img_transformation = AffineMap( affine=affine_matrix, domain_grid_shape=static_image.shape, domain_grid2world=static_grid2world, codomain_grid_shape=moving_image.shape, codomain_grid2world=moving_grid2world) # Transforming the image/ transformed = img_transformation.transform(moving_image) save_nifti(out_file, transformed, affine=static_grid2world)
def run(self, input_files, lb, ub=np.inf, out_dir='', out_mask='mask.nii.gz'): """ Workflow for creating a binary mask Parameters ---------- input_files : string Path to image to be masked. lb : float Lower bound value. ub : float Upper bound value (default Inf) out_dir : string, optional Output directory (default input file directory) out_mask : string, optional Name of the masked file (default 'mask.nii.gz') """ if lb >= ub: logging.error('The upper bound(less than) should be greater' ' than the lower bound (greather_than).') return io_it = self.get_io_iterator() for input_path, out_mask_path in io_it: logging.info('Creating mask of {0}'.format(input_path)) data, affine = load_nifti(input_path) mask = np.bitwise_and(data > lb, data < ub) save_nifti(out_mask_path, mask.astype(np.ubyte), affine) logging.info('Mask saved at {0}'.format(out_mask_path))
def _core_run(self, stopping_path, stopping_thr, seeding_path, seed_density, direction_getter, out_tract): stop, affine = load_nifti(stopping_path) classifier = ThresholdTissueClassifier(stop, stopping_thr) logging.info('classifier done') seed_mask, _ = load_nifti(seeding_path) seeds = \ utils.seeds_from_mask( seed_mask, density=[seed_density, seed_density, seed_density], affine=affine) logging.info('seeds done') streamlines = LocalTracking(direction_getter, classifier, seeds, affine, step_size=.5) logging.info('LocalTracking initiated') tractogram = Tractogram(streamlines, affine_to_rasmm=np.eye(4)) save(tractogram, out_tract) logging.info('Saved {0}'.format(out_tract))
def run(self, input_files, new_vox_size, order=1, mode='constant', cval=0, num_processes=1, out_dir='', out_resliced='resliced.nii.gz'): """Reslice data with new voxel resolution defined by ``new_vox_sz`` Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. new_vox_size : variable float new voxel size order : int, optional order of interpolation, from 0 to 5, for resampling/reslicing, 0 nearest interpolation, 1 trilinear etc.. if you don't want any smoothing 0 is the option you need (default 1) mode : string, optional Points outside the boundaries of the input are filled according to the given mode 'constant', 'nearest', 'reflect' or 'wrap' (default 'constant') cval : float, optional Value used for points outside the boundaries of the input if mode='constant' (default 0) num_processes : int, optional Split the calculation to a pool of children processes. This only applies to 4D `data` arrays. If a positive integer then it defines the size of the multiprocessing pool that will be used. If 0, then the size of the pool will equal the number of cores available. (default 1) out_dir : string, optional Output directory (default input file directory) out_resliced : string, optional Name of the resliced dataset to be saved (default 'resliced.nii.gz') """ io_it = self.get_io_iterator() for inputfile, outpfile in io_it: data, affine, vox_sz = load_nifti(inputfile, return_voxsize=True) logging.info('Processing {0}'.format(inputfile)) new_data, new_affine = reslice(data, affine, vox_sz, new_vox_size, order, mode=mode, cval=cval, num_processes=num_processes) save_nifti(outpfile, new_data, new_affine) logging.info('Resliced file save in {0}'.format(outpfile))
def read_mapping(disp, domain_img, codomain_img, prealign=None): """ Read a syn registration mapping from a nifti file Parameters ---------- disp : str or Nifti1Image A file of image containing the mapping displacement field in each voxel Shape (x, y, z, 3, 2) domain_img : str or Nifti1Image codomain_img : str or Nifti1Image Returns ------- A :class:`DiffeomorphicMap` object. Notes ----- See :func:`write_mapping` for the data format expected. """ if isinstance(disp, str): disp_data, disp_affine = load_nifti(disp) if isinstance(domain_img, str): domain_img = nib.load(domain_img) if isinstance(codomain_img, str): codomain_img = nib.load(codomain_img) mapping = DiffeomorphicMap(3, disp_data.shape[:3], disp_grid2world=np.linalg.inv(disp_affine), domain_shape=domain_img.shape[:3], domain_grid2world=domain_img.affine, codomain_shape=codomain_img.shape, codomain_grid2world=codomain_img.affine, prealign=prealign) mapping.forward = disp_data[..., 0] mapping.backward = disp_data[..., 1] mapping.is_inverse = True return mapping
def get_labels(self): """ labels are the image aparc-reduced.nii.gz, which is a modified version of FreeSurfer label map aparc+aseg.mgz. The corpus callosum region is a combination of FreeSurfer labels 251-255 Remaining FreeSurfer labels were re-mapped and reduced so that they lie between 0 and 88. To see the FreeSurfer region, label and name, represented by each value see label_info.txt in ~/.dipy/stanford_hardi. """ current_location = os.path.dirname(os.path.abspath(__file__)) path_atlas_dir = os.sep.join( current_location.split(os.sep)[:-1], "atlases") if self.atlas == "stanford": label_fname = get_fnames('stanford_labels') label_file = 'label_info.txt' elif self.atlas == "desikan": # Downloaded Desikan atlas from : https://neurovault.org/images/23262/ label_fname = os.path.join(path_atlas_dir, "aparcaseg.nii.gz") label_file = os.path.join(path_atlas_dir, "FreeSurferColorLUT.txt") elif self.atlas == "destrieux": # Downloaded Destrieux atlas from : https://neurovault.org/images/23264/ label_fname = os.path.join(path_atlas_dir, "aparc.a2009saseg.nii.gz") label_file = os.path.join(path_atlas_dir, "FreeSurferColorLUT.txt") # Creating dictionary from label file self.labels_all = dict() f = open(label_file, "r") if self.atlas == "stanford": for line in f: split_line = line.split(",") if split_line[0].isnumeric() == True: self.labels_all[int(split_line[0])] = split_line[2][2:-2] elif self.atlas == "desikan": for line in f: split_line = line.split() if len(split_line) > 0: if split_line[0].isnumeric() == True: self.labels_all[int(split_line[0])] = split_line[1] self.labels_all.pop(0, None) self.labels, self.labels_affine, self.labels_voxel_size = load_nifti( label_fname, return_voxsize=True)
def test_stats(): with TemporaryDirectory() as out_dir: data_path, bval_path, bvec_path = get_fnames('small_101D') volume, affine = load_nifti(data_path) mask = np.ones_like(volume[:, :, :, 0], dtype=np.uint8) mask_path = join(out_dir, 'tmp_mask.nii.gz') save_nifti(mask_path, mask, affine) snr_flow = SNRinCCFlow(force=True) args = [data_path, bval_path, bvec_path, mask_path] snr_flow.run(*args, out_dir=out_dir) assert_true(os.path.exists(os.path.join(out_dir, 'product.json'))) assert_true( os.stat(os.path.join(out_dir, 'product.json')).st_size != 0) assert_true(os.path.exists(os.path.join(out_dir, 'cc.nii.gz'))) assert_true(os.stat(os.path.join(out_dir, 'cc.nii.gz')).st_size != 0) assert_true(os.path.exists(os.path.join(out_dir, 'mask_noise.nii.gz'))) assert_true( os.stat(os.path.join(out_dir, 'mask_noise.nii.gz')).st_size != 0) snr_flow._force_overwrite = True snr_flow.run(*args, out_dir=out_dir) assert_true(os.path.exists(os.path.join(out_dir, 'product.json'))) assert_true( os.stat(os.path.join(out_dir, 'product.json')).st_size != 0) assert_true(os.path.exists(os.path.join(out_dir, 'cc.nii.gz'))) assert_true(os.stat(os.path.join(out_dir, 'cc.nii.gz')).st_size != 0) assert_true(os.path.exists(os.path.join(out_dir, 'mask_noise.nii.gz'))) assert_true( os.stat(os.path.join(out_dir, 'mask_noise.nii.gz')).st_size != 0) snr_flow._force_overwrite = True snr_flow.run(*args, bbox_threshold=(0.5, 1, 0, 0.15, 0, 0.2), out_dir=out_dir) assert_true(os.path.exists(os.path.join(out_dir, 'product.json'))) assert_true( os.stat(os.path.join(out_dir, 'product.json')).st_size != 0) assert_true(os.path.exists(os.path.join(out_dir, 'cc.nii.gz'))) assert_true(os.stat(os.path.join(out_dir, 'cc.nii.gz')).st_size != 0) assert_true(os.path.exists(os.path.join(out_dir, 'mask_noise.nii.gz'))) assert_true( os.stat(os.path.join(out_dir, 'mask_noise.nii.gz')).st_size != 0)
def check_for_fa(outpath, subject, getdata=False): #Checks for fa files ('bmfa') in specified outpath folder. Returns with the path #whether it exists or not, and the fa nifti if specified to do so if os.path.isdir(outpath): outpathglobfa = os.path.join(outpath, subject + '_*fa.nii.gz') elif os.path.isfile(outpath): outpathglobfa = os.path.join(os.path.dirname(outpath), subject + '_*fa.nii.gz') outpathfa = glob.glob(outpathglobfa) if np.size(outpathfa) == 1: outpathfa = outpathfa[0] if getdata is True: fa = load_nifti(outpathfa) return outpathfa, True, fa else: return outpathfa, True, None elif np.size(outpathfa) == 0: outpathglobfa.replace("*fa", "bmfa") return outpathfa, False, None
def run(self, input_files, new_vox_size, order=1, mode='constant', cval=0, num_processes=1, out_dir='', out_resliced='resliced.nii.gz'): """Reslice data with new voxel resolution defined by ``new_vox_sz`` Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. new_vox_size : variable float new voxel size. order : int, optional order of interpolation, from 0 to 5, for resampling/reslicing, 0 nearest interpolation, 1 trilinear etc.. if you don't want any smoothing 0 is the option you need. mode : string, optional Points outside the boundaries of the input are filled according to the given mode 'constant', 'nearest', 'reflect' or 'wrap'. cval : float, optional Value used for points outside the boundaries of the input if mode='constant'. num_processes : int, optional Split the calculation to a pool of children processes. This only applies to 4D `data` arrays. Default is 1. If < 0 the maximal number of cores minus |num_processes + 1| is used (enter -1 to use as many cores as possible). 0 raises an error. out_dir : string, optional Output directory. (default current directory) out_resliced : string, optional Name of the resliced dataset to be saved. """ io_it = self.get_io_iterator() for inputfile, outpfile in io_it: data, affine, vox_sz = load_nifti(inputfile, return_voxsize=True) logging.info('Processing {0}'.format(inputfile)) new_data, new_affine = reslice(data, affine, vox_sz, new_vox_size, order, mode=mode, cval=cval, num_processes=num_processes) save_nifti(outpfile, new_data, new_affine) logging.info('Resliced file save in {0}'.format(outpfile))
def run(self, input_files, sigma=0, out_dir='', out_denoised='dwi_nlmeans.nii.gz'): """ Workflow wrapping the nlmeans denoising method. It applies nlmeans denoise on each file found by 'globing' ``input_files`` and saves the results in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. sigma : float, optional Sigma parameter to pass to the nlmeans algorithm (default: auto estimation). out_dir : string, optional Output directory (default input file directory) out_denoised : string, optional Name of the resulting denoised volume (default: dwi_nlmeans.nii.gz) """ io_it = self.get_io_iterator() for fpath, odenoised in io_it: if self._skip: shutil.copy(fpath, odenoised) logging.warning('Denoising skipped for now.') else: logging.info('Denoising {0}'.format(fpath)) data, affine, image = load_nifti(fpath, return_img=True) if sigma == 0: logging.info('Estimating sigma') sigma = estimate_sigma(data) logging.debug('Found sigma {0}'.format(sigma)) denoised_data = nlmeans(data, sigma) save_nifti(odenoised, denoised_data, affine, image.header) logging.info('Denoised volume saved as {0}'.format(odenoised))
def run(self, input_files, slice_axis=2, n_points=3, out_dir='', out_unring='dwi_unrig.nii.gz'): r"""Workflow for applying Gibbs Ringing method. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. slice_axis : int, optional Data axis corresponding to the number of acquired slices. Default is set to the third axis(2). Could be (0, 1, or 2). n_points : int, optional Number of neighbour points to access local TV (see note). Default is set to 3. out_dir : string, optional Output directory (default input file directory) out_unrig : string, optional Name of the resulting denoised volume (default: dwi_unrig.nii.gz) References ---------- .. [1] Neto Henriques, R., 2018. Advanced Methods for Diffusion MRI Data Analysis and their Application to the Healthy Ageing Brain (Doctoral thesis). https://doi.org/10.17863/CAM.29356 .. [2] Kellner E, Dhital B, Kiselev VG, Reisert M. Gibbs-ringing artifact removal based on local subvoxel-shifts. Magn Reson Med. 2016 doi: 10.1002/mrm.26054. """ io_it = self.get_io_iterator() for dwi, ounring in io_it: logging.info('Unringing %s', dwi) data, affine, image = load_nifti(dwi, return_img=True) unring_data = gibbs_removal(data, slice_axis=slice_axis, n_points=n_points) save_nifti(ounring, unring_data, affine, image.header) logging.info('Denoised volume saved as %s', ounring)
def make_tensorfit(data, mask, gtab, affine, subject, outpath, overwrite=False, forcestart=False, verbose=None): #Given dwi data, a mask, and other relevant information, creates the fa and saves it to outpath, unless #if it already exists, in which case it simply returns the fa from dipy.reconst.dti import TensorModel outpathbmfa, exists, _ = check_for_fa(outpath, subject, getdata=False) if exists and not forcestart: fa = load_nifti(outpathbmfa) fa_array = fa[0] if verbose: txt = "FA already computed at " + outpathbmfa print(txt) return outpathbmfa, fa_array else: if verbose: print('Calculating the tensor model from bval/bvec values of ', subject) tensor_model = TensorModel(gtab) t1 = time() if len(mask.shape) == 4: mask = mask[..., 0] tensor_fit = tensor_model.fit(data, mask) duration1 = time() - t1 if verbose: print(subject + ' DTI duration %.3f' % (duration1, )) save_nifti(outpathbmfa, tensor_fit.fa, affine) if verbose: print('Saving subject' + subject + ' at ' + outpathbmfa) return outpathbmfa, tensor_fit.fa
def getlabelmask(mypath, subject, verbose=None): labelsoption = glob.glob(mypath + '/' + subject + '/' + subject + '*labels.nii.gz') print(mypath + '/' + subject + '/' + subject + '*labels.nii.gz') if np.size(labelsoption) > 0: labelspath = labelsoption[0] labelsoption = glob.glob(mypath + '/*' + subject + '*labels.nii.gz') print((mypath + '/' + subject + '_labels_RAS.nii.gz')) if np.size(labelsoption) > 0: labelspath = labelsoption[0] elif os.path.exists(mypath + '/' + subject + '_labels_RAS.nii.gz'): labelspath = mypath + '/' + subject + '_labels_RAS.nii.gz' elif os.path.exists(mypath + '/Reg_' + subject + '_nii4D_brain_mask.nii.gz'): labelspath = mypath + '/Reg_' + subject + '_nii4D_brain_mask.nii.gz' elif os.path.exists(mypath + '/' + subject + '_chass_symmetric3_labels_RAS.nii.gz'): labelspath = mypath + '/' + subject + '_chass_symmetric3_labels_RAS.nii.gz' elif os.path.exists(mypath + '/' + subject + '_chass_symmetric3_labels_RAS_combined.nii.gz'): labelspath = mypath + '/' + subject + '_chass_symmetric3_labels_RAS_combined.nii.gz' elif os.path.exists(mypath + '/fa_labels_warp_' + subject + '_RAS.nii.gz'): labelspath = mypath + '/fa_labels_warp_' + subject + '_RAS.nii.gz' elif os.path.exists(mypath + '/labels/fa_labels_warp_' + subject + '_RAS.nii.gz'): labelspath = mypath + '/labels/fa_labels_warp_' + subject + '_RAS.nii.gz' elif os.path.exists(mypath + '/mask.nii.gz'): labelspath = mypath + '/mask.nii.gz' elif os.path.exists(mypath + '/mask.nii'): labelspath = mypath + '/mask.nii' if 'labelspath' in locals(): labels, affine_labels = load_nifti(labelspath) if verbose: print("Label mask taken from " + labelspath) else: txt = f"Mask for subject {subject} not found" raise Exception(txt) return labels, affine_labels, labelspath
def load_slice(fname, dname="data.nii.gz", xy_size=18, gtab=False, load=False): data, affine = load_nifti(os.path.join(fname, dname)) bvals, bvecs = read_bvals_bvecs(os.path.join(fname, 'bvals'), os.path.join(fname, 'bvecs')) # generate q-vectors qvecs = [] for bval, bvec in zip(bvals, bvecs): qvecs.append(bval * bvec) qvecs = np.stack(qvecs) if gtab: gt = gradient_table(bvals, bvecs) return data[data.shape[0] // 2 + 20 - xy_size:data.shape[0] // 2 + 20 + xy_size, data.shape[1] // 2 - xy_size:data.shape[1] // 2 + xy_size, data.shape[2] // 2], qvecs, gt return data[data.shape[0] // 2 - xy_size:data.shape[0] // 2 + xy_size, data.shape[1] // 2 - xy_size:data.shape[1] // 2 + xy_size, data.shape[2] // 2], qvecs
def load_predict_dataset(savedir_preproc=savedir_preproc_val1, savedir_results_step1=savedir_results_val1): preprocs = load_brats.load_normalized_data(dir=savedir_preproc) # Load results from step1 list_results = os.listdir(savedir_results_step1) list_results.sort() for idx, dataset in enumerate(preprocs): name = dataset['name'] f_result_step1 = list_results[idx] if name != f_result_step1[:-12]: raise FileNotFoundError("Wrong File") prediction, _ = load_nifti(opj(savedir_results_step1, f_result_step1)) #%% Keep only the biggest found region in the brain cc = dataset['bbox_brain'] pred_in_shape = prediction[cc[0]:cc[1],cc[2]:cc[3],cc[4]:cc[5]] biggest_size = -1 biggest_region = 0 for region in regionprops(label(pred_in_shape)): if region.area > biggest_size: biggest_size = region.area biggest_region = region if biggest_region == 0: print("ATTENTION, No Tumor found in image: " + name) continue z1, x1, y1, z2, x2, y2 = biggest_region.bbox bbox_tumor = [z1,z2,x1,x2,y1,y2] dataset['bbox_tumor'] = bbox_tumor dataset['data'][5,:,:,:] = np.zeros(dataset['data'].shape[1:]) # if train set, delete original segmentation for p in biggest_region.coords: dataset['data'][5,p[0],p[1],p[2]] = 1 return preprocs
def convert_labelmask(atlas, converter, atlas_outpath = None, affine_labels=None): if isinstance(atlas, str): labels, affine_labels = load_nifti(atlas) else: if affine_labels is None: raise TypeError('Need to add the affine labels if directly including label array') else: labels = atlas labels_new = np.copy(labels) for i in range(np.shape(labels)[0]): for j in range(np.shape(labels)[1]): for k in range(np.shape(labels)[2]): try: labels_new[i, j, k] = converter[labels[i, j, k]] except: print('hi') save_nifti(atlas_outpath, labels_new, affine_labels) return(labels_new)
def run(self, input_files, sigma=0, out_dir='', out_denoised='dwi_nlmeans.nii.gz'): """ Workflow wrapping the nlmeans denoising method. It applies nlmeans denoise on each file found by 'globing' ``input_files`` and saves the results in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. sigma : float, optional Sigma parameter to pass to the nlmeans algorithm (default: auto estimation). out_dir : string, optional Output directory (default input file directory) out_denoised : string, optional Name of the resuting denoised volume (default: dwi_nlmeans.nii.gz) """ io_it = self.get_io_iterator() for fpath, odenoised in io_it: if self._skip: shutil.copy(fpath, odenoised) logging.warning('Denoising skipped for now.') else: logging.info('Denoising {0}'.format(fpath)) data, affine, image = load_nifti(fpath, return_img=True) if sigma == 0: logging.info('Estimating sigma') sigma = estimate_sigma(data) logging.debug('Found sigma {0}'.format(sigma)) denoised_data = nlmeans(data, sigma) save_nifti(odenoised, denoised_data, affine, image.header) logging.info('Denoised volume saved as {0}'.format(odenoised))
def show_bundles(bundles, colors=None, show=True, fname=None, fa=False, str_tube=False, size=(900, 900)): """ Displays bundles Parameters --------- bundles: bundles object """ ren = window.Renderer() ren.SetBackground(1., 1, 1) if str_tube: bundle_actor = actor.streamtube(bundles, colors, linewidth=0.5) ren.add(bundle_actor) else: for (i, bundle) in enumerate(bundles): color = colors[i] # lines_actor = actor.streamtube(bundle, color, linewidth=0.05 lines_actor = actor.line(bundle, color, linewidth=2.5) #lines_actor.RotateX(-90) #lines_actor.RotateZ(90) ren.add(lines_actor) if fa: fa, affine_fa = load_nifti( '/Users/alex/code/Wenlin/data/wenlin_results/bmfaN54900.nii.gz') fa_actor = actor.slicer(fa, affine_fa) ren.add(fa_actor) if show: window.show(ren) if fname is not None: sleep(1) window.record(ren, n_frames=1, out_path=fname, size=size)
def track_gen_net_work(atlas_path=None, atlas=None, data_path=None, affine=None, track_path=None, streamlines=None, return_matrix=False, save_matrix=True, output_path=None): import scipy.io as scio if atlas_path != None: img_atlas = nib.load(atlas_path) labels = img_atlas.get_data() labels = labels.astype(int) else: labels = atlas if data_path != None: data, affine, hardi_img = load_nifti(data_path, return_img=True) if track_path == None: tracks = streamlines else: tracks = dwi.load_streamlines_from_trk(track_path) tracks = reduct(tracks) M, grouping = utils.connectivity_matrix(tracks, affine=affine, label_volume=labels, return_mapping=True, mapping_as_streamlines=True) if save_matrix: scio.savemat(output_path, {'matrix': M}) if return_matrix: return M
def run_onall(): atlas_legends = "/Users/alex/jacques/connectomes_testing/atlases/CHASSSYMM3AtlasLegends.xlsx" df = pd.read_excel(atlas_legends, sheet_name='Sheet1') df['Structure'] = df['Structure'].str.lower() index1=df['index'] index2=df['index2'] l = ['N57442', 'N57446', 'N57447','N57449','N57451','N57496','N57498','N57500','N57502','N57504','N57513','N57515','N57518', 'N57520','N57522','N57546','N57447','N57449','N57451','N57496','N57498','N57500','N57502','N57504','N57513','N57515','N57518','N57520','N57522','N57546','N57548', 'N57550', 'N57552', 'N57554', 'N57559', 'N57580', 'N57582', 'N57584', 'N57587', 'N57590', 'N57692', 'N57694', 'N57700', 'N57702', 'N57709'] converter_lr, converter_comb = chassym3_converter(atlas_legends) atlas_folder = "/Volumes/Data/Badea/Lab/mouse/C57_JS/DWI_RAS_40subj/" for subject in l: labels, affine_labels = load_nifti(atlas_folder + subject + "_chass_symmetric3_labels_RAS.nii.gz") rslt_whitem = df.loc[df['Subdivisions_7'] == "7_whitematter"] labels_leftright = np.copy(labels) labels_combinedlr = np.copy(labels) # for i in range(int(np.shape(labels)[0]/2-2), int(np.shape(labels)[0])): for i in range(np.shape(labels)[0]): for j in range(np.shape(labels)[1]): for k in range(np.shape(labels)[2]): if labels[i, j, k] > 1000: labels_leftright[i, j, k] = converter_lr[labels[i,j,k]] labels_combinedlr[i, j, k] = converter_comb[labels[i,j,k]] save_nifti(atlas_folder + subject + "_chass_symmetric3_labels_RAS_combined.nii.gz", labels_combinedlr, affine_labels) save_nifti(atlas_folder + subject + "_chass_symmetric3_labels_RAS_lrordered.nii.gz", labels_leftright, affine_labels) print("done") """
def run(self, input_files, bvalues_files, bvectors_files, sigma=0, b0_threshold=50, bvecs_tol=0.01, patch_radius=2, pca_method='eig', tau_factor=2.3, out_dir='', out_denoised='dwi_lpca.nii.gz'): r"""Workflow wrapping LPCA denoising method. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. sigma : float, optional Standard deviation of the noise estimated from the data. Default 0: it means sigma value estimation with the Manjon2013 algorithm [3]_. b0_threshold : float, optional Threshold used to find b0 volumes. bvecs_tol : float, optional Threshold used to check that norm(bvec) = 1 +/- bvecs_tol b-vectors are unit vectors. patch_radius : int, optional The radius of the local patch to be taken around each voxel (in voxels) For example, for a patch radius with value 2, and assuming the input image is a 3D image, the denoising will take place in blocks of 5x5x5 voxels. pca_method : string, optional Use either eigenvalue decomposition ('eig') or singular value decomposition ('svd') for principal component analysis. The default method is 'eig' which is faster. However, occasionally 'svd' might be more accurate. tau_factor : float, optional Thresholding of PCA eigenvalues is done by nulling out eigenvalues that are smaller than: .. math :: \tau = (\tau_{factor} \sigma)^2 \tau_{factor} can be change to adjust the relationship between the noise standard deviation and the threshold \tau. If \tau_{factor} is set to None, it will be automatically calculated using the Marcenko-Pastur distribution [2]_. out_dir : string, optional Output directory. (default current directory) out_denoised : string, optional Name of the resulting denoised volume. References ---------- .. [1] Veraart J, Novikov DS, Christiaens D, Ades-aron B, Sijbers, Fieremans E, 2016. Denoising of Diffusion MRI using random matrix theory. Neuroimage 142:394-406. doi: 10.1016/j.neuroimage.2016.08.016 .. [2] Veraart J, Fieremans E, Novikov DS. 2016. Diffusion MRI noise mapping using random matrix theory. Magnetic Resonance in Medicine. doi: 10.1002/mrm.26059. .. [3] Manjon JV, Coupe P, Concha L, Buades A, Collins DL (2013) Diffusion Weighted Image Denoising Using Overcomplete Local PCA. PLoS ONE 8(9): e73021. https://doi.org/10.1371/journal.pone.0073021 """ io_it = self.get_io_iterator() for dwi, bval, bvec, odenoised in io_it: logging.info('Denoising %s', dwi) data, affine, image = load_nifti(dwi, return_img=True) if not sigma: logging.info('Estimating sigma') bvals, bvecs = read_bvals_bvecs(bval, bvec) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold, atol=bvecs_tol) sigma = pca_noise_estimate(data, gtab, correct_bias=True, smooth=3) logging.debug('Found sigma %s', sigma) denoised_data = localpca(data, sigma=sigma, patch_radius=patch_radius, pca_method=pca_method, tau_factor=tau_factor) save_nifti(odenoised, denoised_data, affine, image.header) logging.info('Denoised volume saved as %s', odenoised)
def bundle_analysis(model_bundle_folder, bundle_folder, orig_bundle_folder, metric_folder, group, subject, no_disks=100, out_dir=''): """ Applies statistical analysis on bundles and saves the results in a directory specified by ``out_dir``. Parameters ---------- model_bundle_folder : string Path to the input model bundle files. This path may contain wildcards to process multiple inputs at once. bundle_folder : string Path to the input bundle files in common space. This path may contain wildcards to process multiple inputs at once. orig_folder : string Path to the input bundle files in native space. This path may contain wildcards to process multiple inputs at once. metric_folder : string Path to the input dti metric or/and peak files. It will be used as metric for statistical analysis of bundles. group : string what group subject belongs to e.g. control or patient subject : string subject id e.g. 10001 no_disks : integer, optional Number of disks used for dividing bundle into disks. (Default 100) out_dir : string, optional Output directory (default input file directory) References ---------- .. [Chandio19] Chandio, B.Q., S. Koudoro, D. Reagan, J. Harezlak, E. Garyfallidis, Bundle Analytics: a computational and statistical analyses framework for tractometric studies, Proceedings of: International Society of Magnetic Resonance in Medicine (ISMRM), Montreal, Canada, 2019. """ dt = dict() mb = os.listdir(model_bundle_folder) mb.sort() bd = os.listdir(bundle_folder) bd.sort() org_bd = os.listdir(orig_bundle_folder) org_bd.sort() n = len(org_bd) for io in range(n): mbundles, _ = load_trk(os.path.join(model_bundle_folder, mb[io])) bundles, _ = load_trk(os.path.join(bundle_folder, bd[io])) orig_bundles, _ = load_trk(os.path.join(orig_bundle_folder, org_bd[io])) mbundle_streamlines = set_number_of_points(mbundles, nb_points=no_disks) metric = AveragePointwiseEuclideanMetric() qb = QuickBundles(threshold=25., metric=metric) clusters = qb.cluster(mbundle_streamlines) centroids = Streamlines(clusters.centroids) print('Number of centroids ', len(centroids.data)) print('Model bundle ', mb[io]) print('Number of streamlines in bundle in common space ', len(bundles)) print('Number of streamlines in bundle in original space ', len(orig_bundles)) _, indx = cKDTree(centroids.data, 1, copy_data=True).query(bundles.data, k=1) metric_files_names = os.listdir(metric_folder) _, affine = load_nifti(os.path.join(metric_folder, "fa.nii.gz")) affine_r = np.linalg.inv(affine) transformed_orig_bundles = transform_streamlines( orig_bundles, affine_r) for mn in range(0, len(metric_files_names)): ind = np.array(indx) fm = metric_files_names[mn][:2] bm = mb[io][:-4] dt = dict() metric_name = os.path.join(metric_folder, metric_files_names[mn]) if metric_files_names[mn][2:] == '.nii.gz': metric, _ = load_nifti(metric_name) dti_measures(transformed_orig_bundles, metric, dt, fm, bm, subject, group, ind, out_dir) else: fm = metric_files_names[mn][:3] metric = load_peaks(metric_name) peak_values(bundles, metric, dt, fm, bm, subject, group, ind, out_dir)
def run(self, input_files, bvalues_files, bvectors_files, mask_files, b0_threshold=50, bvecs_tol=0.01, save_metrics=[], out_dir='', out_tensor='tensors.nii.gz', out_fa='fa.nii.gz', out_ga='ga.nii.gz', out_rgb='rgb.nii.gz', out_md='md.nii.gz', out_ad='ad.nii.gz', out_rd='rd.nii.gz', out_mode='mode.nii.gz', out_evec='evecs.nii.gz', out_eval='evals.nii.gz'): """ Workflow for tensor reconstruction and for computing DTI metrics. using Weighted Least-Squares. Performs a tensor reconstruction on the files by 'globing' ``input_files`` and saves the DTI metrics in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) b0_threshold : float, optional Threshold used to find b=0 directions (default 0.0) bvecs_tol : float, optional Threshold used to check that norm(bvec) = 1 +/- bvecs_tol b-vectors are unit vectors (default 0.01) save_metrics : variable string, optional List of metrics to save. Possible values: fa, ga, rgb, md, ad, rd, mode, tensor, evec, eval (default [] (all)) out_dir : string, optional Output directory (default input file directory) out_tensor : string, optional Name of the tensors volume to be saved (default 'tensors.nii.gz') out_fa : string, optional Name of the fractional anisotropy volume to be saved (default 'fa.nii.gz') out_ga : string, optional Name of the geodesic anisotropy volume to be saved (default 'ga.nii.gz') out_rgb : string, optional Name of the color fa volume to be saved (default 'rgb.nii.gz') out_md : string, optional Name of the mean diffusivity volume to be saved (default 'md.nii.gz') out_ad : string, optional Name of the axial diffusivity volume to be saved (default 'ad.nii.gz') out_rd : string, optional Name of the radial diffusivity volume to be saved (default 'rd.nii.gz') out_mode : string, optional Name of the mode volume to be saved (default 'mode.nii.gz') out_evec : string, optional Name of the eigenvectors volume to be saved (default 'evecs.nii.gz') out_eval : string, optional Name of the eigenvalues to be saved (default 'evals.nii.gz') References ---------- .. [1] Basser, P.J., Mattiello, J., LeBihan, D., 1994. Estimation of the effective self-diffusion tensor from the NMR spin echo. J Magn Reson B 103, 247-254. .. [2] Basser, P., Pierpaoli, C., 1996. Microstructural and physiological features of tissues elucidated by quantitative diffusion-tensor MRI. Journal of Magnetic Resonance 111, 209-219. .. [3] Lin-Ching C., Jones D.K., Pierpaoli, C. 2005. RESTORE: Robust estimation of tensors by outlier rejection. MRM 53: 1088-1095 .. [4] hung, SW., Lu, Y., Henry, R.G., 2006. Comparison of bootstrap approaches for estimation of uncertainties of DTI parameters. NeuroImage 33, 531-541. """ io_it = self.get_io_iterator() for dwi, bval, bvec, mask, otensor, ofa, oga, orgb, omd, oad, orad, \ omode, oevecs, oevals in io_it: logging.info('Computing DTI metrics for {0}'.format(dwi)) data, affine = load_nifti(dwi) if mask is not None: mask = nib.load(mask).get_data().astype(np.bool) tenfit, _ = self.get_fitted_tensor(data, mask, bval, bvec, b0_threshold, bvecs_tol) if not save_metrics: save_metrics = ['fa', 'md', 'rd', 'ad', 'ga', 'rgb', 'mode', 'evec', 'eval', 'tensor'] FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 FA = np.clip(FA, 0, 1) if 'tensor' in save_metrics: tensor_vals = lower_triangular(tenfit.quadratic_form) correct_order = [0, 1, 3, 2, 4, 5] tensor_vals_reordered = tensor_vals[..., correct_order] save_nifti(otensor, tensor_vals_reordered.astype(np.float32), affine) if 'fa' in save_metrics: save_nifti(ofa, FA.astype(np.float32), affine) if 'ga' in save_metrics: GA = geodesic_anisotropy(tenfit.evals) save_nifti(oga, GA.astype(np.float32), affine) if 'rgb' in save_metrics: RGB = color_fa(FA, tenfit.evecs) save_nifti(orgb, np.array(255 * RGB, 'uint8'), affine) if 'md' in save_metrics: MD = mean_diffusivity(tenfit.evals) save_nifti(omd, MD.astype(np.float32), affine) if 'ad' in save_metrics: AD = axial_diffusivity(tenfit.evals) save_nifti(oad, AD.astype(np.float32), affine) if 'rd' in save_metrics: RD = radial_diffusivity(tenfit.evals) save_nifti(orad, RD.astype(np.float32), affine) if 'mode' in save_metrics: MODE = get_mode(tenfit.quadratic_form) save_nifti(omode, MODE.astype(np.float32), affine) if 'evec' in save_metrics: save_nifti(oevecs, tenfit.evecs.astype(np.float32), affine) if 'eval' in save_metrics: save_nifti(oevals, tenfit.evals.astype(np.float32), affine) dname_ = os.path.dirname(oevals) if dname_ == '': logging.info('DTI metrics saved in current directory') else: logging.info( 'DTI metrics saved in {0}'.format(dname_))
def run(self, input_files, bvalues_files, bvectors_files, mask_files, sh_order=6, odf_to_sh_order=8, b0_threshold=50.0, bvecs_tol=0.01, extract_pam_values=False, parallel=False, nbr_processes=None, out_dir='', out_pam='peaks.pam5', out_shm='shm.nii.gz', out_peaks_dir='peaks_dirs.nii.gz', out_peaks_values='peaks_values.nii.gz', out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'): """ Constant Solid Angle. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) sh_order : int, optional Spherical harmonics order (default 6) used in the CSA fit. odf_to_sh_order : int, optional Spherical harmonics order used for peak_from_model to compress the ODF to spherical harmonics coefficients (default 8) b0_threshold : float, optional Threshold used to find b=0 directions bvecs_tol : float, optional Threshold used so that norm(bvec)=1 (default 0.01) extract_pam_values : bool, optional Wheter or not to save pam volumes as single nifti files. parallel : bool, optional Whether to use parallelization in peak-finding during the calibration procedure. Default: False nbr_processes : int, optional If `parallel` is True, the number of subprocesses to use (default multiprocessing.cpu_count()). out_dir : string, optional Output directory (default input file directory) out_pam : string, optional Name of the peaks volume to be saved (default 'peaks.pam5') out_shm : string, optional Name of the shperical harmonics volume to be saved (default 'shm.nii.gz') out_peaks_dir : string, optional Name of the peaks directions volume to be saved (default 'peaks_dirs.nii.gz') out_peaks_values : string, optional Name of the peaks values volume to be saved (default 'peaks_values.nii.gz') out_peaks_indices : string, optional Name of the peaks indices volume to be saved (default 'peaks_indices.nii.gz') out_gfa : string, optional Name of the generalise fa volume to be saved (default 'gfa.nii.gz') References ---------- .. [1] Aganj, I., et al. 2009. ODF Reconstruction in Q-Ball Imaging with Solid Angle Consideration. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa) in io_it: logging.info('Loading {0}'.format(dwi)) data, affine = load_nifti(dwi) bvals, bvecs = read_bvals_bvecs(bval, bvec) if b0_threshold < bvals.min(): warn("b0_threshold (value: {0}) is too low, increase your " "b0_threshold. It should higher than the first b0 value " "({1}).".format(b0_threshold, bvals.min())) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold, atol=bvecs_tol) mask_vol = nib.load(maskfile).get_data().astype(np.bool) peaks_sphere = get_sphere('repulsion724') logging.info('Starting CSA computations {0}'.format(dwi)) csa_model = CsaOdfModel(gtab, sh_order) peaks_csa = peaks_from_model(model=csa_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=odf_to_sh_order, normalize_peaks=True, parallel=parallel, nbr_processes=nbr_processes) peaks_csa.affine = affine save_peaks(opam, peaks_csa) logging.info('Finished CSA {0}'.format(dwi)) if extract_pam_values: peaks_to_niftis(peaks_csa, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa, reshape_dirs=True) dname_ = os.path.dirname(opam) if dname_ == '': logging.info('Pam5 file saved in current directory') else: logging.info( 'Pam5 file saved in {0}'.format(dname_)) return io_it
# load other dipy's functions that will be used for auxiliar analysis from dipy.core.gradients import gradient_table from dipy.io.image import load_nifti from dipy.io.gradients import read_bvals_bvecs from dipy.segment.mask import median_otsu import dipy.reconst.dki as dki """ For this example, we use fetch to download a multi-shell dataset which was kindly provided by Hansen and Jespersen (more details about the data are provided in their paper [Hansen2016]_). The total size of the downloaded data is 192 MBytes, however you only need to fetch it once. """ dwi_fname, dwi_bval_fname, dwi_bvec_fname, _ = get_fnames('cfin_multib') data, affine = load_nifti(dwi_fname) bvals, bvecs = read_bvals_bvecs(dwi_bval_fname, dwi_bvec_fname) gtab = gradient_table(bvals, bvecs) """ For the sake of simplicity, we only select two non-zero b-values for this example. """ bvals = gtab.bvals bvecs = gtab.bvecs sel_b = np.logical_or(np.logical_or(bvals == 0, bvals == 1000), bvals == 2000) data = data[..., sel_b]
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) from dipy.tracking.local_tracking import (LocalTracking, ParticleFilteringTracking) from dipy.tracking.streamline import Streamlines from dipy.tracking import utils from dipy.viz import window, actor, colormap, has_fury # Enables/disables interactive visualization interactive = False hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi') label_fname = get_fnames('stanford_labels') f_pve_csf, f_pve_gm, f_pve_wm = get_fnames('stanford_pve_maps') data, affine, hardi_img = load_nifti(hardi_fname, return_img=True) labels = load_nifti_data(label_fname) bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname) gtab = gradient_table(bvals, bvecs) pve_csf_data = load_nifti_data(f_pve_csf) pve_gm_data = load_nifti_data(f_pve_gm) pve_wm_data, _, voxel_size = load_nifti(f_pve_wm, return_voxsize=True) shape = labels.shape response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(data, mask=pve_wm_data) dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff,
def run(self, input_files, sigma=0, patch_radius=1, block_radius=5, rician=True, out_dir='', out_denoised='dwi_nlmeans.nii.gz'): """Workflow wrapping the nlmeans denoising method. It applies nlmeans denoise on each file found by 'globing' ``input_files`` and saves the results in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. sigma : float, optional Sigma parameter to pass to the nlmeans algorithm. patch_radius : int, optional patch size is ``2 x patch_radius + 1``. block_radius : int, optional block size is ``2 x block_radius + 1``. rician : bool, optional If True the noise is estimated as Rician, otherwise Gaussian noise is assumed. out_dir : string, optional Output directory. (default current directory) out_denoised : string, optional Name of the resulting denoised volume. References ---------- .. [Descoteaux08] Descoteaux, Maxime and Wiest-Daesslé, Nicolas and Prima, Sylvain and Barillot, Christian and Deriche, Rachid. Impact of Rician Adapted Non-Local Means Filtering on HARDI, MICCAI 2008 """ io_it = self.get_io_iterator() for fpath, odenoised in io_it: if self._skip: shutil.copy(fpath, odenoised) logging.warning('Denoising skipped for now.') else: logging.info('Denoising %s', fpath) data, affine, image = load_nifti(fpath, return_img=True) if sigma == 0: logging.info('Estimating sigma') sigma = estimate_sigma(data) logging.debug('Found sigma {0}'.format(sigma)) denoised_data = nlmeans(data, sigma=sigma, patch_radius=patch_radius, block_radius=block_radius, rician=rician) save_nifti(odenoised, denoised_data, affine, image.header) logging.info('Denoised volume saved as %s', odenoised)
def run( self, input_files, save_masked=False, median_radius=2, numpass=5, autocrop=False, vol_idx=None, dilate=None, out_dir="", out_mask="brain_mask.nii.gz", out_masked="dwi_masked.nii.gz", ): """Workflow wrapping the median_otsu segmentation method. Applies median_otsu segmentation on each file found by 'globing' ``input_files`` and saves the results in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. save_masked : bool Save mask median_radius : int, optional Radius (in voxels) of the applied median filter (default 2) numpass : int, optional Number of pass of the median filter (default 5) autocrop : bool, optional If True, the masked input_volumes will also be cropped using the bounding box defined by the masked data. For example, if diffusion images are of 1x1x1 (mm^3) or higher resolution auto-cropping could reduce their size in memory and speed up some of the analysis. (default False) vol_idx : string, optional 1D array representing indices of ``axis=3`` of a 4D `input_volume` 'None' (the default) corresponds to ``(0,)`` (assumes first volume in 4D array) dilate : string, optional number of iterations for binary dilation (default 'None') out_dir : string, optional Output directory (default input file directory) out_mask : string, optional Name of the mask volume to be saved (default 'brain_mask.nii.gz') out_masked : string, optional Name of the masked volume to be saved (default 'dwi_masked.nii.gz') """ io_it = self.get_io_iterator() for fpath, mask_out_path, masked_out_path in io_it: logging.info("Applying median_otsu segmentation on {0}".format(fpath)) data, affine, img = load_nifti(fpath, return_img=True) masked_volume, mask_volume = median_otsu(data, median_radius, numpass, autocrop, vol_idx, dilate) save_nifti(mask_out_path, mask_volume.astype(np.float32), affine) logging.info("Mask saved as {0}".format(mask_out_path)) if save_masked: save_nifti(masked_out_path, masked_volume, affine, img.header) logging.info("Masked volume saved as {0}".format(masked_out_path)) return io_it
def run(self, static_image_files, moving_image_files, prealign_file='', inv_static=False, level_iters=[10, 10, 5], metric="cc", mopt_sigma_diff=2.0, mopt_radius=4, mopt_smooth=0.0, mopt_inner_iter=0.0, mopt_q_levels=256, mopt_double_gradient=True, mopt_step_type='', step_length=0.25, ss_sigma_factor=0.2, opt_tol=1e-5, inv_iter=20, inv_tol=1e-3, out_dir='', out_warped='warped_moved.nii.gz', out_inv_static='inc_static.nii.gz', out_field='displacement_field.nii.gz'): """ Parameters ---------- static_image_files : string Path of the static image file. moving_image_files : string Path to the moving image file. prealign_file : string, optional The text file containing pre alignment information via an affine matrix. inv_static : boolean, optional Apply the inverse mapping to the static image (default 'False'). level_iters : variable int, optional The number of iterations at each level of the gaussian pyramid. By default, a 3-level scale space with iterations sequence equal to [10, 10, 5] will be used. The 0-th level corresponds to the finest resolution. metric : string, optional The metric to be used (Default cc, 'Cross Correlation metric'). metric available: cc (Cross Correlation), ssd (Sum Squared Difference), em (Expectation-Maximization). mopt_sigma_diff : float, optional Metric option applied on Cross correlation (CC). The standard deviation of the Gaussian smoothing kernel to be applied to the update field at each iteration (default 2.0) mopt_radius : int, optional Metric option applied on Cross correlation (CC). the radius of the squared (cubic) neighborhood at each voxel to be considered to compute the cross correlation. (default 4) mopt_smooth : float, optional Metric option applied on Sum Squared Difference (SSD) and Expectation Maximization (EM). Smoothness parameter, the larger the value the smoother the deformation field. (default 1.0 for EM, 4.0 for SSD) mopt_inner_iter : int, optional Metric option applied on Sum Squared Difference (SSD) and Expectation Maximization (EM). This is number of iterations to be performed at each level of the multi-resolution Gauss-Seidel optimization algorithm (this is not the number of steps per Gaussian Pyramid level, that parameter must be set for the optimizer, not the metric). Default 5 for EM, 10 for SSD. mopt_q_levels : int, optional Metric option applied on Expectation Maximization (EM). Number of quantization levels (Default: 256 for EM) mopt_double_gradient : bool, optional Metric option applied on Expectation Maximization (EM). if True, the gradient of the expected static image under the moving modality will be added to the gradient of the moving image, similarly, the gradient of the expected moving image under the static modality will be added to the gradient of the static image. mopt_step_type : string, optional Metric option applied on Sum Squared Difference (SSD) and Expectation Maximization (EM). The optimization schedule to be used in the multi-resolution Gauss-Seidel optimization algorithm (not used if Demons Step is selected). Possible value: ('gauss_newton', 'demons'). default: 'gauss_newton' for EM, 'demons' for SSD. step_length : float, optional the length of the maximum displacement vector of the update displacement field at each iteration. ss_sigma_factor : float, optional parameter of the scale-space smoothing kernel. For example, the std. dev. of the kernel will be factor*(2^i) in the isotropic case where i = 0, 1, ..., n_scales is the scale. opt_tol : float, optional the optimization will stop when the estimated derivative of the energy profile w.r.t. time falls below this threshold. inv_iter : int, optional the number of iterations to be performed by the displacement field inversion algorithm. inv_tol : float, optional the displacement field inversion algorithm will stop iterating when the inversion error falls below this threshold. out_dir : string, optional Directory to save the transformed files (default ''). out_warped : string, optional Name of the warped file. (default 'warped_moved.nii.gz'). out_inv_static : string, optional Name of the file to save the static image after applying the inverse mapping (default 'inv_static.nii.gz'). out_field : string, optional Name of the file to save the diffeomorphic map. (default 'displacement_field.nii.gz') """ io_it = self.get_io_iterator() metric = metric.lower() if metric not in ['ssd', 'cc', 'em']: raise ValueError("Invalid similarity metric: Please" " provide a valid metric like 'ssd', 'cc', 'em'") logging.info("Starting Diffeormorphic Registration") logging.info('Using {0} Metric'.format(metric.upper())) # Init parameter if they are not setup init_param = {'ssd': {'mopt_smooth': 4.0, 'mopt_inner_iter': 10, 'mopt_step_type': 'demons' }, 'em': {'mopt_smooth': 1.0, 'mopt_inner_iter': 5, 'mopt_step_type': 'gauss_newton' } } mopt_smooth = mopt_smooth or init_param[metric]['mopt_smooth'] mopt_inner_iter = mopt_inner_iter or \ init_param[metric]['mopt_inner_iter'] mopt_step_type = mopt_step_type or \ init_param[metric]['mopt_step_type'] for (static_file, moving_file, owarped_file, oinv_static_file, omap_file) in io_it: logging.info('Loading static file {0}'.format(static_file)) logging.info('Loading moving file {0}'.format(moving_file)) # Loading the image data from the input files into object. static_image, static_grid2world = load_nifti(static_file) moving_image, moving_grid2world = load_nifti(moving_file) # Sanity check for the input image dimensions. check_dimensions(static_image, moving_image) # Loading the affine matrix. prealign = np.loadtxt(prealign_file) if prealign_file else None l_metric = {"ssd": SSDMetric(static_image.ndim, smooth=mopt_smooth, inner_iter=mopt_inner_iter, step_type=mopt_step_type ), "cc": CCMetric(static_image.ndim, sigma_diff=mopt_sigma_diff, radius=mopt_radius), "em": EMMetric(static_image.ndim, smooth=mopt_smooth, inner_iter=mopt_inner_iter, step_type=mopt_step_type, q_levels=mopt_q_levels, double_gradient=mopt_double_gradient) } current_metric = l_metric.get(metric.lower()) sdr = SymmetricDiffeomorphicRegistration( metric=current_metric, level_iters=level_iters, step_length=step_length, ss_sigma_factor=ss_sigma_factor, opt_tol=opt_tol, inv_iter=inv_iter, inv_tol=inv_tol ) mapping = sdr.optimize(static_image, moving_image, static_grid2world, moving_grid2world, prealign) mapping_data = np.array([mapping.forward.T, mapping.backward.T]).T warped_moving = mapping.transform(moving_image) # Saving logging.info('Saving warped {0}'.format(owarped_file)) save_nifti(owarped_file, warped_moving, static_grid2world) logging.info('Saving Diffeormorphic map {0}'.format(omap_file)) save_nifti(omap_file, mapping_data, mapping.codomain_world2grid)
def run(self, data_files, bvals_files, bvecs_files, small_delta, big_delta, b0_threshold=50.0, laplacian=True, positivity=True, bval_threshold=2000, save_metrics=[], laplacian_weighting=0.05, radial_order=6, out_dir='', out_rtop='rtop.nii.gz', out_lapnorm='lapnorm.nii.gz', out_msd='msd.nii.gz', out_qiv='qiv.nii.gz', out_rtap='rtap.nii.gz', out_rtpp='rtpp.nii.gz', out_ng='ng.nii.gz', out_perng='perng.nii.gz', out_parng='parng.nii.gz'): """ Workflow for fitting the MAPMRI model (with optional Laplacian regularization). Generates rtop, lapnorm, msd, qiv, rtap, rtpp, non-gaussian (ng), parallel ng, perpendicular ng saved in a nifti format in input files provided by `data_files` and saves the nifti files to an output directory specified by `out_dir`. In order for the MAPMRI workflow to work in the way intended either the laplacian or positivity or both must be set to True. Parameters ---------- data_files : string Path to the input volume. bvals_files : string Path to the bval files. bvecs_files : string Path to the bvec files. small_delta : float Small delta value used in generation of gradient table of provided bval and bvec. big_delta : float Big delta value used in generation of gradient table of provided bval and bvec. b0_threshold : float, optional Threshold used to find b=0 directions (default 0.0) laplacian : bool, optional Regularize using the Laplacian of the MAP-MRI basis (default True) positivity : bool, optional Constrain the propagator to be positive. (default True) bval_threshold : float, optional Sets the b-value threshold to be used in the scale factor estimation. In order for the estimated non-Gaussianity to have meaning this value should set to a lower value (b<2000 s/mm^2) such that the scale factors are estimated on signal points that reasonably represent the spins at Gaussian diffusion. (default: 2000) save_metrics : variable string, optional List of metrics to save. Possible values: rtop, laplacian_signal, msd, qiv, rtap, rtpp, ng, perng, parng (default: [] (all)) laplacian_weighting : float, optional Weighting value used in fitting the MAPMRI model in the laplacian and both model types. (default: 0.05) radial_order : unsigned int, optional Even value used to set the order of the basis (default: 6) out_dir : string, optional Output directory (default: input file directory) out_rtop : string, optional Name of the rtop to be saved out_lapnorm : string, optional Name of the norm of laplacian signal to be saved out_msd : string, optional Name of the msd to be saved out_qiv : string, optional Name of the qiv to be saved out_rtap : string, optional Name of the rtap to be saved out_rtpp : string, optional Name of the rtpp to be saved out_ng : string, optional Name of the Non-Gaussianity to be saved out_perng : string, optional Name of the Non-Gaussianity perpendicular to be saved out_parng : string, optional Name of the Non-Gaussianity parallel to be saved """ io_it = self.get_io_iterator() for (dwi, bval, bvec, out_rtop, out_lapnorm, out_msd, out_qiv, out_rtap, out_rtpp, out_ng, out_perng, out_parng) in io_it: logging.info('Computing MAPMRI metrics for {0}'.format(dwi)) data, affine = load_nifti(dwi) bvals, bvecs = read_bvals_bvecs(bval, bvec) if b0_threshold < bvals.min(): warn("b0_threshold (value: {0}) is too low, increase your " "b0_threshold. It should higher than the first b0 value " "({1}).".format(b0_threshold, bvals.min())) gtab = gradient_table(bvals=bvals, bvecs=bvecs, small_delta=small_delta, big_delta=big_delta, b0_threshold=b0_threshold) if not save_metrics: save_metrics = ['rtop', 'laplacian_signal', 'msd', 'qiv', 'rtap', 'rtpp', 'ng', 'perng', 'parng'] if laplacian and positivity: map_model_aniso = mapmri.MapmriModel( gtab, radial_order=radial_order, laplacian_regularization=True, laplacian_weighting=laplacian_weighting, positivity_constraint=True, bval_threshold=bval_threshold) mapfit_aniso = map_model_aniso.fit(data) elif positivity: map_model_aniso = mapmri.MapmriModel( gtab, radial_order=radial_order, laplacian_regularization=False, positivity_constraint=True, bval_threshold=bval_threshold) mapfit_aniso = map_model_aniso.fit(data) elif laplacian: map_model_aniso = mapmri.MapmriModel( gtab, radial_order=radial_order, laplacian_regularization=True, laplacian_weighting=laplacian_weighting, bval_threshold=bval_threshold) mapfit_aniso = map_model_aniso.fit(data) else: map_model_aniso = mapmri.MapmriModel( gtab, radial_order=radial_order, laplacian_regularization=False, positivity_constraint=False, bval_threshold=bval_threshold) mapfit_aniso = map_model_aniso.fit(data) if 'rtop' in save_metrics: r = mapfit_aniso.rtop() save_nifti(out_rtop, r.astype(np.float32), affine) if 'laplacian_signal' in save_metrics: ll = mapfit_aniso.norm_of_laplacian_signal() save_nifti(out_lapnorm, ll.astype(np.float32), affine) if 'msd' in save_metrics: m = mapfit_aniso.msd() save_nifti(out_msd, m.astype(np.float32), affine) if 'qiv' in save_metrics: q = mapfit_aniso.qiv() save_nifti(out_qiv, q.astype(np.float32), affine) if 'rtap' in save_metrics: r = mapfit_aniso.rtap() save_nifti(out_rtap, r.astype(np.float32), affine) if 'rtpp' in save_metrics: r = mapfit_aniso.rtpp() save_nifti(out_rtpp, r.astype(np.float32), affine) if 'ng' in save_metrics: n = mapfit_aniso.ng() save_nifti(out_ng, n.astype(np.float32), affine) if 'perng' in save_metrics: n = mapfit_aniso.ng_perpendicular() save_nifti(out_perng, n.astype(np.float32), affine) if 'parng' in save_metrics: n = mapfit_aniso.ng_parallel() save_nifti(out_parng, n.astype(np.float32), affine) logging.info('MAPMRI saved in {0}'. format(os.path.dirname(out_dir)))
def run(self, input_files, b0_threshold=50, bvecs_tol=0.01, bshell_thr=100): """ Provides useful information about different files used in medical imaging. Any number of input files can be provided. The program identifies the type of file by its extension. Parameters ---------- input_files : variable string Any number of Nifti1, bvals or bvecs files. b0_threshold : float, optional (default 50) bvecs_tol : float, optional Threshold used to check that norm(bvec) = 1 +/- bvecs_tol b-vectors are unit vectors (default 0.01) bshell_thr : float, optional Threshold for distinguishing b-values in different shells (default 100) """ np.set_printoptions(3, suppress=True) io_it = self.get_io_iterator() for input_path in io_it: logging.info('------------------------------------------') logging.info('Looking at {0}'.format(input_path)) logging.info('------------------------------------------') ipath_lower = input_path.lower() if ipath_lower.endswith('.nii') or ipath_lower.endswith('.nii.gz'): data, affine, img, vox_sz, affcodes = load_nifti( input_path, return_img=True, return_voxsize=True, return_coords=True) logging.info('Data size {0}'.format(data.shape)) logging.info('Data type {0}'.format(data.dtype)) logging.info('Data min {0} max {1} avg {2}' .format(data.min(), data.max(), data.mean())) logging.info('2nd percentile {0} 98th percentile {1}' .format(np.percentile(data, 2), np.percentile(data, 98))) logging.info('Native coordinate system {0}' .format(''.join(affcodes))) logging.info('Affine to RAS1mm \n{0}'.format(affine)) logging.info('Voxel size {0}'.format(np.array(vox_sz))) if np.sum(np.abs(np.diff(vox_sz))) > 0.1: msg = \ 'Voxel size is not isotropic. Please reslice.\n' logging.warning(msg) if os.path.basename(input_path).lower().find('bval') > -1: bvals = np.loadtxt(input_path) logging.info('Bvalues \n{0}'.format(bvals)) logging.info('Total number of bvalues {}'.format(len(bvals))) shells = np.sum(np.diff(np.sort(bvals)) > bshell_thr) logging.info('Number of gradient shells {0}'.format(shells)) logging.info('Number of b0s {0} (b0_thr {1})\n' .format(np.sum(bvals <= b0_threshold), b0_threshold)) if os.path.basename(input_path).lower().find('bvec') > -1: bvecs = np.loadtxt(input_path) logging.info('Bvectors shape on disk is {0}' .format(bvecs.shape)) rows, cols = bvecs.shape if rows < cols: bvecs = bvecs.T logging.info('Bvectors are \n{0}'.format(bvecs)) norms = np.array([np.linalg.norm(bvec) for bvec in bvecs]) res = np.where( (norms <= 1 + bvecs_tol) & (norms >= 1 - bvecs_tol)) ncl1 = np.sum(norms < 1 - bvecs_tol) logging.info('Total number of unit bvectors {0}' .format(len(res[0]))) logging.info('Total number of non-unit bvectors {0}\n' .format(ncl1)) np.set_printoptions()
def run(self, input_files, bvalues_files, bvectors_files, mask_files, split_b_D=400, split_b_S0=200, b0_threshold=0, save_metrics=[], out_dir='', out_S0_predicted='S0_predicted.nii.gz', out_perfusion_fraction='perfusion_fraction.nii.gz', out_D_star='D_star.nii.gz', out_D='D.nii.gz'): """ Workflow for Intra-voxel Incoherent Motion reconstruction and for computing IVIM metrics. Performs a IVIM reconstruction on the files by 'globing' ``input_files`` and saves the IVIM metrics in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) split_b_D : int, optional Value to split the bvals to estimate D for the two-stage process of fitting (default 400) split_b_S0 : int, optional Value to split the bvals to estimate S0 for the two-stage process of fitting. (default 200) b0_threshold : int, optional Threshold value for the b0 bval. (default 0) save_metrics : variable string, optional List of metrics to save. Possible values: S0_predicted, perfusion_fraction, D_star, D (default [] (all)) out_dir : string, optional Output directory (default input file directory) out_S0_predicted : string, optional Name of the S0 signal estimated to be saved (default: 'S0_predicted.nii.gz') out_perfusion_fraction : string, optional Name of the estimated volume fractions to be saved (default 'perfusion_fraction.nii.gz') out_D_star : string, optional Name of the estimated pseudo-diffusion parameter to be saved (default 'D_star.nii.gz') out_D : string, optional Name of the estimated diffusion parameter to be saved (default 'D.nii.gz') References ---------- .. [Stejskal65] Stejskal, E. O.; Tanner, J. E. (1 January 1965). "Spin Diffusion Measurements: Spin Echoes in the Presence of a Time-Dependent Field Gradient". The Journal of Chemical Physics 42 (1): 288. Bibcode: 1965JChPh..42..288S. doi:10.1063/1.1695690. .. [LeBihan84] Le Bihan, Denis, et al. "Separation of diffusion and perfusion in intravoxel incoherent motion MR imaging." Radiology 168.2 (1988): 497-505. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, mask, oS0_predicted, operfusion_fraction, oD_star, oD) in io_it: logging.info('Computing IVIM metrics for {0}'.format(dwi)) data, affine = load_nifti(dwi) if mask is not None: mask = nib.load(mask).get_data().astype(np.bool) ivimfit, _ = self.get_fitted_ivim(data, mask, bval, bvec, b0_threshold) if not save_metrics: save_metrics = ['S0_predicted', 'perfusion_fraction', 'D_star', 'D'] if 'S0_predicted' in save_metrics: save_nifti(oS0_predicted, ivimfit.S0_predicted.astype(np.float32), affine) if 'perfusion_fraction' in save_metrics: save_nifti(operfusion_fraction, ivimfit.perfusion_fraction.astype(np.float32), affine) if 'D_star' in save_metrics: save_nifti(oD_star, ivimfit.D_star.astype(np.float32), affine) if 'D' in save_metrics: save_nifti(oD, ivimfit.D.astype(np.float32), affine) logging.info('IVIM metrics saved in {0}'. format(os.path.dirname(oD)))
def run(self, input_files, bvalues_files, bvectors_files, mask_files, b0_threshold=50.0, save_metrics=[], out_dir='', out_dt_tensor='dti_tensors.nii.gz', out_fa='fa.nii.gz', out_ga='ga.nii.gz', out_rgb='rgb.nii.gz', out_md='md.nii.gz', out_ad='ad.nii.gz', out_rd='rd.nii.gz', out_mode='mode.nii.gz', out_evec='evecs.nii.gz', out_eval='evals.nii.gz', out_dk_tensor="dki_tensors.nii.gz", out_mk="mk.nii.gz", out_ak="ak.nii.gz", out_rk="rk.nii.gz"): """ Workflow for Diffusion Kurtosis reconstruction and for computing DKI metrics. Performs a DKI reconstruction on the files by 'globing' ``input_files`` and saves the DKI metrics in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) b0_threshold : float, optional Threshold used to find b=0 directions (default 0.0) save_metrics : variable string, optional List of metrics to save. Possible values: fa, ga, rgb, md, ad, rd, mode, tensor, evec, eval (default [] (all)) out_dir : string, optional Output directory (default input file directory) out_dt_tensor : string, optional Name of the tensors volume to be saved (default: 'dti_tensors.nii.gz') out_dk_tensor : string, optional Name of the tensors volume to be saved (default 'dki_tensors.nii.gz') out_fa : string, optional Name of the fractional anisotropy volume to be saved (default 'fa.nii.gz') out_ga : string, optional Name of the geodesic anisotropy volume to be saved (default 'ga.nii.gz') out_rgb : string, optional Name of the color fa volume to be saved (default 'rgb.nii.gz') out_md : string, optional Name of the mean diffusivity volume to be saved (default 'md.nii.gz') out_ad : string, optional Name of the axial diffusivity volume to be saved (default 'ad.nii.gz') out_rd : string, optional Name of the radial diffusivity volume to be saved (default 'rd.nii.gz') out_mode : string, optional Name of the mode volume to be saved (default 'mode.nii.gz') out_evec : string, optional Name of the eigenvectors volume to be saved (default 'evecs.nii.gz') out_eval : string, optional Name of the eigenvalues to be saved (default 'evals.nii.gz') out_mk : string, optional Name of the mean kurtosis to be saved (default: 'mk.nii.gz') out_ak : string, optional Name of the axial kurtosis to be saved (default: 'ak.nii.gz') out_rk : string, optional Name of the radial kurtosis to be saved (default: 'rk.nii.gz') References ---------- .. [1] Tabesh, A., Jensen, J.H., Ardekani, B.A., Helpern, J.A., 2011. Estimation of tensors and tensor-derived measures in diffusional kurtosis imaging. Magn Reson Med. 65(3), 823-836 .. [2] Jensen, Jens H., Joseph A. Helpern, Anita Ramani, Hanzhang Lu, and Kyle Kaczynski. 2005. Diffusional Kurtosis Imaging: The Quantification of Non-Gaussian Water Diffusion by Means of Magnetic Resonance Imaging. MRM 53 (6):1432-40. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, mask, otensor, ofa, oga, orgb, omd, oad, orad, omode, oevecs, oevals, odk_tensor, omk, oak, ork) in io_it: logging.info('Computing DKI metrics for {0}'.format(dwi)) data, affine = load_nifti(dwi) if mask is not None: mask = nib.load(mask).get_data().astype(np.bool) dkfit, _ = self.get_fitted_tensor(data, mask, bval, bvec, b0_threshold) if not save_metrics: save_metrics = ['mk', 'rk', 'ak', 'fa', 'md', 'rd', 'ad', 'ga', 'rgb', 'mode', 'evec', 'eval', 'dt_tensor', 'dk_tensor'] evals, evecs, kt = split_dki_param(dkfit.model_params) FA = fractional_anisotropy(evals) FA[np.isnan(FA)] = 0 FA = np.clip(FA, 0, 1) if 'dt_tensor' in save_metrics: tensor_vals = lower_triangular(dkfit.quadratic_form) correct_order = [0, 1, 3, 2, 4, 5] tensor_vals_reordered = tensor_vals[..., correct_order] save_nifti(otensor, tensor_vals_reordered.astype(np.float32), affine) if 'dk_tensor' in save_metrics: save_nifti(odk_tensor, dkfit.kt.astype(np.float32), affine) if 'fa' in save_metrics: save_nifti(ofa, FA.astype(np.float32), affine) if 'ga' in save_metrics: GA = geodesic_anisotropy(dkfit.evals) save_nifti(oga, GA.astype(np.float32), affine) if 'rgb' in save_metrics: RGB = color_fa(FA, dkfit.evecs) save_nifti(orgb, np.array(255 * RGB, 'uint8'), affine) if 'md' in save_metrics: MD = mean_diffusivity(dkfit.evals) save_nifti(omd, MD.astype(np.float32), affine) if 'ad' in save_metrics: AD = axial_diffusivity(dkfit.evals) save_nifti(oad, AD.astype(np.float32), affine) if 'rd' in save_metrics: RD = radial_diffusivity(dkfit.evals) save_nifti(orad, RD.astype(np.float32), affine) if 'mode' in save_metrics: MODE = get_mode(dkfit.quadratic_form) save_nifti(omode, MODE.astype(np.float32), affine) if 'evec' in save_metrics: save_nifti(oevecs, dkfit.evecs.astype(np.float32), affine) if 'eval' in save_metrics: save_nifti(oevals, dkfit.evals.astype(np.float32), affine) if 'mk' in save_metrics: save_nifti(omk, dkfit.mk().astype(np.float32), affine) if 'ak' in save_metrics: save_nifti(oak, dkfit.ak().astype(np.float32), affine) if 'rk' in save_metrics: save_nifti(ork, dkfit.rk().astype(np.float32), affine) logging.info('DKI metrics saved in {0}'. format(os.path.dirname(oevals)))
def run(self, input_files, bval_files, model='ols', verbose=False, out_dir='', out_denoised='dwi_patch2self.nii.gz'): """Workflow for Patch2Self denoising method. It applies patch2self denoising on each file found by 'globing' ``input_file`` and ``bval_file``. It saves the results in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bval_files : string bval file associated with the diffusion data. model : string, or initialized linear model object. This will determine the algorithm used to solve the set of linear equations underlying this model. If it is a string it needs to be one of the following: {'ols', 'ridge', 'lasso'}. Otherwise, it can be an object that inherits from `dipy.optimize.SKLearnLinearSolver` or an object with a similar interface from Scikit-Learn: `sklearn.linear_model.LinearRegression`, `sklearn.linear_model.Lasso` or `sklearn.linear_model.Ridge` and other objects that inherit from `sklearn.base.RegressorMixin`. Default: 'ols'. verbose : bool, optional Show progress of Patch2Self and time taken. out_dir : string, optional Output directory (default current directory) out_denoised : string, optional Name of the resulting denoised volume (default: dwi_patch2self.nii.gz) References ---------- .. [Fadnavis20] S. Fadnavis, J. Batson, E. Garyfallidis, Patch2Self: Denoising Diffusion MRI with Self-supervised Learning, Advances in Neural Information Processing Systems 33 (2020) """ io_it = self.get_io_iterator() for fpath, bvalpath, odenoised in io_it: if self._skip: shutil.copy(fpath, odenoised) logging.warning('Denoising skipped for now.') else: logging.info('Denoising %s', fpath) data, affine, image = load_nifti(fpath, return_img=True) bvals = np.loadtxt(bvalpath) denoised_data = patch2self(data, bvals, model=model, verbose=verbose) save_nifti(odenoised, denoised_data, affine, image.header) logging.info('Denoised volumes saved as %s', odenoised)
def run(self, static_image_files, moving_image_files, transform_map_file, transform_type='affine', out_dir='', out_file='transformed.nii.gz'): """ Parameters ---------- static_image_files : string Path of the static image file. moving_image_files : string Path of the moving image(s). It can be a single image or a folder containing multiple images. transform_map_file : string For the affine case, it should be a text(*.txt) file containing the affine matrix. For the diffeomorphic case, it should be a nifti file containing the mapping displacement field in each voxel with this shape (x, y, z, 3, 2) transform_type : string, optional Select the transformation type to apply between 'affine' or 'diffeomorphic'. (default affine) out_dir : string, optional Directory to save the transformed files (default ''). out_file : string, optional Name of the transformed file (default 'transformed.nii.gz'). It is recommended to use the flag --mix-names to prevent the output files from being overwritten. """ if transform_type.lower() not in ['affine', 'diffeomorphic']: raise ValueError("Invalid transformation type: Please" " provide a valid transform like 'affine'" " or 'diffeomorphic'") io = self.get_io_iterator() for static_image_file, moving_image_file, transform_file, \ out_file in io: # Loading the image data from the input files into object. static_image, static_grid2world = load_nifti(static_image_file) moving_image, moving_grid2world = load_nifti(moving_image_file) # Doing a sanity check for validating the dimensions of the input # images. check_dimensions(static_image, moving_image) if transform_type.lower() == 'affine': # Loading the affine matrix. affine_matrix = np.loadtxt(transform_file) # Setting up the affine transformation object. mapping = AffineMap( affine=affine_matrix, domain_grid_shape=static_image.shape, domain_grid2world=static_grid2world, codomain_grid_shape=moving_image.shape, codomain_grid2world=moving_grid2world) elif transform_type.lower() == 'diffeomorphic': # Loading the diffeomorphic map. disp = nib.load(transform_file) mapping = DiffeomorphicMap( 3, disp.shape[:3], disp_grid2world=np.linalg.inv(disp.affine), domain_shape=static_image.shape, domain_grid2world=static_grid2world, codomain_shape=moving_image.shape, codomain_grid2world=moving_grid2world) disp_data = disp.get_data() mapping.forward = disp_data[..., 0] mapping.backward = disp_data[..., 1] mapping.is_inverse = True # Transforming the image/ transformed = mapping.transform(moving_image) save_nifti(out_file, transformed, affine=static_grid2world)
def run(self, input_files, patch_radius=2, pca_method='eig', return_sigma=False, out_dir='', out_denoised='dwi_mppca.nii.gz', out_sigma='dwi_sigma.nii.gz'): r"""Workflow wrapping Marcenko-Pastur PCA denoising method. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. patch_radius : variable int, optional The radius of the local patch to be taken around each voxel (in voxels) For example, for a patch radius with value 2, and assuming the input image is a 3D image, the denoising will take place in blocks of 5x5x5 voxels. pca_method : string, optional Use either eigenvalue decomposition ('eig') or singular value decomposition ('svd') for principal component analysis. The default method is 'eig' which is faster. However, occasionally 'svd' might be more accurate. return_sigma : bool, optional If true, a noise standard deviation estimate based on the Marcenko-Pastur distribution is returned [2]_. out_dir : string, optional Output directory. (default current directory) out_denoised : string, optional Name of the resulting denoised volume. out_sigma : string, optional Name of the resulting sigma volume. References ---------- .. [1] Veraart J, Novikov DS, Christiaens D, Ades-aron B, Sijbers, Fieremans E, 2016. Denoising of Diffusion MRI using random matrix theory. Neuroimage 142:394-406. doi: 10.1016/j.neuroimage.2016.08.016 .. [2] Veraart J, Fieremans E, Novikov DS. 2016. Diffusion MRI noise mapping using random matrix theory. Magnetic Resonance in Medicine. doi: 10.1002/mrm.26059. """ io_it = self.get_io_iterator() for dwi, odenoised, osigma in io_it: logging.info('Denoising %s', dwi) data, affine, image = load_nifti(dwi, return_img=True) denoised_data, sigma = mppca(data, patch_radius=patch_radius, pca_method=pca_method, return_sigma=True) save_nifti(odenoised, denoised_data, affine, image.header) logging.info('Denoised volume saved as %s', odenoised) if return_sigma: save_nifti(osigma, sigma, affine, image.header) logging.info('Sigma volume saved as %s', osigma)
def run(self, input_files, cluster=False, cluster_thr=15., random_colors=False, length_gt=0, length_lt=1000, clusters_gt=0, clusters_lt=10**8, native_coords=False, stealth=False, emergency_header='icbm_2009a', bg_color=(0, 0, 0), disable_order_transparency=False, buan=False, buan_thr=0.5, buan_highlight=(1, 0, 0), out_dir='', out_stealth_png='tmp.png'): """ Interactive medical visualization - Invert the Horizon! Interact with any number of .trk, .tck or .dpy tractograms and anatomy files .nii or .nii.gz. Cluster streamlines on loading. Parameters ---------- input_files : variable string cluster : bool, optional Enable QuickBundlesX clustering. cluster_thr : float, optional Distance threshold used for clustering. Default value 15.0 for small animal brains you may need to use something smaller such as 2.0. The distance is in mm. For this parameter to be active ``cluster`` should be enabled. random_colors : bool, optional Given multiple tractograms have been included then each tractogram will be shown with different color. length_gt : float, optional Clusters with average length greater than ``length_gt`` amount in mm will be shown. length_lt : float, optional Clusters with average length less than ``length_lt`` amount in mm will be shown. clusters_gt : int, optional Clusters with size greater than ``clusters_gt`` will be shown. clusters_lt : int, optional Clusters with size less than ``clusters_gt`` will be shown. native_coords : bool, optional Show results in native coordinates. stealth : bool, optional Do not use interactive mode just save figure. emergency_header : str, optional If no anatomy reference is provided an emergency header is provided. Current options 'icbm_2009a' and 'icbm_2009c'. bg_color : variable float, optional Define the background color of the scene. Colors can be defined with 1 or 3 values and should be between [0-1]. disable_order_transparency : bool, optional Use depth peeling to sort transparent objects. If True also enables anti-aliasing. buan : bool, optional Enables BUAN framework visualization. buan_thr : float, optional Uses the threshold value to highlight segments on the bundle which have pvalues less than this threshold. buan_highlight : variable float, optional Define the bundle highlight area color. Colors can be defined with 1 or 3 values and should be between [0-1]. For example, a value of (1, 0, 0) would mean the red color. out_dir : str, optional Output directory. (default current directory) out_stealth_png : str, optional Filename of saved picture. References ---------- .. [Horizon_ISMRM19] Garyfallidis E., M-A. Cote, B.Q. Chandio, S. Fadnavis, J. Guaje, R. Aggarwal, E. St-Onge, K.S. Juneja, S. Koudoro, D. Reagan, DIPY Horizon: fast, modular, unified and adaptive visualization, Proceedings of: International Society of Magnetic Resonance in Medicine (ISMRM), Montreal, Canada, 2019. """ verbose = True tractograms = [] images = [] pams = [] numpy_files = [] interactive = not stealth world_coords = not native_coords bundle_colors = None mni_2009a = {} mni_2009a['affine'] = np.array([[1., 0., 0., -98.], [0., 1., 0., -134.], [0., 0., 1., -72.], [0., 0., 0., 1.]]) mni_2009a['dims'] = (197, 233, 189) mni_2009a['vox_size'] = (1., 1., 1.) mni_2009a['vox_space'] = 'RAS' mni_2009c = {} mni_2009c['affine'] = np.array([[1., 0., 0., -96.], [0., 1., 0., -132.], [0., 0., 1., -78.], [0., 0., 0., 1.]]) mni_2009c['dims'] = (193, 229, 193) mni_2009c['vox_size'] = (1., 1., 1.) mni_2009c['vox_space'] = 'RAS' if emergency_header == 'icbm_2009a': hdr = mni_2009c else: hdr = mni_2009c emergency_ref = create_nifti_header(hdr['affine'], hdr['dims'], hdr['vox_size']) io_it = self.get_io_iterator() for input_output in io_it: fname = input_output[0] if verbose: print('Loading file ...') print(fname) print('\n') fl = fname.lower() ends = fl.endswith if ends('.trk'): sft = load_tractogram(fname, 'same', bbox_valid_check=False) tractograms.append(sft) if ends('.dpy') or ends('.tck'): sft = load_tractogram(fname, emergency_ref) tractograms.append(sft) if ends('.nii.gz') or ends('.nii'): data, affine = load_nifti(fname) images.append((data, affine)) if verbose: print('Affine to RAS') np.set_printoptions(3, suppress=True) print(affine) np.set_printoptions() if ends(".pam5"): pam = load_peaks(fname) pams.append(pam) if verbose: print('Peak_dirs shape') print(pam.peak_dirs.shape) if ends(".npy"): data = np.load(fname) numpy_files.append(data) if verbose: print('numpy array length') print(len(data)) if buan: bundle_colors = [] for i in range(len(numpy_files)): n = len(numpy_files[i]) pvalues = numpy_files[i] bundle = tractograms[i].streamlines indx = assignment_map(bundle, bundle, n) ind = np.array(indx) nb_lines = len(bundle) lines_range = range(nb_lines) points_per_line = [len(bundle[i]) for i in lines_range] points_per_line = np.array(points_per_line, np.intp) cols_arr = line_colors(bundle) colors_mapper = np.repeat(lines_range, points_per_line, axis=0) vtk_colors = numpy_to_vtk_colors(255 * cols_arr[colors_mapper]) colors = numpy_support.vtk_to_numpy(vtk_colors) colors = (colors - np.min(colors)) / np.ptp(colors) for i in range(n): if pvalues[i] < buan_thr: colors[ind == i] = buan_highlight bundle_colors.append(colors) if len(bg_color) == 1: bg_color *= 3 elif len(bg_color) != 3: raise ValueError('You need 3 values to set up backgound color. ' 'e.g --bg_color 0.5 0.5 0.5') order_transparent = not disable_order_transparency horizon(tractograms=tractograms, images=images, pams=pams, cluster=cluster, cluster_thr=cluster_thr, random_colors=random_colors, bg_color=bg_color, order_transparent=order_transparent, length_gt=length_gt, length_lt=length_lt, clusters_gt=clusters_gt, clusters_lt=clusters_lt, world_coords=world_coords, interactive=interactive, buan=buan, buan_colors=bundle_colors, out_png=pjoin(out_dir, out_stealth_png))
def test_reconst_dki(): with TemporaryDirectory() as out_dir: data_path, bval_path, bvec_path = get_fnames('small_101D') volume, affine = load_nifti(data_path) mask = np.ones_like(volume[:, :, :, 0]) mask_path = pjoin(out_dir, 'tmp_mask.nii.gz') save_nifti(mask_path, mask.astype(np.uint8), affine) dki_flow = ReconstDkiFlow() args = [data_path, bval_path, bvec_path, mask_path] dki_flow.run(*args, out_dir=out_dir) fa_path = dki_flow.last_generated_outputs['out_fa'] fa_data = load_nifti_data(fa_path) assert_equal(fa_data.shape, volume.shape[:-1]) tensor_path = dki_flow.last_generated_outputs['out_dt_tensor'] tensor_data = load_nifti_data(tensor_path) assert_equal(tensor_data.shape[-1], 6) assert_equal(tensor_data.shape[:-1], volume.shape[:-1]) ga_path = dki_flow.last_generated_outputs['out_ga'] ga_data = load_nifti_data(ga_path) assert_equal(ga_data.shape, volume.shape[:-1]) rgb_path = dki_flow.last_generated_outputs['out_rgb'] rgb_data = load_nifti_data(rgb_path) assert_equal(rgb_data.shape[-1], 3) assert_equal(rgb_data.shape[:-1], volume.shape[:-1]) md_path = dki_flow.last_generated_outputs['out_md'] md_data = load_nifti_data(md_path) assert_equal(md_data.shape, volume.shape[:-1]) ad_path = dki_flow.last_generated_outputs['out_ad'] ad_data = load_nifti_data(ad_path) assert_equal(ad_data.shape, volume.shape[:-1]) rd_path = dki_flow.last_generated_outputs['out_rd'] rd_data = load_nifti_data(rd_path) assert_equal(rd_data.shape, volume.shape[:-1]) mk_path = dki_flow.last_generated_outputs['out_mk'] mk_data = load_nifti_data(mk_path) assert_equal(mk_data.shape, volume.shape[:-1]) ak_path = dki_flow.last_generated_outputs['out_ak'] ak_data = load_nifti_data(ak_path) assert_equal(ak_data.shape, volume.shape[:-1]) rk_path = dki_flow.last_generated_outputs['out_rk'] rk_data = load_nifti_data(rk_path) assert_equal(rk_data.shape, volume.shape[:-1]) kt_path = dki_flow.last_generated_outputs['out_dk_tensor'] kt_data = load_nifti_data(kt_path) assert_equal(kt_data.shape[-1], 15) assert_equal(kt_data.shape[:-1], volume.shape[:-1]) mode_path = dki_flow.last_generated_outputs['out_mode'] mode_data = load_nifti_data(mode_path) assert_equal(mode_data.shape, volume.shape[:-1]) evecs_path = dki_flow.last_generated_outputs['out_evec'] evecs_data = load_nifti_data(evecs_path) assert_equal(evecs_data.shape[-2:], tuple((3, 3))) assert_equal(evecs_data.shape[:-2], volume.shape[:-1]) evals_path = dki_flow.last_generated_outputs['out_eval'] evals_data = load_nifti_data(evals_path) assert_equal(evals_data.shape[-1], 3) assert_equal(evals_data.shape[:-1], volume.shape[:-1]) bvals, bvecs = read_bvals_bvecs(bval_path, bvec_path) bvals[0] = 5. bvecs = generate_bvecs(len(bvals)) tmp_bval_path = pjoin(out_dir, "tmp.bval") tmp_bvec_path = pjoin(out_dir, "tmp.bvec") np.savetxt(tmp_bval_path, bvals) np.savetxt(tmp_bvec_path, bvecs.T) dki_flow._force_overwrite = True npt.assert_warns(UserWarning, dki_flow.run, data_path, tmp_bval_path, tmp_bvec_path, mask_path, out_dir=out_dir, b0_threshold=0)
def ROI_registration(datapath, template, t1, b0, roi): t1_path = datapath + '/' + t1 b0_path = datapath + '/' + b0 roi_path = datapath + '/' + roi template_path = datapath + '/' + template template_img, template_affine = load_nifti(template_path) t1_img, t1_affine = load_nifti(t1_path) b0_img, b0_affine = load_nifti(b0_path) roi_img, roi_affine = load_nifti(roi_path) #diff2struct affine registartion moving = b0_img moving_grid2world = b0_affine static = t1_img static_grid2world = t1_affine affine_path = datapath + '/' + 'diff2struct_affine.mat' nbins = 32 sampling_prop = None metric = MutualInformationMetric(nbins, sampling_prop) sigmas = [3.0, 1.0, 0.0] level_iters = [10000, 1000, 100] factors = [4, 2, 1] affreg_diff2struct = AffineRegistration(metric=metric, level_iters=level_iters, sigmas=sigmas, factors=factors) transform = AffineTransform3D() params0 = None affine_diff2struct = affreg_diff2struct.optimize(static, moving, transform, params0, static_grid2world, moving_grid2world, starting_affine=None) saveAffineMat(affine_diff2struct, affine_path) # struct2standard affine registartion moving = t1_img moving_grid2world = t1_affine static = template_img static_grid2world = template_affine nbins = 32 sampling_prop = None metric = MutualInformationMetric(nbins, sampling_prop) sigmas = [3.0, 1.0, 0.0] level_iters = [10000, 1000, 100] factors = [4, 2, 1] affreg_struct2standard = AffineRegistration(metric=metric, level_iters=level_iters, sigmas=sigmas, factors=factors) transform = AffineTransform3D() params0 = None affine_struct2standard = affreg_struct2standard.optimize( static, moving, transform, params0, static_grid2world, moving_grid2world, starting_affine=None) # struct2standard SyN registartion pre_align = affine_struct2standard.get_affine() metric = CCMetric(3) level_iters = [10, 10, 5] sdr = SymmetricDiffeomorphicRegistration(metric, level_iters) mapping = sdr.optimize(static, moving, static_grid2world, moving_grid2world, pre_align) warped = mapping.transform_inverse(template_img) warped = affine_diff2struct.transform_inverse(warped) template_diff_path = datapath + '/' + 'MNI152_diff' save_nifti(template_diff_path, warped, b0_affine) warped_roi = mapping.transform_inverse(roi_img) warped_roi = affine_diff2struct.transform_inverse(warped_roi) roi_diff_path = datapath + '/' + roi + '_diff.nii.gz' save_nifti(roi_diff_path, warped_roi, b0_affine) print(" Done! ")
def read_bundles_2_subjects(subj_id='subj_1', metrics=['fa'], bundles=['af.left', 'cst.right', 'cc_1']): r"""Read images and streamlines from 2 subjects of the SNAIL dataset. Parameters ---------- subj_id : string Either ``subj_1`` or ``subj_2``. metrics : list Either ['fa'] or ['t1'] or ['fa', 't1'] bundles : list E.g., ['af.left', 'cst.right', 'cc_1']. See all the available bundles in the ``exp_bundles_maps/bundles_2_subjects`` directory of your ``$HOME/.dipy`` folder. Returns ------- dix : dict Dictionary with data of the metrics and the bundles as keys. Notes ----- If you are using these datasets please cite the following publications. References ---------- .. [1] Renauld, E., M. Descoteaux, M. Bernier, E. Garyfallidis, K. Whittingstall, "Morphology of thalamus, LGN and optic radiation do not influence EEG alpha waves", Plos One (under submission), 2015. .. [2] Garyfallidis, E., O. Ocegueda, D. Wassermann, M. Descoteaux. Robust and efficient linear registration of fascicles in the space of streamlines , Neuroimage, 117:124-140, 2015. """ dname = pjoin(dipy_home, 'exp_bundles_and_maps', 'bundles_2_subjects') from dipy.io.streamline import load_tractogram from dipy.tracking.streamline import Streamlines res = {} if 't1' in metrics: data, affine = load_nifti(pjoin(dname, subj_id, 't1_warped.nii.gz')) res['t1'] = data if 'fa' in metrics: fa, affine = load_nifti(pjoin(dname, subj_id, 'fa_1x1x1.nii.gz')) res['fa'] = fa res['affine'] = affine for bun in bundles: streams = load_tractogram(pjoin(dname, subj_id, 'bundles', 'bundles_' + bun + '.trk'), 'same', bbox_valid_check=False).streamlines streamlines = Streamlines(streams) res[bun] = streamlines return res
def bundle_analysis(model_bundle_folder, bundle_folder, orig_bundle_folder, metric_folder, group, subject, no_disks=100, out_dir=''): """ Applies statistical analysis on bundles and saves the results in a directory specified by ``out_dir``. Parameters ---------- model_bundle_folder : string Path to the input model bundle files. This path may contain wildcards to process multiple inputs at once. bundle_folder : string Path to the input bundle files in common space. This path may contain wildcards to process multiple inputs at once. orig_folder : string Path to the input bundle files in native space. This path may contain wildcards to process multiple inputs at once. metric_folder : string Path to the input dti metric or/and peak files. It will be used as metric for statistical analysis of bundles. group : string what group subject belongs to e.g. control or patient subject : string subject id e.g. 10001 no_disks : integer, optional Number of disks used for dividing bundle into disks. (Default 100) out_dir : string, optional Output directory (default input file directory) References ---------- .. [Chandio19] Chandio, B.Q., S. Koudoro, D. Reagan, J. Harezlak, E. Garyfallidis, Bundle Analytics: a computational and statistical analyses framework for tractometric studies, Proceedings of: International Society of Magnetic Resonance in Medicine (ISMRM), Montreal, Canada, 2019. """ dt = dict() mb = os.listdir(model_bundle_folder) mb.sort() bd = os.listdir(bundle_folder) bd.sort() org_bd = os.listdir(orig_bundle_folder) org_bd.sort() n = len(org_bd) for io in range(n): mbundles, _ = load_trk(os.path.join(model_bundle_folder, mb[io])) bundles, _ = load_trk(os.path.join(bundle_folder, bd[io])) orig_bundles, _ = load_trk(os.path.join(orig_bundle_folder, org_bd[io])) mbundle_streamlines = set_number_of_points(mbundles, nb_points=no_disks) metric = AveragePointwiseEuclideanMetric() qb = QuickBundles(threshold=25., metric=metric) clusters = qb.cluster(mbundle_streamlines) centroids = Streamlines(clusters.centroids) print('Number of centroids ', len(centroids.data)) print('Model bundle ', mb[io]) print('Number of streamlines in bundle in common space ', len(bundles)) print('Number of streamlines in bundle in original space ', len(orig_bundles)) _, indx = cKDTree(centroids.data, 1, copy_data=True).query(bundles.data, k=1) metric_files_names = os.listdir(metric_folder) _, affine = load_nifti(os.path.join(metric_folder, "fa.nii.gz")) affine_r = np.linalg.inv(affine) transformed_orig_bundles = transform_streamlines(orig_bundles, affine_r) for mn in range(0, len(metric_files_names)): ind = np.array(indx) fm = metric_files_names[mn][:2] bm = mb[io][:-4] dt = dict() metric_name = os.path.join(metric_folder, metric_files_names[mn]) if metric_files_names[mn][2:] == '.nii.gz': metric, _ = load_nifti(metric_name) dti_measures(transformed_orig_bundles, metric, dt, fm, bm, subject, group, ind, out_dir) else: fm = metric_files_names[mn][:3] metric = load_peaks(metric_name) peak_values(bundles, metric, dt, fm, bm, subject, group, ind, out_dir)
def run(self, input_files, save_masked=False, median_radius=2, numpass=5, autocrop=False, vol_idx=None, dilate=None, out_dir='', out_mask='brain_mask.nii.gz', out_masked='dwi_masked.nii.gz'): """Workflow wrapping the median_otsu segmentation method. Applies median_otsu segmentation on each file found by 'globing' ``input_files`` and saves the results in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. save_masked : bool, optional Save mask. median_radius : int, optional Radius (in voxels) of the applied median filter. numpass : int, optional Number of pass of the median filter. autocrop : bool, optional If True, the masked input_volumes will also be cropped using the bounding box defined by the masked data. For example, if diffusion images are of 1x1x1 (mm^3) or higher resolution auto-cropping could reduce their size in memory and speed up some of the analysis. vol_idx : variable int, optional 1D array representing indices of ``axis=-1`` of a 4D `input_volume`. From the command line use something like `3 4 5 6`. From script use something like `[3, 4, 5, 6]`. This input is required for 4D volumes. dilate : int, optional number of iterations for binary dilation. out_dir : string, optional Output directory. (default current directory) out_mask : string, optional Name of the mask volume to be saved. out_masked : string, optional Name of the masked volume to be saved. """ io_it = self.get_io_iterator() if vol_idx is not None: vol_idx = map(int, vol_idx) for fpath, mask_out_path, masked_out_path in io_it: logging.info( 'Applying median_otsu segmentation on {0}'.format(fpath)) data, affine, img = load_nifti(fpath, return_img=True) masked_volume, mask_volume = median_otsu( data, vol_idx=vol_idx, median_radius=median_radius, numpass=numpass, autocrop=autocrop, dilate=dilate) save_nifti(mask_out_path, mask_volume.astype(np.float64), affine) logging.info('Mask saved as {0}'.format(mask_out_path)) if save_masked: save_nifti(masked_out_path, masked_volume, affine, img.header) logging.info( 'Masked volume saved as {0}'.format(masked_out_path)) return io_it
def run(self, input_files, bvalues_files, bvectors_files, mask_files, b0_threshold=50.0, bvecs_tol=0.01, roi_center=None, roi_radius=10, fa_thr=0.7, frf=None, extract_pam_values=False, sh_order=8, odf_to_sh_order=8, parallel=False, nbr_processes=None, out_dir='', out_pam='peaks.pam5', out_shm='shm.nii.gz', out_peaks_dir='peaks_dirs.nii.gz', out_peaks_values='peaks_values.nii.gz', out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'): """ Constrained spherical deconvolution Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) b0_threshold : float, optional Threshold used to find b=0 directions bvecs_tol : float, optional Bvecs should be unit vectors. (default:0.01) roi_center : variable int, optional Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]` (default None) roi_radius : int, optional radius of cubic ROI in voxels (default 10) fa_thr : float, optional FA threshold for calculating the response function (default 0.7) frf : variable float, optional Fiber response function can be for example inputed as 15 4 4 (from the command line) or [15, 4, 4] from a Python script to be converted to float and mutiplied by 10**-4 . If None the fiber response function will be computed automatically (default: None). extract_pam_values : bool, optional Save or not to save pam volumes as single nifti files. sh_order : int, optional Spherical harmonics order (default 6) used in the CSA fit. odf_to_sh_order : int, optional Spherical harmonics order used for peak_from_model to compress the ODF to spherical harmonics coefficients (default 8) parallel : bool, optional Whether to use parallelization in peak-finding during the calibration procedure. Default: False nbr_processes : int, optional If `parallel` is True, the number of subprocesses to use (default multiprocessing.cpu_count()). out_dir : string, optional Output directory (default input file directory) out_pam : string, optional Name of the peaks volume to be saved (default 'peaks.pam5') out_shm : string, optional Name of the shperical harmonics volume to be saved (default 'shm.nii.gz') out_peaks_dir : string, optional Name of the peaks directions volume to be saved (default 'peaks_dirs.nii.gz') out_peaks_values : string, optional Name of the peaks values volume to be saved (default 'peaks_values.nii.gz') out_peaks_indices : string, optional Name of the peaks indices volume to be saved (default 'peaks_indices.nii.gz') out_gfa : string, optional Name of the generalise fa volume to be saved (default 'gfa.nii.gz') References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the fibre orientation distribution in diffusion MRI: Non-negativity constrained super-resolved spherical deconvolution. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa) in io_it: logging.info('Loading {0}'.format(dwi)) data, affine = load_nifti(dwi) bvals, bvecs = read_bvals_bvecs(bval, bvec) print(b0_threshold, bvals.min()) if b0_threshold < bvals.min(): warn("b0_threshold (value: {0}) is too low, increase your " "b0_threshold. It should higher than the first b0 value " "({1}).".format(b0_threshold, bvals.min())) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold, atol=bvecs_tol) mask_vol = nib.load(maskfile).get_data().astype(np.bool) n_params = ((sh_order + 1) * (sh_order + 2)) / 2 if data.shape[-1] < n_params: raise ValueError( 'You need at least {0} unique DWI volumes to ' 'compute fiber odfs. You currently have: {1}' ' DWI volumes.'.format(n_params, data.shape[-1])) if frf is None: logging.info('Computing response function') if roi_center is not None: logging.info('Response ROI center:\n{0}' .format(roi_center)) logging.info('Response ROI radius:\n{0}' .format(roi_radius)) response, ratio, nvox = auto_response( gtab, data, roi_center=roi_center, roi_radius=roi_radius, fa_thr=fa_thr, return_number_of_voxels=True) response = list(response) else: logging.info('Using response function') if isinstance(frf, str): l01 = np.array(literal_eval(frf), dtype=np.float64) else: l01 = np.array(frf, dtype=np.float64) l01 *= 10 ** -4 response = np.array([l01[0], l01[1], l01[1]]) ratio = l01[1] / l01[0] response = (response, ratio) logging.info("Eigenvalues for the frf of the input" " data are :{0}".format(response[0])) logging.info('Ratio for smallest to largest eigen value is {0}' .format(ratio)) peaks_sphere = get_sphere('repulsion724') logging.info('CSD computation started.') csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) peaks_csd = peaks_from_model(model=csd_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=parallel, nbr_processes=nbr_processes) peaks_csd.affine = affine save_peaks(opam, peaks_csd) logging.info('CSD computation completed.') if extract_pam_values: peaks_to_niftis(peaks_csd, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa, reshape_dirs=True) dname_ = os.path.dirname(opam) if dname_ == '': logging.info('Pam5 file saved in current directory') else: logging.info( 'Pam5 file saved in {0}'.format(dname_)) return io_it
def main(): # reads the tractography data in trk format # extracts streamlines and the file header. Streamlines should be in the same coordinate system as the FA map (used later). # input example: '/home/Example_data/tracts.trk' tractography_file = input( "Please, specify the file with tracts that you would like to analyse. File should be in the trk format. " ) streams, hdr = load_trk(tractography_file) # for old DIPY version # sft = load_trk(tractography_file, tractography_file) # streams = sft.streamlines streams_array = np.asarray(streams) print('imported tractography data:' + tractography_file) # load T1fs_conform image that operates in the same coordinates as simnibs except for the fact the center of mesh # is located at the image center # T1fs_conform image should be generated in advance during the head meshing procedure # input example: fname_T1='/home/Example_data/T1fs_conform.nii.gz' fname_T1 = input( "Please, specify the T1fs_conform image that has been generated during head meshing procedure. " ) data_T1, affine_T1 = load_nifti(fname_T1) # load FA image in the same coordinates as tracts # input example:fname_FA='/home/Example_data/DTI_FA.nii' fname_FA = input("Please, specify the FA image. ") data_FA, affine_FA = load_nifti(fname_FA) print('loaded T1fs_conform.nii and FA images') # specify the head mesh file that is used later in simnibs to simulate induced electric field # input example:'/home/Example_data/SUBJECT_MESH.msh' global mesh_path mesh_path = input("Please, specify the head mesh file. ") last_slach = max([i for i, ltr in enumerate(mesh_path) if ltr == '/']) + 1 global subject_name subject_name = mesh_path[last_slach:-4] # specify the directory where you would like to save your simulation results # input example:'/home/Example_data/Output' global out_dir out_dir = input( "Please, specify the directory where you would like to save your simulation results. " ) out_dir = out_dir + '/simulation_at_pos_' # Co-registration of T1fs_conform and FA images. Performed in 4 steps. # Step 1. Calculation of the center of mass transform. Used later as starting transform. c_of_mass = transform_centers_of_mass(data_T1, affine_T1, data_FA, affine_FA) print('calculated c_of_mass transformation') # Step 2. Calculation of a 3D translation transform. Used in the next step as starting transform. nbins = 32 sampling_prop = None metric = MutualInformationMetric(nbins, sampling_prop) level_iters = [10000, 1000, 100] sigmas = [3.0, 1.0, 0.0] factors = [4, 2, 1] affreg = AffineRegistration(metric=metric, level_iters=level_iters, sigmas=sigmas, factors=factors) transform = TranslationTransform3D() params0 = None starting_affine = c_of_mass.affine translation = affreg.optimize(data_T1, data_FA, transform, params0, affine_T1, affine_FA, starting_affine=starting_affine) print('calculated 3D translation transform') # Step 3. Calculation of a Rigid 3D transform. Used in the next step as starting transform transform = RigidTransform3D() params0 = None starting_affine = translation.affine rigid = affreg.optimize(data_T1, data_FA, transform, params0, affine_T1, affine_FA, starting_affine=starting_affine) print('calculated Rigid 3D transform') # Step 4. Calculation of an affine transform. Used for co-registration of T1 and FA images. transform = AffineTransform3D() params0 = None starting_affine = rigid.affine affine = affreg.optimize(data_T1, data_FA, transform, params0, affine_T1, affine_FA, starting_affine=starting_affine) print('calculated Affine 3D transform') identity = np.eye(4) inv_affine_FA = np.linalg.inv(affine_FA) inv_affine_T1 = np.linalg.inv(affine_T1) inv_affine = np.linalg.inv(affine.affine) # transforming streamlines to FA space new_streams_FA = streamline.transform_streamlines(streams, inv_affine_FA) new_streams_FA_array = np.asarray(new_streams_FA) T1_to_FA = np.dot(inv_affine_FA, np.dot(affine.affine, affine_T1)) FA_to_T1 = np.linalg.inv(T1_to_FA) # transforming streamlines from FA to T1 space new_streams_T1 = streamline.transform_streamlines(new_streams_FA, FA_to_T1) global new_streams_T1_array new_streams_T1_array = np.asarray(new_streams_T1) # calculating amline derivatives along the streamlines to get the local orientation of the streamlines global streams_array_derivative streams_array_derivative = copy.deepcopy(new_streams_T1_array) print('calculating amline derivatives') for stream in range(len(new_streams_T1_array)): my_steam = new_streams_T1_array[stream] for t in range(len(my_steam[:, 0])): streams_array_derivative[stream][t, 0] = my_deriv(t, my_steam[:, 0]) streams_array_derivative[stream][t, 1] = my_deriv(t, my_steam[:, 1]) streams_array_derivative[stream][t, 2] = my_deriv(t, my_steam[:, 2]) deriv_norm = np.linalg.norm(streams_array_derivative[stream][t, :]) streams_array_derivative[stream][ t, :] = streams_array_derivative[stream][t, :] / deriv_norm # to create a torus representing a coil in an interactive window torus = vtk.vtkParametricTorus() torus.SetRingRadius(5) torus.SetCrossSectionRadius(2) torusSource = vtk.vtkParametricFunctionSource() torusSource.SetParametricFunction(torus) torusSource.SetScalarModeToPhase() torusMapper = vtk.vtkPolyDataMapper() torusMapper.SetInputConnection(torusSource.GetOutputPort()) torusMapper.SetScalarRange(0, 360) torusActor = vtk.vtkActor() torusActor.SetMapper(torusMapper) torus_pos_x = 100 torus_pos_y = 129 torus_pos_z = 211 torusActor.SetPosition(torus_pos_x, torus_pos_y, torus_pos_z) list_streams_T1 = list(new_streams_T1) # adding one fictive bundle of length 1 with coordinates [0,0,0] to avoid some bugs with actor.line during visualization list_streams_T1.append(np.array([0, 0, 0])) global bundle_native bundle_native = list_streams_T1 # generating a list of colors to visualize later the stimualtion effects effect_max = 0.100 effect_min = -0.100 global colors colors = [ np.random.rand(*current_streamline.shape) for current_streamline in bundle_native ] for my_streamline in range(len(bundle_native) - 1): my_stream = copy.deepcopy(bundle_native[my_streamline]) for point in range(len(my_stream)): colors[my_streamline][point] = vtkplotter.colors.colorMap( (effect_min + effect_max) / 2, name='jet', vmin=effect_min, vmax=effect_max) colors[my_streamline + 1] = vtkplotter.colors.colorMap(effect_min, name='jet', vmin=effect_min, vmax=effect_max) # Vizualization of fibers over T1 # i_coord = 0 # j_coord = 0 # k_coord = 0 # global number_of_stimulations number_of_stimulations = 0 actor_line_list = [] scene = window.Scene() scene.clear() scene.background((0.5, 0.5, 0.5)) world_coords = False shape = data_T1.shape lut = actor.colormap_lookup_table(scale_range=(effect_min, effect_max), hue_range=(0.4, 1.), saturation_range=(1, 1.)) # # the lines below is for a non-interactive demonstration run only. # # they should remain commented unless you set "interactive" to False # lut, colors = change_TMS_effects(torus_pos_x, torus_pos_y, torus_pos_z) # bar = actor.scalar_bar(lut) # bar.SetTitle("TMS effect") # bar.SetHeight(0.3) # bar.SetWidth(0.10) # bar.SetPosition(0.85, 0.3) # scene.add(bar) actor_line_list.append( actor.line(bundle_native, colors, linewidth=5, fake_tube=True, lookup_colormap=lut)) if not world_coords: image_actor_z = actor.slicer(data_T1, identity) else: image_actor_z = actor.slicer(data_T1, identity) slicer_opacity = 0.6 image_actor_z.opacity(slicer_opacity) image_actor_x = image_actor_z.copy() x_midpoint = int(np.round(shape[0] / 2)) image_actor_x.display_extent(x_midpoint, x_midpoint, 0, shape[1] - 1, 0, shape[2] - 1) image_actor_y = image_actor_z.copy() y_midpoint = int(np.round(shape[1] / 2)) image_actor_y.display_extent(0, shape[0] - 1, y_midpoint, y_midpoint, 0, shape[2] - 1) """ Connect the actors with the scene. """ scene.add(actor_line_list[0]) scene.add(image_actor_z) scene.add(image_actor_x) scene.add(image_actor_y) show_m = window.ShowManager(scene, size=(1200, 900)) show_m.initialize() """ Create sliders to move the slices and change their opacity. """ line_slider_z = ui.LineSlider2D(min_value=0, max_value=shape[2] - 1, initial_value=shape[2] / 2, text_template="{value:.0f}", length=140) line_slider_x = ui.LineSlider2D(min_value=0, max_value=shape[0] - 1, initial_value=shape[0] / 2, text_template="{value:.0f}", length=140) line_slider_y = ui.LineSlider2D(min_value=0, max_value=shape[1] - 1, initial_value=shape[1] / 2, text_template="{value:.0f}", length=140) opacity_slider = ui.LineSlider2D(min_value=0.0, max_value=1.0, initial_value=slicer_opacity, length=140) """ Сallbacks for the sliders. """ def change_slice_z(slider): z = int(np.round(slider.value)) image_actor_z.display_extent(0, shape[0] - 1, 0, shape[1] - 1, z, z) def change_slice_x(slider): x = int(np.round(slider.value)) image_actor_x.display_extent(x, x, 0, shape[1] - 1, 0, shape[2] - 1) def change_slice_y(slider): y = int(np.round(slider.value)) image_actor_y.display_extent(0, shape[0] - 1, y, y, 0, shape[2] - 1) def change_opacity(slider): slicer_opacity = slider.value image_actor_z.opacity(slicer_opacity) image_actor_x.opacity(slicer_opacity) image_actor_y.opacity(slicer_opacity) line_slider_z.on_change = change_slice_z line_slider_x.on_change = change_slice_x line_slider_y.on_change = change_slice_y opacity_slider.on_change = change_opacity """ Сreate text labels to identify the sliders. """ def build_label(text): label = ui.TextBlock2D() label.message = text label.font_size = 18 label.font_family = 'Arial' label.justification = 'left' label.bold = False label.italic = False label.shadow = False label.background = (0, 0, 0) label.color = (1, 1, 1) return label line_slider_label_z = build_label(text="Z Slice") line_slider_label_x = build_label(text="X Slice") line_slider_label_y = build_label(text="Y Slice") opacity_slider_label = build_label(text="Opacity") """ Create a ``panel`` to contain the sliders and labels. """ panel = ui.Panel2D(size=(300, 200), color=(1, 1, 1), opacity=0.1, align="right") panel.center = (1030, 120) panel.add_element(line_slider_label_x, (0.1, 0.75)) panel.add_element(line_slider_x, (0.38, 0.75)) panel.add_element(line_slider_label_y, (0.1, 0.55)) panel.add_element(line_slider_y, (0.38, 0.55)) panel.add_element(line_slider_label_z, (0.1, 0.35)) panel.add_element(line_slider_z, (0.38, 0.35)) panel.add_element(opacity_slider_label, (0.1, 0.15)) panel.add_element(opacity_slider, (0.38, 0.15)) scene.add(panel) """ Create a ``panel`` to show the value of a picked voxel. """ label_position = ui.TextBlock2D(text='Position:') label_value = ui.TextBlock2D(text='Value:') result_position = ui.TextBlock2D(text='') result_value = ui.TextBlock2D(text='') text2 = ui.TextBlock2D(text='Calculate') panel_picking = ui.Panel2D(size=(250, 125), color=(1, 1, 1), opacity=0.1, align="left") panel_picking.center = (200, 120) panel_picking.add_element(label_position, (0.1, 0.75)) panel_picking.add_element(label_value, (0.1, 0.45)) panel_picking.add_element(result_position, (0.45, 0.75)) panel_picking.add_element(result_value, (0.45, 0.45)) panel_picking.add_element(text2, (0.1, 0.15)) icon_files = [] icon_files.append(('left', read_viz_icons(fname='circle-left.png'))) button_example = ui.Button2D(icon_fnames=icon_files, size=(100, 30)) panel_picking.add_element(button_example, (0.5, 0.1)) def change_text_callback(i_ren, obj, button): text2.message = str(i_coord) + ' ' + str(j_coord) + ' ' + str(k_coord) torusActor.SetPosition(i_coord, j_coord, k_coord) print(i_coord, j_coord, k_coord) lut, colors = change_TMS_effects(i_coord, j_coord, k_coord) scene.rm(actor_line_list[0]) actor_line_list.append( actor.line(bundle_native, colors, linewidth=5, fake_tube=True, lookup_colormap=lut)) scene.add(actor_line_list[1]) nonlocal number_of_stimulations global bar if number_of_stimulations > 0: scene.rm(bar) else: number_of_stimulations = number_of_stimulations + 1 bar = actor.scalar_bar(lut) bar.SetTitle("TMS effect") bar.SetHeight(0.3) bar.SetWidth(0.10) # the width is set first bar.SetPosition(0.85, 0.3) scene.add(bar) actor_line_list.pop(0) i_ren.force_render() button_example.on_left_mouse_button_clicked = change_text_callback scene.add(panel_picking) scene.add(torusActor) def left_click_callback(obj, ev): """Get the value of the clicked voxel and show it in the panel.""" event_pos = show_m.iren.GetEventPosition() obj.picker.Pick(event_pos[0], event_pos[1], 0, scene) global i_coord, j_coord, k_coord i_coord, j_coord, k_coord = obj.picker.GetPointIJK() print(i_coord, j_coord, k_coord) result_position.message = '({}, {}, {})'.format( str(i_coord), str(j_coord), str(k_coord)) result_value.message = '%.8f' % data_T1[i_coord, j_coord, k_coord] torusActor.SetPosition(i_coord, j_coord, k_coord) image_actor_z.AddObserver('LeftButtonPressEvent', left_click_callback, 1.0) global size size = scene.GetSize() def win_callback(obj, event): global size if size != obj.GetSize(): size_old = size size = obj.GetSize() size_change = [size[0] - size_old[0], 0] panel.re_align(size_change) show_m.initialize() """ Set the following variable to ``True`` to interact with the datasets in 3D. """ interactive = True scene.zoom(2.0) scene.reset_clipping_range() scene.set_camera(position=(-642.07, 495.40, 148.49), focal_point=(127.50, 127.50, 127.50), view_up=(0.02, -0.01, 1.00)) if interactive: show_m.add_window_callback(win_callback) show_m.render() show_m.start() else: window.record(scene, out_path=out_dir + '/bundles_and_effects.png', size=(1200, 900), reset_camera=True)
def run(self, data_files, bvals_files, bvecs_files, mask_files, bbox_threshold=[0.6, 1, 0, 0.1, 0, 0.1], out_dir='', out_file='product.json', out_mask_cc='cc.nii.gz', out_mask_noise='mask_noise.nii.gz'): """Compute the signal-to-noise ratio in the corpus callosum. Parameters ---------- data_files : string Path to the dwi.nii.gz file. This path may contain wildcards to process multiple inputs at once. bvals_files : string Path of bvals. bvecs_files : string Path of bvecs. mask_files : string Path of brain mask bbox_threshold : variable float, optional Threshold for bounding box, values separated with commas for ex. [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1)) out_dir : string, optional Where the resulting file will be saved. (default '') out_file : string, optional Name of the result file to be saved. (default 'product.json') out_mask_cc : string, optional Name of the CC mask volume to be saved (default 'cc.nii.gz') out_mask_noise : string, optional Name of the mask noise volume to be saved (default 'mask_noise.nii.gz') """ io_it = self.get_io_iterator() for dwi_path, bvals_path, bvecs_path, mask_path, out_path, \ cc_mask_path, mask_noise_path in io_it: data, affine = load_nifti(dwi_path) bvals, bvecs = read_bvals_bvecs(bvals_path, bvecs_path) gtab = gradient_table(bvals=bvals, bvecs=bvecs) logging.info('Computing brain mask...') _, calc_mask = median_otsu(data) mask, affine = load_nifti(mask_path) mask = np.array(calc_mask == mask.astype(bool)).astype(int) logging.info('Computing tensors...') tenmodel = TensorModel(gtab) tensorfit = tenmodel.fit(data, mask=mask) logging.info( 'Computing worst-case/best-case SNR using the CC...') if np.ndim(data) == 4: CC_box = np.zeros_like(data[..., 0]) elif np.ndim(data) == 3: CC_box = np.zeros_like(data) else: raise IOError('DWI data has invalid dimensions') mins, maxs = bounding_box(mask) mins = np.array(mins) maxs = np.array(maxs) diff = (maxs - mins) // 4 bounds_min = mins + diff bounds_max = maxs - diff CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1], bounds_min[2]:bounds_max[2]] = 1 if len(bbox_threshold) != 6: raise IOError('bbox_threshold should have 6 float values') mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box, bbox_threshold, return_cfa=True) save_nifti(cc_mask_path, mask_cc_part.astype(np.uint8), affine) logging.info('CC mask saved as {0}'.format(cc_mask_path)) mean_signal = np.mean(data[mask_cc_part], axis=0) mask_noise = binary_dilation(mask, iterations=10) mask_noise[..., :mask_noise.shape[-1]//2] = 1 mask_noise = ~mask_noise save_nifti(mask_noise_path, mask_noise.astype(np.uint8), affine) logging.info('Mask noise saved as {0}'.format(mask_noise_path)) noise_std = np.std(data[mask_noise, :]) logging.info('Noise standard deviation sigma= ' + str(noise_std)) idx = np.sum(gtab.bvecs, axis=-1) == 0 gtab.bvecs[idx] = np.inf axis_X = np.argmin( np.sum((gtab.bvecs-np.array([1, 0, 0])) ** 2, axis=-1)) axis_Y = np.argmin( np.sum((gtab.bvecs-np.array([0, 1, 0])) ** 2, axis=-1)) axis_Z = np.argmin( np.sum((gtab.bvecs-np.array([0, 0, 1])) ** 2, axis=-1)) SNR_output = [] SNR_directions = [] for direction in ['b0', axis_X, axis_Y, axis_Z]: if direction == 'b0': SNR = mean_signal[0]/noise_std logging.info("SNR for the b=0 image is :" + str(SNR)) else: logging.info("SNR for direction " + str(direction) + " " + str(gtab.bvecs[direction]) + "is :" + str(SNR)) SNR_directions.append(direction) SNR = mean_signal[direction]/noise_std SNR_output.append(SNR) data = [] data.append({ 'data': str(SNR_output[0]) + ' ' + str(SNR_output[1]) + ' ' + str(SNR_output[2]) + ' ' + str(SNR_output[3]), 'directions': 'b0' + ' ' + str(SNR_directions[0]) + ' ' + str(SNR_directions[1]) + ' ' + str(SNR_directions[2]) }) with open(os.path.join(out_dir, out_path), 'w') as myfile: json.dump(data, myfile)
import numpy as np from dipy.viz import regtools from dipy.data import fetch_stanford_hardi from dipy.data.fetcher import fetch_syn_data from dipy.io.image import load_nifti from dipy.align.imaffine import (transform_centers_of_mass, AffineMap, MutualInformationMetric, AffineRegistration) from dipy.align.transforms import (TranslationTransform3D, RigidTransform3D, AffineTransform3D) """ Let's fetch two b0 volumes, the static image will be the b0 from the Stanford HARDI dataset """ files, folder = fetch_stanford_hardi() static_data, static_affine = load_nifti(pjoin(folder, 'HARDI150.nii.gz')) static = np.squeeze(static_data)[..., 0] static_grid2world = static_affine """ Now the moving image """ files, folder = fetch_syn_data() moving_data, moving_affine = load_nifti(pjoin(folder, 'b0.nii.gz')) moving = moving_data moving_grid2world = moving_affine """ We can see that the images are far from aligned by drawing one on top of the other. The images don't even have the same number of voxels, so in order to draw one on top of the other we need to resample the moving image on a grid of the same dimensions as the static image, we can do this by "transforming"
fdwi = '100307_dwi.nii.gz' fpve = '100307_pve.nii.gz' ffa = 'fa.nii.gz' fdet = 'det.trk' fpft = 'pft.trk' fpam5 = 'peaks.pam5' rec_model = 'GQI2' sphere = get_sphere('repulsion724') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) data, affine, vox_size = load_nifti(fdwi, return_voxsize=True) mask, _ = load_nifti(fbmask) pve, _, img, vox_size = load_nifti( fpve, return_img=True, return_voxsize=True) shape = data.shape[:3] if rec_model == 'GQI2': # Try with SFM, SHore, MAPL, MSMTCSD model = GeneralizedQSamplingModel(gtab, sampling_length=1.2) if rec_model == 'SFM': # Ariel please add here your best SFM calls and parameters pass
from dipy.io.image import load_nifti, load_nifti_data from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) from dipy.tracking import utils from dipy.tracking.local_tracking import LocalTracking from dipy.tracking.streamline import Streamlines from dipy.tracking.stopping_criterion import ThresholdStoppingCriterion from dipy.viz import window, actor, colormap, has_fury # Enables/disables interactive visualization interactive = False hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi') label_fname = get_fnames('stanford_labels') data, affine, hardi_img = load_nifti(hardi_fname, return_img=True) labels = load_nifti_data(label_fname) bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname) gtab = gradient_table(bvals, bvecs) seed_mask = (labels == 2) white_matter = (labels == 1) | (labels == 2) seeds = utils.seeds_from_mask(seed_mask, affine, density=1) response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6) csd_fit = csd_model.fit(data, mask=white_matter) """ We use the GFA of the CSA model to build a stopping criterion. """
def run(self, pam_files, wm_files, gm_files, csf_files, seeding_files, step_size=0.2, seed_density=1, pmf_threshold=0.1, max_angle=20., pft_back=2, pft_front=1, pft_count=15, out_dir='', out_tractogram='tractogram.trk'): """Workflow for Particle Filtering Tracking. This workflow use a saved peaks and metrics (PAM) file as input. Parameters ---------- pam_files : string Path to the peaks and metrics files. This path may contain wildcards to use multiple masks at once. wm_files : string Path to white matter partial volume estimate for tracking (CMC). gm_files : string Path to grey matter partial volume estimate for tracking (CMC). csf_files : string Path to cerebrospinal fluid partial volume estimate for tracking (CMC). seeding_files : string A binary image showing where we need to seed for tracking. step_size : float, optional Step size used for tracking (default 0.2mm). seed_density : int, optional Number of seeds per dimension inside voxel (default 1). For example, seed_density of 2 means 8 regularly distributed points in the voxel. And seed density of 1 means 1 point at the center of the voxel. pmf_threshold : float, optional Threshold for ODF functions (default 0.1). max_angle : float, optional Maximum angle between streamline segments (range [0, 90], default 20). pft_back : float, optional Distance in mm to back track before starting the particle filtering tractography (defaul 2mm). The total particle filtering tractography distance is equal to back_tracking_dist + front_tracking_dist. pft_front : float, optional Distance in mm to run the particle filtering tractography after the the back track distance (default 1mm). The total particle filtering tractography distance is equal to back_tracking_dist + front_tracking_dist. pft_count : int, optional Number of particles to use in the particle filter (default 15). out_dir : string, optional Output directory (default input file directory) out_tractogram : string, optional Name of the tractogram file to be saved (default 'tractogram.trk') References ---------- Girard, G., Whittingstall, K., Deriche, R., & Descoteaux, M. Towards quantitative connectivity analysis: reducing tractography biases. NeuroImage, 98, 266-278, 2014. """ io_it = self.get_io_iterator() for pams_path, wm_path, gm_path, csf_path, seeding_path, out_tract \ in io_it: logging.info('Particle Filtering tracking on {0}' .format(pams_path)) pam = load_peaks(pams_path, verbose=False) wm, affine, voxel_size = load_nifti(wm_path, return_voxsize=True) gm, _ = load_nifti(gm_path) csf, _ = load_nifti(csf_path) avs = sum(voxel_size) / len(voxel_size) # average_voxel_size classifier = CmcTissueClassifier.from_pve(wm, gm, csf, step_size=step_size, average_voxel_size=avs) logging.info('classifier done') seed_mask, _ = load_nifti(seeding_path) seeds = utils.seeds_from_mask(seed_mask, density=[seed_density, seed_density, seed_density], affine=affine) logging.info('seeds done') dg = ProbabilisticDirectionGetter direction_getter = dg.from_shcoeff(pam.shm_coeff, max_angle=max_angle, sphere=pam.sphere, pmf_threshold=pmf_threshold) streamlines_generator = ParticleFilteringTracking( direction_getter, classifier, seeds, affine, step_size=step_size, pft_back_tracking_dist=pft_back, pft_front_tracking_dist=pft_front, pft_max_trial=20, particle_count=pft_count) logging.info('ParticleFilteringTracking initiated') tractogram = Tractogram(streamlines_generator, affine_to_rasmm=np.eye(4)) save(tractogram, out_tract) logging.info('Saved {0}'.format(out_tract))