def constrained_spherical_deconvolution(dir_src, dir_out, verbose=False): # Load data fbval = pjoin(dir_src, 'bvals_' + par_b_tag) fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag) fdwi = pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz') fmask = pjoin(dir_src, 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold) data, affine = load_nifti(fdwi, verbose) mask, _ = load_nifti(fmask, verbose) sphere = get_sphere('symmetric724') response, ratio = auto_response(gtab, data, roi_radius=par_ar_radius, fa_thr=par_ar_fa_th) # print('Response function', response) # Model fitting csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(data, mask=mask) # Saving Spherical Harmonic Coefficient out_peaks = 'sh_' + par_b_tag + '_' + par_dim_tag + '.nii.gz' save_nifti(pjoin(dir_out, out_peaks), csd_fit.shm_coeff, affine)
def test_response_from_mask(): fdata, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) data = nib.load(fdata).get_data() gtab = gradient_table(bvals, bvecs) ten = TensorModel(gtab) tenfit = ten.fit(data) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 radius = 3 for fa_thr in np.arange(0, 1, 0.1): response_auto, ratio_auto, nvoxels = auto_response(gtab, data, roi_center=None, roi_radius=radius, fa_thr=fa_thr, return_number_of_voxels=True) ci, cj, ck = np.array(data.shape[:3]) / 2 mask = np.zeros(data.shape[:3]) mask[ci - radius: ci + radius, cj - radius: cj + radius, ck - radius: ck + radius] = 1 mask[FA <= fa_thr] = 0 response_mask, ratio_mask = response_from_mask(gtab, data, mask) assert_equal(int(np.sum(mask)), nvoxels) assert_array_almost_equal(response_mask[0], response_auto[0]) assert_almost_equal(response_mask[1], response_auto[1]) assert_almost_equal(ratio_mask, ratio_auto)
def test_auto_response(): fdata, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) data = load_nifti_data(fdata) gtab = gradient_table(bvals, bvecs) radius = 3 def test_fa_superior(FA, fa_thr): return FA > fa_thr def test_fa_inferior(FA, fa_thr): return FA < fa_thr predefined_functions = [fa_superior, fa_inferior] defined_functions = [test_fa_superior, test_fa_inferior] for fa_thr in np.arange(0.1, 1, 0.1): for predefined, defined in \ zip(predefined_functions, defined_functions): response_predefined, ratio_predefined, nvoxels_predefined = \ auto_response(gtab, data, roi_center=None, roi_radius=radius, fa_callable=predefined, fa_thr=fa_thr, return_number_of_voxels=True) response_defined, ratio_defined, nvoxels_defined = \ auto_response(gtab, data, roi_center=None, roi_radius=radius, fa_callable=defined, fa_thr=fa_thr, return_number_of_voxels=True) assert_equal(nvoxels_predefined, nvoxels_defined) assert_array_almost_equal(response_predefined[0], response_defined[0]) assert_almost_equal(response_predefined[1], response_defined[1]) assert_almost_equal(ratio_predefined, ratio_defined)
def test_auto_response_deprecated(): with warnings.catch_warnings(record=True) as cw: warnings.simplefilter("always", DeprecationWarning) gtab, data, _, _, _ = get_test_data() _, _ = auto_response(gtab, data, roi_center=None, roi_radius=1, fa_thr=0.7) npt.assert_(issubclass(cw[0].category, DeprecationWarning))
def test_auto_response(): fdata, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) data = nib.load(fdata).get_data() gtab = gradient_table(bvals, bvecs) radius = 3 def test_fa_superior(FA, fa_thr): return FA > fa_thr def test_fa_inferior(FA, fa_thr): return FA < fa_thr predefined_functions = [fa_superior, fa_inferior] defined_functions = [test_fa_superior, test_fa_inferior] for fa_thr in np.arange(0.1, 1, 0.1): for predefined, defined in zip(predefined_functions, defined_functions): response_predefined, ratio_predefined, nvoxels_predefined = auto_response( gtab, data, roi_center=None, roi_radius=radius, fa_callable=predefined, fa_thr=fa_thr, return_number_of_voxels=True) response_defined, ratio_defined, nvoxels_defined = auto_response( gtab, data, roi_center=None, roi_radius=radius, fa_callable=defined, fa_thr=fa_thr, return_number_of_voxels=True) assert_equal(nvoxels_predefined, nvoxels_defined) assert_array_almost_equal(response_predefined[0], response_defined[0]) assert_almost_equal(response_predefined[1], response_defined[1]) assert_almost_equal(ratio_predefined, ratio_defined)
def fit_csd(data_files, bval_files, bvec_files, mask=None, response=None, sh_order=8, lambda_=1, tau=0.1, out_dir=None): """ Fit the CSD model and save file with SH coefficients. Parameters ---------- data_files : str or list Files containing DWI data. If this is a str, that's the full path to a single file. If it's a list, each entry is a full path. bval_files : str or list Equivalent to `data_files`. bvec_files : str or list Equivalent to `data_files`. mask : ndarray, optional Binary mask, set to True or 1 in voxels to be processed. Default: Process all voxels. out_dir : str, optional A full path to a directory to store the maps that get computed. Default: file with coefficients gets stored in the same directory as the first DWI file in `data_files`. Returns ------- fname : the full path to the file containing the SH coefficients. """ img, data, gtab, mask = ut.prepare_data(data_files, bval_files, bvec_files) if response is None: response, ratio = csd.auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csdmodel = csd.ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) csdfit = csdmodel.fit(data, mask=mask) if out_dir is None: out_dir = op.join(op.split(data_files)[0], 'dki') if not op.exists(out_dir): os.makedirs(out_dir) aff = img.affine fname = op.join(out_dir, 'csd_sh_coeff.nii.gz') nib.save(nib.Nifti1Image(csdfit.shm_coeff, aff), fname) return fname
def create_csd_model(data, gtab, white_matter, sh_order=6): from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, auto_response response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) csd_fit = csd_model.fit(data, mask=white_matter) return csd_fit
def tracking_eudx4csd(dir_src, dir_out, verbose=False): # Load data fbval = pjoin(dir_src, 'bvals_' + par_b_tag) fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag) fdwi = pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz') fmask = pjoin(dir_src, 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold) data, affine = load_nifti(fdwi, verbose) mask, _ = load_nifti(fmask, verbose) sphere = get_sphere('symmetric724') response, ratio = auto_response(gtab, data, roi_radius=par_ar_radius, fa_thr=par_ar_fa_th) # print('Response function', response) # Model fitting csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_peaks = peaks_from_model(csd_model, data, sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=False) # Computation of streamlines streamlines = EuDX(csd_peaks.peak_values, csd_peaks.peak_indices, seeds=par_eudx_seeds, odf_vertices=sphere.vertices, a_low=par_eudx_threshold) # Saving tractography voxel_size = (par_dim_vox, ) * 3 dims = mask.shape[:3] hdr = nib.trackvis.empty_header() hdr['voxel_size'] = voxel_size hdr['voxel_order'] = 'LAS' hdr['dim'] = dims hdr['vox_to_ras'] = affine strm = ((sl, None, None) for sl in streamlines) trk_name = 'tractogram_' + par_b_tag + '_' + par_dim_tag + '_' + par_csd_tag + '_' + par_eudx_tag + '.trk' trk_out = os.path.join(dir_out, trk_name) nib.trackvis.write(trk_out, strm, hdr, points_space='voxel')
def quick_fodf(gtab, images, sphere=default_sphere, radius=10, fa_threshold=0.7): response, ratio = auto_response(gtab, images, roi_radius=radius, fa_thr=fa_threshold) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(images) csd_odf = csd_fit.odf(sphere) return csd_odf
def _fit(gtab, data, mask, response=None, sh_order=8, lambda_=1, tau=0.1): """ Helper function that does the core of fitting a model to data. """ if response is None: response, ratio = csd.auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csdmodel = csd.ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) csdfit = csdmodel.fit(data, mask=mask) return csdfit
def tracking_eudx4csd(dir_src, dir_out, verbose=False): # Load data fbval = pjoin(dir_src, 'bvals_' + par_b_tag) fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag) fdwi = pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz') fmask = pjoin(dir_src, 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold) data, affine = load_nifti(fdwi, verbose) mask, _ = load_nifti(fmask, verbose) sphere = get_sphere('symmetric724') response, ratio = auto_response(gtab, data, roi_radius=par_ar_radius, fa_thr=par_ar_fa_th) # print('Response function', response) # Model fitting csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_peaks = peaks_from_model(csd_model, data, sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=False) # Computation of streamlines streamlines = EuDX(csd_peaks.peak_values, csd_peaks.peak_indices, seeds=par_eudx_seeds, odf_vertices= sphere.vertices, a_low=par_eudx_threshold) # Saving tractography voxel_size = (par_dim_vox,) * 3 dims = mask.shape[:3] hdr = nib.trackvis.empty_header() hdr['voxel_size'] = voxel_size hdr['voxel_order'] = 'LAS' hdr['dim'] = dims hdr['vox_to_ras'] = affine strm = ((sl, None, None) for sl in streamlines) trk_name = 'tractogram_' + par_b_tag + '_' + par_dim_tag + '_' + par_csd_tag + '_' + par_eudx_tag + '.trk' trk_out = os.path.join(dir_out, trk_name) nib.trackvis.write(trk_out, strm, hdr, points_space='voxel')
def reconstruction(dwi,bval_file,bvec_file,mask=None,type='dti',b0=0.,order=4): """ Uses Dipy to reconstruct an fODF for each voxel. Parameters ---------- dwi: numpy array (mandatory) Holds the diffusion weighted image in a 4D-array (see nibabel). bval_file: string (mandatory) Path to the b-value file (FSL format). bvec_file: string (mandatory) Path to the b-vectors file (FSL format). mask: numpy array Holds the mask in a 3D array (see nibabel). type: string \in {'dti','csd','csa'} (default = 'dti') The type of the ODF reconstruction. b0: float (default = 0) Threshold to use for defining b0 images. order: int (default = 4) Order to use for constrained spherical deconvolution (csd) or constant solid angle (csa). Returns ----------- model_fit: Dipy Object (depends on the type) Represents the fitted model for each voxel. """ #b-values and b-vectors bvals, bvecs = read_bvals_bvecs(bval_file,bvec_file) gtab = gradient_table(bvals, bvecs, b0_threshold=b0) #reconstruction if type == 'dti': model = TensorModel(gtab,fit_method='WLS') elif type == 'csd': response, ratio = auto_response(gtab, dwi, roi_radius=10, fa_thr=0.7) model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=order) elif type == 'csa': model = CsaOdfModel(gtab, order) if mask is not None: model_fit = model.fit(dwi,mask=mask) else: model_fit = model.fit(dwi) return model_fit
def track(dname, fdwi, fbval, fbvec, fmask=None, seed_density = 1, show=False): data, affine = load_nifti(fdwi) bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=50) if fmask is None: from dipy.segment.mask import median_otsu b0_mask, mask = median_otsu(data) # TODO: check parameters to improve the mask else: mask, mask_affine = load_nifti(fmask) mask = np.squeeze(mask) #fix mask dimensions # compute DTI model from dipy.reconst.dti import TensorModel tenmodel = TensorModel(gtab)#, fit_method='OLS') #, min_signal=5000) # fit the dti model tenfit = tenmodel.fit(data, mask=mask) # save fa ffa = dname + 'tensor_fa.nii.gz' fa_img = nib.Nifti1Image(tenfit.fa.astype(np.float32), affine) nib.save(fa_img, ffa) sh_order = 8 #TODO: check what that does if data.shape[-1] < 15: raise ValueError('You need at least 15 unique DWI volumes to ' 'compute fiber ODFs. You currently have: {0}' ' DWI volumes.'.format(data.shape[-1])) elif data.shape[-1] < 30: sh_order = 6 # compute the response equation ? from dipy.reconst.csdeconv import auto_response response, ratio = auto_response(gtab, data) response = list(response) peaks_sphere = get_sphere('symmetric362') #TODO: check what that does peaks_csd = peaks_from_model(model=tenmodel, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, #.5 min_separation_angle=25, mask=mask, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=False) peaks_csd.affine = affine fpeaks = dname + 'peaks.npz' save_peaks(fpeaks, peaks_csd) from dipy.io.trackvis import save_trk from dipy.tracking import utils from dipy.tracking.local import (ThresholdTissueClassifier, LocalTracking) stopping_thr = 0.25 #0.25 pam = load_peaks(fpeaks) #ffa = dname + 'tensor_fa_nomask.nii.gz' fa, fa_affine = load_nifti(ffa) classifier = ThresholdTissueClassifier(fa, stopping_thr) # seeds seed_mask = fa > 0.4 #0.4 #TODO: check this parameter seeds = utils.seeds_from_mask( seed_mask, density=seed_density, affine=affine) # tractography, if affine then in world coordinates streamlines = LocalTracking(pam, classifier, seeds, affine=affine, step_size=.5) # Compute streamlines and store as a list. streamlines = list(streamlines) ftractogram = dname + 'tractogram.trk' #save .trk save_trk_old_style(ftractogram, streamlines, affine, fa.shape) if show: #render show_results(data,streamlines, fa, fa_affine)
def compute_tensors(self, dti_vol, atlas_file, gtab): # WGR:TODO figure out how to organize tensor options and formats # WGR:TODO figure out how to deal with files on disk vs. in workspace """ Takes registered DTI image and produces tensors **Positional Arguments:** dti_vol: - Registered DTI volume, from workspace. atlas_file: - File containing an atlas (or brain mask). gtab: - Structure containing dipy formatted bval/bvec information """ labeldata = nib.load(atlas_file) label = labeldata.get_data() """ Create a brain mask. Here we just threshold labels. """ mask = (label > 0) gtab.info print data.shape """ For the constrained spherical deconvolution we need to estimate the response function (see :ref:`example_reconst_csd`) and create a model. """ response, ratio = auto_response(gtab, dti_vol, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) """ Next, we use ``peaks_from_model`` to fit the data and calculated the fiber directions in all voxels. """ sphere = get_sphere('symmetric724') csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, mask=mask, relative_peak_threshold=.5, min_separation_angle=25, parallel=True) """ For the tracking part, we will use ``csd_model`` fiber directions but stop tracking where fractional anisotropy (FA) is low (< 0.1). To derive the FA, used as a stopping criterion, we need to fit a tensor model first. Here, we use weighted least squares (WLS). """ print 'tensors...' tensor_model = TensorModel(gtab, fit_method='WLS') tensor_fit = tensor_model.fit(data, mask) FA = fractional_anisotropy(tensor_fit.evals) """ In order for the stopping values to be used with our tracking algorithm we need to have the same dimensions as the ``csd_peaks.peak_values``. For this reason, we can assign the same FA value to every peak direction in the same voxel in the following way. """ stopping_values = np.zeros(csd_peaks.peak_values.shape) stopping_values[:] = FA[..., None] print datetime.now() - startTime pass
def run(self, input_files, bvalues_files, bvectors_files, mask_files, b0_threshold=50.0, bvecs_tol=0.01, roi_center=None, roi_radius=10, fa_thr=0.7, frf=None, extract_pam_values=False, sh_order=8, odf_to_sh_order=8, parallel=False, nbr_processes=None, out_dir='', out_pam='peaks.pam5', out_shm='shm.nii.gz', out_peaks_dir='peaks_dirs.nii.gz', out_peaks_values='peaks_values.nii.gz', out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'): """ Constrained spherical deconvolution Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) b0_threshold : float, optional Threshold used to find b=0 directions bvecs_tol : float, optional Bvecs should be unit vectors. (default:0.01) roi_center : variable int, optional Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]` (default None) roi_radius : int, optional radius of cubic ROI in voxels (default 10) fa_thr : float, optional FA threshold for calculating the response function (default 0.7) frf : variable float, optional Fiber response function can be for example inputed as 15 4 4 (from the command line) or [15, 4, 4] from a Python script to be converted to float and mutiplied by 10**-4 . If None the fiber response function will be computed automatically (default: None). extract_pam_values : bool, optional Save or not to save pam volumes as single nifti files. sh_order : int, optional Spherical harmonics order (default 6) used in the CSA fit. odf_to_sh_order : int, optional Spherical harmonics order used for peak_from_model to compress the ODF to spherical harmonics coefficients (default 8) parallel : bool, optional Whether to use parallelization in peak-finding during the calibration procedure. Default: False nbr_processes : int, optional If `parallel` is True, the number of subprocesses to use (default multiprocessing.cpu_count()). out_dir : string, optional Output directory (default input file directory) out_pam : string, optional Name of the peaks volume to be saved (default 'peaks.pam5') out_shm : string, optional Name of the shperical harmonics volume to be saved (default 'shm.nii.gz') out_peaks_dir : string, optional Name of the peaks directions volume to be saved (default 'peaks_dirs.nii.gz') out_peaks_values : string, optional Name of the peaks values volume to be saved (default 'peaks_values.nii.gz') out_peaks_indices : string, optional Name of the peaks indices volume to be saved (default 'peaks_indices.nii.gz') out_gfa : string, optional Name of the generalise fa volume to be saved (default 'gfa.nii.gz') References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the fibre orientation distribution in diffusion MRI: Non-negativity constrained super-resolved spherical deconvolution. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa) in io_it: logging.info('Loading {0}'.format(dwi)) data, affine = load_nifti(dwi) bvals, bvecs = read_bvals_bvecs(bval, bvec) print(b0_threshold, bvals.min()) if b0_threshold < bvals.min(): warn("b0_threshold (value: {0}) is too low, increase your " "b0_threshold. It should higher than the first b0 value " "({1}).".format(b0_threshold, bvals.min())) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold, atol=bvecs_tol) mask_vol = nib.load(maskfile).get_data().astype(np.bool) n_params = ((sh_order + 1) * (sh_order + 2)) / 2 if data.shape[-1] < n_params: raise ValueError( 'You need at least {0} unique DWI volumes to ' 'compute fiber odfs. You currently have: {1}' ' DWI volumes.'.format(n_params, data.shape[-1])) if frf is None: logging.info('Computing response function') if roi_center is not None: logging.info('Response ROI center:\n{0}' .format(roi_center)) logging.info('Response ROI radius:\n{0}' .format(roi_radius)) response, ratio, nvox = auto_response( gtab, data, roi_center=roi_center, roi_radius=roi_radius, fa_thr=fa_thr, return_number_of_voxels=True) response = list(response) else: logging.info('Using response function') if isinstance(frf, str): l01 = np.array(literal_eval(frf), dtype=np.float64) else: l01 = np.array(frf, dtype=np.float64) l01 *= 10 ** -4 response = np.array([l01[0], l01[1], l01[1]]) ratio = l01[1] / l01[0] response = (response, ratio) logging.info("Eigenvalues for the frf of the input" " data are :{0}".format(response[0])) logging.info('Ratio for smallest to largest eigen value is {0}' .format(ratio)) peaks_sphere = get_sphere('repulsion724') logging.info('CSD computation started.') csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) peaks_csd = peaks_from_model(model=csd_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=parallel, nbr_processes=nbr_processes) peaks_csd.affine = affine save_peaks(opam, peaks_csd) logging.info('CSD computation completed.') if extract_pam_values: peaks_to_niftis(peaks_csd, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa, reshape_dirs=True) dname_ = os.path.dirname(opam) if dname_ == '': logging.info('Pam5 file saved in current directory') else: logging.info( 'Pam5 file saved in {0}'.format(dname_)) return io_it
def test_csdeconv(): SNR = 100 S0 = 1 _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, sticks = multi_tensor(gtab, mevals, S0, angles=angles, fractions=[50, 50], snr=SNR) sphere = get_sphere('symmetric362') odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50]) response = (np.array([0.0015, 0.0003, 0.0003]), S0) csd = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd.fit(S) assert_equal(csd_fit.shm_coeff[0] > 0, True) fodf = csd_fit.odf(sphere) directions, _, _ = peak_directions(odf_gt, sphere) directions2, _, _ = peak_directions(fodf, sphere) ang_sim = angular_similarity(directions, directions2) assert_equal(ang_sim > 1.9, True) assert_equal(directions.shape[0], 2) assert_equal(directions2.shape[0], 2) with warnings.catch_warnings(record=True) as w: ConstrainedSphericalDeconvModel(gtab, response, sh_order=10) assert_equal(len(w) > 0, True) with warnings.catch_warnings(record=True) as w: ConstrainedSphericalDeconvModel(gtab, response, sh_order=8) assert_equal(len(w) > 0, False) mevecs = [] for s in sticks: mevecs += [all_tensor_evecs(s).T] S2 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None) big_S = np.zeros((10, 10, 10, len(S2))) big_S[:] = S2 aresponse, aratio = auto_response(gtab, big_S, roi_center=(5, 5, 4), roi_radius=3, fa_thr=0.5) assert_array_almost_equal(aresponse[0], response[0]) assert_almost_equal(aresponse[1], 100) assert_almost_equal(aratio, response[0][1]/response[0][0]) aresponse2, aratio2 = auto_response(gtab, big_S, roi_radius=3, fa_thr=0.5) assert_array_almost_equal(aresponse[0], response[0])
# Loading values, vectors, image and mask sphere = get_sphere('symmetric362') print "loading bval/bvec files" bvals, bvecs = read_bvals_bvecs("tp3_data//bvals2000", "tp3_data//bvecs2000") gtab = gradient_table(bvals, bvecs) print "loading nifti files" img = nib.load("tp3_data//dwi2000.nii.gz") affine = img.get_affine() data = img.get_data() mask = nib.load("tp3_data//_binary_mask.nii.gz").get_data() ## Apply mask data_in_wm = applymask(data, mask) response, ratio = auto_response(gtab, data_in_wm) # Computing ODF print "computing fODF... please wait an hour" csd_model = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=sphere) peaks_csd = peaks_from_model(model=csd_model, data=data, sphere=sphere, relative_peak_threshold=.25, min_separation_angle=25, mask=mask, normalize_peaks=True, parallel=True) # Saving files print "saving files" nib.save(nib.Nifti1Image(peaks_csd.shm_coeff.astype(np.float32), affine), "tp3_data//_fodf.nii.gz")
#Build Brain Mask bm = np.where(labels==0, False, True) print 'masking the brain finished' # white matter mask logic1 = np.logical_and(labels_>117, labels_<148) logic2 = np.logical_and(labels_>283, labels_<314) logic = np.logical_or(logic1, logic2) logic_ = np.logical_or(labels_==150, labels_==316) wm = np.where(logic, True, np.where(logic_, True, False)) mask = wm radius = 15 response, ratio, nvl = auto_response(gtab, data, roi_radius=radius, return_number_of_voxels=True) print 'We use the roi_radius={},\nand the response is {},\nthe ratio is {},\nusing {} of voxels'.format(radius, response, ratio, nvl) print 'fitting CSD model' st2 = time.time() sphere = get_sphere('symmetric724') csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=8) csd_peaks = peaks_from_model(csd_model, data, sphere = sphere, relative_peak_threshold=0.1, min_separation_angle=25,mask=mask, return_sh=True, parallel=True, normalize_peaks=True) et2 = time.time() - st2 print 'fitting CSD model finished, running time is {}'.format(et2) print 'seeding begins, using np.random.seed(123)' st3 = time.time() np.random.seed(123)
data = img.get_data() affine = img.affine print 'loading img data finished' #load bvals and bvecs data bvals = np.loadtxt(mypath+'N54917_RAS_ecc_bvals.txt') bvecs = np.loadtxt(mypath+'N54917_RAS_ecc_bvecs.txt') gtab = gradient_table(bvals, bvecs) #aoto response function in the whole brain from dipy.reconst.csdeconv import auto_response response1, ratio1, nvoxel = auto_response(gtab, data, roi_radius=30, fa_thr=0.7, roi_center=None, return_number_of_voxels=True) print 'ratio1' print ratio1 print response1 # In[ ]: #recursive response function from dipy.reconst.csdeconv import recursive_response #logic1 = np.logical_and(labels>117, labels<148) #logic2 = np.logical_and(labels>283, labels<314) #logic = np.logical_or(logic1, logic2) #logic_ = np.logical_or(labels==150, labels==316) #wm = np.where(logic, 1, np.where(logic_, 1, 0))
def _run_interface(self, runtime): import numpy as np import nibabel as nib from dipy.io import read_bvals_bvecs from dipy.core.gradients import gradient_table from nipype.utils.filemanip import split_filename # Loading the data fname = self.inputs.in_file img = nib.load(fname) data = img.get_data() affine = img.get_affine() FA_fname = self.inputs.FA_file FA_img = nib.load(FA_fname) fa = FA_img.get_data() affine = FA_img.get_affine() affine = np.matrix.round(affine) mask_fname = self.inputs.brain_mask mask_img = nib.load(mask_fname) mask = mask_img.get_data() bval_fname = self.inputs.bval bvals = np.loadtxt(bval_fname) bvec_fname = self.inputs.bvec bvecs = np.loadtxt(bvec_fname) bvecs = np.vstack([bvecs[0,:],bvecs[1,:],bvecs[2,:]]).T gtab = gradient_table(bvals, bvecs) # Creating a white matter mask fa = fa*mask white_matter = fa >= 0.2 # Creating a seed mask from dipy.tracking import utils seeds = utils.seeds_from_mask(white_matter, density=[2, 2, 2], affine=affine) # Fitting the CSA model from dipy.reconst.shm import CsaOdfModel from dipy.data import default_sphere from dipy.direction import peaks_from_model csa_model = CsaOdfModel(gtab, sh_order=8) csa_peaks = peaks_from_model(csa_model, data, default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=white_matter) from dipy.tracking.local import ThresholdTissueClassifier classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25) # CSD model from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=8) csd_fit = csd_model.fit(data, mask=white_matter) from dipy.direction import ProbabilisticDirectionGetter prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=45., sphere=default_sphere) # Tracking from dipy.tracking.local import LocalTracking streamlines = LocalTracking(prob_dg, classifier, seeds, affine, step_size=.5, maxlen=200, max_cross=1) # Compute streamlines and store as a list. streamlines = list(streamlines) # Saving the trackfile from dipy.io.trackvis import save_trk _, base, _ = split_filename(fname) save_trk(base + '_CSDprob.trk', streamlines, affine, fa.shape) return runtime
def tracking(folder): print('Tracking in ' + folder) output_folder = folder + 'dipy_out/' # make a folder to save new data into try: Path(output_folder).mkdir(parents=True, exist_ok=True) except OSError: print('Could not create output dir. Aborting...') return # load data print('Loading data...') img = nib.load(folder + 'data.nii.gz') dmri = np.asarray(img.dataobj) affine = img.affine mask, _ = load_nifti(folder + 'nodif_brain_mask.nii.gz') bvals, bvecs = read_bvals_bvecs(folder + 'bvals', folder + 'bvecs') gtab = gradient_table(bvals, bvecs) # extract peaksoutput_folder + 'peak_vals.nii.gz' if Path(output_folder + 'peaks.pam5').exists(): peaks = load_peaks(output_folder + 'peaks.pam5') else: print('Extracting peaks...') response, ration = auto_response(gtab, dmri, roi_radius=10, fa_thr=.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) peaks = peaks_from_model(model=csd_model, data=dmri, sphere=default_sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=True) save_peaks(output_folder + 'peaks.pam5', peaks, affine) scaled = peaks.peak_dirs * np.repeat( np.expand_dims(peaks.peak_values, -1), 3, -1) cropped = scaled[:, :, :, :3, :].reshape(dmri.shape[:3] + (9, )) save_nifti(output_folder + 'peaks.nii.gz', cropped, affine) #save_nifti(output_folder + 'peak_dirs.nii.gz', peaks.peak_dirs, affine) #save_nifti(output_folder + 'peak_vals.nii.gz', peaks.peak_values, affine) # tracking print('Tracking...') maskdata, mask = median_otsu(dmri, vol_idx=range(0, dmri.shape[3]), median_radius=3, numpass=1, autocrop=True, dilate=2) tensor_model = TensorModel(gtab, fit_method='WLS') tensor_fit = tensor_model.fit(maskdata) fa = fractional_anisotropy(tensor_fit.evals) fa[np.isnan(fa)] = 0 bla = np.average(fa) tissue_classifier = ThresholdStoppingCriterion(fa, .1) seeds = random_seeds_from_mask(fa > 1e-5, affine, seeds_count=1) streamline_generator = LocalTracking(direction_getter=peaks, stopping_criterion=tissue_classifier, seeds=seeds, affine=affine, step_size=.5) streamlines = Streamlines(streamline_generator) save_trk(StatefulTractogram(streamlines, img, Space.RASMM), output_folder + 'whole_brain.trk')
def dmri_recon(sid, data_dir, out_dir, resolution, recon='csd', dirs='', num_threads=2): import tempfile #tempfile.tempdir = '/om/scratch/Fri/ksitek/' import os oldval = None if 'MKL_NUM_THREADS' in os.environ: oldval = os.environ['MKL_NUM_THREADS'] os.environ['MKL_NUM_THREADS'] = '%d' % num_threads ompoldval = None if 'OMP_NUM_THREADS' in os.environ: ompoldval = os.environ['OMP_NUM_THREADS'] os.environ['OMP_NUM_THREADS'] = '%d' % num_threads import nibabel as nib import numpy as np from glob import glob if resolution == '0.2mm': filename = 'Reg_S64550_nii4d.nii' #filename = 'angular_resample/dwi_%s.nii.gz'%dirs fimg = os.path.abspath(glob(os.path.join(data_dir, filename))[0]) else: filename = 'Reg_S64550_nii4d_resamp-%s.nii.gz' % (resolution) fimg = os.path.abspath( glob(os.path.join(data_dir, 'resample', filename))[0]) print("dwi file = %s" % fimg) fbval = os.path.abspath( glob(os.path.join(data_dir, 'bvecs', 'camino_120_RAS.bvals'))[0]) print("bval file = %s" % fbval) fbvec = os.path.abspath( glob(os.path.join(data_dir, 'bvecs', 'camino_120_RAS_flipped-xy.bvecs'))[0]) # 'angular_resample', # 'dwi_%s.bvecs'%dirs))[0]) print("bvec file = %s" % fbvec) img = nib.load(fimg) data = img.get_fdata() affine = img.get_affine() prefix = sid from dipy.io import read_bvals_bvecs bvals, bvecs = read_bvals_bvecs(fbval, fbvec) ''' from dipy.core.gradients import vector_norm b0idx = [] for idx, val in enumerate(bvals): if val < 1: pass #bvecs[idx] = [1, 0, 0] else: b0idx.append(idx) #print "b0idx=%d"%idx #print "input bvecs:" #print bvecs bvecs[b0idx, :] = bvecs[b0idx, :]/vector_norm(bvecs[b0idx])[:, None] #print "bvecs after normalization:" #print bvecs ''' from dipy.core.gradients import gradient_table gtab = gradient_table(bvals, bvecs) gtab.bvecs.shape == bvecs.shape gtab.bvecs gtab.bvals.shape == bvals.shape gtab.bvals #from dipy.segment.mask import median_otsu #b0_mask, mask = median_otsu(data[:, :, :, b0idx].mean(axis=3).squeeze(), 4, 4) if resolution == '0.2mm': mask_name = 'Reg_S64550_nii_b0-slice_mask.nii.gz' fmask1 = os.path.join(data_dir, mask_name) else: mask_name = 'Reg_S64550_nii_b0-slice_mask_resamp-%s.nii.gz' % ( resolution) fmask1 = os.path.join(data_dir, 'resample', mask_name) print("fmask file = %s" % fmask1) mask = nib.load(fmask1).get_fdata() ''' DTI model & save metrics ''' from dipy.reconst.dti import TensorModel print("running tensor model") tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) from dipy.reconst.dti import fractional_anisotropy print("running FA") FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 fa_img = nib.Nifti1Image(FA, img.get_affine()) tensor_fa_file = os.path.abspath('%s_tensor_fa.nii.gz' % (prefix)) nib.save(fa_img, tensor_fa_file) from dipy.reconst.dti import axial_diffusivity print("running AD") AD = axial_diffusivity(tenfit.evals) AD[np.isnan(AD)] = 0 ad_img = nib.Nifti1Image(AD, img.get_affine()) tensor_ad_file = os.path.abspath('%s_tensor_ad.nii.gz' % (prefix)) nib.save(ad_img, tensor_ad_file) from dipy.reconst.dti import radial_diffusivity print("running RD") RD = radial_diffusivity(tenfit.evals) RD[np.isnan(RD)] = 0 rd_img = nib.Nifti1Image(RD, img.get_affine()) tensor_rd_file = os.path.abspath('%s_tensor_rd.nii.gz' % (prefix)) nib.save(rd_img, tensor_rd_file) from dipy.reconst.dti import mean_diffusivity print("running MD") MD = mean_diffusivity(tenfit.evals) MD[np.isnan(MD)] = 0 md_img = nib.Nifti1Image(MD, img.get_affine()) tensor_md_file = os.path.abspath('%s_tensor_md.nii.gz' % (prefix)) nib.save(md_img, tensor_md_file) evecs = tenfit.evecs evec_img = nib.Nifti1Image(evecs, img.get_affine()) tensor_evec_file = os.path.abspath('%s_tensor_evec.nii.gz' % (prefix)) nib.save(evec_img, tensor_evec_file) ''' ODF model ''' useFA = True print("creating %s model" % recon) if recon == 'csd': from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel from dipy.reconst.csdeconv import auto_response response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.5) # 0.7 model = ConstrainedSphericalDeconvModel(gtab, response) useFA = True return_sh = True elif recon == 'csa': from dipy.reconst.shm import CsaOdfModel, normalize_data model = CsaOdfModel(gtab, sh_order=8) useFA = True return_sh = True elif recon == 'gqi': from dipy.reconst.gqi import GeneralizedQSamplingModel model = GeneralizedQSamplingModel(gtab) return_sh = False else: raise ValueError('only csd, csa supported currently') from dipy.reconst.dsi import (DiffusionSpectrumDeconvModel, DiffusionSpectrumModel) model = DiffusionSpectrumDeconvModel(gtab) '''reconstruct ODFs''' from dipy.data import get_sphere sphere = get_sphere('symmetric724') #odfs = fit.odf(sphere) # with CSD/GQI, uses > 50GB per core; don't get greedy with cores! from dipy.reconst.peaks import peaks_from_model print("running peaks_from_model") peaks = peaks_from_model( model=model, data=data, sphere=sphere, mask=mask, return_sh=return_sh, return_odf=False, normalize_peaks=True, npeaks=5, relative_peak_threshold=.5, min_separation_angle=10, #25, parallel=num_threads > 1, nbr_processes=num_threads) # save the peaks from dipy.io.peaks import save_peaks peaks_file = os.path.abspath('%s_peaks.pam5' % (prefix)) save_peaks(peaks_file, peaks) # save the spherical harmonics shm_coeff_file = os.path.abspath('%s_shm_coeff.nii.gz' % (prefix)) if return_sh: shm_coeff = peaks.shm_coeff nib.save(nib.Nifti1Image(shm_coeff, img.get_affine()), shm_coeff_file) else: # if it's not a spherical model, output it as an essentially null file np.savetxt(shm_coeff_file, [0]) # save the generalized fractional anisotropy image gfa_img = nib.Nifti1Image(peaks.gfa, img.get_affine()) model_gfa_file = os.path.abspath('%s_%s_gfa.nii.gz' % (prefix, recon)) nib.save(gfa_img, model_gfa_file) #from dipy.reconst.dti import quantize_evecs #peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices) #eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices, #a_low=0.2, seeds=10**6, ang_thr=35) ''' probabilistic tracking ''' ''' from dipy.direction import ProbabilisticDirectionGetter from dipy.tracking.local import LocalTracking from dipy.tracking.streamline import Streamlines from dipy.io.streamline import save_trk prob_dg = ProbabilisticDirectionGetter.from_shcoeff(shm_coeff, max_angle=45., sphere=sphere) streamlines_generator = LocalTracking(prob_dg, affine, step_size=.5, max_cross=1) # Generate streamlines object streamlines = Streamlines(streamlines_generator) affine = img.get_affine() vox_size=fa_img.get_header().get_zooms()[:3] fname = os.path.abspath('%s_%s_prob_streamline.trk' % (prefix, recon)) save_trk(fname, streamlines, affine, vox_size=vox_size) ''' ''' deterministic tracking with EuDX method''' from dipy.tracking.eudx import EuDX print("reconstructing with EuDX") if useFA: eu = EuDX( FA, peaks.peak_indices[..., 0], odf_vertices=sphere.vertices, a_low=0.001, # default is 0.0239 seeds=10**6, ang_thr=75) else: eu = EuDX( peaks.gfa, peaks.peak_indices[..., 0], odf_vertices=sphere.vertices, #a_low=0.1, seeds=10**6, ang_thr=45) sl_fname = os.path.abspath('%s_%s_det_streamline.trk' % (prefix, recon)) # trying new dipy.io.streamline module, per email to neuroimaging list # 2018.04.05 from nibabel.streamlines import Field from nibabel.orientations import aff2axcodes affine = img.get_affine() vox_size = fa_img.get_header().get_zooms()[:3] fov_shape = FA.shape[:3] if vox_size is not None and fov_shape is not None: hdr = {} hdr[Field.VOXEL_TO_RASMM] = affine.copy() hdr[Field.VOXEL_SIZES] = vox_size hdr[Field.DIMENSIONS] = fov_shape hdr[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine)) tractogram = nib.streamlines.Tractogram(eu) tractogram.affine_to_rasmm = affine trk_file = nib.streamlines.TrkFile(tractogram, header=hdr) nib.streamlines.save(trk_file, sl_fname) if oldval: os.environ['MKL_NUM_THREADS'] = oldval else: del os.environ['MKL_NUM_THREADS'] if ompoldval: os.environ['OMP_NUM_THREADS'] = ompoldval else: del os.environ['OMP_NUM_THREADS'] print('all output files created') return (tensor_fa_file, tensor_evec_file, model_gfa_file, sl_fname, affine, tensor_ad_file, tensor_rd_file, tensor_md_file, shm_coeff_file, peaks_file)
def compute_ssst_frf(data, bvals, bvecs, mask=None, mask_wm=None, fa_thresh=0.7, min_fa_thresh=0.5, min_nvox=300, roi_radius=10, roi_center=None, force_b0_threshold=False): """Compute a single-shell (under b=1500), single-tissue single Fiber Response Function from a DWI volume. A DTI fit is made, and voxels containing a single fiber population are found using a threshold on the FA. Parameters ---------- data : ndarray 4D Input diffusion volume with shape (X, Y, Z, N) bvals : ndarray 1D bvals array with shape (N,) bvecs : ndarray 2D bvecs array with shape (N, 3) mask : ndarray, optional 3D mask with shape (X,Y,Z) Binary mask. Only the data inside the mask will be used for computations and reconstruction. Useful if no white matter mask is available. mask_wm : ndarray, optional 3D mask with shape (X,Y,Z) Binary white matter mask. Only the data inside this mask and above the threshold defined by fa_thresh will be used to estimate the fiber response function. fa_thresh : float, optional Use this threshold as the initial threshold to select single fiber voxels. Defaults to 0.7 min_fa_thresh : float, optional Minimal value that will be tried when looking for single fiber voxels. Defaults to 0.5 min_nvox : int, optional Minimal number of voxels needing to be identified as single fiber voxels in the automatic estimation. Defaults to 300. roi_radius : int, optional Use this radius to select single fibers from the tensor to estimate the FRF. The roi will be a cube spanning from the middle of the volume in each direction. Defaults to 10. roi_center : tuple(3), optional Use this center to span the roi of size roi_radius (center of the 3D volume). force_b0_threshold : bool, optional If set, will continue even if the minimum bvalue is suspiciously high. Returns ------- full_reponse : ndarray Fiber Response Function, with shape (4,) Raises ------ ValueError If less than `min_nvox` voxels were found with sufficient FA to estimate the FRF. """ if min_fa_thresh < 0.4: logging.warning( "Minimal FA threshold ({:.2f}) seems really small. " "Make sure it makes sense for this dataset.".format(min_fa_thresh)) if not is_normalized_bvecs(bvecs): logging.warning("Your b-vectors do not seem normalized...") bvecs = normalize_bvecs(bvecs) check_b0_threshold(force_b0_threshold, bvals.min()) gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min()) if mask is not None: data = applymask(data, mask) if mask_wm is not None: data = applymask(data, mask_wm) else: logging.warning( "No white matter mask specified! Only mask will be used " "(if it has been supplied). \nBe *VERY* careful about the " "estimation of the fiber response function to ensure no invalid " "voxel was used.") # Iteratively trying to fit at least min_nvox voxels. Lower the FA threshold # when it doesn't work. Fail if the fa threshold is smaller than # the min_threshold. # We use an epsilon since the -= 0.05 might incur numerical imprecision. nvox = 0 while nvox < min_nvox and fa_thresh >= min_fa_thresh - 0.00001: response, ratio, nvox = auto_response(gtab, data, roi_center=roi_center, roi_radius=roi_radius, fa_thr=fa_thresh, return_number_of_voxels=True) logging.debug( "Number of indices is {:d} with threshold of {:.2f}".format( nvox, fa_thresh)) fa_thresh -= 0.05 if nvox < min_nvox: raise ValueError( "Could not find at least {:d} voxels with sufficient FA " "to estimate the FRF!".format(min_nvox)) logging.debug( "Found {:d} voxels with FA threshold {:.2f} for " "FRF estimation".format(nvox, fa_thresh + 0.05)) logging.debug("FRF eigenvalues: {}".format(str(response[0]))) logging.debug("Ratio for smallest to largest eigen value " "is {:.3f}".format(ratio)) logging.debug("Mean of the b=0 signal for voxels used " "for FRF: {}".format(response[1])) full_response = np.array([response[0][0], response[0][1], response[0][2], response[1]]) return full_response
ten_img = ten_img[:, :, :, (0, 1, 3, 2, 4, 5)] array_img = nib.Nifti1Image(ten_img, affine) nib.save(array_img, dav_dir + 'TN_img.nii.gz') # estimating fiber response from corpus callosum cc_label = 48 cc_pixels = np.where(pr_img == cc_label) cc_x_min, cc_x_max, cc_y_min, cc_y_max, cc_z_min, cc_z_max= \ cc_pixels[0].min(), cc_pixels[0].max(), cc_pixels[1].min(), cc_pixels[1].max(), cc_pixels[2].min(), cc_pixels[2].max() slc = (cc_z_min + cc_z_max) // 2 response, ratio = auto_response(gtab, d_img[cc_x_min:cc_x_max, cc_y_min:cc_y_max, cc_z_min:cc_z_max, :], roi_radius=300, fa_thr=0.7) # choose the b=1000 shell ind_0_1000 = np.logical_or(b_vals == 0, b_vals == 1000) d_img_0_1000 = d_img[:, :, :, ind_0_1000].copy() b_vecs_0_1000 = b_vecs[:, ind_0_1000] b_vals_0_1000 = b_vals[ind_0_1000] gtab_0_1000 = gradient_table(b_vals_0_1000, b_vecs_0_1000) response_0_1000, ratio_0_1000 = auto_response( gtab_0_1000, d_img_0_1000[cc_x_min:cc_x_max, cc_y_min:cc_y_max, cc_z_min:cc_z_max, :], roi_radius=300,
def eudx_advanced(self, dti_file, mask_file, gtab, seed_num=100000, stop_val=0.1): """ Tracking with more complex tensors - experimental Initializes the graph with nodes corresponding to the number of ROIs **Positional Arguments:** dti_file: - File (registered) to use for tensor/fiber tracking mask_file: - Brain mask to keep tensors inside the brain gtab: - dipy formatted bval/bvec Structure **Optional Arguments:** seed_num: - Number of seeds to use for fiber tracking stop_val: - Value to cutoff fiber track """ img = nb.load(dti_file) data = img.get_data() img = nb.load(mask_file) mask = img.get_data() mask = mask > 0 # to ensure binary mask """ For the constrained spherical deconvolution we need to estimate the response function (see :ref:`example_reconst_csd`) and create a model. """ response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) """ Next, we use ``peaks_from_model`` to fit the data and calculated the fiber directions in all voxels. """ sphere = get_sphere('symmetric724') csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, mask=mask, relative_peak_threshold=.5, min_separation_angle=25, parallel=True) """ For the tracking part, we will use ``csd_model`` fiber directions but stop tracking where fractional anisotropy (FA) is low (< 0.1). To derive the FA, used as a stopping criterion, we need to fit a tensor model first. Here, we use weighted least squares (WLS). """ print 'tensors...' tensor_model = TensorModel(gtab, fit_method='WLS') tensor_fit = tensor_model.fit(data, mask) FA = fractional_anisotropy(tensor_fit.evals) """ In order for the stopping values to be used with our tracking algorithm we need to have the same dimensions as the ``csd_peaks.peak_values``. For this reason, we can assign the same FA value to every peak direction in the same voxel in the following way. """ stopping_values = np.zeros(csd_peaks.peak_values.shape) stopping_values[:] = FA[..., None] streamline_generator = EuDX(stopping_values, csd_peaks.peak_indices, seeds=seed_num, odf_vertices=sphere.vertices, a_low=stop_val) streamlines = [streamline for streamline in streamline_generator] return streamlines
def _run_interface(self, runtime): from dipy.core.gradients import GradientTable from dipy.reconst.dti import fractional_anisotropy, mean_diffusivity from dipy.reconst.csdeconv import recursive_response, auto_response img = nb.load(self.inputs.in_file) imref = nb.four_to_three(img)[0] affine = img.affine if isdefined(self.inputs.in_mask): msk = nb.load(self.inputs.in_mask).get_data() msk[msk > 0] = 1 msk[msk < 0] = 0 else: msk = np.ones(imref.shape) data = img.get_data().astype(np.float32) gtab = self._get_gradient_table() evals = np.nan_to_num(nb.load(self.inputs.in_evals).get_data()) FA = np.nan_to_num(fractional_anisotropy(evals)) * msk indices = np.where(FA > self.inputs.fa_thresh) S0s = data[indices][:, np.nonzero(gtab.b0s_mask)[0]] S0 = np.mean(S0s) if self.inputs.auto: response, ratio = auto_response(gtab, data, roi_radius=self.inputs.roi_radius, fa_thr=self.inputs.fa_thresh) response = response[0].tolist() + [S0] elif self.inputs.recursive: MD = np.nan_to_num(mean_diffusivity(evals)) * msk indices = np.logical_or(FA >= 0.4, (np.logical_and(FA >= 0.15, MD >= 0.0011))) data = nb.load(self.inputs.in_file).get_data() response = recursive_response(gtab, data, mask=indices, sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=True) ratio = abs(response[1] / response[0]) else: lambdas = evals[indices] l01 = np.sort(np.mean(lambdas, axis=0)) response = np.array([l01[-1], l01[-2], l01[-2], S0]) ratio = abs(response[1] / response[0]) if ratio > 0.25: IFLOGGER.warn( 'Estimated response is not prolate enough. ' 'Ratio=%0.3f.', ratio) elif ratio < 1.e-5 or np.any(np.isnan(response)): response = np.array([1.8e-3, 3.6e-4, 3.6e-4, S0]) IFLOGGER.warn( 'Estimated response is not valid, using a default one') else: IFLOGGER.info('Estimated response: %s', str(response[:3])) np.savetxt(op.abspath(self.inputs.response), response) wm_mask = np.zeros_like(FA) wm_mask[indices] = 1 nb.Nifti1Image(wm_mask.astype(np.uint8), affine, None).to_filename(op.abspath(self.inputs.out_mask)) return runtime
Constrained Spherical Deconvolution (CSD) Model. This model represents each voxel in the data set as a collection of small white matter fibers with different orientations. The density of fibers along each orientation is known as the Fiber Orientation Distribution (FOD). In order to perform probabilistic fiber tracking, we pick a fiber from the FOD at random at each new location along the streamline. Note: one could use this model to perform deterministic fiber tracking by always tracking along the directions that have the most fibers. Let's begin probabilistic fiber tracking by fitting the data to the CSD model. """ from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6) csd_fit = csd_model.fit(data, mask=white_matter) """ Next we'll need to make a ``ProbabilisticDirectionGetter``. Because the CSD model represents the FOD using the spherical harmonic basis, we can use the ``from_shcoeff`` method to create the direction getter. This direction getter will randomly sample directions from the FOD each time the tracking algorithm needs to take another step. """ from dipy.direction import ProbabilisticDirectionGetter prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=30.,
def main(): parser = _build_arg_parser() args = parser.parse_args() logging.basicConfig(level=logging.INFO) if not args.not_all: args.fodf = args.fodf or 'fodf.nii.gz' args.peaks = args.peaks or 'peaks.nii.gz' args.peak_indices = args.peak_indices or 'peak_indices.nii.gz' arglist = [args.fodf, args.peaks, args.peak_indices] if args.not_all and not any(arglist): parser.error('When using --not_all, you need to specify at least ' 'one file to output.') assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs]) assert_outputs_exists(parser, args, arglist) nbr_processes = args.nbr_processes parallel = True if nbr_processes <= 0: nbr_processes = None elif nbr_processes == 1: parallel = False # Check for FRF filename base_odf_name, _ = split_name_with_nii(args.fodf) frf_filename = base_odf_name + '_frf.txt' if os.path.isfile(frf_filename) and not args.overwrite: parser.error('Cannot save frf file, "{0}" already exists. ' 'Use -f to overwrite.'.format(frf_filename)) vol = nib.load(args.input) data = vol.get_data() bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) if args.mask_wm is not None: wm_mask = nib.load(args.mask_wm).get_data().astype('bool') else: wm_mask = np.ones_like(data[..., 0], dtype=np.bool) logging.info( 'No white matter mask specified! mask_data will be used instead, ' 'if it has been supplied. \nBe *VERY* careful about the ' 'estimation of the fiber response function for the CSD.') data_in_wm = applymask(data, wm_mask) if not is_normalized_bvecs(bvecs): logging.warning('Your b-vectors do not seem normalized...') bvecs = normalize_bvecs(bvecs) if bvals.min() != 0: if bvals.min() > 20: raise ValueError( 'The minimal bvalue is greater than 20. This is highly ' 'suspicious. Please check your data to ensure everything is ' 'correct.\nValue found: {}'.format(bvals.min())) else: logging.warning( 'Warning: no b=0 image. Setting b0_threshold to ' 'bvals.min() = %s', bvals.min()) gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min()) else: gtab = gradient_table(bvals, bvecs) if args.mask is None: mask = None else: mask = nib.load(args.mask).get_data().astype(np.bool) # Raise warning for sh order if there is not enough DWIs if data.shape[-1] < (args.sh_order + 1) * (args.sh_order + 2) / 2: warnings.warn( 'We recommend having at least %s unique DWIs volumes, but you ' 'currently have %s volumes. Try lowering the parameter --sh_order ' 'in case of non convergence.', (args.sh_order + 1) * (args.sh_order + 2) / 2), data.shape[-1] fa_thresh = args.fa_thresh # If threshold is too high, try lower until enough indices are found # estimating a response function with fa < 0.5 does not make sense nvox = 0 while nvox < 300 and fa_thresh > 0.5: response, ratio, nvox = auto_response(gtab, data_in_wm, roi_center=args.roi_center, roi_radius=args.roi_radius, fa_thr=fa_thresh, return_number_of_voxels=True) logging.info('Number of indices is %s with threshold of %s', nvox, fa_thresh) fa_thresh -= 0.05 if fa_thresh <= 0: raise ValueError( 'Could not find at least 300 voxels for estimating the frf!') logging.info('Found %s valid voxels for frf estimation.', nvox) response = list(response) logging.info('Response function is %s', response) if args.frf is not None: l01 = np.array(literal_eval(args.frf), dtype=np.float64) if not args.no_factor: l01 *= 10**-4 response[0] = np.array([l01[0], l01[1], l01[1]]) ratio = l01[1] / l01[0] logging.info("Eigenvalues for the frf of the input data are: %s", response[0]) logging.info("Ratio for smallest to largest eigen value is %s", ratio) np.savetxt(frf_filename, response[0]) if not args.frf_only: reg_sphere = get_sphere('symmetric362') peaks_sphere = get_sphere('symmetric724') csd_model = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=reg_sphere, sh_order=args.sh_order) peaks_csd = peaks_from_model(model=csd_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask, return_sh=True, sh_basis_type=args.basis, sh_order=args.sh_order, normalize_peaks=True, parallel=parallel, nbr_processes=nbr_processes) if args.fodf: nib.save( nib.Nifti1Image(peaks_csd.shm_coeff.astype(np.float32), vol.affine), args.fodf) if args.peaks: nib.save( nib.Nifti1Image(reshape_peaks_for_visualization(peaks_csd), vol.affine), args.peaks) if args.peak_indices: nib.save(nib.Nifti1Image(peaks_csd.peak_indices, vol.affine), args.peak_indices)
def tracking(image, bvecs, bvals, wm, seeds, fibers, prune_length=3, rseed=42, plot=False, proba=False, verbose=False): # Pipelines transcribed from: # https://dipy.org/documentation/1.1.1./examples_built/tracking_introduction_eudx/#example-tracking-introduction-eudx # https://dipy.org/documentation/1.1.1./examples_built/tracking_probabilistic/ # Load Images dwi_loaded = nib.load(image) dwi_data = dwi_loaded.get_fdata() wm_loaded = nib.load(wm) wm_data = wm_loaded.get_fdata() seeds_loaded = nib.load(seeds) seeds_data = seeds_loaded.get_fdata() seeds = utils.seeds_from_mask(seeds_data, dwi_loaded.affine, density=2) # Load B-values & B-vectors # NB. Use aligned b-vecs if providing eddy-aligned data bvals, bvecs = read_bvals_bvecs(bvals, bvecs) gtab = gradient_table(bvals, bvecs) csa_model = CsaOdfModel(gtab, sh_order=6) # Set stopping criterion gfa = csa_model.fit(dwi_data, mask=wm_data).gfa stop_criterion = ThresholdStoppingCriterion(gfa, .25) if proba: # Establish ODF model response, ratio = auto_response(gtab, dwi_data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6) csd_fit = csd_model.fit(dwi_data, mask=wm_data) # Create Probabilisitic direction getter fod = csd_fit.odf(default_sphere) pmf = fod.clip(min=0) prob_dg = ProbabilisticDirectionGetter.from_pmf(pmf, max_angle=30., sphere=default_sphere) # Use the probabilisitic direction getter as the dg dg = prob_dg else: # Establish ODF model csa_peaks = peaks_from_model(csa_model, dwi_data, default_sphere, relative_peak_threshold=0.8, min_separation_angle=45, mask=wm_data) # Use the CSA peaks as the dg dg = csa_peaks # Create generator and perform tracing s_generator = LocalTracking(dg, stop_criterion, seeds, dwi_loaded.affine, 0.5, random_seed=rseed) streamlines = Streamlines(s_generator) # Prune streamlines streamlines = ArraySequence( [strline for strline in streamlines if len(strline) > prune_length]) sft = StatefulTractogram(streamlines, dwi_loaded, Space.RASMM) # Save streamlines save_trk(sft, fibers + ".trk") # Visualize fibers if plot and has_fury: from dipy.viz import window, actor, colormap as cmap # Create the 3D display. r = window.Renderer() r.add(actor.line(streamlines, cmap.line_colors(streamlines))) window.record(r, out_path=fibers + '.png', size=(800, 800))
def run(self, input_files, bvalues_files, bvectors_files, mask_files, b0_threshold=50.0, bvecs_tol=0.01, roi_center=None, roi_radius=10, fa_thr=0.7, frf=None, extract_pam_values=False, sh_order=8, odf_to_sh_order=8, parallel=False, nbr_processes=None, out_dir='', out_pam='peaks.pam5', out_shm='shm.nii.gz', out_peaks_dir='peaks_dirs.nii.gz', out_peaks_values='peaks_values.nii.gz', out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'): """ Constrained spherical deconvolution Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) b0_threshold : float, optional Threshold used to find b=0 directions bvecs_tol : float, optional Bvecs should be unit vectors. (default:0.01) roi_center : variable int, optional Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]` (default None) roi_radius : int, optional radius of cubic ROI in voxels (default 10) fa_thr : float, optional FA threshold for calculating the response function (default 0.7) frf : variable float, optional Fiber response function can be for example inputed as 15 4 4 (from the command line) or [15, 4, 4] from a Python script to be converted to float and mutiplied by 10**-4 . If None the fiber response function will be computed automatically (default: None). extract_pam_values : bool, optional Save or not to save pam volumes as single nifti files. sh_order : int, optional Spherical harmonics order (default 6) used in the CSA fit. odf_to_sh_order : int, optional Spherical harmonics order used for peak_from_model to compress the ODF to spherical harmonics coefficients (default 8) parallel : bool, optional Whether to use parallelization in peak-finding during the calibration procedure. Default: False nbr_processes: int, optional If `parallel` is True, the number of subprocesses to use (default multiprocessing.cpu_count()). out_dir : string, optional Output directory (default input file directory) out_pam : string, optional Name of the peaks volume to be saved (default 'peaks.pam5') out_shm : string, optional Name of the shperical harmonics volume to be saved (default 'shm.nii.gz') out_peaks_dir : string, optional Name of the peaks directions volume to be saved (default 'peaks_dirs.nii.gz') out_peaks_values : string, optional Name of the peaks values volume to be saved (default 'peaks_values.nii.gz') out_peaks_indices : string, optional Name of the peaks indices volume to be saved (default 'peaks_indices.nii.gz') out_gfa : string, optional Name of the generalise fa volume to be saved (default 'gfa.nii.gz') References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the fibre orientation distribution in diffusion MRI: Non-negativity constrained super-resolved spherical deconvolution. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa) in io_it: logging.info('Loading {0}'.format(dwi)) data, affine = load_nifti(dwi) bvals, bvecs = read_bvals_bvecs(bval, bvec) print(b0_threshold, bvals.min()) if b0_threshold < bvals.min(): warn("b0_threshold (value: {0}) is too low, increase your " "b0_threshold. It should higher than the first b0 value " "({1}).".format(b0_threshold, bvals.min())) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold, atol=bvecs_tol) mask_vol = nib.load(maskfile).get_data().astype(np.bool) n_params = ((sh_order + 1) * (sh_order + 2)) / 2 if data.shape[-1] < n_params: raise ValueError('You need at least {0} unique DWI volumes to ' 'compute fiber odfs. You currently have: {1}' ' DWI volumes.'.format( n_params, data.shape[-1])) if frf is None: logging.info('Computing response function') if roi_center is not None: logging.info( 'Response ROI center:\n{0}'.format(roi_center)) logging.info( 'Response ROI radius:\n{0}'.format(roi_radius)) response, ratio, nvox = auto_response( gtab, data, roi_center=roi_center, roi_radius=roi_radius, fa_thr=fa_thr, return_number_of_voxels=True) response = list(response) else: logging.info('Using response function') if isinstance(frf, str): l01 = np.array(literal_eval(frf), dtype=np.float64) else: l01 = np.array(frf, dtype=np.float64) l01 *= 10**-4 response = np.array([l01[0], l01[1], l01[1]]) ratio = l01[1] / l01[0] response = (response, ratio) logging.info("Eigenvalues for the frf of the input" " data are :{0}".format(response[0])) logging.info( 'Ratio for smallest to largest eigen value is {0}'.format( ratio)) peaks_sphere = get_sphere('repulsion724') logging.info('CSD computation started.') csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) peaks_csd = peaks_from_model(model=csd_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=parallel, nbr_processes=nbr_processes) peaks_csd.affine = affine save_peaks(opam, peaks_csd) logging.info('CSD computation completed.') if extract_pam_values: peaks_to_niftis(peaks_csd, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa, reshape_dirs=True) dname_ = os.path.dirname(opam) if dname_ == '': logging.info('Pam5 file saved in current directory') else: logging.info('Pam5 file saved in {0}'.format(dname_)) return io_it
def test_csdeconv(): SNR = 100 S0 = 1 _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs, b0_threshold=0) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, sticks = multi_tensor(gtab, mevals, S0, angles=angles, fractions=[50, 50], snr=SNR) sphere = get_sphere('symmetric362') odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50]) response = (np.array([0.0015, 0.0003, 0.0003]), S0) csd = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd.fit(S) assert_equal(csd_fit.shm_coeff[0] > 0, True) fodf = csd_fit.odf(sphere) directions, _, _ = peak_directions(odf_gt, sphere) directions2, _, _ = peak_directions(fodf, sphere) ang_sim = angular_similarity(directions, directions2) assert_equal(ang_sim > 1.9, True) assert_equal(directions.shape[0], 2) assert_equal(directions2.shape[0], 2) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", category=UserWarning) _ = ConstrainedSphericalDeconvModel(gtab, response, sh_order=10) assert_greater(len([lw for lw in w if issubclass(lw.category, UserWarning)]), 0) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", category=UserWarning) ConstrainedSphericalDeconvModel(gtab, response, sh_order=8) assert_equal(len([lw for lw in w if issubclass(lw.category, UserWarning)]), 0) mevecs = [] for s in sticks: mevecs += [all_tensor_evecs(s).T] S2 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None) big_S = np.zeros((10, 10, 10, len(S2))) big_S[:] = S2 aresponse, aratio = auto_response(gtab, big_S, roi_center=(5, 5, 4), roi_radius=3, fa_thr=0.5) assert_array_almost_equal(aresponse[0], response[0]) assert_almost_equal(aresponse[1], 100) assert_almost_equal(aratio, response[0][1] / response[0][0]) auto_response(gtab, big_S, roi_radius=3, fa_thr=0.5) assert_array_almost_equal(aresponse[0], response[0]) _, _, nvoxels = auto_response(gtab, big_S, roi_center=(5, 5, 4), roi_radius=30, fa_thr=0.5, return_number_of_voxels=True) assert_equal(nvoxels, 1000) _, _, nvoxels = auto_response(gtab, big_S, roi_center=(5, 5, 4), roi_radius=30, fa_thr=1, return_number_of_voxels=True) assert_equal(nvoxels, 0)
def generate_response(self): response, ratio = auto_response(self.gtab, self.data, roi_radius=self.roi_radius, fa_thr=self.fa_thr) return response, ratio
mask_vol, mask_affine = load_nifti(fmask) sh_order = 8 if data.shape[-1] < 15: raise ValueError( "You need at least 15 unique DWI volumes to " "compute fiber ODFs. You currently have: {0}" " DWI volumes.".format(data.shape[-1]) ) elif data.shape[-1] < 30: sh_order = 6 from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, auto_response response, ratio = auto_response(gtab, data) response = list(response) peaks_sphere = get_sphere("symmetric362") model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) peaks_csd = peaks_from_model( model=model, data=data, sphere=peaks_sphere, relative_peak_threshold=0.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order,
def fiber_tracking(subject): # declare the type of algorithm, \in [deterministic, probabilitic] algo = 'deterministic' # algo = 'probabilitic' ''' @param subject: string represents the subject name @param algo: the name for the algorithms, \in ['deterministic', 'probabilitic'] @return streamlines: for saving the final results and visualization ''' print('processing for', subject) fname, bval_fname, bvec_fname, label_fname = get_file_names(subject) data, sub_affine, img = load_nifti(fname, return_img=True) bvals, bvecs = read_bvals_bvecs(bval_fname, bvec_fname) gtab = gradient_table(bvals, bvecs) labels = load_nifti_data(label_fname) print('data loading complete.\n') ################################################################## # set mask(s) and seed(s) # global_mask = binary_dilation((data[:, :, :, 0] != 0)) global_mask = binary_dilation((labels == 1) | (labels == 2)) # global_mask = binary_dilation((labels == 2) | (labels == 32) | (labels == 76)) affine = np.eye(4) seeds = utils.seeds_from_mask(global_mask, affine, density=1) print('mask(s) and seed(s) set complete.\n') ################################################################## print('getting directions from diffusion dataset...') # define tracking mask with Constant Solid Angle (CSA) csamodel = CsaOdfModel(gtab, 6) stopping_criterion = BinaryStoppingCriterion(global_mask) # define direction criterion direction_criterion = None print('Compute directions...') if algo == "deterministic": # EuDX direction_criterion = peaks.peaks_from_model( model=csamodel, data=data, sphere=peaks.default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=global_mask) # # Deterministic Algorithm (select direction with max probability) # direction_criterion = DeterministicMaximumDirectionGetter.from_shcoeff( # csd_fit.shm_coeff, # max_angle=30., # sphere=default_sphere) else: response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) # fit the reconstruction model with Constrained Spherical Deconvolusion (CSD) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6) csd_fit = csd_model.fit(data, mask=global_mask) # gfa = csamodel.fit(data, mask=global_mask).gfa # stopping_criterion = ThresholdStoppingCriterion(gfa, .25) # Probabilitic Algorithm direction_criterion = ProbabilisticDirectionGetter.from_shcoeff( csd_fit.shm_coeff, max_angle=30., sphere=default_sphere) print('direction computation complete.\n') ################################################################## print('start tracking process...') # start tracking streamline_generator = LocalTracking(direction_criterion, stopping_criterion, seeds, affine=affine, step_size=0.5) # Generate streamlines object streamlines = Streamlines(streamline_generator) sft = StatefulTractogram(streamlines, img, Space.RASMM) print('traking complete.\n') ################################################################## return { "subject": subject, "streamlines": streamlines, "sft": sft, "affine": sub_affine, "data": data, "img": img, "labels": labels }
FA_img_test.SetDirection(ref_dir_test) FA_img_test.SetOrigin(ref_org_test) FA_img_test.SetSpacing(ref_spc_test) sitk.WriteImage(FA_img_test, test_dir + 'results/test/FA_test.mhd') sitk.WriteImage(FA_img_test, test_dir + 'results/test/FA_test.nii.gz' ) FA_img_test_nii= nib.load( test_dir + 'results/test/FA_test.nii.gz' ) FA_img_test_nii.header['srow_x']= FA_img_test_nii.affine[0,:] FA_img_test_nii.header['srow_y']= FA_img_test_nii.affine[1,:] FA_img_test_nii.header['srow_z']= FA_img_test_nii.affine[2,:] affine_test= FA_img_test_nii.affine''' response_test, ratio_test = auto_response(gtab_test, hardi_d_img_test_np, roi_radius=10, fa_thr=0.7) # Tensor fitting methods # F- test and bootstrap b_vals, b_vecs = b_vals_test.copy(), b_vecs_test.copy() Lam = np.array([response_test[0][0], response_test[0][1]]) d_iso = 0.003 img_err_sd = np.zeros((sx, sy, sz, 5)) img_err_mf = np.zeros((sx, sy, sz, 5)) prd_err_sd = np.zeros((sx, sy, sz)) prd_err_mf = np.zeros((sx, sy, sz))
def track(dname, fdwi, fbval, fbvec, fmask=None, seed_density=1, show=False): data, affine = load_nifti(fdwi) bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=50) if fmask is None: from dipy.segment.mask import median_otsu b0_mask, mask = median_otsu( data) # TODO: check parameters to improve the mask else: mask, mask_affine = load_nifti(fmask) mask = np.squeeze(mask) #fix mask dimensions # compute DTI model from dipy.reconst.dti import TensorModel tenmodel = TensorModel(gtab) #, fit_method='OLS') #, min_signal=5000) # fit the dti model tenfit = tenmodel.fit(data, mask=mask) # save fa ffa = dname + 'tensor_fa.nii.gz' fa_img = nib.Nifti1Image(tenfit.fa.astype(np.float32), affine) nib.save(fa_img, ffa) sh_order = 8 #TODO: check what that does if data.shape[-1] < 15: raise ValueError('You need at least 15 unique DWI volumes to ' 'compute fiber ODFs. You currently have: {0}' ' DWI volumes.'.format(data.shape[-1])) elif data.shape[-1] < 30: sh_order = 6 # compute the response equation ? from dipy.reconst.csdeconv import auto_response response, ratio = auto_response(gtab, data) response = list(response) peaks_sphere = get_sphere('symmetric362') #TODO: check what that does peaks_csd = peaks_from_model( model=tenmodel, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, #.5 min_separation_angle=25, mask=mask, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=False) peaks_csd.affine = affine fpeaks = dname + 'peaks.npz' save_peaks(fpeaks, peaks_csd) from dipy.io.trackvis import save_trk from dipy.tracking import utils from dipy.tracking.local import (ThresholdTissueClassifier, LocalTracking) stopping_thr = 0.25 #0.25 pam = load_peaks(fpeaks) #ffa = dname + 'tensor_fa_nomask.nii.gz' fa, fa_affine = load_nifti(ffa) classifier = ThresholdTissueClassifier(fa, stopping_thr) # seeds seed_mask = fa > 0.4 #0.4 #TODO: check this parameter seeds = utils.seeds_from_mask(seed_mask, density=seed_density, affine=affine) # tractography, if affine then in world coordinates streamlines = LocalTracking(pam, classifier, seeds, affine=affine, step_size=.5) # Compute streamlines and store as a list. streamlines = list(streamlines) ftractogram = dname + 'tractogram.trk' #save .trk save_trk_old_style(ftractogram, streamlines, affine, fa.shape) if show: #render show_results(data, streamlines, fa, fa_affine)
selected_csf = np.zeros(FA.shape, dtype='bool') selected_gm = np.zeros(FA.shape, dtype='bool') selected_csf[indices_csf] = True selected_gm[indices_gm] = True csf_md = np.mean(MD[selected_csf]) gm_md = np.mean(MD[selected_gm]) """ The ``auto_response`` function will calculate FA for an ROI of radius equal to ``roi_radius`` in the center of the volume and return the response function estimated in that region for the voxels with FA higher than 0.7. """ response, ratio = auto_response(gtab, denoised_arr, roi_radius=10, fa_thr=0.7) evals_d = response[0] """ We will now use the evals obtained from the ``auto_response`` to generate the ``multi_shell_fiber_response`` rquired by the MSMT-CSD model. Note that we mead diffusivities of ``csf`` and ``gm`` as inputs to generate th response. """ response_mcsd = multi_shell_fiber_response(sh_order=8, bvals=bvals, evals=evals_d, csf_md=csf_md, gm_md=gm_md) """ Now we build the MSMT-CSD model with the ``response_mcsd`` as input. We then call the ``fit`` function to fit one slice of the 3D data and visualize it.
def main(): start = time.time() with open('config.json') as config_json: config = json.load(config_json) # Load the data dmri_image = nib.load(config['data_file']) dmri = dmri_image.get_data() affine = dmri_image.affine #aparc_im = nib.load(config['freesurfer']) aparc_im = nib.load('volume.nii.gz') aparc = aparc_im.get_data() end = time.time() print('Loaded Files: ' + str((end - start))) print(dmri.shape) print(aparc.shape) # Create the white matter and callosal masks start = time.time() wm_regions = [ 2, 41, 16, 17, 28, 60, 51, 53, 12, 52, 12, 52, 13, 18, 54, 50, 11, 251, 252, 253, 254, 255, 10, 49, 46, 7 ] wm_mask = np.zeros(aparc.shape) for l in wm_regions: wm_mask[aparc == l] = 1 #np.save('wm_mask',wm_mask) #p = os.getcwd()+'wm.json' #json.dump(wm_mask, codecs.open(p, 'w', encoding='utf-8'), separators=(',', ':'), sort_keys=True, indent=4) #with open('wm_mask.txt', 'wb') as wm: #np.savetxt('wm.txt', wm_mask, fmt='%5s') #print(wm_mask) # Create the gradient table from the bvals and bvecs bvals, bvecs = read_bvals_bvecs(config['data_bval'], config['data_bvec']) gtab = gradient_table(bvals, bvecs, b0_threshold=100) end = time.time() print('Created Gradient Table: ' + str((end - start))) ##The probabilistic model## """ # Use the Constant Solid Angle (CSA) to find the Orientation Dist. Function # Helps orient the wm tracts start = time.time() csa_model = CsaOdfModel(gtab, sh_order=6) csa_peaks = peaks_from_model(csa_model, dmri, default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=wm_mask) print('Creating CSA Model: ' + str(time.time() - start)) """ # Use the SHORE model to find Orientation Dist. Function start = time.time() shore_model = ShoreModel(gtab) shore_peaks = peaks_from_model(shore_model, dmri, default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=wm_mask) print('Creating Shore Model: ' + str(time.time() - start)) # Begins the seed in the wm tracts seeds = utils.seeds_from_mask(wm_mask, density=[1, 1, 1], affine=affine) print('Created White Matter seeds: ' + str(time.time() - start)) # Create a CSD model to measure Fiber Orientation Dist print('Begin the probabilistic model') response, ratio = auto_response(gtab, dmri, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6) csd_fit = csd_model.fit(data=dmri, mask=wm_mask) print('Created the CSD model: ' + str(time.time() - start)) # Set the Direction Getter to randomly choose directions prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=30., sphere=default_sphere) print('Created the Direction Getter: ' + str(time.time() - start)) # Restrict the white matter tracking classifier = ThresholdTissueClassifier(shore_peaks.gfa, .25) print('Created the Tissue Classifier: ' + str(time.time() - start)) # Create the probabilistic model streamlines = LocalTracking(prob_dg, tissue_classifier=classifier, seeds=seeds, step_size=.5, max_cross=1, affine=affine) print('Created the probabilistic model: ' + str(time.time() - start)) # Compute streamlines and store as a list. streamlines = list(streamlines) print('Computed streamlines: ' + str(time.time() - start)) #from dipy.tracking.streamline import transform_streamlines #streamlines = transform_streamlines(streamlines, np.linalg.inv(affine)) # Create a tractogram from the streamlines and save it tractogram = Tractogram(streamlines, affine_to_rasmm=affine) save(tractogram, 'track.tck') end = time.time() print("Created the tck file: " + str((end - start)))
def main(): parser = _build_arg_parser() args = parser.parse_args() if args.verbose: logging.basicConfig(level=logging.DEBUG) else: logging.basicConfig(level=logging.INFO) assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs]) assert_outputs_exists(parser, args, [args.frf_file]) vol = nib.load(args.input) data = vol.get_data() bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) if not is_normalized_bvecs(bvecs): logging.warning('Your b-vectors do not seem normalized...') bvecs = normalize_bvecs(bvecs) check_b0_threshold(args, bvals.min()) gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min()) if args.min_fa_thresh < 0.4: logging.warn( 'Minimal FA threshold ({}) seems really small. Make sure it ' 'makes sense for this dataset.'.format(args.min_fa_thresh)) if args.mask: mask = nib.load(args.mask).get_data().astype(np.bool) data = applymask(data, mask) if args.mask_wm: wm_mask = nib.load(args.mask_wm).get_data().astype('bool') else: wm_mask = np.ones_like(data[..., 0], dtype=np.bool) logging.warn( 'No white matter mask specified! mask_data will be used instead, ' 'if it has been supplied. \nBe *VERY* careful about the ' 'estimation of the fiber response function to ensure no invalid ' 'voxel was used.') data_in_wm = applymask(data, wm_mask) fa_thresh = args.fa_thresh # Iteratively trying to fit at least 300 voxels. Lower the FA threshold # when it doesn't work. Fail if the fa threshold is smaller than # the min_threshold. # We use an epsilon since the -= 0.05 might incurs numerical imprecision. nvox = 0 while nvox < args.min_nvox and fa_thresh >= args.min_fa_thresh - 0.00001: response, ratio, nvox = auto_response(gtab, data_in_wm, roi_center=args.roi_center, roi_radius=args.roi_radius, fa_thr=fa_thresh, return_number_of_voxels=True) logging.debug('Number of indices is %s with threshold of %s', nvox, fa_thresh) fa_thresh -= 0.05 if nvox < args.min_nvox: raise ValueError( "Could not find at least {} voxels with sufficient FA " "to estimate the FRF!".format(args.min_nvox)) logging.debug("Found %i voxels with FA threshold %f for FRF estimation", nvox, fa_thresh + 0.05) logging.debug("FRF eigenvalues: %s", str(response[0])) logging.debug("Ratio for smallest to largest eigen value is %f", ratio) logging.debug("Mean of the b=0 signal for voxels used for FRF: %f", response[1]) full_response = np.array( [response[0][0], response[0][1], response[0][2], response[1]]) np.savetxt(args.frf_file, full_response)
def _run_interface(self, runtime): from dipy.core.gradients import GradientTable from dipy.reconst.dti import fractional_anisotropy, mean_diffusivity from dipy.reconst.csdeconv import recursive_response, auto_response img = nb.load(self.inputs.in_file) affine = img.get_affine() if isdefined(self.inputs.in_mask): msk = nb.load(self.inputs.in_mask).get_data() msk[msk > 0] = 1 msk[msk < 0] = 0 else: msk = np.ones(imref.get_shape()) data = img.get_data().astype(np.float32) gtab = self._get_gradient_table() evals = np.nan_to_num(nb.load(self.inputs.in_evals).get_data()) FA = np.nan_to_num(fractional_anisotropy(evals)) * msk indices = np.where(FA > self.inputs.fa_thresh) S0s = data[indices][:, np.nonzero(gtab.b0s_mask)[0]] S0 = np.mean(S0s) if self.inputs.auto: response, ratio = auto_response(gtab, data, roi_radius=self.inputs.roi_radius, fa_thr=self.inputs.fa_thresh) response = response[0].tolist() + [S0] elif self.inputs.recursive: MD = np.nan_to_num(mean_diffusivity(evals)) * msk indices = np.logical_or( FA >= 0.4, (np.logical_and(FA >= 0.15, MD >= 0.0011))) data = nb.load(self.inputs.in_file).get_data() response = recursive_response(gtab, data, mask=indices, sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=True) ratio = abs(response[1] / response[0]) else: lambdas = evals[indices] l01 = np.sort(np.mean(lambdas, axis=0)) response = np.array([l01[-1], l01[-2], l01[-2], S0]) ratio = abs(response[1] / response[0]) if ratio > 0.25: IFLOGGER.warn(('Estimated response is not prolate enough. ' 'Ratio=%0.3f.') % ratio) elif ratio < 1.e-5 or np.any(np.isnan(response)): response = np.array([1.8e-3, 3.6e-4, 3.6e-4, S0]) IFLOGGER.warn( ('Estimated response is not valid, using a default one')) else: IFLOGGER.info(('Estimated response: %s') % str(response[:3])) np.savetxt(op.abspath(self.inputs.response), response) wm_mask = np.zeros_like(FA) wm_mask[indices] = 1 nb.Nifti1Image( wm_mask.astype(np.uint8), affine, None).to_filename(op.abspath(self.inputs.out_mask)) return runtime
fmask = dname + 'mask_extern.nii' mask_vol, mask_affine = load_nifti(fmask) sh_order = 8 if data.shape[-1] < 15: raise ValueError('You need at least 15 unique DWI volumes to ' 'compute fiber ODFs. You currently have: {0}' ' DWI volumes.'.format(data.shape[-1])) elif data.shape[-1] < 30: sh_order = 6 from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) response, ratio = auto_response(gtab, data) response = list(response) peaks_sphere = get_sphere('symmetric362') model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) peaks_csd = peaks_from_model(model=model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order,
auto_response) from dipy.tracking.local import LocalTracking, ParticleFilteringTracking from dipy.tracking import utils from dipy.viz import window, actor, colormap as cmap renderer = window.Renderer() img_pve_csf, img_pve_gm, img_pve_wm = read_stanford_pve_maps() hardi_img, gtab, labels_img = read_stanford_labels() data = hardi_img.get_data() labels = labels_img.get_data() affine = hardi_img.affine shape = labels.shape response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(data, mask=img_pve_wm.get_data()) dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=20., sphere=default_sphere) """ CMC/ACT Tissue Classifiers --------------------- Continuous map criterion (CMC) [Girard2014]_ and Anatomically-constrained tractography (ACT) [Smith2012]_ both uses PVEs information from anatomical images to determine when the tractography stops. Both tissue classifiers use a trilinear interpolation at the tracking position. CMC tissue classifier uses a probability derived from the PVE maps to determine if the streamline reaches a 'valid' or 'invalid'
def compCsdPeaks(basename, output, mask=None): home = os.getcwd() fbase = basename fdwi = fbase+".nii.gz" fbval = fbase+".bval" fbvec = fbase+".bvec" print fdwi,fbval,fbvec img = nib.load(fdwi) data = img.get_data() zooms = img.get_header().get_zooms()[:3] affine = img.get_affine() # reslice image into 1x1x1 iso voxel # new_zooms = (1., 1., 1.) # data, affine = resample(data, affine, zooms, new_zooms) # img = nib.Nifti1Image(data, affine) # # print data.shape # print img.get_header().get_zooms() # print "###" # # nib.save(img, 'C5_iso.nii.gz') bval, bvec = dio.read_bvals_bvecs(fbval, fbvec) # invert bvec z for GE scanner bvec[:,1]*= -1 gtab = dgrad.gradient_table(bval, bvec) if mask is None: print 'generate mask' maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2) else: mask = nib.load(mask).get_data() maskdata = applymask(data, mask) # tenmodel = dti.TensorModel(gtab) # tenfit = tenmodel.fit(data) # print('Computing anisotropy measures (FA, MD, RGB)') # # # FA = fractional_anisotropy(tenfit.evals) # FA[np.isnan(FA)] = 0 # # fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine()) # nib.save(fa_img, 'FA.nii.gz') # # return # estimate response function, ratio should be ~0.2 response, ratio = auto_response(gtab, maskdata, roi_radius=10, fa_thr=0.7) print response, ratio # reconstruct csd model print "estimate csd_model" csd_model = ConstrainedSphericalDeconvModel(gtab, response) #a_data = maskdata[40:80, 40:80, 60:61] #c_data = maskdata[40:80, 59:60, 50:80] #s_data = maskdata[59:60, 40:70, 30:80] #data_small = a_data # # evals = response[0] # evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T #sphere = get_sphere('symmetric362') #csd_fit = csd_model.fit(data_small) #csd_odf = csd_fit.odf(sphere) # # #fodf_spheres = fvtk.sphere_funcs(csd_odf, sphere, scale=1, norm=False) ##fodf_spheres.GetProperty().SetOpacity(0.4) ## #fvtk.add(ren, fodf_spheres) ##fvtk.add(ren, fodf_peaks) #fvtk.show(ren) # #sys.exit() # fit csd peaks print "fit csd peaks" print "peaks_from_model using core# =" + str(multiprocessing.cpu_count()) sphere = get_sphere('symmetric724') csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, mask=mask, relative_peak_threshold=.5, min_separation_angle=25, parallel=True, nbr_processes=10) #fodf_peaks = fvtk.peaks(csd_peaks.peak_dirs, csd_peaks.peak_values, scale=1) # fd, fname = mkstemp() # pickle.save_pickle(fname, csd_peaks) # # os.close(fd) #pickle.dump(csd_peaks, open("csd.p", "wb")) with open(output, 'wb') as fout: cPickle.dump(csd_peaks, fout, -1) print "done writing to file %s"% (output) return csd_peaks