def test_peaks_shm_coeff(): SNR = 100 S0 = 100 _, fbvals, fbvecs = get_data('small_64D') from dipy.data import get_sphere sphere = get_sphere('repulsion724') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) data, _ = multi_tensor(gtab, mevals, S0, angles=[(0, 0), (60, 0)], fractions=[50, 50], snr=SNR) from dipy.reconst.shm import CsaOdfModel model = CsaOdfModel(gtab, 4) pam = peaks_from_model(model, data[None, :], sphere, .5, 45, return_odf=True, return_sh=True) # Test that spherical harmonic coefficients return back correctly odf2 = np.dot(pam.shm_coeff, pam.B) assert_array_almost_equal(pam.odf, odf2) assert_equal(pam.shm_coeff.shape[-1], 45) pam = peaks_from_model(model, data[None, :], sphere, .5, 45, return_odf=True, return_sh=False) assert_equal(pam.shm_coeff, None) pam = peaks_from_model(model, data[None, :], sphere, .5, 45, return_odf=True, return_sh=True, sh_basis_type='mrtrix') odf2 = np.dot(pam.shm_coeff, pam.B) assert_array_almost_equal(pam.odf, odf2)
def test_peaksFromModelParallel(): SNR = 100 S0 = 100 _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) data, _ = multi_tensor(gtab, mevals, S0, angles=[(0, 0), (60, 0)], fractions=[50, 50], snr=SNR) # test equality with/without multiprocessing model = SimpleOdfModel(gtab) pam_multi = peaks_from_model(model, data, _sphere, .5, 45, normalize_peaks=True, return_odf=True, return_sh=True, parallel=True) pam_single = peaks_from_model(model, data, _sphere, .5, 45, normalize_peaks=True, return_odf=True, return_sh=True, parallel=False) assert_equal(pam_multi.gfa.dtype, pam_single.gfa.dtype) assert_equal(pam_multi.gfa.shape, pam_single.gfa.shape) assert_array_almost_equal(pam_multi.gfa, pam_single.gfa) assert_equal(pam_multi.qa.dtype, pam_single.qa.dtype) assert_equal(pam_multi.qa.shape, pam_single.qa.shape) assert_array_almost_equal(pam_multi.qa, pam_single.qa) assert_equal(pam_multi.peak_values.dtype, pam_single.peak_values.dtype) assert_equal(pam_multi.peak_values.shape, pam_single.peak_values.shape) assert_array_almost_equal(pam_multi.peak_values, pam_single.peak_values) assert_equal(pam_multi.peak_indices.dtype, pam_single.peak_indices.dtype) assert_equal(pam_multi.peak_indices.shape, pam_single.peak_indices.shape) assert_array_equal(pam_multi.peak_indices, pam_single.peak_indices) assert_equal(pam_multi.peak_dirs.dtype, pam_single.peak_dirs.dtype) assert_equal(pam_multi.peak_dirs.shape, pam_single.peak_dirs.shape) assert_array_almost_equal(pam_multi.peak_dirs, pam_single.peak_dirs) assert_equal(pam_multi.shm_coeff.dtype, pam_single.shm_coeff.dtype) assert_equal(pam_multi.shm_coeff.shape, pam_single.shm_coeff.shape) assert_array_almost_equal(pam_multi.shm_coeff, pam_single.shm_coeff) assert_equal(pam_multi.odf.dtype, pam_single.odf.dtype) assert_equal(pam_multi.odf.shape, pam_single.odf.shape) assert_array_almost_equal(pam_multi.odf, pam_single.odf)
def _run_interface(self, runtime): from dipy.reconst import shm from dipy.data import get_sphere from dipy.reconst.peaks import peaks_from_model gtab = self._get_gradient_table() img = nb.load(self.inputs.in_file) data = img.get_data() affine = img.affine mask = None if isdefined(self.inputs.mask_file): mask = nb.load(self.inputs.mask_file).get_data() # Fit it model = shm.QballModel(gtab, 8) sphere = get_sphere('symmetric724') peaks = peaks_from_model( model=model, data=data, relative_peak_threshold=.5, min_separation_angle=25, sphere=sphere, mask=mask) apm = shm.anisotropic_power(peaks.shm_coeff) out_file = self._gen_filename('apm') nb.Nifti1Image(apm.astype("float32"), affine).to_filename(out_file) IFLOGGER.info('APM qball image saved as %s', out_file) return runtime
def _run_interface(self, runtime): from dipy.reconst import shm from dipy.data import get_sphere from dipy.reconst.peaks import peaks_from_model gtab = self._get_gradient_table() img = nb.load(self.inputs.in_file) data = img.get_data() affine = img.affine mask = None if isdefined(self.inputs.mask_file): mask = nb.load(self.inputs.mask_file).get_data() # Fit it model = shm.QballModel(gtab, 8) sphere = get_sphere('symmetric724') peaks = peaks_from_model(model=model, data=data, relative_peak_threshold=.5, min_separation_angle=25, sphere=sphere, mask=mask) apm = shm.anisotropic_power(peaks.shm_coeff) out_file = self._gen_filename('apm') nb.Nifti1Image(apm.astype("float32"), affine).to_filename(out_file) IFLOGGER.info('APM qball image saved as %s', out_file) return runtime
def test_peaksFromModel(): data = np.zeros((10, 2)) # Test basic case model = SimpleOdfModel(_gtab) odf_argmax = _odf.argmax() pam = peaks_from_model(model, data, _sphere, .5, 45, normalize_peaks=True) assert_array_equal(pam.gfa, gfa(_odf)) assert_array_equal(pam.peak_values[:, 0], 1.) assert_array_equal(pam.peak_values[:, 1:], 0.) mn, mx = _odf.min(), _odf.max() assert_array_equal(pam.qa[:, 0], (mx - mn) / mx) assert_array_equal(pam.qa[:, 1:], 0.) assert_array_equal(pam.peak_indices[:, 0], odf_argmax) assert_array_equal(pam.peak_indices[:, 1:], -1) # Test that odf array matches and is right shape pam = peaks_from_model(model, data, _sphere, .5, 45, return_odf=True) expected_shape = (len(data), len(_odf)) assert_equal(pam.odf.shape, expected_shape) assert_((_odf == pam.odf).all()) assert_array_equal(pam.peak_values[:, 0], _odf.max()) # Test mask mask = (np.arange(10) % 2) == 1 pam = peaks_from_model(model, data, _sphere, .5, 45, mask=mask, normalize_peaks=True) assert_array_equal(pam.gfa[~mask], 0) assert_array_equal(pam.qa[~mask], 0) assert_array_equal(pam.peak_values[~mask], 0) assert_array_equal(pam.peak_indices[~mask], -1) assert_array_equal(pam.gfa[mask], gfa(_odf)) assert_array_equal(pam.peak_values[mask, 0], 1.) assert_array_equal(pam.peak_values[mask, 1:], 0.) mn, mx = _odf.min(), _odf.max() assert_array_equal(pam.qa[mask, 0], (mx - mn) / mx) assert_array_equal(pam.qa[mask, 1:], 0.) assert_array_equal(pam.peak_indices[mask, 0], odf_argmax) assert_array_equal(pam.peak_indices[mask, 1:], -1)
def test_peaks_shm_coeff(): SNR = 100 S0 = 100 _, fbvals, fbvecs = get_data('small_64D') from dipy.data import get_sphere sphere = get_sphere('symmetric724') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) data, _ = multi_tensor(gtab, mevals, S0, angles=[(0, 0), (60, 0)], fractions=[50, 50], snr=SNR) from dipy.reconst.shm import CsaOdfModel model = CsaOdfModel(gtab, 4) pam = peaks_from_model(model, data[None, :], sphere, .5, 45, return_odf=True, return_sh=True) # Test that spherical harmonic coefficients return back correctly odf2 = np.dot(pam.shm_coeff, pam.B) assert_array_almost_equal(pam.odf, odf2) assert_equal(pam.shm_coeff.shape[-1], 45) pam = peaks_from_model(model, data[None, :], sphere, .5, 45, return_odf=True, return_sh=False) assert_equal(pam.shm_coeff, None) pam = peaks_from_model(model, data[None, :], sphere, .5, 45, return_odf=True, return_sh=True, sh_basis_type='mrtrix') odf2 = np.dot(pam.shm_coeff, pam.B) assert_array_almost_equal(pam.odf, odf2)
def test_peaksFromModel(): data = np.zeros((10, 2)) # Test basic case model = SimpleOdfModel(_gtab) odf_argmax = _odf.argmax() pam = peaks_from_model(model, data, _sphere, .5, 45, normalize_peaks=True) assert_array_equal(pam.gfa, gfa(_odf)) assert_array_equal(pam.peak_values[:, 0], 1.) assert_array_equal(pam.peak_values[:, 1:], 0.) mn, mx = _odf.min(), _odf.max() assert_array_equal(pam.qa[:, 0], (mx - mn) / mx) assert_array_equal(pam.qa[:, 1:], 0.) assert_array_equal(pam.peak_indices[:, 0], odf_argmax) assert_array_equal(pam.peak_indices[:, 1:], -1) # Test that odf array matches and is right shape pam = peaks_from_model(model, data, _sphere, .5, 45, return_odf=True) expected_shape = (len(data), len(_odf)) assert_equal(pam.odf.shape, expected_shape) assert_true((_odf == pam.odf).all()) assert_array_equal(pam.peak_values[:, 0], _odf.max()) # Test mask mask = (np.arange(10) % 2) == 1 pam = peaks_from_model(model, data, _sphere, .5, 45, mask=mask, normalize_peaks=True) assert_array_equal(pam.gfa[~mask], 0) assert_array_equal(pam.qa[~mask], 0) assert_array_equal(pam.peak_values[~mask], 0) assert_array_equal(pam.peak_indices[~mask], -1) assert_array_equal(pam.gfa[mask], gfa(_odf)) assert_array_equal(pam.peak_values[mask, 0], 1.) assert_array_equal(pam.peak_values[mask, 1:], 0.) mn, mx = _odf.min(), _odf.max() assert_array_equal(pam.qa[mask, 0], (mx - mn) / mx) assert_array_equal(pam.qa[mask, 1:], 0.) assert_array_equal(pam.peak_indices[mask, 0], odf_argmax) assert_array_equal(pam.peak_indices[mask, 1:], -1)
def tracking_eudx4csd(dir_src, dir_out, verbose=False): # Load data fbval = pjoin(dir_src, 'bvals_' + par_b_tag) fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag) fdwi = pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz') fmask = pjoin(dir_src, 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold) data, affine = load_nifti(fdwi, verbose) mask, _ = load_nifti(fmask, verbose) sphere = get_sphere('symmetric724') response, ratio = auto_response(gtab, data, roi_radius=par_ar_radius, fa_thr=par_ar_fa_th) # print('Response function', response) # Model fitting csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_peaks = peaks_from_model(csd_model, data, sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=False) # Computation of streamlines streamlines = EuDX(csd_peaks.peak_values, csd_peaks.peak_indices, seeds=par_eudx_seeds, odf_vertices=sphere.vertices, a_low=par_eudx_threshold) # Saving tractography voxel_size = (par_dim_vox, ) * 3 dims = mask.shape[:3] hdr = nib.trackvis.empty_header() hdr['voxel_size'] = voxel_size hdr['voxel_order'] = 'LAS' hdr['dim'] = dims hdr['vox_to_ras'] = affine strm = ((sl, None, None) for sl in streamlines) trk_name = 'tractogram_' + par_b_tag + '_' + par_dim_tag + '_' + par_csd_tag + '_' + par_eudx_tag + '.trk' trk_out = os.path.join(dir_out, trk_name) nib.trackvis.write(trk_out, strm, hdr, points_space='voxel')
def tracking_eudx4csd(dir_src, dir_out, verbose=False): # Load data fbval = pjoin(dir_src, 'bvals_' + par_b_tag) fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag) fdwi = pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz') fmask = pjoin(dir_src, 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold) data, affine = load_nifti(fdwi, verbose) mask, _ = load_nifti(fmask, verbose) sphere = get_sphere('symmetric724') response, ratio = auto_response(gtab, data, roi_radius=par_ar_radius, fa_thr=par_ar_fa_th) # print('Response function', response) # Model fitting csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_peaks = peaks_from_model(csd_model, data, sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=False) # Computation of streamlines streamlines = EuDX(csd_peaks.peak_values, csd_peaks.peak_indices, seeds=par_eudx_seeds, odf_vertices= sphere.vertices, a_low=par_eudx_threshold) # Saving tractography voxel_size = (par_dim_vox,) * 3 dims = mask.shape[:3] hdr = nib.trackvis.empty_header() hdr['voxel_size'] = voxel_size hdr['voxel_order'] = 'LAS' hdr['dim'] = dims hdr['vox_to_ras'] = affine strm = ((sl, None, None) for sl in streamlines) trk_name = 'tractogram_' + par_b_tag + '_' + par_dim_tag + '_' + par_csd_tag + '_' + par_eudx_tag + '.trk' trk_out = os.path.join(dir_out, trk_name) nib.trackvis.write(trk_out, strm, hdr, points_space='voxel')
def peaks_from_nifti(fdwi, fbvec=None, fbval=None, mask=None): if '.' not in fdwi: fbase = fdwi fdwi = fdwi+".nii.gz" if not fbval: fbval = fbase+".bval" if not fbvec: fbvec = fbase+".bvec" print fdwi img = nib.load(fdwi) data = img.get_data() zooms = img.get_header().get_zooms()[:3] affine = img.get_affine() bval, bvec = dio.read_bvals_bvecs(fbval, fbvec) gtab = dgrad.gradient_table(bval, bvec) if not mask: print 'generate mask' maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2) else: mask_img = nib.load(mask) mask = mask_img.get_data() from dipy.segment.mask import applymask maskdata = applymask(data, mask) print maskdata.shape, mask.shape from dipy.reconst.shm import QballModel, CsaOdfModel model = QballModel(gtab, 6) sphere = get_sphere('symmetric724') print "fit Qball peaks" proc_num = multiprocessing.cpu_count()-1 print "peaks_from_model using core# =" + str(proc_num) peaks = peaks_from_model(model=model, data=maskdata, relative_peak_threshold=.5, min_separation_angle=25, sphere=sphere, mask=mask, parallel=True, nbr_processes=proc_num) return peaks
from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) response, ratio = auto_response(gtab, data) response = list(response) peaks_sphere = get_sphere('symmetric362') model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) peaks_csd = peaks_from_model(model=model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=False) peaks_csd.affine = affine fpeaks = dname + 'peaks.npz' save_peaks(fpeaks, peaks_csd) from dipy.io.trackvis import save_trk from dipy.tracking import utils from dipy.tracking.local import (ThresholdTissueClassifier, LocalTracking)
print '\tCreating Mask' data_masked, mask = median_otsu(data, 2, 1) ''' We've loaded an image called `atlas` which is a map of tissue types such that every integer value in the array `labels` represents an anatomical structure or tissue type [#]_. We'll use `peaks_from_model` to apply the `CsaOdfModel` to each white matter voxel and estimate fiber orientations which we can use for tracking. ''' print '\tCalculating peaks' csamodel = shm.CsaOdfModel(gtab, 6) csapeaks = peaks.peaks_from_model(model=csamodel, data=data, sphere=peaks.default_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask) ''' Brief interlude to make sure we don't seed from low-FA voxels. ''' print '\tTensor Fitting' tensor_model = TensorModel(gtab, fit_method='WLS') tensor_fit = tensor_model.fit(data, mask) FA = fractional_anisotropy(tensor_fit.evals) stopping_values = np.zeros(csapeaks.peak_values.shape) stopping_values[:] = FA[..., None]
def run(self, input_files, bvalues, bvectors, mask_files, b0_threshold=0.0, bvecs_tol=0.01, roi_center=None, roi_radius=10, fa_thr=0.7, frf=None, extract_pam_values=False, sh_order=8, odf_to_sh_order=8, out_dir='', out_pam='peaks.pam5', out_shm='shm.nii.gz', out_peaks_dir='peaks_dirs.nii.gz', out_peaks_values='peaks_values.nii.gz', out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'): """ Constrained spherical deconvolution Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) b0_threshold : float, optional Threshold used to find b=0 directions bvecs_tol : float, optional Bvecs should be unit vectors. (default:0.01) roi_center : variable int, optional Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]` (default None) roi_radius : int, optional radius of cubic ROI in voxels (default 10) fa_thr : float, optional FA threshold for calculating the response function (default 0.7) frf : variable float, optional Fiber response function can be for example inputed as 15 4 4 (from the command line) or [15, 4, 4] from a Python script to be converted to float and mutiplied by 10**-4 . If None the fiber response function will be computed automatically (default: None). extract_pam_values : bool, optional Save or not to save pam volumes as single nifti files. sh_order : int, optional Spherical harmonics order (default 6) used in the CSA fit. odf_to_sh_order : int, optional Spherical harmonics order used for peak_from_model to compress the ODF to spherical harmonics coefficients (default 8) out_dir : string, optional Output directory (default input file directory) out_pam : string, optional Name of the peaks volume to be saved (default 'peaks.pam5') out_shm : string, optional Name of the shperical harmonics volume to be saved (default 'shm.nii.gz') out_peaks_dir : string, optional Name of the peaks directions volume to be saved (default 'peaks_dirs.nii.gz') out_peaks_values : string, optional Name of the peaks values volume to be saved (default 'peaks_values.nii.gz') out_peaks_indices : string, optional Name of the peaks indices volume to be saved (default 'peaks_indices.nii.gz') out_gfa : string, optional Name of the generalise fa volume to be saved (default 'gfa.nii.gz') References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the fibre orientation distribution in diffusion MRI: Non-negativity constrained super-resolved spherical deconvolution. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa) in io_it: logging.info('Loading {0}'.format(dwi)) img = nib.load(dwi) data = img.get_data() affine = img.affine bvals, bvecs = read_bvals_bvecs(bval, bvec) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold, atol=bvecs_tol) mask_vol = nib.load(maskfile).get_data().astype(np.bool) sh_order = 8 if data.shape[-1] < 15: raise ValueError( 'You need at least 15 unique DWI volumes to ' 'compute fiber odfs. You currently have: {0}' ' DWI volumes.'.format(data.shape[-1])) elif data.shape[-1] < 30: sh_order = 6 if frf is None: logging.info('Computing response function') if roi_center is not None: logging.info('Response ROI center:\n{0}' .format(roi_center)) logging.info('Response ROI radius:\n{0}' .format(roi_radius)) response, ratio, nvox = auto_response( gtab, data, roi_center=roi_center, roi_radius=roi_radius, fa_thr=fa_thr, return_number_of_voxels=True) response = list(response) else: logging.info('Using response function') if isinstance(frf, str): l01 = np.array(literal_eval(frf), dtype=np.float64) else: l01 = np.array(frf, dtype=np.float64) l01 *= 10 ** -4 response = np.array([l01[0], l01[1], l01[1]]) ratio = l01[1] / l01[0] response = (response, ratio) logging.info( 'Eigenvalues for the frf of the input data are :{0}' .format(response[0])) logging.info('Ratio for smallest to largest eigen value is {0}' .format(ratio)) peaks_sphere = get_sphere('repulsion724') logging.info('CSD computation started.') csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) peaks_csd = peaks_from_model(model=csd_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=False) peaks_csd.affine = affine save_peaks(opam, peaks_csd) logging.info('CSD computation completed.') if extract_pam_values: peaks_to_niftis(peaks_csd, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa, reshape_dirs=True) dname_ = os.path.dirname(opam) if dname_ == '': logging.info('Pam5 file saved in current directory') else: logging.info( 'Pam5 file saved in {0}'.format(dname_)) return io_it
def dmri_recon(sid, data_dir, out_dir, resolution, recon='csd', dirs='', num_threads=2): import tempfile #tempfile.tempdir = '/om/scratch/Fri/ksitek/' import os oldval = None if 'MKL_NUM_THREADS' in os.environ: oldval = os.environ['MKL_NUM_THREADS'] os.environ['MKL_NUM_THREADS'] = '%d' % num_threads ompoldval = None if 'OMP_NUM_THREADS' in os.environ: ompoldval = os.environ['OMP_NUM_THREADS'] os.environ['OMP_NUM_THREADS'] = '%d' % num_threads import nibabel as nib import numpy as np from glob import glob if resolution == '0.2mm': filename = 'Reg_S64550_nii4d.nii' #filename = 'angular_resample/dwi_%s.nii.gz'%dirs fimg = os.path.abspath(glob(os.path.join(data_dir, filename))[0]) else: filename = 'Reg_S64550_nii4d_resamp-%s.nii.gz' % (resolution) fimg = os.path.abspath( glob(os.path.join(data_dir, 'resample', filename))[0]) print("dwi file = %s" % fimg) fbval = os.path.abspath( glob(os.path.join(data_dir, 'bvecs', 'camino_120_RAS.bvals'))[0]) print("bval file = %s" % fbval) fbvec = os.path.abspath( glob(os.path.join(data_dir, 'bvecs', 'camino_120_RAS_flipped-xy.bvecs'))[0]) # 'angular_resample', # 'dwi_%s.bvecs'%dirs))[0]) print("bvec file = %s" % fbvec) img = nib.load(fimg) data = img.get_fdata() affine = img.get_affine() prefix = sid from dipy.io import read_bvals_bvecs bvals, bvecs = read_bvals_bvecs(fbval, fbvec) ''' from dipy.core.gradients import vector_norm b0idx = [] for idx, val in enumerate(bvals): if val < 1: pass #bvecs[idx] = [1, 0, 0] else: b0idx.append(idx) #print "b0idx=%d"%idx #print "input bvecs:" #print bvecs bvecs[b0idx, :] = bvecs[b0idx, :]/vector_norm(bvecs[b0idx])[:, None] #print "bvecs after normalization:" #print bvecs ''' from dipy.core.gradients import gradient_table gtab = gradient_table(bvals, bvecs) gtab.bvecs.shape == bvecs.shape gtab.bvecs gtab.bvals.shape == bvals.shape gtab.bvals #from dipy.segment.mask import median_otsu #b0_mask, mask = median_otsu(data[:, :, :, b0idx].mean(axis=3).squeeze(), 4, 4) if resolution == '0.2mm': mask_name = 'Reg_S64550_nii_b0-slice_mask.nii.gz' fmask1 = os.path.join(data_dir, mask_name) else: mask_name = 'Reg_S64550_nii_b0-slice_mask_resamp-%s.nii.gz' % ( resolution) fmask1 = os.path.join(data_dir, 'resample', mask_name) print("fmask file = %s" % fmask1) mask = nib.load(fmask1).get_fdata() ''' DTI model & save metrics ''' from dipy.reconst.dti import TensorModel print("running tensor model") tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) from dipy.reconst.dti import fractional_anisotropy print("running FA") FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 fa_img = nib.Nifti1Image(FA, img.get_affine()) tensor_fa_file = os.path.abspath('%s_tensor_fa.nii.gz' % (prefix)) nib.save(fa_img, tensor_fa_file) from dipy.reconst.dti import axial_diffusivity print("running AD") AD = axial_diffusivity(tenfit.evals) AD[np.isnan(AD)] = 0 ad_img = nib.Nifti1Image(AD, img.get_affine()) tensor_ad_file = os.path.abspath('%s_tensor_ad.nii.gz' % (prefix)) nib.save(ad_img, tensor_ad_file) from dipy.reconst.dti import radial_diffusivity print("running RD") RD = radial_diffusivity(tenfit.evals) RD[np.isnan(RD)] = 0 rd_img = nib.Nifti1Image(RD, img.get_affine()) tensor_rd_file = os.path.abspath('%s_tensor_rd.nii.gz' % (prefix)) nib.save(rd_img, tensor_rd_file) from dipy.reconst.dti import mean_diffusivity print("running MD") MD = mean_diffusivity(tenfit.evals) MD[np.isnan(MD)] = 0 md_img = nib.Nifti1Image(MD, img.get_affine()) tensor_md_file = os.path.abspath('%s_tensor_md.nii.gz' % (prefix)) nib.save(md_img, tensor_md_file) evecs = tenfit.evecs evec_img = nib.Nifti1Image(evecs, img.get_affine()) tensor_evec_file = os.path.abspath('%s_tensor_evec.nii.gz' % (prefix)) nib.save(evec_img, tensor_evec_file) ''' ODF model ''' useFA = True print("creating %s model" % recon) if recon == 'csd': from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel from dipy.reconst.csdeconv import auto_response response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.5) # 0.7 model = ConstrainedSphericalDeconvModel(gtab, response) useFA = True return_sh = True elif recon == 'csa': from dipy.reconst.shm import CsaOdfModel, normalize_data model = CsaOdfModel(gtab, sh_order=8) useFA = True return_sh = True elif recon == 'gqi': from dipy.reconst.gqi import GeneralizedQSamplingModel model = GeneralizedQSamplingModel(gtab) return_sh = False else: raise ValueError('only csd, csa supported currently') from dipy.reconst.dsi import (DiffusionSpectrumDeconvModel, DiffusionSpectrumModel) model = DiffusionSpectrumDeconvModel(gtab) '''reconstruct ODFs''' from dipy.data import get_sphere sphere = get_sphere('symmetric724') #odfs = fit.odf(sphere) # with CSD/GQI, uses > 50GB per core; don't get greedy with cores! from dipy.reconst.peaks import peaks_from_model print("running peaks_from_model") peaks = peaks_from_model( model=model, data=data, sphere=sphere, mask=mask, return_sh=return_sh, return_odf=False, normalize_peaks=True, npeaks=5, relative_peak_threshold=.5, min_separation_angle=10, #25, parallel=num_threads > 1, nbr_processes=num_threads) # save the peaks from dipy.io.peaks import save_peaks peaks_file = os.path.abspath('%s_peaks.pam5' % (prefix)) save_peaks(peaks_file, peaks) # save the spherical harmonics shm_coeff_file = os.path.abspath('%s_shm_coeff.nii.gz' % (prefix)) if return_sh: shm_coeff = peaks.shm_coeff nib.save(nib.Nifti1Image(shm_coeff, img.get_affine()), shm_coeff_file) else: # if it's not a spherical model, output it as an essentially null file np.savetxt(shm_coeff_file, [0]) # save the generalized fractional anisotropy image gfa_img = nib.Nifti1Image(peaks.gfa, img.get_affine()) model_gfa_file = os.path.abspath('%s_%s_gfa.nii.gz' % (prefix, recon)) nib.save(gfa_img, model_gfa_file) #from dipy.reconst.dti import quantize_evecs #peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices) #eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices, #a_low=0.2, seeds=10**6, ang_thr=35) ''' probabilistic tracking ''' ''' from dipy.direction import ProbabilisticDirectionGetter from dipy.tracking.local import LocalTracking from dipy.tracking.streamline import Streamlines from dipy.io.streamline import save_trk prob_dg = ProbabilisticDirectionGetter.from_shcoeff(shm_coeff, max_angle=45., sphere=sphere) streamlines_generator = LocalTracking(prob_dg, affine, step_size=.5, max_cross=1) # Generate streamlines object streamlines = Streamlines(streamlines_generator) affine = img.get_affine() vox_size=fa_img.get_header().get_zooms()[:3] fname = os.path.abspath('%s_%s_prob_streamline.trk' % (prefix, recon)) save_trk(fname, streamlines, affine, vox_size=vox_size) ''' ''' deterministic tracking with EuDX method''' from dipy.tracking.eudx import EuDX print("reconstructing with EuDX") if useFA: eu = EuDX( FA, peaks.peak_indices[..., 0], odf_vertices=sphere.vertices, a_low=0.001, # default is 0.0239 seeds=10**6, ang_thr=75) else: eu = EuDX( peaks.gfa, peaks.peak_indices[..., 0], odf_vertices=sphere.vertices, #a_low=0.1, seeds=10**6, ang_thr=45) sl_fname = os.path.abspath('%s_%s_det_streamline.trk' % (prefix, recon)) # trying new dipy.io.streamline module, per email to neuroimaging list # 2018.04.05 from nibabel.streamlines import Field from nibabel.orientations import aff2axcodes affine = img.get_affine() vox_size = fa_img.get_header().get_zooms()[:3] fov_shape = FA.shape[:3] if vox_size is not None and fov_shape is not None: hdr = {} hdr[Field.VOXEL_TO_RASMM] = affine.copy() hdr[Field.VOXEL_SIZES] = vox_size hdr[Field.DIMENSIONS] = fov_shape hdr[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine)) tractogram = nib.streamlines.Tractogram(eu) tractogram.affine_to_rasmm = affine trk_file = nib.streamlines.TrkFile(tractogram, header=hdr) nib.streamlines.save(trk_file, sl_fname) if oldval: os.environ['MKL_NUM_THREADS'] = oldval else: del os.environ['MKL_NUM_THREADS'] if ompoldval: os.environ['OMP_NUM_THREADS'] = ompoldval else: del os.environ['OMP_NUM_THREADS'] print('all output files created') return (tensor_fa_file, tensor_evec_file, model_gfa_file, sl_fname, affine, tensor_ad_file, tensor_rd_file, tensor_md_file, shm_coeff_file, peaks_file)
sf_model = sfm.SparseFascicleModel(gtab, sphere=sphere, l1_ratio=0.5, alpha=0.001, response=response[0]) """ We fit this model to the data in each voxel in the white-matter mask, so that we can use these directions in tracking: """ from dipy.reconst.peaks import peaks_from_model pnm = peaks_from_model(sf_model, data, sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=white_matter, parallel=True) """ A ThresholdTissueClassifier object is used to segment the data to track only through areas in which the Generalized Fractional Anisotropy (GFA) is sufficiently high. """ from dipy.tracking.local import ThresholdTissueClassifier classifier = ThresholdTissueClassifier(pnm.gfa, .25) """ Tracking will be started from a set of seeds evenly distributed in the white matter: """
sf_odf = sf_fit.odf(sphere) fodf_spheres = fvtk.sphere_funcs(sf_odf, sphere, scale=1.3, norm=True) ren = fvtk.ren() fvtk.add(ren, fodf_spheres) print('Saving illustration as sf_odfs.png') fvtk.record(ren, out_path='sf_odfs.png', size=(1000, 1000)) """ We can extract the peaks from the ODF, and plot these as well """ sf_peaks = dpp.peaks_from_model(sf_model, data_small, sphere, relative_peak_threshold=.5, min_separation_angle=25, return_sh=False) fvtk.clear(ren) fodf_peaks = fvtk.peaks(sf_peaks.peak_dirs, sf_peaks.peak_values, scale=1.3) fvtk.add(ren, fodf_peaks) print('Saving illustration as sf_peaks.png') fvtk.record(ren, out_path='sf_peaks.png', size=(1000, 1000)) """ Finally, we plot both the peaks and the ODFs, overlayed: """ fodf_spheres.GetProperty().SetOpacity(0.4) fvtk.add(ren, fodf_spheres)
def run(self, input_files, bvalues_files, bvectors_files, mask_files, sh_order=6, odf_to_sh_order=8, b0_threshold=0.0, bvecs_tol=0.01, extract_pam_values=False, out_dir='', out_pam='peaks.pam5', out_shm='shm.nii.gz', out_peaks_dir='peaks_dirs.nii.gz', out_peaks_values='peaks_values.nii.gz', out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'): """ Constant Solid Angle. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) sh_order : int, optional Spherical harmonics order (default 6) used in the CSA fit. odf_to_sh_order : int, optional Spherical harmonics order used for peak_from_model to compress the ODF to spherical harmonics coefficients (default 8) b0_threshold : float, optional Threshold used to find b=0 directions bvecs_tol : float, optional Threshold used so that norm(bvec)=1 (default 0.01) extract_pam_values : bool, optional Wheter or not to save pam volumes as single nifti files. out_dir : string, optional Output directory (default input file directory) out_pam : string, optional Name of the peaks volume to be saved (default 'peaks.pam5') out_shm : string, optional Name of the shperical harmonics volume to be saved (default 'shm.nii.gz') out_peaks_dir : string, optional Name of the peaks directions volume to be saved (default 'peaks_dirs.nii.gz') out_peaks_values : string, optional Name of the peaks values volume to be saved (default 'peaks_values.nii.gz') out_peaks_indices : string, optional Name of the peaks indices volume to be saved (default 'peaks_indices.nii.gz') out_gfa : string, optional Name of the generalise fa volume to be saved (default 'gfa.nii.gz') References ---------- .. [1] Aganj, I., et al. 2009. ODF Reconstruction in Q-Ball Imaging with Solid Angle Consideration. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa) in io_it: logging.info('Loading {0}'.format(dwi)) vol = nib.load(dwi) data = vol.get_data() affine = vol.affine bvals, bvecs = read_bvals_bvecs(bval, bvec) if b0_threshold < bvals.min(): warn("b0_threshold (value: {0}) is too low, increase your " "b0_threshold. It should higher than the first b0 value " "({1}).".format(b0_threshold, bvals.min())) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold, atol=bvecs_tol) mask_vol = nib.load(maskfile).get_data().astype(np.bool) peaks_sphere = get_sphere('repulsion724') logging.info('Starting CSA computations {0}'.format(dwi)) csa_model = CsaOdfModel(gtab, sh_order) peaks_csa = peaks_from_model(model=csa_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=odf_to_sh_order, normalize_peaks=True, parallel=False) peaks_csa.affine = affine save_peaks(opam, peaks_csa) logging.info('Finished CSA {0}'.format(dwi)) if extract_pam_values: peaks_to_niftis(peaks_csa, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa, reshape_dirs=True) dname_ = os.path.dirname(opam) if dname_ == '': logging.info('Pam5 file saved in current directory') else: logging.info('Pam5 file saved in {0}'.format(dname_)) return io_it
def deterministic(diffusion_file, bvecs_file, bvals_file, outdir, mask_file=None, order=4, nb_seeds_per_voxel=1, step=0.5, fmt="%.4f"): """ Compute a deterministic tractography using an ODF model. Parameters ---------- diffusion_file: str (mandatory) a file containing the preprocessed diffusion data. bvecs_file: str (mandatory) a file containing the diffusion gradient directions. bvals_file: str (mandatory) a file containing the diffusion b-values. outdir: str (mandatory) the output directory. mask_file: str (optional, default None) an image used to mask the diffusion data during the tractography. If not set, all the image voxels are considered. order: int (optional, default 4) the order of the ODF model. nb_seeds_per_voxel: int (optional, default 1) the number of seeds per voxel used during the propagation. step: float (optional, default 0.5) the integration step in voxel fraction used during the propagation. fmt: str (optional, default '%.4f') the saved track elements format. Returns ------- track_file: str a determinist model of the white matter organization. """ # Read diffusion sequence bvals, bvecs = read_bvals_bvecs(bvals_file, bvecs_file) gtab = gradient_table(bvals, bvecs) diffusion_array = nibabel.load(diffusion_file).get_data() if mask_file is not None: mask_array = nibabel.load(mask_file).get_data() else: mask_array = numpy.ones(diffusion_array.shape[:3], dtype=numpy.uint8) # Estimate ODF model csamodel = shm.CsaOdfModel(gtab, order) csapeaks = peaks.peaks_from_model( model=csamodel, data=diffusion_array, sphere=peaks.default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=mask_array) # Compute deterministic tractography in voxel space so affine is equal # to identity seeds = utils.seeds_from_mask(mask_array, density=nb_seeds_per_voxel) streamline_generator = EuDX( csapeaks.peak_values, csapeaks.peak_indices, odf_vertices=peaks.default_sphere.vertices, a_low=.05, step_sz=step, seeds=seeds) affine = streamline_generator.affine streamlines = list(streamline_generator) # Save the tracks track_file = os.path.join(outdir, "fibers.txt") savetxt(track_file, streamlines, fmt=fmt) return track_file
# Loading values, vectors, image and mask sphere = get_sphere('symmetric362') print "loading bval/bvec files" bvals, bvecs = read_bvals_bvecs("tp3_data//bvals2000", "tp3_data//bvecs2000") gtab = gradient_table(bvals, bvecs) print "loading nifti files" img = nib.load("tp3_data//dwi2000.nii.gz") affine = img.get_affine() data = img.get_data() mask = nib.load("tp3_data//_binary_mask.nii.gz").get_data() ## Apply mask data_in_wm = applymask(data, mask) response, ratio = auto_response(gtab, data_in_wm) # Computing ODF print "computing fODF... please wait an hour" csd_model = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=sphere) peaks_csd = peaks_from_model(model=csd_model, data=data, sphere=sphere, relative_peak_threshold=.25, min_separation_angle=25, mask=mask, normalize_peaks=True, parallel=True) # Saving files print "saving files" nib.save(nib.Nifti1Image(peaks_csd.shm_coeff.astype(np.float32), affine), "tp3_data//_fodf.nii.gz") nib.save(nib.Nifti1Image(reshape_peaks_for_visualization(peaks_csd), affine), "tp3_data//_fodfpeaks.nii.gz")
def recursive_response(gtab, data, mask=None, sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=True, nbr_processes=None, sphere=default_sphere): """ Recursive calibration of response function using peak threshold Parameters ---------- gtab : GradientTable data : ndarray diffusion data mask : ndarray, optional mask for recursive calibration, for example a white matter mask. It has shape `data.shape[0:3]` and dtype=bool. Default: use the entire data array. sh_order : int, optional maximal spherical harmonics order. Default: 8 peak_thr : float, optional peak threshold, how large the second peak can be relative to the first peak in order to call it a single fiber population [1]. Default: 0.01 init_fa : float, optional FA of the initial 'fat' response function (tensor). Default: 0.08 init_trace : float, optional trace of the initial 'fat' response function (tensor). Default: 0.0021 iter : int, optional maximum number of iterations for calibration. Default: 8. convergence : float, optional convergence criterion, maximum relative change of SH coefficients. Default: 0.001. parallel : bool, optional Whether to use parallelization in peak-finding during the calibration procedure. Default: True nbr_processes: int If `parallel` is True, the number of subprocesses to use (default multiprocessing.cpu_count()). sphere : Sphere, optional. The sphere used for peak finding. Default: default_sphere. Returns ------- response : ndarray response function in SH coefficients Notes ----- In CSD there is an important pre-processing step: the estimation of the fiber response function. Using an FA threshold is not a very robust method. It is dependent on the dataset (non-informed used subjectivity), and still depends on the diffusion tensor (FA and first eigenvector), which has low accuracy at high b-value. This function recursively calibrates the response function, for more information see [1]. References ---------- .. [1] Tax, C.M.W., et al. NeuroImage 2014. Recursive calibration of the fiber response function for spherical deconvolution of diffusion MRI data. """ S0 = 1 evals = fa_trace_to_lambdas(init_fa, init_trace) res_obj = (evals, S0) if mask is None: data = data.reshape(-1, data.shape[-1]) else: data = data[mask] n = np.arange(0, sh_order + 1, 2) where_dwi = lazy_index(~gtab.b0s_mask) response_p = np.ones(len(n)) for num_it in range(1, iter): r_sh_all = np.zeros(len(n)) csd_model = ConstrainedSphericalDeconvModel(gtab, res_obj, sh_order=sh_order) csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, relative_peak_threshold=peak_thr, min_separation_angle=25, parallel=parallel, nbr_processes=nbr_processes) dirs = csd_peaks.peak_dirs vals = csd_peaks.peak_values single_peak_mask = (vals[:, 1] / vals[:, 0]) < peak_thr data = data[single_peak_mask] dirs = dirs[single_peak_mask] for num_vox in range(0, data.shape[0]): rotmat = vec2vec_rotmat(dirs[num_vox, 0], np.array([0, 0, 1])) rot_gradients = np.dot(rotmat, gtab.gradients.T).T x, y, z = rot_gradients[where_dwi].T r, theta, phi = cart2sphere(x, y, z) # for the gradient sphere B_dwi = real_sph_harm(0, n, theta[:, None], phi[:, None]) r_sh_all += np.linalg.lstsq(B_dwi, data[num_vox, where_dwi])[0] response = r_sh_all / data.shape[0] res_obj = AxSymShResponse(data[:, gtab.b0s_mask].mean(), response) change = abs((response_p - response) / response_p) if all(change < convergence): break response_p = response return res_obj
def main(): parser = _build_arg_parser() args = parser.parse_args() logging.basicConfig(level=logging.INFO) if not args.not_all: args.fodf = args.fodf or 'fodf.nii.gz' args.peaks = args.peaks or 'peaks.nii.gz' args.peak_indices = args.peak_indices or 'peak_indices.nii.gz' arglist = [args.fodf, args.peaks, args.peak_indices] if args.not_all and not any(arglist): parser.error('When using --not_all, you need to specify at least ' 'one file to output.') assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs]) assert_outputs_exists(parser, args, arglist) nbr_processes = args.nbr_processes parallel = True if nbr_processes <= 0: nbr_processes = None elif nbr_processes == 1: parallel = False # Check for FRF filename base_odf_name, _ = split_name_with_nii(args.fodf) frf_filename = base_odf_name + '_frf.txt' if os.path.isfile(frf_filename) and not args.overwrite: parser.error('Cannot save frf file, "{0}" already exists. ' 'Use -f to overwrite.'.format(frf_filename)) vol = nib.load(args.input) data = vol.get_data() bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) if args.mask_wm is not None: wm_mask = nib.load(args.mask_wm).get_data().astype('bool') else: wm_mask = np.ones_like(data[..., 0], dtype=np.bool) logging.info( 'No white matter mask specified! mask_data will be used instead, ' 'if it has been supplied. \nBe *VERY* careful about the ' 'estimation of the fiber response function for the CSD.') data_in_wm = applymask(data, wm_mask) if not is_normalized_bvecs(bvecs): logging.warning('Your b-vectors do not seem normalized...') bvecs = normalize_bvecs(bvecs) if bvals.min() != 0: if bvals.min() > 20: raise ValueError( 'The minimal bvalue is greater than 20. This is highly ' 'suspicious. Please check your data to ensure everything is ' 'correct.\nValue found: {}'.format(bvals.min())) else: logging.warning( 'Warning: no b=0 image. Setting b0_threshold to ' 'bvals.min() = %s', bvals.min()) gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min()) else: gtab = gradient_table(bvals, bvecs) if args.mask is None: mask = None else: mask = nib.load(args.mask).get_data().astype(np.bool) # Raise warning for sh order if there is not enough DWIs if data.shape[-1] < (args.sh_order + 1) * (args.sh_order + 2) / 2: warnings.warn( 'We recommend having at least %s unique DWIs volumes, but you ' 'currently have %s volumes. Try lowering the parameter --sh_order ' 'in case of non convergence.', (args.sh_order + 1) * (args.sh_order + 2) / 2), data.shape[-1] fa_thresh = args.fa_thresh # If threshold is too high, try lower until enough indices are found # estimating a response function with fa < 0.5 does not make sense nvox = 0 while nvox < 300 and fa_thresh > 0.5: response, ratio, nvox = auto_response(gtab, data_in_wm, roi_center=args.roi_center, roi_radius=args.roi_radius, fa_thr=fa_thresh, return_number_of_voxels=True) logging.info('Number of indices is %s with threshold of %s', nvox, fa_thresh) fa_thresh -= 0.05 if fa_thresh <= 0: raise ValueError( 'Could not find at least 300 voxels for estimating the frf!') logging.info('Found %s valid voxels for frf estimation.', nvox) response = list(response) logging.info('Response function is %s', response) if args.frf is not None: l01 = np.array(literal_eval(args.frf), dtype=np.float64) if not args.no_factor: l01 *= 10**-4 response[0] = np.array([l01[0], l01[1], l01[1]]) ratio = l01[1] / l01[0] logging.info("Eigenvalues for the frf of the input data are: %s", response[0]) logging.info("Ratio for smallest to largest eigen value is %s", ratio) np.savetxt(frf_filename, response[0]) if not args.frf_only: reg_sphere = get_sphere('symmetric362') peaks_sphere = get_sphere('symmetric724') csd_model = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=reg_sphere, sh_order=args.sh_order) peaks_csd = peaks_from_model(model=csd_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask, return_sh=True, sh_basis_type=args.basis, sh_order=args.sh_order, normalize_peaks=True, parallel=parallel, nbr_processes=nbr_processes) if args.fodf: nib.save( nib.Nifti1Image(peaks_csd.shm_coeff.astype(np.float32), vol.affine), args.fodf) if args.peaks: nib.save( nib.Nifti1Image(reshape_peaks_for_visualization(peaks_csd), vol.affine), args.peaks) if args.peak_indices: nib.save(nib.Nifti1Image(peaks_csd.peak_indices, vol.affine), args.peak_indices)
sphere, like ``default_sphere`` which has 362 directions on the hemisphere, without having to worry about memory limitations. """ from dipy.data import default_sphere prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=30., sphere=default_sphere) streamlines = LocalTracking(prob_dg, classifier, seeds, affine, step_size=.5) save_trk("probabilistic_shm_coeff.trk", streamlines, affine, labels.shape) """ Not all model fits have the ``shm_coeff`` attribute because not all models use this basis to represent the data internally. However we can fit the ODF of any model to the spherical harmonic basis using the ``peaks_from_model`` function. """ from dipy.reconst.peaks import peaks_from_model peaks = peaks_from_model(csd_model, data, default_sphere, .5, 25, mask=white_matter, return_sh=True, parallel=True) fod_coeff = peaks.shm_coeff prob_dg = ProbabilisticDirectionGetter.from_shcoeff(fod_coeff, max_angle=30., sphere=default_sphere) streamlines = LocalTracking(prob_dg, classifier, seeds, affine, step_size=.5) save_trk("probabilistic_peaks_from_model.trk", streamlines, affine, labels.shape)
def dmri_recon(sid, data_dir, out_dir, resolution, recon='csd', num_threads=2): import tempfile #tempfile.tempdir = '/om/scratch/Fri/ksitek/' import os oldval = None if 'MKL_NUM_THREADS' in os.environ: oldval = os.environ['MKL_NUM_THREADS'] os.environ['MKL_NUM_THREADS'] = '%d' % num_threads ompoldval = None if 'OMP_NUM_THREADS' in os.environ: ompoldval = os.environ['OMP_NUM_THREADS'] os.environ['OMP_NUM_THREADS'] = '%d' % num_threads import nibabel as nib import numpy as np from glob import glob if resolution == '0.2mm': filename = 'Reg_S64550_nii4d.nii' fimg = os.path.abspath(glob(os.path.join(data_dir, filename))[0]) else: filename = 'Reg_S64550_nii4d_resamp-%s.nii.gz'%(resolution) fimg = os.path.abspath(glob(os.path.join(data_dir, 'resample', filename))[0]) print("dwi file = %s"%fimg) fbvec = os.path.abspath(glob(os.path.join(data_dir, 'bvecs', 'camino_120_RAS_flipped-xy.bvecs'))[0]) print("bvec file = %s"%fbvec) fbval = os.path.abspath(glob(os.path.join(data_dir, 'bvecs', 'camino_120_RAS.bvals'))[0]) print("bval file = %s"%fbval) img = nib.load(fimg) data = img.get_data() affine = img.get_affine() prefix = sid from dipy.io import read_bvals_bvecs from dipy.core.gradients import vector_norm bvals, bvecs = read_bvals_bvecs(fbval, fbvec) b0idx = [] for idx, val in enumerate(bvals): if val < 1: pass #bvecs[idx] = [1, 0, 0] else: b0idx.append(idx) #print "b0idx=%d"%idx #print "input bvecs:" #print bvecs bvecs[b0idx, :] = bvecs[b0idx, :]/vector_norm(bvecs[b0idx])[:, None] #print "bvecs after normalization:" #print bvecs from dipy.core.gradients import gradient_table gtab = gradient_table(bvals, bvecs) gtab.bvecs.shape == bvecs.shape gtab.bvecs gtab.bvals.shape == bvals.shape gtab.bvals from dipy.reconst.csdeconv import auto_response response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.1) # 0.7 #from dipy.segment.mask import median_otsu #b0_mask, mask = median_otsu(data[:, :, :, b0idx].mean(axis=3).squeeze(), 4, 4) if resolution == '0.2mm': mask_name = 'Reg_S64550_nii_b0-slice_mask.nii.gz' fmask1 = os.path.join(data_dir, mask_name) else: mask_name = 'Reg_S64550_nii_b0-slice_mask_resamp-%s.nii.gz'%(resolution) fmask1 = os.path.join(data_dir, 'resample', mask_name) print("fmask file = %s"%fmask1) mask = nib.load(fmask1).get_data() useFA = True print("creating model") if recon == 'csd': from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel model = ConstrainedSphericalDeconvModel(gtab, response) useFA = True elif recon == 'csa': from dipy.reconst.shm import CsaOdfModel, normalize_data model = CsaOdfModel(gtab, 4) useFA = False else: raise ValueError('only csd, csa supported currently') from dipy.reconst.dsi import (DiffusionSpectrumDeconvModel, DiffusionSpectrumModel) model = DiffusionSpectrumDeconvModel(gtab) fit = model.fit(data) from dipy.data import get_sphere sphere = get_sphere('symmetric724') #odfs = fit.odf(sphere) from dipy.reconst.peaks import peaks_from_model print("running peaks_from_model") peaks = peaks_from_model(model=model, data=data, sphere=sphere, mask=mask, return_sh=True, return_odf=False, normalize_peaks=True, npeaks=5, relative_peak_threshold=.5, min_separation_angle=25, parallel=num_threads > 1, nbr_processes=num_threads) from dipy.reconst.dti import TensorModel print("running tensor model") tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) from dipy.reconst.dti import fractional_anisotropy print("running FA") FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 fa_img = nib.Nifti1Image(FA, img.get_affine()) tensor_fa_file = os.path.abspath('%s_tensor_fa.nii.gz' % (prefix)) nib.save(fa_img, tensor_fa_file) from dipy.reconst.dti import axial_diffusivity print("running AD") AD = axial_diffusivity(tenfit.evals) AD[np.isnan(AD)] = 0 ad_img = nib.Nifti1Image(AD, img.get_affine()) tensor_ad_file = os.path.abspath('%s_tensor_ad.nii.gz' % (prefix)) nib.save(ad_img, tensor_ad_file) from dipy.reconst.dti import radial_diffusivity print("running RD") RD = radial_diffusivity(tenfit.evals) RD[np.isnan(RD)] = 0 rd_img = nib.Nifti1Image(RD, img.get_affine()) tensor_rd_file = os.path.abspath('%s_tensor_rd.nii.gz' % (prefix)) nib.save(rd_img, tensor_rd_file) from dipy.reconst.dti import mean_diffusivity print("running MD") MD = mean_diffusivity(tenfit.evals) MD[np.isnan(MD)] = 0 md_img = nib.Nifti1Image(MD, img.get_affine()) tensor_md_file = os.path.abspath('%s_tensor_md.nii.gz' % (prefix)) nib.save(md_img, tensor_md_file) evecs = tenfit.evecs evec_img = nib.Nifti1Image(evecs, img.get_affine()) tensor_evec_file = os.path.abspath('%s_tensor_evec.nii.gz' % (prefix)) nib.save(evec_img, tensor_evec_file) shm_coeff = fit.shm_coeff shm_coeff_file = os.path.abspath('%s_shm_coeff.nii.gz' % (prefix)) nib.save(nib.Nifti1Image(shm_coeff, img.get_affine()), shm_coeff_file) #from dipy.reconst.dti import quantize_evecs #peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices) #eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices, #a_low=0.2, seeds=10**6, ang_thr=35) fa_img = nib.Nifti1Image(peaks.gfa, img.get_affine()) model_gfa_file = os.path.abspath('%s_%s_gfa.nii.gz' % (prefix, recon)) nib.save(fa_img, model_gfa_file) from dipy.tracking.eudx import EuDX print("reconstructing with EuDX") if useFA: eu = EuDX(FA, peaks.peak_indices[..., 0], odf_vertices = sphere.vertices, #a_low=0.1, seeds=10**6, ang_thr=45) else: eu = EuDX(peaks.gfa, peaks.peak_indices[..., 0], odf_vertices = sphere.vertices, #a_low=0.1, seeds=10**6, ang_thr=45) sl_fname = os.path.abspath('%s_%s_streamline.trk' % (prefix, recon)) """ #import dipy.tracking.metrics as dmetrics streamlines = ((sl, None, None) for sl in eu) # if dmetrics.length(sl) > 15) hdr = nib.trackvis.empty_header() hdr['voxel_size'] = fa_img.get_header().get_zooms()[:3] hdr['voxel_order'] = 'RAS' #LAS hdr['dim'] = FA.shape[:3] nib.trackvis.write(sl_fname, streamlines, hdr, points_space='voxel') """ # trying new dipy.io.streamline module, per email to neuroimaging list # 2018.04.05 from nibabel.streamlines import Field from nibabel.orientations import aff2axcodes affine = img.get_affine() vox_size=fa_img.get_header().get_zooms()[:3] fov_shape=FA.shape[:3] if vox_size is not None and fov_shape is not None: hdr = {} hdr[Field.VOXEL_TO_RASMM] = affine.copy() hdr[Field.VOXEL_SIZES] = vox_size hdr[Field.DIMENSIONS] = fov_shape hdr[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine)) tractogram = nib.streamlines.Tractogram(eu) tractogram.affine_to_rasmm = affine trk_file = nib.streamlines.TrkFile(tractogram, header=hdr) nib.streamlines.save(trk_file, sl_fname) if oldval: os.environ['MKL_NUM_THREADS'] = oldval else: del os.environ['MKL_NUM_THREADS'] if ompoldval: os.environ['OMP_NUM_THREADS'] = ompoldval else: del os.environ['OMP_NUM_THREADS'] assert tensor_fa_file assert tensor_evec_file assert model_gfa_file assert tensor_ad_file assert tensor_rd_file assert tensor_md_file assert shm_coeff_file print('all output files created') return tensor_fa_file, tensor_evec_file, model_gfa_file, sl_fname, affine, tensor_ad_file, tensor_rd_file, tensor_md_file, shm_coeff_file
def track(dname, fdwi, fbval, fbvec, fmask=None, seed_density = 1, show=False): data, affine = load_nifti(fdwi) bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=50) if fmask is None: from dipy.segment.mask import median_otsu b0_mask, mask = median_otsu(data) # TODO: check parameters to improve the mask else: mask, mask_affine = load_nifti(fmask) mask = np.squeeze(mask) #fix mask dimensions # compute DTI model from dipy.reconst.dti import TensorModel tenmodel = TensorModel(gtab)#, fit_method='OLS') #, min_signal=5000) # fit the dti model tenfit = tenmodel.fit(data, mask=mask) # save fa ffa = dname + 'tensor_fa.nii.gz' fa_img = nib.Nifti1Image(tenfit.fa.astype(np.float32), affine) nib.save(fa_img, ffa) sh_order = 8 #TODO: check what that does if data.shape[-1] < 15: raise ValueError('You need at least 15 unique DWI volumes to ' 'compute fiber ODFs. You currently have: {0}' ' DWI volumes.'.format(data.shape[-1])) elif data.shape[-1] < 30: sh_order = 6 # compute the response equation ? from dipy.reconst.csdeconv import auto_response response, ratio = auto_response(gtab, data) response = list(response) peaks_sphere = get_sphere('symmetric362') #TODO: check what that does peaks_csd = peaks_from_model(model=tenmodel, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, #.5 min_separation_angle=25, mask=mask, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=False) peaks_csd.affine = affine fpeaks = dname + 'peaks.npz' save_peaks(fpeaks, peaks_csd) from dipy.io.trackvis import save_trk from dipy.tracking import utils from dipy.tracking.local import (ThresholdTissueClassifier, LocalTracking) stopping_thr = 0.25 #0.25 pam = load_peaks(fpeaks) #ffa = dname + 'tensor_fa_nomask.nii.gz' fa, fa_affine = load_nifti(ffa) classifier = ThresholdTissueClassifier(fa, stopping_thr) # seeds seed_mask = fa > 0.4 #0.4 #TODO: check this parameter seeds = utils.seeds_from_mask( seed_mask, density=seed_density, affine=affine) # tractography, if affine then in world coordinates streamlines = LocalTracking(pam, classifier, seeds, affine=affine, step_size=.5) # Compute streamlines and store as a list. streamlines = list(streamlines) ftractogram = dname + 'tractogram.trk' #save .trk save_trk_old_style(ftractogram, streamlines, affine, fa.shape) if show: #render show_results(data,streamlines, fa, fa_affine)
fbvec = "original.bvec" bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) ten_model = TensorModel(gtab) ten_fit = ten_model.fit(data, mask) fa = fractional_anisotropy(ten_fit.evals) cfa = color_fa(fa, ten_fit.evecs) csamodel = CsaOdfModel(gtab, 6) sphere = get_sphere('symmetric724') pmd = peaks_from_model(model=csamodel, data=data, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask, return_odf=False) #Deterministic tractography eu = EuDX(a=fa, ind=pmd.peak_indices[..., 0], seeds=2000000, odf_vertices=sphere.vertices, a_low=0.2) affine = eu.affine csd_streamlines = list(eu) #Remove tracts shorter than 30mm #print np.shape(csd_streamlines) from dipy.tracking.utils import length
parcellation_data = parcellation_data * mask_data_bin parcellation_wm_data = parcellation_data * wm_data_bin parcellation_wm_data = parcellation_wm_data.astype(np.int) #============================================================================= # Track all of white matter using EuDX #============================================================================= if not os.path.exists(Msym_file) and not os.path.exists(Mdir_file): print '\tCalculating peaks' csamodel = shm.CsaOdfModel(gtab, 6) csapeaks = peaks.peaks_from_model(model=csamodel, data=dwi_data, sphere=peaks.default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=wm_data_bin) print '\tTracking' seeds = utils.seeds_from_mask(parcellation_wm_data, density=2) condition_seeds = condition_seeds(seeds, np.eye(4), csapeaks.peak_values.shape[:3]) streamline_generator = EuDX(csapeaks.peak_values, csapeaks.peak_indices, odf_vertices=peaks.default_sphere.vertices, a_low=.05, step_sz=.5, seeds=condition_seeds) affine = streamline_generator.affine streamlines = list(streamline_generator) else: print '\tTracking already complete'
relative_peak_threshold=.5, min_separation_angle=25, parallel=False) GFA = csd_peaks.gfa """ sphere = get_sphere('symmetric724') csamodel = CsaOdfModel(gtab, 4) #csafit = csamodel.fit(data_small) csapeaks = peaks_from_model(model=csamodel, data=maskdata, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask, return_odf=False, normalize_peaks=True) GFA = csapeaks.gfa GFA_img = nib.Nifti1Image(GFA.astype(np.float32), affine) print GFA.shape nib.save(GFA_img, os.getcwd()+'/zhibiao/'+f_name+'_GFA.nii.gz') print('Saving "GFA.nii.gz" sucessful.') from dipy.reconst.shore import ShoreModel asm = ShoreModel(gtab) print('Calculating...SHORE msd') asmfit = asm.fit(data,mask)
def deterministic(diffusion_file, bvecs_file, bvals_file, trackfile, mask_file=None, order=4, nb_seeds_per_voxel=1, step=0.5): """ Compute a deterministic tractography using an ODF model. Parameters ---------- diffusion_file: str (mandatory) a file containing the preprocessed diffusion data. bvecs_file: str (mandatory) a file containing the diffusion gradient directions. bvals_file: str (mandatory) a file containing the diffusion b-values. trackfile: str (mandatory) a file path where the fibers will be saved in trackvis format. mask_file: str (optional, default None) an image used to mask the diffusion data during the tractography. If not set, all the image voxels are considered. order: int (optional, default 4) the order of the ODF model. nb_seeds_per_voxel: int (optional, default 1) the number of seeds per voxel used during the propagation. step: float (optional, default 0.5) the integration step in voxel fraction used during the propagation. Returns ------- streamlines: tuple of 3-uplet the computed fiber tracks in trackvis format (points: ndarray shape (N,3) where N is the number of points, scalars: None or ndarray shape (N, M) where M is the number of scalars per point, properties: None or ndarray shape (P,) where P is the number of properties). hdr: structured array structured array with trackvis header fields (voxel size, voxel order, dim). """ # Read diffusion sequence bvals, bvecs = read_bvals_bvecs(bvals_file, bvecs_file) gtab = gradient_table(bvals, bvecs) diffusion_image = nibabel.load(diffusion_file) diffusion_array = diffusion_image.get_data() if mask_file is not None: mask_array = nibabel.load(mask_file).get_data() else: mask_array = numpy.ones(diffusion_array.shape[:3], dtype=numpy.uint8) # Estimate ODF model csamodel = shm.CsaOdfModel(gtab, order) csapeaks = peaks.peaks_from_model(model=csamodel, data=diffusion_array, sphere=peaks.default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=mask_array) # Compute deterministic tractography in voxel space so affine is equal # to identity seeds = utils.seeds_from_mask(mask_array, density=nb_seeds_per_voxel) streamline_generator = EuDX(csapeaks.peak_values, csapeaks.peak_indices, odf_vertices=peaks.default_sphere.vertices, a_low=.05, step_sz=step, seeds=seeds) # affine = streamline_generator.affine # Save the tracks in trackvis format hdr = nibabel.trackvis.empty_header() hdr["voxel_size"] = diffusion_image.get_header().get_zooms()[:3] hdr["voxel_order"] = "LAS" hdr["dim"] = diffusion_array.shape[:3] streamlines = [track for track in streamline_generator] random.shuffle(streamlines) streamlines = ((track, None, None) for track in streamlines) nibabel.trackvis.write(trackfile, streamlines, hdr, points_space="voxel") return streamlines, hdr
def dodata(f_name,data_path): dipy_home = pjoin(os.path.expanduser('~'), 'dipy_data') folder = pjoin(dipy_home, data_path) fraw = pjoin(folder, f_name+'.nii.gz') fbval = pjoin(folder, f_name+'.bval') fbvec = pjoin(folder, f_name+'.bvec') flabels = pjoin(folder, f_name+'.nii-label.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) data = img.get_data() affine = img.get_affine() label_img = nib.load(flabels) labels=label_img.get_data() lap=through_label_sl.label_position(labels, labelValue=1) dataslice = data[40:80, 20:80, lap[2][2] / 2] #print lap[2][2]/2 #get_csd_gfa(f_name,data,gtab,dataslice) maskdata, mask = median_otsu(data, 2, 1, False, vol_idx=range(10, 50), dilate=2) #不去背景 """ get fa and tensor evecs and ODF""" from dipy.reconst.dti import TensorModel,mean_diffusivity tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) sphere = get_sphere('symmetric724') FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 np.save(os.getcwd()+'\zhibiao'+f_name+'_FA.npy',FA) fa_img = nib.Nifti1Image(FA.astype(np.float32), affine) nib.save(fa_img,os.getcwd()+'\zhibiao'+f_name+'_FA.nii.gz') print('Saving "DTI_tensor_fa.nii.gz" sucessful.') evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), affine) nib.save(evecs_img, os.getcwd()+'\zhibiao'+f_name+'_DTI_tensor_evecs.nii.gz') print('Saving "DTI_tensor_evecs.nii.gz" sucessful.') MD1 = mean_diffusivity(tenfit.evals) nib.save(nib.Nifti1Image(MD1.astype(np.float32), img.get_affine()), os.getcwd()+'\zhibiao'+f_name+'_MD.nii.gz') #tensor_odfs = tenmodel.fit(data[20:50, 55:85, 38:39]).odf(sphere) #from dipy.reconst.odf import gfa #dti_gfa=gfa(tensor_odfs) wm_mask = (np.logical_or(FA >= 0.4, (np.logical_and(FA >= 0.15, MD >= 0.0011)))) response = recursive_response(gtab, data, mask=wm_mask, sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=False) from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel csd_model = ConstrainedSphericalDeconvModel(gtab, response) #csd_fit = csd_model.fit(data) from dipy.direction import peaks_from_model csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=False) GFA = csd_peaks.gfa nib.save(GFA, os.getcwd()+'\zhibiao'+f_name+'_MSD.nii.gz') print('Saving "GFA.nii.gz" sucessful.') from dipy.reconst.shore import ShoreModel asm = ShoreModel(gtab) print('Calculating...SHORE msd') asmfit = asm.fit(data,mask) msd = asmfit.msd() msd[np.isnan(msd)] = 0 #print GFA[:,:,slice].T print('Saving msd_img.png') nib.save(msd, os.getcwd()+'\zhibiao'+f_name+'_GFA.nii.gz')
def main(): parser = _build_arg_parser() args = parser.parse_args() if not args.not_all: args.gfa = args.gfa or 'gfa.nii.gz' args.peaks = args.peaks or 'peaks.nii.gz' args.peak_indices = args.peak_indices or 'peaks_indices.nii.gz' args.sh = args.sh or 'sh.nii.gz' args.nufo = args.nufo or 'nufo.nii.gz' args.a_power = args.a_power or 'anisotropic_power.nii.gz' arglist = [ args.gfa, args.peaks, args.peak_indices, args.sh, args.nufo, args.a_power ] if args.not_all and not any(arglist): parser.error('When using --not_all, you need to specify at least ' + 'one file to output.') assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs]) assert_outputs_exists(parser, args, arglist) nbr_processes = args.nbr_processes parallel = True if nbr_processes <= 0: nbr_processes = None elif nbr_processes == 1: parallel = False # Load data img = nib.load(args.input) data = img.get_data() affine = img.get_affine() bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) if not is_normalized_bvecs(bvecs): logging.warning('Your b-vectors do not seem normalized...') bvecs = normalize_bvecs(bvecs) if bvals.min() != 0: if bvals.min() > 20: raise ValueError( 'The minimal bvalue is greater than 20. This is highly ' 'suspicious. Please check your data to ensure everything is ' 'correct.\nValue found: {0}'.format(bvals.min())) else: logging.warning( 'Warning: no b=0 image. Setting b0_threshold to ' 'bvals.min() = %s', bvals.min()) gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min()) else: gtab = gradient_table(bvals, bvecs) sphere = get_sphere('symmetric724') if args.mask is None: mask = None else: mask = nib.load(args.mask).get_data().astype(np.bool) if args.use_qball: model = QballModel(gtab, sh_order=int(args.sh_order), smooth=0.006) else: model = CsaOdfModel(gtab, sh_order=int(args.sh_order), smooth=0.006) odfpeaks = peaks_from_model(model=model, data=data, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask, return_odf=False, normalize_peaks=True, return_sh=True, sh_order=int(args.sh_order), sh_basis_type=args.basis, npeaks=5, parallel=parallel, nbr_processes=nbr_processes) if args.gfa: nib.save(nib.Nifti1Image(odfpeaks.gfa.astype(np.float32), affine), args.gfa) if args.peaks: nib.save( nib.Nifti1Image(reshape_peaks_for_visualization(odfpeaks), affine), args.peaks) if args.peak_indices: nib.save(nib.Nifti1Image(odfpeaks.peak_indices, affine), args.peak_indices) if args.sh: nib.save( nib.Nifti1Image(odfpeaks.shm_coeff.astype(np.float32), affine), args.sh) if args.nufo: peaks_count = (odfpeaks.peak_indices > -1).sum(3) nib.save(nib.Nifti1Image(peaks_count.astype(np.int32), affine), args.nufo) if args.a_power: odf_a_power = anisotropic_power(odfpeaks.shm_coeff) nib.save(nib.Nifti1Image(odf_a_power.astype(np.float32), affine), args.a_power)
""" .. figure:: csd_odfs.png :align: center **CSD ODFs**. In Dipy we also provide tools for finding the peak directions (maxima) of the ODFs. For this purpose we strongly recommend using ``peaks_from_model``. """ from dipy.reconst.peaks import peaks_from_model csd_peaks = peaks_from_model(model=csd_model, data=data_small, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=False) fvtk.clear(ren) fodf_peaks = fvtk.peaks(csd_peaks.peak_dirs, csd_peaks.peak_values, scale=1.3) fvtk.add(ren, fodf_peaks) print('Saving illustration as csd_peaks.png') fvtk.record(ren, out_path='csd_peaks.png', size=(600, 600)) """ .. figure:: csd_peaks.png :align: center
# # white matter mask # logic1 = np.logical_and(labels_>117, labels_<148) # logic2 = np.logical_and(labels_>283, labels_<314) # logic = np.logical_or(logic1, logic2) # logic_ = np.logical_or(labels_==150, labels_==316) # wm = np.where(logic, True, np.where(logic_, True, False)) print 'fitting CSA model' st2 = time.time() csamodel = shm.CsaOdfModel(gtab, 6) csapeaks = peaks.peaks_from_model(model=csamodel, data=data, sphere=peaks.default_sphere, relative_peak_threshold=.1, min_separation_angle=25, mask=wm, sh_order=8, npeaks=5, parallel=True) et2 = time.time() - st2 print 'fitting CSA model finished, running time is {}'.format(et2) # plot peaks # interactive = False # ren = window.Renderer() # slice_actor = actor.peak_slicer(csd_peaks.peak_dirs, # csd_peaks.peak_values, # colors=None) # slice_actor.RotateX(90)
fodf_spheres = fvtk.sphere_funcs(sf_odf, sphere, scale=1.3, norm=True) ren = fvtk.ren() fvtk.add(ren, fodf_spheres) print('Saving illustration as sf_odfs.png') fvtk.record(ren, out_path='sf_odfs.png', size=(1000, 1000)) """ We can extract the peaks from the ODF, and plot these as well """ sf_peaks = dpp.peaks_from_model(sf_model, data_small, sphere, relative_peak_threshold=.5, min_separation_angle=25, return_sh=False) fvtk.clear(ren) fodf_peaks = fvtk.peaks(sf_peaks.peak_dirs, sf_peaks.peak_values, scale=1.3) fvtk.add(ren, fodf_peaks) print('Saving illustration as sf_peaks.png') fvtk.record(ren, out_path='sf_peaks.png', size=(1000, 1000)) """ Finally, we plot both the peaks and the ODFs, overlayed: """
def _run_interface(self, runtime): from dipy.reconst.peaks import peaks_from_model from dipy.tracking.eudx import EuDX from dipy.data import get_sphere # import marshal as pickle import pickle as pickle import gzip if (not (isdefined(self.inputs.in_model) or isdefined(self.inputs.in_peaks))): raise RuntimeError(('At least one of in_model or in_peaks should ' 'be supplied')) img = nb.load(self.inputs.in_file) imref = nb.four_to_three(img)[0] affine = img.affine data = img.get_data().astype(np.float32) hdr = imref.header.copy() hdr.set_data_dtype(np.float32) hdr['data_type'] = 16 sphere = get_sphere('symmetric724') self._save_peaks = False if isdefined(self.inputs.in_peaks): IFLOGGER.info('Peaks file found, skipping ODF peaks search...') f = gzip.open(self.inputs.in_peaks, 'rb') peaks = pickle.load(f) f.close() else: self._save_peaks = True IFLOGGER.info('Loading model and computing ODF peaks') f = gzip.open(self.inputs.in_model, 'rb') odf_model = pickle.load(f) f.close() peaks = peaks_from_model( model=odf_model, data=data, sphere=sphere, relative_peak_threshold=self.inputs.peak_threshold, min_separation_angle=self.inputs.min_angle, parallel=self.inputs.multiprocess) f = gzip.open(self._gen_filename('peaks', ext='.pklz'), 'wb') pickle.dump(peaks, f, -1) f.close() hdr.set_data_shape(peaks.gfa.shape) nb.Nifti1Image(peaks.gfa.astype(np.float32), affine, hdr).to_filename( self._gen_filename('gfa')) IFLOGGER.info('Performing tractography') if isdefined(self.inputs.tracking_mask): msk = nb.load(self.inputs.tracking_mask).get_data() msk[msk > 0] = 1 msk[msk < 0] = 0 else: msk = np.ones(imref.shape) gfa = peaks.gfa * msk seeds = self.inputs.num_seeds if isdefined(self.inputs.seed_coord): seeds = np.loadtxt(self.inputs.seed_coord) elif isdefined(self.inputs.seed_mask): seedmsk = nb.load(self.inputs.seed_mask).get_data() assert (seedmsk.shape == data.shape[:3]) seedmsk[seedmsk > 0] = 1 seedmsk[seedmsk < 1] = 0 seedps = np.array(np.where(seedmsk == 1), dtype=np.float32).T vseeds = seedps.shape[0] nsperv = (seeds // vseeds) + 1 IFLOGGER.info('Seed mask is provided (%d voxels inside ' 'mask), computing seeds (%d seeds/voxel).', vseeds, nsperv) if nsperv > 1: IFLOGGER.info('Needed %d seeds per selected voxel (total %d).', nsperv, vseeds) seedps = np.vstack(np.array([seedps] * nsperv)) voxcoord = seedps + np.random.uniform(-1, 1, size=seedps.shape) nseeds = voxcoord.shape[0] seeds = affine.dot( np.vstack((voxcoord.T, np.ones((1, nseeds)))))[:3, :].T if self.inputs.save_seeds: np.savetxt(self._gen_filename('seeds', ext='.txt'), seeds) if isdefined(self.inputs.tracking_mask): tmask = msk a_low = 0.1 else: tmask = gfa a_low = self.inputs.gfa_thresh eu = EuDX( tmask, peaks.peak_indices[..., 0], seeds=seeds, affine=affine, odf_vertices=sphere.vertices, a_low=a_low) ss_mm = [np.array(s) for s in eu] trkfilev = nb.trackvis.TrackvisFile( [(s, None, None) for s in ss_mm], points_space='rasmm', affine=np.eye(4)) trkfilev.to_file(self._gen_filename('tracked', ext='.trk')) return runtime
if __name__ == '__main__': freeze_support() img, gtab = read_stanford_hardi() data = img.get_data() maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2) response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) sphere = get_sphere('symmetric724') csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, mask=mask, relative_peak_threshold=.5, min_separation_angle=25, parallel=True) tensor_model = TensorModel(gtab, fit_method='WLS') tensor_fit = tensor_model.fit(data, mask) FA = fractional_anisotropy(tensor_fit.evals) stopping_values = np.zeros(csd_peaks.peak_values.shape) stopping_values[:] = FA[..., None] ren = fvtk.ren() slice_no = data.shape[2] / 2 fvtk.add(ren, fvtk.peaks(csd_peaks.peak_dirs[:, :, slice_no:slice_no + 1], stopping_values[:, :, slice_no:slice_no + 1]))
def deterministic(diffusion_file, bvecs_file, bvals_file, trackfile, mask_file=None, order=4, nb_seeds_per_voxel=1, step=0.5): """ Compute a deterministic tractography using an ODF model. Parameters ---------- diffusion_file: str (mandatory) a file containing the preprocessed diffusion data. bvecs_file: str (mandatory) a file containing the diffusion gradient directions. bvals_file: str (mandatory) a file containing the diffusion b-values. trackfile: str (mandatory) a file path where the fibers will be saved in trackvis format. mask_file: str (optional, default None) an image used to mask the diffusion data during the tractography. If not set, all the image voxels are considered. order: int (optional, default 4) the order of the ODF model. nb_seeds_per_voxel: int (optional, default 1) the number of seeds per voxel used during the propagation. step: float (optional, default 0.5) the integration step in voxel fraction used during the propagation. Returns ------- streamlines: tuple of 3-uplet the computed fiber tracks in trackvis format (points: ndarray shape (N,3) where N is the number of points, scalars: None or ndarray shape (N, M) where M is the number of scalars per point, properties: None or ndarray shape (P,) where P is the number of properties). hdr: structured array structured array with trackvis header fields (voxel size, voxel order, dim). """ # Read diffusion sequence bvals, bvecs = read_bvals_bvecs(bvals_file, bvecs_file) gtab = gradient_table(bvals, bvecs) diffusion_image = nibabel.load(diffusion_file) diffusion_array = diffusion_image.get_data() if mask_file is not None: mask_array = nibabel.load(mask_file).get_data() else: mask_array = numpy.ones(diffusion_array.shape[:3], dtype=numpy.uint8) # Estimate ODF model csamodel = shm.CsaOdfModel(gtab, order) csapeaks = peaks.peaks_from_model( model=csamodel, data=diffusion_array, sphere=peaks.default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=mask_array) # Compute deterministic tractography in voxel space so affine is equal # to identity seeds = utils.seeds_from_mask(mask_array, density=nb_seeds_per_voxel) streamline_generator = EuDX( csapeaks.peak_values, csapeaks.peak_indices, odf_vertices=peaks.default_sphere.vertices, a_low=.05, step_sz=step, seeds=seeds) affine = streamline_generator.affine # Save the tracks in trackvis format hdr = nibabel.trackvis.empty_header() hdr["voxel_size"] = diffusion_image.get_header().get_zooms()[:3] hdr["voxel_order"] = "LAS" hdr["dim"] = diffusion_array.shape[:3] streamlines = [track for track in streamline_generator] random.shuffle(streamlines) streamlines = ((track, None, None) for track in streamlines) nibabel.trackvis.write(trackfile, streamlines, hdr, points_space="voxel") return streamlines, hdr
from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, auto_response response, ratio = auto_response(gtab, data) response = list(response) peaks_sphere = get_sphere("symmetric362") model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) peaks_csd = peaks_from_model( model=model, data=data, sphere=peaks_sphere, relative_peak_threshold=0.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=False, ) peaks_csd.affine = affine fpeaks = dname + "peaks.npz" save_peaks(fpeaks, peaks_csd) from dipy.io.trackvis import save_trk from dipy.tracking import utils from dipy.tracking.local import ThresholdTissueClassifier, LocalTracking
""" 1. The first thing we need to begin fiber tracking is a way of getting directions from this diffusion data set. In order to do that, we can fit the data to a Constant Solid Angle ODF Model. This model will estimate the orientation distribution function (ODF) at each voxel. The ODF is the distribution of water diffusion as a function of direction. The peaks of an ODF are good estimates for the orientation of tract segments at a point in the image. """ from dipy.reconst.shm import CsaOdfModel from dipy.reconst.peaks import peaks_from_model, default_sphere csa_model = CsaOdfModel(gtab, sh_order=6) csa_peaks = peaks_from_model(csa_model, data, default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=white_matter) """ 2. Next we need some way of restricting the fiber tracking to areas with good directionality information. We've already created the white matter mask, but we can go a step further and restrict fiber tracking to those areas where the ODF shows significant restricted diffusion by thresholding on the general fractional anisotropy (GFA). """ from dipy.tracking.local import ThresholdTissueClassifier classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25) """
def test_PeaksAndMetricsDirectionGetter(): class SillyModel(object): def fit(self, data, mask=None): return SillyFit(self) class SillyFit(object): def __init__(self, model): self.model = model def odf(self, sphere): odf = np.zeros(sphere.theta.shape) r = np.random.randint(0, len(odf)) odf[r] = 1 return odf def get_direction(dg, point, dir): newdir = dir.copy() state = dg.get_direction(point, newdir) return (state, np.array(newdir)) data = np.random.random((3, 4, 5, 2)) peaks = peaks_from_model(SillyModel(), data, default_sphere, relative_peak_threshold=.5, min_separation_angle=25) peaks._initialize() up = np.zeros(3) up[2] = 1. down = -up for i in range(3-1): for j in range(4-1): for k in range(5-1): point = np.array([i, j, k], dtype=float) # Test that the angle threshold rejects points peaks.ang_thr = 0. state, nd = get_direction(peaks, point, up) npt.assert_equal(state, 1) # Here we leverage the fact that we know Hemispheres project # all their vertices into the z >= 0 half of the sphere. peaks.ang_thr = 90. state, nd = get_direction(peaks, point, up) npt.assert_equal(state, 0) expected_dir = peaks.peak_dirs[i, j, k, 0] npt.assert_array_almost_equal(nd, expected_dir) state, nd = get_direction(peaks, point, down) npt.assert_array_almost_equal(nd, -expected_dir) # Check that we can get directions at non-integer points point += np.random.random(3) state, nd = get_direction(peaks, point, up) npt.assert_equal(state, 0) # Check that points are rounded to get initial direction point -= .5 id = peaks.initial_direction(point) # id should be a (1, 3) array npt.assert_array_almost_equal(id, [expected_dir])
def run(self, input_files, bvalues, bvectors, mask_files, sh_order=6, odf_to_sh_order=8, b0_threshold=0.0, bvecs_tol=0.01, extract_pam_values=False, out_dir='', out_pam='peaks.pam5', out_shm='shm.nii.gz', out_peaks_dir='peaks_dirs.nii.gz', out_peaks_values='peaks_values.nii.gz', out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'): """ Constant Solid Angle. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) sh_order : int, optional Spherical harmonics order (default 6) used in the CSA fit. odf_to_sh_order : int, optional Spherical harmonics order used for peak_from_model to compress the ODF to spherical harmonics coefficients (default 8) b0_threshold : float, optional Threshold used to find b=0 directions bvecs_tol : float, optional Threshold used so that norm(bvec)=1 (default 0.01) extract_pam_values : bool, optional Wheter or not to save pam volumes as single nifti files. out_dir : string, optional Output directory (default input file directory) out_pam : string, optional Name of the peaks volume to be saved (default 'peaks.pam5') out_shm : string, optional Name of the shperical harmonics volume to be saved (default 'shm.nii.gz') out_peaks_dir : string, optional Name of the peaks directions volume to be saved (default 'peaks_dirs.nii.gz') out_peaks_values : string, optional Name of the peaks values volume to be saved (default 'peaks_values.nii.gz') out_peaks_indices : string, optional Name of the peaks indices volume to be saved (default 'peaks_indices.nii.gz') out_gfa : string, optional Name of the generalise fa volume to be saved (default 'gfa.nii.gz') References ---------- .. [1] Aganj, I., et al. 2009. ODF Reconstruction in Q-Ball Imaging with Solid Angle Consideration. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa) in io_it: logging.info('Loading {0}'.format(dwi)) vol = nib.load(dwi) data = vol.get_data() affine = vol.affine bvals, bvecs = read_bvals_bvecs(bval, bvec) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold, atol=bvecs_tol) mask_vol = nib.load(maskfile).get_data().astype(np.bool) peaks_sphere = get_sphere('repulsion724') logging.info('Starting CSA computations {0}'.format(dwi)) csa_model = CsaOdfModel(gtab, sh_order) peaks_csa = peaks_from_model(model=csa_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=odf_to_sh_order, normalize_peaks=True, parallel=False) peaks_csa.affine = affine save_peaks(opam, peaks_csa) logging.info('Finished CSA {0}'.format(dwi)) if extract_pam_values: peaks_to_niftis(peaks_csa, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa, reshape_dirs=True) dname_ = os.path.dirname(opam) if dname_ == '': logging.info('Pam5 file saved in current directory') else: logging.info( 'Pam5 file saved in {0}'.format(dname_)) return io_it
def deterministic(diffusion_file, bvecs_file, bvals_file, outdir, mask_file=None, order=4, nb_seeds_per_voxel=1, step=0.5, fmt="%.4f"): """ Compute a deterministic tractography using an ODF model. Parameters ---------- diffusion_file: str (mandatory) a file containing the preprocessed diffusion data. bvecs_file: str (mandatory) a file containing the diffusion gradient directions. bvals_file: str (mandatory) a file containing the diffusion b-values. outdir: str (mandatory) the output directory. mask_file: str (optional, default None) an image used to mask the diffusion data during the tractography. If not set, all the image voxels are considered. order: int (optional, default 4) the order of the ODF model. nb_seeds_per_voxel: int (optional, default 1) the number of seeds per voxel used during the propagation. step: float (optional, default 0.5) the integration step in voxel fraction used during the propagation. fmt: str (optional, default '%.4f') the saved track elements format. Returns ------- track_file: str a determinist model of the white matter organization. """ # Read diffusion sequence bvals, bvecs = read_bvals_bvecs(bvals_file, bvecs_file) gtab = gradient_table(bvals, bvecs) diffusion_array = nibabel.load(diffusion_file).get_data() if mask_file is not None: mask_array = nibabel.load(mask_file).get_data() else: mask_array = numpy.ones(diffusion_array.shape[:3], dtype=numpy.uint8) # Estimate ODF model csamodel = shm.CsaOdfModel(gtab, order) csapeaks = peaks.peaks_from_model(model=csamodel, data=diffusion_array, sphere=peaks.default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=mask_array) # Compute deterministic tractography in voxel space so affine is equal # to identity seeds = utils.seeds_from_mask(mask_array, density=nb_seeds_per_voxel) streamline_generator = EuDX(csapeaks.peak_values, csapeaks.peak_indices, odf_vertices=peaks.default_sphere.vertices, a_low=.05, step_sz=step, seeds=seeds) affine = streamline_generator.affine streamlines = list(streamline_generator) # Save the tracks track_file = os.path.join(outdir, "fibers.txt") savetxt(track_file, streamlines, fmt=fmt) return track_file
def track(dname, fdwi, fbval, fbvec, fmask=None, seed_density=1, show=False): data, affine = load_nifti(fdwi) bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=50) if fmask is None: from dipy.segment.mask import median_otsu b0_mask, mask = median_otsu( data) # TODO: check parameters to improve the mask else: mask, mask_affine = load_nifti(fmask) mask = np.squeeze(mask) #fix mask dimensions # compute DTI model from dipy.reconst.dti import TensorModel tenmodel = TensorModel(gtab) #, fit_method='OLS') #, min_signal=5000) # fit the dti model tenfit = tenmodel.fit(data, mask=mask) # save fa ffa = dname + 'tensor_fa.nii.gz' fa_img = nib.Nifti1Image(tenfit.fa.astype(np.float32), affine) nib.save(fa_img, ffa) sh_order = 8 #TODO: check what that does if data.shape[-1] < 15: raise ValueError('You need at least 15 unique DWI volumes to ' 'compute fiber ODFs. You currently have: {0}' ' DWI volumes.'.format(data.shape[-1])) elif data.shape[-1] < 30: sh_order = 6 # compute the response equation ? from dipy.reconst.csdeconv import auto_response response, ratio = auto_response(gtab, data) response = list(response) peaks_sphere = get_sphere('symmetric362') #TODO: check what that does peaks_csd = peaks_from_model( model=tenmodel, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, #.5 min_separation_angle=25, mask=mask, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=False) peaks_csd.affine = affine fpeaks = dname + 'peaks.npz' save_peaks(fpeaks, peaks_csd) from dipy.io.trackvis import save_trk from dipy.tracking import utils from dipy.tracking.local import (ThresholdTissueClassifier, LocalTracking) stopping_thr = 0.25 #0.25 pam = load_peaks(fpeaks) #ffa = dname + 'tensor_fa_nomask.nii.gz' fa, fa_affine = load_nifti(ffa) classifier = ThresholdTissueClassifier(fa, stopping_thr) # seeds seed_mask = fa > 0.4 #0.4 #TODO: check this parameter seeds = utils.seeds_from_mask(seed_mask, density=seed_density, affine=affine) # tractography, if affine then in world coordinates streamlines = LocalTracking(pam, classifier, seeds, affine=affine, step_size=.5) # Compute streamlines and store as a list. streamlines = list(streamlines) ftractogram = dname + 'tractogram.trk' #save .trk save_trk_old_style(ftractogram, streamlines, affine, fa.shape) if show: #render show_results(data, streamlines, fa, fa_affine)
def run(self, input_files, bvalues_files, bvectors_files, mask_files, b0_threshold=0.0, bvecs_tol=0.01, roi_center=None, roi_radius=10, fa_thr=0.7, frf=None, extract_pam_values=False, sh_order=8, odf_to_sh_order=8, out_dir='', out_pam='peaks.pam5', out_shm='shm.nii.gz', out_peaks_dir='peaks_dirs.nii.gz', out_peaks_values='peaks_values.nii.gz', out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'): """ Constrained spherical deconvolution Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues_files : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors_files : string Path to the bvectors files. This path may contain wildcards to use multiple bvectors files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) b0_threshold : float, optional Threshold used to find b=0 directions bvecs_tol : float, optional Bvecs should be unit vectors. (default:0.01) roi_center : variable int, optional Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]` (default None) roi_radius : int, optional radius of cubic ROI in voxels (default 10) fa_thr : float, optional FA threshold for calculating the response function (default 0.7) frf : variable float, optional Fiber response function can be for example inputed as 15 4 4 (from the command line) or [15, 4, 4] from a Python script to be converted to float and mutiplied by 10**-4 . If None the fiber response function will be computed automatically (default: None). extract_pam_values : bool, optional Save or not to save pam volumes as single nifti files. sh_order : int, optional Spherical harmonics order (default 6) used in the CSA fit. odf_to_sh_order : int, optional Spherical harmonics order used for peak_from_model to compress the ODF to spherical harmonics coefficients (default 8) out_dir : string, optional Output directory (default input file directory) out_pam : string, optional Name of the peaks volume to be saved (default 'peaks.pam5') out_shm : string, optional Name of the shperical harmonics volume to be saved (default 'shm.nii.gz') out_peaks_dir : string, optional Name of the peaks directions volume to be saved (default 'peaks_dirs.nii.gz') out_peaks_values : string, optional Name of the peaks values volume to be saved (default 'peaks_values.nii.gz') out_peaks_indices : string, optional Name of the peaks indices volume to be saved (default 'peaks_indices.nii.gz') out_gfa : string, optional Name of the generalise fa volume to be saved (default 'gfa.nii.gz') References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the fibre orientation distribution in diffusion MRI: Non-negativity constrained super-resolved spherical deconvolution. """ io_it = self.get_io_iterator() for (dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa) in io_it: logging.info('Loading {0}'.format(dwi)) img = nib.load(dwi) data = img.get_data() affine = img.affine bvals, bvecs = read_bvals_bvecs(bval, bvec) print(b0_threshold, bvals.min()) if b0_threshold < bvals.min(): warn("b0_threshold (value: {0}) is too low, increase your " "b0_threshold. It should higher than the first b0 value " "({1}).".format(b0_threshold, bvals.min())) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold, atol=bvecs_tol) mask_vol = nib.load(maskfile).get_data().astype(np.bool) n_params = ((sh_order + 1) * (sh_order + 2)) / 2 if data.shape[-1] < n_params: raise ValueError('You need at least {0} unique DWI volumes to ' 'compute fiber odfs. You currently have: {1}' ' DWI volumes.'.format( n_params, data.shape[-1])) if frf is None: logging.info('Computing response function') if roi_center is not None: logging.info( 'Response ROI center:\n{0}'.format(roi_center)) logging.info( 'Response ROI radius:\n{0}'.format(roi_radius)) response, ratio, nvox = auto_response( gtab, data, roi_center=roi_center, roi_radius=roi_radius, fa_thr=fa_thr, return_number_of_voxels=True) response = list(response) else: logging.info('Using response function') if isinstance(frf, str): l01 = np.array(literal_eval(frf), dtype=np.float64) else: l01 = np.array(frf, dtype=np.float64) l01 *= 10**-4 response = np.array([l01[0], l01[1], l01[1]]) ratio = l01[1] / l01[0] response = (response, ratio) logging.info( 'Eigenvalues for the frf of the input data are :{0}'.format( response[0])) logging.info( 'Ratio for smallest to largest eigen value is {0}'.format( ratio)) peaks_sphere = get_sphere('repulsion724') logging.info('CSD computation started.') csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) peaks_csd = peaks_from_model(model=csd_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=False) peaks_csd.affine = affine save_peaks(opam, peaks_csd) logging.info('CSD computation completed.') if extract_pam_values: peaks_to_niftis(peaks_csd, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa, reshape_dirs=True) dname_ = os.path.dirname(opam) if dname_ == '': logging.info('Pam5 file saved in current directory') else: logging.info('Pam5 file saved in {0}'.format(dname_)) return io_it
t1 = read_stanford_t1() t1_data = t1.get_data() """ We've loaded an image called ``labels_img`` which is a map of tissue types such that every integer value in the array ``labels`` represents an anatomical structure or tissue type [#]_. For this example, the image was created so that white matter voxels have values of either 1 or 2. We'll use ``peaks_from_model`` to apply the ``CsaOdfModel`` to each white matter voxel and estimate fiber orientations which we can use for tracking. """ white_matter = (labels == 1) | (labels == 2) csamodel = shm.CsaOdfModel(gtab, 6) csapeaks = peaks.peaks_from_model(model=csamodel, data=data, sphere=peaks.default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=white_matter) """ Now we can use EuDX to track all of the white matter. To keep things reasonably fast we use ``density=2`` which will result in 8 seeds per voxel. We'll set ``a_low`` (the parameter which determines the threshold of FA/QA under which tracking stops) to be very low because we've already applied a white matter mask. """ seeds = utils.seeds_from_mask(white_matter, density=2) streamline_generator = EuDX(csapeaks.peak_values, csapeaks.peak_indices, odf_vertices=peaks.default_sphere.vertices, a_low=.05,
""" We will first run `peaks_from_model` using parallelism with 2 processes. If `nbr_processes` is None (default option) then this function will find the total number of processors from the operating system and use this number as `nbr_processes`. Sometimes it makes sense to use only a few of the processes in order to allow resources for other applications. However, most of the times using the default option will be sufficient. """ csapeaks_parallel = peaks_from_model(model=csamodel, data=maskdata, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask, return_odf=False, normalize_peaks=True, npeaks=5, parallel=True, nbr_processes=2) time_parallel = time.time() - start_time print("peaks_from_model using 2 processes ran in : " + str(time_parallel) + " seconds") """ peaks_from_model using 2 process ran in : 114.333221912 seconds, using 2 process If we don't use parallelism then we need to set `parallel=False`: """
def compCsdPeaks(basename, output, mask=None): home = os.getcwd() fbase = basename fdwi = fbase+".nii.gz" fbval = fbase+".bval" fbvec = fbase+".bvec" print fdwi,fbval,fbvec img = nib.load(fdwi) data = img.get_data() zooms = img.get_header().get_zooms()[:3] affine = img.get_affine() # reslice image into 1x1x1 iso voxel # new_zooms = (1., 1., 1.) # data, affine = resample(data, affine, zooms, new_zooms) # img = nib.Nifti1Image(data, affine) # # print data.shape # print img.get_header().get_zooms() # print "###" # # nib.save(img, 'C5_iso.nii.gz') bval, bvec = dio.read_bvals_bvecs(fbval, fbvec) # invert bvec z for GE scanner bvec[:,1]*= -1 gtab = dgrad.gradient_table(bval, bvec) if mask is None: print 'generate mask' maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2) else: mask = nib.load(mask).get_data() maskdata = applymask(data, mask) # tenmodel = dti.TensorModel(gtab) # tenfit = tenmodel.fit(data) # print('Computing anisotropy measures (FA, MD, RGB)') # # # FA = fractional_anisotropy(tenfit.evals) # FA[np.isnan(FA)] = 0 # # fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine()) # nib.save(fa_img, 'FA.nii.gz') # # return # estimate response function, ratio should be ~0.2 response, ratio = auto_response(gtab, maskdata, roi_radius=10, fa_thr=0.7) print response, ratio # reconstruct csd model print "estimate csd_model" csd_model = ConstrainedSphericalDeconvModel(gtab, response) #a_data = maskdata[40:80, 40:80, 60:61] #c_data = maskdata[40:80, 59:60, 50:80] #s_data = maskdata[59:60, 40:70, 30:80] #data_small = a_data # # evals = response[0] # evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T #sphere = get_sphere('symmetric362') #csd_fit = csd_model.fit(data_small) #csd_odf = csd_fit.odf(sphere) # # #fodf_spheres = fvtk.sphere_funcs(csd_odf, sphere, scale=1, norm=False) ##fodf_spheres.GetProperty().SetOpacity(0.4) ## #fvtk.add(ren, fodf_spheres) ##fvtk.add(ren, fodf_peaks) #fvtk.show(ren) # #sys.exit() # fit csd peaks print "fit csd peaks" print "peaks_from_model using core# =" + str(multiprocessing.cpu_count()) sphere = get_sphere('symmetric724') csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, mask=mask, relative_peak_threshold=.5, min_separation_angle=25, parallel=True, nbr_processes=10) #fodf_peaks = fvtk.peaks(csd_peaks.peak_dirs, csd_peaks.peak_values, scale=1) # fd, fname = mkstemp() # pickle.save_pickle(fname, csd_peaks) # # os.close(fd) #pickle.dump(csd_peaks, open("csd.p", "wb")) with open(output, 'wb') as fout: cPickle.dump(csd_peaks, fout, -1) print "done writing to file %s"% (output) return csd_peaks
from dipy.data import get_sphere sphere = get_sphere() from dipy.reconst import sfm sf_model = sfm.SparseFascicleModel(gtab, sphere=sphere, l1_ratio=0.5, alpha=0.001, response=response[0]) """ We fit this model to the data in each voxel in the white-matter mask, so that we can use these directions in tracking: """ from dipy.reconst.peaks import peaks_from_model pnm = peaks_from_model( sf_model, data, sphere, relative_peak_threshold=0.5, min_separation_angle=25, mask=white_matter, parallel=True ) """ A ThresholdTissueClassifier object is used to segment the data to track only through areas in which the Generalized Fractional Anisotropy (GFA) is sufficiently high. """ from dipy.tracking.local import ThresholdTissueClassifier classifier = ThresholdTissueClassifier(pnm.gfa, 0.25) """ Tracking will be started from a set of seeds evenly distributed in the white matter:
def dodata(f_name, data_path): dipy_home = pjoin(os.path.expanduser('~'), 'dipy_data') folder = pjoin(dipy_home, data_path) fraw = pjoin(folder, f_name + '.nii.gz') fbval = pjoin(folder, f_name + '.bval') fbvec = pjoin(folder, f_name + '.bvec') flabels = pjoin(folder, f_name + '.nii-label.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) data = img.get_data() affine = img.get_affine() label_img = nib.load(flabels) labels = label_img.get_data() lap = through_label_sl.label_position(labels, labelValue=1) dataslice = data[40:80, 20:80, lap[2][2] / 2] #print lap[2][2]/2 #get_csd_gfa(f_name,data,gtab,dataslice) maskdata, mask = median_otsu(data, 2, 1, False, vol_idx=range(10, 50), dilate=2) #不去背景 """ get fa and tensor evecs and ODF""" from dipy.reconst.dti import TensorModel, mean_diffusivity tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) sphere = get_sphere('symmetric724') FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 np.save(os.getcwd() + '\zhibiao' + f_name + '_FA.npy', FA) fa_img = nib.Nifti1Image(FA.astype(np.float32), affine) nib.save(fa_img, os.getcwd() + '\zhibiao' + f_name + '_FA.nii.gz') print('Saving "DTI_tensor_fa.nii.gz" sucessful.') evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), affine) nib.save(evecs_img, os.getcwd() + '\zhibiao' + f_name + '_DTI_tensor_evecs.nii.gz') print('Saving "DTI_tensor_evecs.nii.gz" sucessful.') MD1 = mean_diffusivity(tenfit.evals) nib.save(nib.Nifti1Image(MD1.astype(np.float32), img.get_affine()), os.getcwd() + '\zhibiao' + f_name + '_MD.nii.gz') #tensor_odfs = tenmodel.fit(data[20:50, 55:85, 38:39]).odf(sphere) #from dipy.reconst.odf import gfa #dti_gfa=gfa(tensor_odfs) wm_mask = (np.logical_or(FA >= 0.4, (np.logical_and(FA >= 0.15, MD >= 0.0011)))) response = recursive_response(gtab, data, mask=wm_mask, sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=False) from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel csd_model = ConstrainedSphericalDeconvModel(gtab, response) #csd_fit = csd_model.fit(data) from dipy.direction import peaks_from_model csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=False) GFA = csd_peaks.gfa nib.save(GFA, os.getcwd() + '\zhibiao' + f_name + '_MSD.nii.gz') print('Saving "GFA.nii.gz" sucessful.') from dipy.reconst.shore import ShoreModel asm = ShoreModel(gtab) print('Calculating...SHORE msd') asmfit = asm.fit(data, mask) msd = asmfit.msd() msd[np.isnan(msd)] = 0 #print GFA[:,:,slice].T print('Saving msd_img.png') nib.save(msd, os.getcwd() + '\zhibiao' + f_name + '_GFA.nii.gz')
""" `Peaks_from_model` is used to calculate properties of the ODFs (Orientation Distribution Function) and return for example the peaks and their indices, or GFA which is similar to FA but for ODF based models. This function mainly needs a reconstruction model, the data and a sphere as input. The sphere is an object that represents the spherical discrete grid where the ODF values will be evaluated. """ sphere = get_sphere('symmetric724') csapeaks = peaks_from_model(model=csamodel, data=maskdata, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask, return_odf=False, normalize_peaks=True) GFA = csapeaks.gfa print('GFA.shape (%d, %d, %d)' % GFA.shape) """ GFA.shape ``(81, 106, 76)`` Apart from GFA, csapeaks also has the attributes peak_values, peak_indices and ODF. peak_values shows the maxima values of the ODF and peak_indices gives us their position on the discrete sphere that was used to do the reconstruction of the ODF. In order to obtain the full ODF, return_odf should be True. Before
ODF = gqfit.odf(sphere) print('ODF.shape (%d, %d, %d)' % ODF.shape) """ ODF.shape ``(96, 96, 724)`` Using peaks_from_model we can find the main peaks of the ODFs and other properties. """ gqpeaks = peaks_from_model(model=gqmodel, data=dataslice, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask, return_odf=False, normalize_peaks=True) gqpeak_values = gqpeaks.peak_values """ gqpeak_indices show which sphere points have the maximum values. """ gqpeak_indices = gqpeaks.peak_indices """ It is also possible to calculate GFA. """
def _run_interface(self, runtime): from dipy.reconst.peaks import peaks_from_model from dipy.tracking.eudx import EuDX from dipy.data import get_sphere # import marshal as pickle import pickle as pickle import gzip if (not (isdefined(self.inputs.in_model) or isdefined(self.inputs.in_peaks))): raise RuntimeError(('At least one of in_model or in_peaks should ' 'be supplied')) img = nb.load(self.inputs.in_file) imref = nb.four_to_three(img)[0] affine = img.affine data = img.get_data().astype(np.float32) hdr = imref.header.copy() hdr.set_data_dtype(np.float32) hdr['data_type'] = 16 sphere = get_sphere('symmetric724') self._save_peaks = False if isdefined(self.inputs.in_peaks): IFLOGGER.info('Peaks file found, skipping ODF peaks search...') f = gzip.open(self.inputs.in_peaks, 'rb') peaks = pickle.load(f) f.close() else: self._save_peaks = True IFLOGGER.info('Loading model and computing ODF peaks') f = gzip.open(self.inputs.in_model, 'rb') odf_model = pickle.load(f) f.close() peaks = peaks_from_model( model=odf_model, data=data, sphere=sphere, relative_peak_threshold=self.inputs.peak_threshold, min_separation_angle=self.inputs.min_angle, parallel=self.inputs.multiprocess) f = gzip.open(self._gen_filename('peaks', ext='.pklz'), 'wb') pickle.dump(peaks, f, -1) f.close() hdr.set_data_shape(peaks.gfa.shape) nb.Nifti1Image(peaks.gfa.astype(np.float32), affine, hdr).to_filename(self._gen_filename('gfa')) IFLOGGER.info('Performing tractography') if isdefined(self.inputs.tracking_mask): msk = nb.load(self.inputs.tracking_mask).get_data() msk[msk > 0] = 1 msk[msk < 0] = 0 else: msk = np.ones(imref.shape) gfa = peaks.gfa * msk seeds = self.inputs.num_seeds if isdefined(self.inputs.seed_coord): seeds = np.loadtxt(self.inputs.seed_coord) elif isdefined(self.inputs.seed_mask): seedmsk = nb.load(self.inputs.seed_mask).get_data() assert (seedmsk.shape == data.shape[:3]) seedmsk[seedmsk > 0] = 1 seedmsk[seedmsk < 1] = 0 seedps = np.array(np.where(seedmsk == 1), dtype=np.float32).T vseeds = seedps.shape[0] nsperv = (seeds // vseeds) + 1 IFLOGGER.info( 'Seed mask is provided (%d voxels inside ' 'mask), computing seeds (%d seeds/voxel).', vseeds, nsperv) if nsperv > 1: IFLOGGER.info('Needed %d seeds per selected voxel (total %d).', nsperv, vseeds) seedps = np.vstack(np.array([seedps] * nsperv)) voxcoord = seedps + np.random.uniform(-1, 1, size=seedps.shape) nseeds = voxcoord.shape[0] seeds = affine.dot( np.vstack((voxcoord.T, np.ones((1, nseeds)))))[:3, :].T if self.inputs.save_seeds: np.savetxt(self._gen_filename('seeds', ext='.txt'), seeds) if isdefined(self.inputs.tracking_mask): tmask = msk a_low = 0.1 else: tmask = gfa a_low = self.inputs.gfa_thresh eu = EuDX(tmask, peaks.peak_indices[..., 0], seeds=seeds, affine=affine, odf_vertices=sphere.vertices, a_low=a_low) ss_mm = [np.array(s) for s in eu] trkfilev = nb.trackvis.TrackvisFile([(s, None, None) for s in ss_mm], points_space='rasmm', affine=np.eye(4)) trkfilev.to_file(self._gen_filename('tracked', ext='.trk')) return runtime
parallel. If ``nbr_processes`` is None it will figure out automatically the number of CPUs available in your system. Alternatively, you can set ``nbr_processes`` manually. Here, we show an example were we compare the duration of execution with or without parallelism. """ import time from dipy.reconst.peaks import peaks_from_model start_time = time.time() csd_peaks_parallel = peaks_from_model(model=csd_model, data=data, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask, return_sh=True, return_odf=False, normalize_peaks=True, npeaks=5, parallel=True, nbr_processes=None) time_parallel = time.time() - start_time print("peaks_from_model using " + str(multiprocessing.cpu_count()) + " process ran in :" + str(time_parallel) + " seconds") """ ``peaks_from_model`` using 8 processes ran in :114.425682068 seconds """ start_time = time.time() csd_peaks = peaks_from_model(model=csd_model,
Distribution Function) and return for example the peaks and their indices, or GFA which is similar to FA but for ODF based models. This function mainly needs a reconstruction model, the data and a sphere as input. The sphere is an object that represents the spherical discrete grid where the ODF values will be evaluated. """ sphere = get_sphere('symmetric724') start_time = time.time() csapeaks_parallel = peaks_from_model(model=csamodel, data=data, sphere=sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=None, return_odf=False, normalize_peaks=True, npeaks=5, parallel=True, nbr_processes=2) # default multiprocessing.cpu_count() time_parallel = time.time() - start_time print("peaks_from_model using 2 processes ran in : " + str(time_parallel) + " seconds") """ peaks_from_model using 2 process ran in : 114.333221912 seconds, using 2 process """ start_time = time.time() csapeaks = peaks_from_model(model=csamodel,
def run(self, input_files, bvalues, bvectors, mask_files, b0_threshold=0.0, extract_pam_values=False, out_dir='', out_pam='peaks.pam5', out_shm='shm.nii.gz', out_peaks_dir='peaks_dirs.nii.gz', out_peaks_values='peaks_values.nii.gz', out_peaks_indices='peaks_indices.nii.gz', out_gfa='gfa.nii.gz'): """ Workflow for peaks computation. Peaks computation is done by 'globing' ``input_files`` and saves the peaks in a directory specified by ``out_dir``. Parameters ---------- input_files : string Path to the input volumes. This path may contain wildcards to process multiple inputs at once. bvalues : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. bvectors : string Path to the bvalues files. This path may contain wildcards to use multiple bvalues files at once. mask_files : string Path to the input masks. This path may contain wildcards to use multiple masks at once. (default: No mask used) b0_threshold : float, optional Threshold used to find b=0 directions extract_pam_values : bool, optional Wheter or not to save pam volumes as single nifti files. out_dir : string, optional Output directory (default input file directory) out_pam : string, optional Name of the peaks volume to be saved (default 'peaks.pam5') out_shm : string, optional Name of the shperical harmonics volume to be saved (default 'shm.nii.gz') out_peaks_dir : string, optional Name of the peaks directions volume to be saved (default 'peaks_dirs.nii.gz') out_peaks_values : string, optional Name of the peaks values volume to be saved (default 'peaks_values.nii.gz') out_peaks_indices : string, optional Name of the peaks indices volume to be saved (default 'peaks_indices.nii.gz') out_gfa : string, optional Name of the generalise fa volume to be saved (default 'gfa.nii.gz') """ io_it = self.get_io_iterator() for dwi, bval, bvec, maskfile, opam, oshm, opeaks_dir, \ opeaks_values, opeaks_indices, ogfa in io_it: logging.info('Computing fiber odfs for {0}'.format(dwi)) vol = nib.load(dwi) data = vol.get_data() affine = vol.get_affine() bvals, bvecs = read_bvals_bvecs(bval, bvec) gtab = gradient_table(bvals, bvecs, b0_threshold=b0_threshold) mask_vol = nib.load(maskfile).get_data().astype(np.bool) sh_order = 8 if data.shape[-1] < 15: raise ValueError('You need at least 15 unique DWI volumes to ' 'compute fiber odfs. You currently have: {0}' ' DWI volumes.'.format(data.shape[-1])) elif data.shape[-1] < 30: sh_order = 6 response, ratio = auto_response(gtab, data) response = list(response) logging.info( 'Eigenvalues for the frf of the input data are :{0}'.format( response[0])) logging.info( 'Ratio for smallest to largest eigen value is {0}'.format( ratio)) peaks_sphere = get_sphere('symmetric362') csa_model = CsaOdfModel(gtab, sh_order) peaks_csa = peaks_from_model(model=csa_model, data=data, sphere=peaks_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=mask_vol, return_sh=True, sh_order=sh_order, normalize_peaks=True, parallel=False) peaks_csa.affine = affine save_peaks(opam, peaks_csa) if extract_pam_values: peaks_to_niftis(peaks_csa, oshm, opeaks_dir, opeaks_values, opeaks_indices, ogfa, reshape_dirs=True) logging.info('Peaks saved in {0}'.format(os.path.dirname(opam))) return io_it