def test_WLS_and_LS_fit(): """ Tests the WLS and LS fitting functions to see if they returns the correct eigenvalues and eigenvectors. Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii as the data. """ ### Defining Test Voxel (avoid nibabel dependency) ### #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s b0 = 1000. bvec, bval = read_bvec_file(get_data('55dir_grad.bvec')) B = bval[1] #Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) #Design Matrix X = dti.design_matrix(bvec, bval) #Signals Y = np.exp(np.dot(X, D)) assert_almost_equal(Y[0], b0) Y.shape = (-1,) + Y.shape gtab = grad.gradient_table(bval, bvec) ### Testing WLS Fit on Single Voxel ### #Estimate tensor from test signals model = TensorModel(gtab, min_signal=1e-8, fit_method='WLS') tensor_est = model.fit(Y) assert_equal(tensor_est.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_est.evals[0], evals) assert_array_almost_equal(tensor_est.quadratic_form[0], tensor, err_msg="Calculation of tensor from Y does not " "compare to analytical solution") assert_almost_equal(tensor_est.md[0], md) # Test that we can fit a single voxel's worth of data (a 1d array) y = Y[0] tensor_est = model.fit(y) assert_equal(tensor_est.shape, tuple()) assert_array_almost_equal(tensor_est.evals, evals) assert_array_almost_equal(tensor_est.quadratic_form, tensor) assert_almost_equal(tensor_est.md, md) assert_array_almost_equal(tensor_est.lower_triangular(b0), D) # Test using fit_method='LS' model = TensorModel(gtab, min_signal=1e-8, fit_method='LS') tensor_est = model.fit(y) assert_equal(tensor_est.shape, tuple()) assert_array_almost_equal(tensor_est.evals, evals) assert_array_almost_equal(tensor_est.quadratic_form, tensor) assert_almost_equal(tensor_est.md, md) assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
def compute_tensor_model(dir_src, dir_out, verbose=False): fbval = pjoin(dir_src, 'bvals_' + par_b_tag) fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag) fdwi = pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold) data, affine = load_nifti(fdwi, verbose) mask, _ = load_nifti(fmask, verbose) ten_model = TensorModel(gtab) ten_fit = ten_model.fit(data, mask) FA = ten_fit.fa MD = ten_fit.md EV = ten_fit.evecs.astype(np.float32) fa_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_FA.nii.gz' save_nifti(pjoin(dir_out, fa_name), FA, affine) md_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_MD.nii.gz' save_nifti(pjoin(dir_out, md_name), MD, affine) ev_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_EV.nii.gz' save_nifti(pjoin(dir_out, ev_name), EV, affine)
def test_eudx_further(): """ Cause we love testin.. ;-) """ fimg,fbvals,fbvecs=get_data('small_101D') img=ni.load(fimg) affine=img.get_affine() data=img.get_data() gtab = gradient_table(fbvals, fbvecs) tensor_model = TensorModel(gtab) ten = tensor_model.fit(data) x,y,z=data.shape[:3] seeds=np.zeros((10**4,3)) for i in range(10**4): rx=(x-1)*np.random.rand() ry=(y-1)*np.random.rand() rz=(z-1)*np.random.rand() seeds[i]=np.ascontiguousarray(np.array([rx,ry,rz]),dtype=np.float64) ind = quantize_evecs(ten.evecs) eu=EuDX(a=ten.fa, ind=ind, seeds=seeds, a_low=.2) T=[e for e in eu] #check that there are no negative elements for t in T: assert_equal(np.sum(t.ravel()<0),0)
def test_eudx_bad_seed(): """Test passing a bad seed to eudx""" fimg, fbvals, fbvecs = get_data('small_101D') img = ni.load(fimg) affine = img.affine data = img.get_data() gtab = gradient_table(fbvals, fbvecs) tensor_model = TensorModel(gtab) ten = tensor_model.fit(data) ind = quantize_evecs(ten.evecs) sphere = get_sphere('symmetric724') seed = [1000000., 1000000., 1000000.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], odf_vertices=sphere.vertices, a_low=.2) assert_raises(ValueError, list, eu) print(data.shape) seed = [1., 5., 8.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], odf_vertices=sphere.vertices, a_low=.2) track = list(eu) seed = [-1., 1000000., 1000000.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], odf_vertices=sphere.vertices, a_low=.2) assert_raises(ValueError, list, eu)
def test_eudx_bad_seed(): """Test passing a bad seed to eudx""" fimg, fbvals, fbvecs = get_data('small_101D') img = ni.load(fimg) affine = img.get_affine() data = img.get_data() gtab = gradient_table(fbvals, fbvecs) tensor_model = TensorModel(gtab) ten = tensor_model.fit(data) ind = quantize_evecs(ten.evecs) seed = [1000000., 1000000., 1000000.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], a_low=.2) try: track = list(eu) except ValueError as ve: if ve.args[0] == 'Seed outside boundaries': print(ve) print(data.shape) seed = [1., 5., 8.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], a_low=.2) track = list(eu) seed = [-1., 1000000., 1000000.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], a_low=.2) try: track = list(eu) except ValueError as ve: if ve.args[0] == 'Seed outside boundaries': print(ve)
def test_response_from_mask(): fdata, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) data = nib.load(fdata).get_data() gtab = gradient_table(bvals, bvecs) ten = TensorModel(gtab) tenfit = ten.fit(data) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 radius = 3 for fa_thr in np.arange(0, 1, 0.1): response_auto, ratio_auto, nvoxels = auto_response(gtab, data, roi_center=None, roi_radius=radius, fa_thr=fa_thr, return_number_of_voxels=True) ci, cj, ck = np.array(data.shape[:3]) / 2 mask = np.zeros(data.shape[:3]) mask[ci - radius: ci + radius, cj - radius: cj + radius, ck - radius: ck + radius] = 1 mask[FA <= fa_thr] = 0 response_mask, ratio_mask = response_from_mask(gtab, data, mask) assert_equal(int(np.sum(mask)), nvoxels) assert_array_almost_equal(response_mask[0], response_auto[0]) assert_almost_equal(response_mask[1], response_auto[1]) assert_almost_equal(ratio_mask, ratio_auto)
def test_phantom(): N = 50 vol = orbital_phantom(gtab, func=f, t=np.linspace(0, 2 * np.pi, N), datashape=(10, 10, 10, len(bvals)), origin=(5, 5, 5), scale=(3, 3, 3), angles=np.linspace(0, 2 * np.pi, 16), radii=np.linspace(0.2, 2, 6), S0=100) m = TensorModel(gtab) t = m.fit(vol) FA = t.fa # print vol FA[np.isnan(FA)] = 0 # 686 -> expected FA given diffusivities of [1500, 400, 400] l1, l2, l3 = 1500e-6, 400e-6, 400e-6 expected_fa = (np.sqrt(0.5) * np.sqrt((l1 - l2)**2 + (l2-l3)**2 + (l3-l1)**2) / np.sqrt(l1**2 + l2**2 + l3**2)) assert_array_almost_equal(FA.max(), expected_fa, decimal=2)
def test_masked_array_with_tensor(): data = np.ones((2, 4, 56)) mask = np.array([[True, False, False, True], [True, False, True, False]]) bvec, bval = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T) tensor_model = TensorModel(gtab) tensor = tensor_model.fit(data, mask=mask) assert_equal(tensor.shape, (2, 4)) assert_equal(tensor.fa.shape, (2, 4)) assert_equal(tensor.evals.shape, (2, 4, 3)) assert_equal(tensor.evecs.shape, (2, 4, 3, 3)) tensor = tensor[0] assert_equal(tensor.shape, (4,)) assert_equal(tensor.fa.shape, (4,)) assert_equal(tensor.evals.shape, (4, 3)) assert_equal(tensor.evecs.shape, (4, 3, 3)) tensor = tensor[0] assert_equal(tensor.shape, tuple()) assert_equal(tensor.fa.shape, tuple()) assert_equal(tensor.evals.shape, (3,)) assert_equal(tensor.evecs.shape, (3, 3)) assert_equal(type(tensor.model_params), np.ndarray)
def reconstruction(dwi,bval_file,bvec_file,mask=None,type='dti',b0=0.,order=4): """ Uses Dipy to reconstruct an fODF for each voxel. Parameters ---------- dwi: numpy array (mandatory) Holds the diffusion weighted image in a 4D-array (see nibabel). bval_file: string (mandatory) Path to the b-value file (FSL format). bvec_file: string (mandatory) Path to the b-vectors file (FSL format). mask: numpy array Holds the mask in a 3D array (see nibabel). type: string \in {'dti','csd','csa'} (default = 'dti') The type of the ODF reconstruction. b0: float (default = 0) Threshold to use for defining b0 images. order: int (default = 4) Order to use for constrained spherical deconvolution (csd) or constant solid angle (csa). Returns ----------- model_fit: Dipy Object (depends on the type) Represents the fitted model for each voxel. """ #b-values and b-vectors bvals, bvecs = read_bvals_bvecs(bval_file,bvec_file) gtab = gradient_table(bvals, bvecs, b0_threshold=b0) #reconstruction if type == 'dti': model = TensorModel(gtab,fit_method='WLS') elif type == 'csd': response, ratio = auto_response(gtab, dwi, roi_radius=10, fa_thr=0.7) model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=order) elif type == 'csa': model = CsaOdfModel(gtab, order) if mask is not None: model_fit = model.fit(dwi,mask=mask) else: model_fit = model.fit(dwi) return model_fit
def DIPY_nii2streamlines(imgfile, maskfile, bvals, bvecs, output_prefix): import numpy as np import nibabel as nib import os from dipy.reconst.dti import TensorModel print "nii2streamlines" img = nib.load(imgfile) bvals = np.genfromtxt(bvals) bvecs = np.genfromtxt(bvecs) if bvecs.shape[1] != 3: bvecs = bvecs.T print bvecs.shape from nipype.utils.filemanip import split_filename _, prefix, _ = split_filename(imgfile) from dipy.data import gradient_table gtab = gradient_table(bvals, bvecs) data = img.get_data() affine = img.get_affine() zooms = img.get_header().get_zooms()[:3] new_zooms = (2., 2., 2.) data2, affine2 = data, affine mask = nib.load(maskfile).get_data().astype(np.bool) tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data2, mask) from dipy.reconst.dti import fractional_anisotropy FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 fa_img = nib.Nifti1Image(FA, img.get_affine()) nib.save(fa_img, experiment_dir + '/' + ('%s_tensor_fa.nii.gz' % prefix)) evecs = tenfit.evecs evec_img = nib.Nifti1Image(evecs, img.get_affine()) nib.save(evec_img, experiment_dir + '/' + ('%s_tensor_evec.nii.gz' % prefix)) from dipy.data import get_sphere sphere = get_sphere('symmetric724') from dipy.reconst.dti import quantize_evecs peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices) from dipy.tracking.eudx import EuDX eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices, a_low=0.2, seeds=10**6, ang_thr=35) tensor_streamlines = [streamline for streamline in eu] hdr = nib.trackvis.empty_header() hdr['voxel_size'] = new_zooms hdr['voxel_order'] = 'LPS' hdr['dim'] = data2.shape[:3] import dipy.tracking.metrics as dmetrics tensor_streamlines = ((sl, None, None) for sl in tensor_streamlines if dmetrics.length(sl) > 15) ten_sl_fname = experiment_dir + '/' + ('%s_streamline.trk' % prefix) nib.trackvis.write(ten_sl_fname, tensor_streamlines, hdr, points_space='voxel') return ten_sl_fname
def test_eudx_further(): """ Cause we love testin.. ;-) """ fimg, fbvals, fbvecs = get_data('small_101D') img = ni.load(fimg) affine = img.affine data = img.get_data() gtab = gradient_table(fbvals, fbvecs) tensor_model = TensorModel(gtab) ten = tensor_model.fit(data) x, y, z = data.shape[:3] seeds = np.zeros((10**4, 3)) for i in range(10**4): rx = (x-1)*np.random.rand() ry = (y-1)*np.random.rand() rz = (z-1)*np.random.rand() seeds[i] = np.ascontiguousarray(np.array([rx, ry, rz]), dtype=np.float64) sphere = get_sphere('symmetric724') ind = quantize_evecs(ten.evecs) eu = EuDX(a=ten.fa, ind=ind, seeds=seeds, odf_vertices=sphere.vertices, a_low=.2) T = [e for e in eu] # check that there are no negative elements for t in T: assert_equal(np.sum(t.ravel() < 0), 0) # Test eudx with affine def random_affine(seeds): affine = np.eye(4) affine[:3, :] = np.random.random((3, 4)) seeds = np.dot(seeds, affine[:3, :3].T) seeds += affine[:3, 3] return affine, seeds # Make two random affines and move seeds affine1, seeds1 = random_affine(seeds) affine2, seeds2 = random_affine(seeds) # Make tracks using different affines eu1 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices, seeds=seeds1, a_low=.2, affine=affine1) eu2 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices, seeds=seeds2, a_low=.2, affine=affine2) # Move from eu2 affine2 to affine1 eu2_to_eu1 = utils.move_streamlines(eu2, output_space=affine1, input_space=affine2) # Check that the tracks are the same for sl1, sl2 in zip(eu1, eu2_to_eu1): assert_array_almost_equal(sl1, sl2)
def test_single_tensor(): evals = np.array([1.4, .35, .35]) * 10**(-3) evecs = np.eye(3) S = SingleTensor(gtab, 100, evals, evecs, snr=None) assert_array_almost_equal(S[gtab.b0s_mask], 100) assert_(np.mean(S[~gtab.b0s_mask]) < 100) from dipy.reconst.dti import TensorModel m = TensorModel(gtab) t = m.fit(S) assert_array_almost_equal(t.fa, 0.707, decimal=3)
def FA_RGB(data, gtab): """ Input : data, gtab taken from the load_data.py script. Return : FA and RGB as two nd numpy array """ tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 FA = np.clip(FA, 0, 1) RGB = color_fa(FA, tenfit.evecs) return FA, RGB
def estimate_response(gtab, data, affine, mask, fa_thr=0.7): tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 mask[FA <= 0.1] = 0 mask[FA > 1.] = 0 indices = np.where(FA > fa_thr) lambdas = tenfit.evals[indices][:, :2] S0s = data[indices][:, 0] S0 = np.mean(S0s) l01 = np.mean(lambdas, axis=0) evals = np.array([l01[0], l01[1], l01[1]]) ratio = evals[1] / evals[0] print 'Response evals' , evals, ' ratio: ', ratio, '\tMean S0', S0 return (evals, S0), ratio
def prepare(training, category, snr, denoised, odeconv, tv, method): data, affine, gtab = get_specific_data(training, category, snr, denoised) prefix = create_file_prefix(training, category, snr, denoised, odeconv, tv, method) if training: mask = nib.load('wm_mask_hardi_01.nii.gz').get_data() else: #mask = np.ones(data.shape[:-1]) mask = nib.load('test_hardi_30_den=1_fa_0025_dilate2_mask.nii.gz').get_data() tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 mask[FA <= 0.1] = 0 mask[FA > 1.] = 0 indices = np.where(FA > 0.7) lambdas = tenfit.evals[indices][:, :2] S0s = data[indices][:, 0] S0 = np.mean(S0s) if S0 == 0: print 'S0 equals to 0 switching to 1' S0 = 1 l01 = np.mean(lambdas, axis=0) evals = np.array([l01[0], l01[1], l01[1]]) print evals, S0 return data, affine, gtab, mask, evals, S0, prefix
def response_from_mask(gtab, data, mask): """ Estimate the response function from a given mask. Parameters ---------- gtab : GradientTable data : ndarray Diffusion data mask : ndarray Mask to use for the estimation of the response function. For example a mask of the white matter voxels with FA values higher than 0.7 (see [1]_). Returns ------- response : tuple, (2,) (`evals`, `S0`) ratio : float The ratio between smallest versus largest eigenvalue of the response. Notes ----- See csdeconv.auto_response() or csdeconv.recursive_response() if you don't have a computed mask for the response function estimation. References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2004. Direct estimation of the fiber orientation density function from diffusion-weighted MRI data using spherical deconvolution """ ten = TensorModel(gtab) indices = np.where(mask > 0) if indices[0].size == 0: msg = "No voxel in mask with value > 0 were found." warnings.warn(msg, UserWarning) return (np.nan, np.nan), np.nan tenfit = ten.fit(data[indices]) lambdas = tenfit.evals[:, :2] S0s = data[indices][:, np.nonzero(gtab.b0s_mask)[0]] return _get_response(S0s, lambdas)
def test_boot_pmf(): """This tests the local model used for the bootstrapping. """ hsph_updated = HemiSphere.from_sphere(unit_octahedron) vertices = hsph_updated.vertices bvecs = vertices bvals = np.ones(len(vertices)) * 1000 bvecs = np.insert(bvecs, 0, np.array([0, 0, 0]), axis=0) bvals = np.insert(bvals, 0, 0) gtab = gradient_table(bvals, bvecs) voxel = single_tensor(gtab) data = np.tile(voxel, (3, 3, 3, 1)) point = np.array([1., 1., 1.]) tensor_model = TensorModel(gtab) boot_pmf_gen = BootPmfGen(data, model=tensor_model, sphere=hsph_updated) no_boot_pmf = boot_pmf_gen.get_pmf_no_boot(point) model_pmf = tensor_model.fit(voxel).odf(hsph_updated) npt.assert_equal(len(hsph_updated.vertices), no_boot_pmf.shape[0]) npt.assert_array_almost_equal(no_boot_pmf, model_pmf) # test model sherical harminic order different than bootstrap order with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always", category=UserWarning) csd_model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=6) assert_greater(len([lw for lw in w if issubclass(lw.category, UserWarning)]), 0) boot_pmf_gen_sh4 = BootPmfGen(data, model=csd_model, sphere=hsph_updated, sh_order=4) pmf_sh4 = boot_pmf_gen_sh4.get_pmf(point) npt.assert_equal(len(hsph_updated.vertices), pmf_sh4.shape[0]) npt.assert_(np.sum(pmf_sh4.shape) > 0) boot_pmf_gen_sh8 = BootPmfGen(data, model=csd_model, sphere=hsph_updated, sh_order=8) pmf_sh8 = boot_pmf_gen_sh8.get_pmf(point) npt.assert_equal(len(hsph_updated.vertices), pmf_sh8.shape[0]) npt.assert_(np.sum(pmf_sh8.shape) > 0)
def single_fiber_response(diffusionData, mask, gtable, fa_thr = 0.7): from dipy.reconst.dti import TensorModel, fractional_anisotropy ten = TensorModel(gtable) tenfit = ten.fit(diffusionData, mask=mask) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 indices = np.where(FA > fa_thr) lambdas = tenfit.evals[indices][:, :2] S0s = diffusionData[indices][:, np.nonzero(gtable.b0s_mask)[0]] S0 = np.mean(S0s) l01 = np.mean(lambdas, axis=0) evals = np.array([l01[0], l01[1], l01[1]]) response = (evals, S0) ratio = evals[1]/evals[0] return response, ratio
def tensor_model( input_filename_data, input_filename_bvecs, input_filename_bvals, output_filename_fa=None, output_filename_evecs=None ): # print 'Tensor model ...' # print 'Loading data ...' img = nib.load(input_filename_data) data = img.get_data() affine = img.get_affine() bvals, bvecs = read_bvals_bvecs(input_filename_bvals, input_filename_bvecs) gtab = gradient_table(bvals, bvecs) mask = data[..., 0] > 50 tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 if output_filename_fa == None: filename_save_fa = input_filename_data.split(".")[0] + "_tensor_fa.nii.gz" else: filename_save_fa = os.path.abspath(output_filename_fa) fa_img = nib.Nifti1Image(FA, img.get_affine()) nib.save(fa_img, filename_save_fa) print "Saving fa to:", filename_save_fa if output_filename_evecs == None: filename_save_evecs = input_filename_data.split(".")[0] + "_tensor_evecs.nii.gz" else: filename_save_evecs = os.path.abspath(output_filename_evecs) evecs_img = nib.Nifti1Image(tenfit.evecs, img.get_affine()) nib.save(evecs_img, filename_save_evecs) print "Saving evecs to:", filename_save_evecs return filename_save_fa, filename_save_evecs
def eudx_basic(self, dti_file, mask_file, gtab, stop_val=0.1): """ Tracking with basic tensors and basic eudx - experimental We now force seeding at every voxel in the provided mask for simplicity. Future functionality will extend these options. **Positional Arguments:** dti_file: - File (registered) to use for tensor/fiber tracking mask_file: - Brain mask to keep tensors inside the brain gtab: - dipy formatted bval/bvec Structure **Optional Arguments:** stop_val: - Value to cutoff fiber track """ img = nb.load(dti_file) data = img.get_data() img = nb.load(mask_file) mask = img.get_data() # use all points in mask seedIdx = np.where(mask > 0) # seed everywhere not equal to zero seedIdx = np.transpose(seedIdx) model = TensorModel(gtab) ten = model.fit(data, mask) sphere = get_sphere('symmetric724') ind = quantize_evecs(ten.evecs, sphere.vertices) eu = EuDX(a=ten.fa, ind=ind, seeds=seedIdx, odf_vertices=sphere.vertices, a_low=stop_val) tracks = [e for e in eu] return (ten, tracks)
def auto_response(gtab, data, roi_center=None, roi_radius=10, fa_thr=0.7, fa_callable=fa_superior, return_number_of_voxels=False): """ Automatic estimation of response function using FA. Parameters ---------- gtab : GradientTable data : ndarray diffusion data roi_center : tuple, (3,) Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]`. roi_radius : int radius of cubic ROI fa_thr : float FA threshold fa_callable : callable A callable that defines an operation that compares FA with the fa_thr. The operator should have two positional arguments (e.g., `fa_operator(FA, fa_thr)`) and it should return a bool array. return_number_of_voxels : bool If True, returns the number of voxels used for estimating the response function. Returns ------- response : tuple, (2,) (`evals`, `S0`) ratio : float The ratio between smallest versus largest eigenvalue of the response. number of voxels : int (optional) The number of voxels used for estimating the response function. Notes ----- In CSD there is an important pre-processing step: the estimation of the fiber response function. In order to do this we look for voxels with very anisotropic configurations. For example we can use an ROI (20x20x20) at the center of the volume and store the signal values for the voxels with FA values higher than 0.7. Of course, if we haven't precalculated FA we need to fit a Tensor model to the datasets. Which is what we do in this function. For the response we also need to find the average S0 in the ROI. This is possible using `gtab.b0s_mask()` we can find all the S0 volumes (which correspond to b-values equal 0) in the dataset. The `response` consists always of a prolate tensor created by averaging the highest and second highest eigenvalues in the ROI with FA higher than threshold. We also include the average S0s. We also return the `ratio` which is used for the SDT models. If requested, the number of voxels used for estimating the response function is also returned, which can be used to judge the fidelity of the response function. As a rule of thumb, at least 300 voxels should be used to estimate a good response function (see [1]_). References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2004. Direct estimation of the fiber orientation density function from diffusion-weighted MRI data using spherical deconvolution """ ten = TensorModel(gtab) if roi_center is None: ci, cj, ck = np.array(data.shape[:3]) // 2 else: ci, cj, ck = roi_center w = roi_radius roi = data[int(ci - w): int(ci + w), int(cj - w): int(cj + w), int(ck - w): int(ck + w)] tenfit = ten.fit(roi) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 indices = np.where(fa_callable(FA, fa_thr)) if indices[0].size == 0: msg = "No voxel with a FA higher than " + str(fa_thr) + " were found." msg += " Try a larger roi or a lower threshold." warnings.warn(msg, UserWarning) lambdas = tenfit.evals[indices][:, :2] S0s = roi[indices][:, np.nonzero(gtab.b0s_mask)[0]] response, ratio = _get_response(S0s, lambdas) if return_number_of_voxels: return response, ratio, indices[0].size return response, ratio
#Correct b0 threshold gtab.b0_threshold = min(bvals) #Correct b0s_mask gtab.b0s_mask = gtab.bvals == gtab.b0_threshold #Get B0 indices B0s_list = np.where(gtab.bvals == gtab.b0_threshold)[0] print('Computing brain mask...') b0_mask, mask = median_otsu(data) print('Computing tensors...') tenmodel = TensorModel(gtab) tensorfit = tenmodel.fit(data, mask=mask) print('Computing worst-case/best-case SNR using the corpus callosum...') threshold = (0.5, 1, 0, 0.1, 0, 0.1) CC_box = np.zeros_like(data[..., 0]) mins, maxs = bounding_box(mask) mins = np.array(mins) maxs = np.array(maxs) diff = (maxs - mins) // 4 bounds_min = mins + diff bounds_max = maxs - diff CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1], bounds_min[2]:bounds_max[2]] = 1
def auto_response(gtab, data, roi_center=None, roi_radius=10, fa_thr=0.7): """ Automatic estimation of response function using FA Parameters ---------- gtab : GradientTable data : ndarray diffusion data roi_center : tuple, (3,) Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]`. roi_radius : int radius of cubic ROI fa_thr : float FA threshold Returns ------- response : tuple, (2,) (`evals`, `S0`) ratio : float the ratio between smallest versus largest eigenvalue of the response Notes ----- In CSD there is an important pre-processing step: the estimation of the fiber response function. In order to do this we look for voxels with very anisotropic configurations. For example we can use an ROI (20x20x20) at the center of the volume and store the signal values for the voxels with FA values higher than 0.7. Of course, if we haven't precalculated FA we need to fit a Tensor model to the datasets. Which is what we do in this function. For the response we also need to find the average S0 in the ROI. This is possible using `gtab.b0s_mask()` we can find all the S0 volumes (which correspond to b-values equal 0) in the dataset. The `response` consists always of a prolate tensor created by averaging the highest and second highest eigenvalues in the ROI with FA higher than threshold. We also include the average S0s. Finally, we also return the `ratio` which is used for the SDT models. """ ten = TensorModel(gtab) if roi_center is None: ci, cj, ck = np.array(data.shape[:3]) / 2 else: ci, cj, ck = roi_center w = roi_radius roi = data[ci - w: ci + w, cj - w: cj + w, ck - w: ck + w] tenfit = ten.fit(roi) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 indices = np.where(FA > fa_thr) lambdas = tenfit.evals[indices][:, :2] S0s = roi[indices][:, np.nonzero(gtab.b0s_mask)[0]] S0 = np.mean(S0s) l01 = np.mean(lambdas, axis=0) evals = np.array([l01[0], l01[1], l01[1]]) response = (evals, S0) ratio = evals[1]/evals[0] return response, ratio
def run(self, data_file, data_bvals, data_bvecs, mask=None, bbox_threshold=(0.6, 1, 0, 0.1, 0, 0.1), out_dir='', out_file='product.json', out_mask_cc='cc.nii.gz', out_mask_noise='mask_noise.nii.gz'): """ Workflow for computing the signal-to-noise ratio in the corpus callosum Parameters ---------- data_file : string Path to the dwi.nii.gz file. This path may contain wildcards to process multiple inputs at once. data_bvals : string Path of bvals. data_bvecs : string Path of bvecs. mask : string, optional Path of mask if desired. (default None) bbox_threshold : string, optional Threshold for bounding box, values separated with commas for ex. [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1)) out_dir : string, optional Where the resulting file will be saved. (default '') out_file : string, optional Name of the result file to be saved. (default 'product.json') out_mask_cc : string, optional Name of the CC mask volume to be saved (default 'cc.nii.gz') out_mask_noise : string, optional Name of the mask noise volume to be saved (default 'mask_noise.nii.gz') """ if not isinstance(bbox_threshold, tuple): b = bbox_threshold.replace("[", "") b = b.replace("]", "") b = b.replace("(", "") b = b.replace(")", "") b = b.replace(" ", "") b = b.split(",") for i in range(len(b)): b[i] = float(b[i]) bbox_threshold = tuple(b) io_it = self.get_io_iterator() for data_path, data_bvals_path, data_bvecs_path, out_path, \ cc_mask_path, mask_noise_path in io_it: img = nib.load('{0}'.format(data_path)) bvals, bvecs = read_bvals_bvecs('{0}'.format(data_bvals_path), '{0}'.format(data_bvecs_path)) gtab = gradient_table(bvals, bvecs) data = img.get_data() affine = img.affine logging.info('Computing brain mask...') b0_mask, calc_mask = median_otsu(data) if mask is None: mask = calc_mask else: mask = nib.load(mask).get_data().astype(bool) mask = np.array(calc_mask == mask).astype(int) logging.info('Computing tensors...') tenmodel = TensorModel(gtab) tensorfit = tenmodel.fit(data, mask=mask) logging.info('Computing worst-case/best-case SNR using the CC...') threshold = bbox_threshold if np.ndim(data) == 4: CC_box = np.zeros_like(data[..., 0]) elif np.ndim(data) == 3: CC_box = np.zeros_like(data) else: raise IOError('DWI data has invalid dimensions') mins, maxs = bounding_box(mask) mins = np.array(mins) maxs = np.array(maxs) diff = (maxs - mins) // 4 bounds_min = mins + diff bounds_max = maxs - diff CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1], bounds_min[2]:bounds_max[2]] = 1 mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box, threshold, return_cfa=True) cfa_img = nib.Nifti1Image((cfa * 255).astype(np.uint8), affine) mask_cc_part_img = nib.Nifti1Image(mask_cc_part.astype(np.uint8), affine) nib.save(mask_cc_part_img, cc_mask_path) logging.info('CC mask saved as {0}'.format(cc_mask_path)) mean_signal = np.mean(data[mask_cc_part], axis=0) mask_noise = binary_dilation(mask, iterations=10) mask_noise[..., :mask_noise.shape[-1] // 2] = 1 mask_noise = ~mask_noise mask_noise_img = nib.Nifti1Image(mask_noise.astype(np.uint8), affine) nib.save(mask_noise_img, mask_noise_path) logging.info('Mask noise saved as {0}'.format(mask_noise_path)) noise_std = np.std(data[mask_noise, :]) logging.info('Noise standard deviation sigma= ' + str(noise_std)) idx = np.sum(gtab.bvecs, axis=-1) == 0 gtab.bvecs[idx] = np.inf axis_X = np.argmin( np.sum((gtab.bvecs - np.array([1, 0, 0]))**2, axis=-1)) axis_Y = np.argmin( np.sum((gtab.bvecs - np.array([0, 1, 0]))**2, axis=-1)) axis_Z = np.argmin( np.sum((gtab.bvecs - np.array([0, 0, 1]))**2, axis=-1)) SNR_output = [] SNR_directions = [] for direction in ['b0', axis_X, axis_Y, axis_Z]: if direction == 'b0': SNR = mean_signal[0] / noise_std logging.info("SNR for the b=0 image is :" + str(SNR)) else: logging.info("SNR for direction " + str(direction) + " " + str(gtab.bvecs[direction]) + "is :" + str(SNR)) SNR_directions.append(direction) SNR = mean_signal[direction] / noise_std SNR_output.append(SNR) data = [] data.append({ 'data': str(SNR_output[0]) + ' ' + str(SNR_output[1]) + ' ' + str(SNR_output[2]) + ' ' + str(SNR_output[3]), 'directions': 'b0' + ' ' + str(SNR_directions[0]) + ' ' + str(SNR_directions[1]) + ' ' + str(SNR_directions[2]) }) with open(os.path.join(out_dir, out_file), 'w') as myfile: json.dump(data, myfile)
def test_wls_and_ls_fit(): """ Tests the WLS and LS fitting functions to see if they returns the correct eigenvalues and eigenvectors. Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii as the data. """ # Defining Test Voxel (avoid nibabel dependency) ### # Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s b0 = 1000. bvec, bval = read_bvec_file(get_fnames('55dir_grad.bvec')) B = bval[1] # Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) # Design Matrix gtab = grad.gradient_table(bval, bvec) X = dti.design_matrix(gtab) # Signals Y = np.exp(np.dot(X, D)) npt.assert_almost_equal(Y[0], b0) Y.shape = (-1,) + Y.shape # Testing WLS Fit on Single Voxel # If you do something wonky (passing min_signal<0), you should get an # error: npt.assert_raises(ValueError, TensorModel, gtab, fit_method='WLS', min_signal=-1) # Estimate tensor from test signals model = TensorModel(gtab, fit_method='WLS', return_S0_hat=True) tensor_est = model.fit(Y) npt.assert_equal(tensor_est.shape, Y.shape[:-1]) npt.assert_array_almost_equal(tensor_est.evals[0], evals) npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor, err_msg="Calculation of tensor from Y does " "not compare to analytical solution") npt.assert_almost_equal(tensor_est.md[0], md) npt.assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3) # Test that we can fit a single voxel's worth of data (a 1d array) y = Y[0] tensor_est = model.fit(y) npt.assert_equal(tensor_est.shape, tuple()) npt.assert_array_almost_equal(tensor_est.evals, evals) npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor) npt.assert_almost_equal(tensor_est.md, md) npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D) # Test using fit_method='LS' model = TensorModel(gtab, fit_method='LS') tensor_est = model.fit(y) npt.assert_equal(tensor_est.shape, tuple()) npt.assert_array_almost_equal(tensor_est.evals, evals) npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor) npt.assert_almost_equal(tensor_est.md, md) npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D) npt.assert_array_almost_equal(tensor_est.linearity, linearity(evals)) npt.assert_array_almost_equal(tensor_est.planarity, planarity(evals)) npt.assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
def run(self, data_files, bvals_files, bvecs_files, mask_files, bbox_threshold=[0.6, 1, 0, 0.1, 0, 0.1], out_dir='', out_file='product.json', out_mask_cc='cc.nii.gz', out_mask_noise='mask_noise.nii.gz'): """Compute the signal-to-noise ratio in the corpus callosum. Parameters ---------- data_files : string Path to the dwi.nii.gz file. This path may contain wildcards to process multiple inputs at once. bvals_files : string Path of bvals. bvecs_files : string Path of bvecs. mask_files : string Path of brain mask bbox_threshold : variable float, optional Threshold for bounding box, values separated with commas for ex. [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1)) out_dir : string, optional Where the resulting file will be saved. (default '') out_file : string, optional Name of the result file to be saved. (default 'product.json') out_mask_cc : string, optional Name of the CC mask volume to be saved (default 'cc.nii.gz') out_mask_noise : string, optional Name of the mask noise volume to be saved (default 'mask_noise.nii.gz') """ io_it = self.get_io_iterator() for dwi_path, bvals_path, bvecs_path, mask_path, out_path, \ cc_mask_path, mask_noise_path in io_it: data, affine = load_nifti(dwi_path) bvals, bvecs = read_bvals_bvecs(bvals_path, bvecs_path) gtab = gradient_table(bvals=bvals, bvecs=bvecs) logging.info('Computing brain mask...') _, calc_mask = median_otsu(data) mask, affine = load_nifti(mask_path) mask = np.array(calc_mask == mask.astype(bool)).astype(int) logging.info('Computing tensors...') tenmodel = TensorModel(gtab) tensorfit = tenmodel.fit(data, mask=mask) logging.info('Computing worst-case/best-case SNR using the CC...') if np.ndim(data) == 4: CC_box = np.zeros_like(data[..., 0]) elif np.ndim(data) == 3: CC_box = np.zeros_like(data) else: raise IOError('DWI data has invalid dimensions') mins, maxs = bounding_box(mask) mins = np.array(mins) maxs = np.array(maxs) diff = (maxs - mins) // 4 bounds_min = mins + diff bounds_max = maxs - diff CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1], bounds_min[2]:bounds_max[2]] = 1 if len(bbox_threshold) != 6: raise IOError('bbox_threshold should have 6 float values') mask_cc_part, cfa = segment_from_cfa(tensorfit, CC_box, bbox_threshold, return_cfa=True) save_nifti(cc_mask_path, mask_cc_part.astype(np.uint8), affine) logging.info('CC mask saved as {0}'.format(cc_mask_path)) mean_signal = np.mean(data[mask_cc_part], axis=0) mask_noise = binary_dilation(mask, iterations=10) mask_noise[..., :mask_noise.shape[-1] // 2] = 1 mask_noise = ~mask_noise save_nifti(mask_noise_path, mask_noise.astype(np.uint8), affine) logging.info('Mask noise saved as {0}'.format(mask_noise_path)) noise_std = np.std(data[mask_noise, :]) logging.info('Noise standard deviation sigma= ' + str(noise_std)) idx = np.sum(gtab.bvecs, axis=-1) == 0 gtab.bvecs[idx] = np.inf axis_X = np.argmin( np.sum((gtab.bvecs - np.array([1, 0, 0]))**2, axis=-1)) axis_Y = np.argmin( np.sum((gtab.bvecs - np.array([0, 1, 0]))**2, axis=-1)) axis_Z = np.argmin( np.sum((gtab.bvecs - np.array([0, 0, 1]))**2, axis=-1)) SNR_output = [] SNR_directions = [] for direction in ['b0', axis_X, axis_Y, axis_Z]: if direction == 'b0': SNR = mean_signal[0] / noise_std logging.info("SNR for the b=0 image is :" + str(SNR)) else: logging.info("SNR for direction " + str(direction) + " " + str(gtab.bvecs[direction]) + "is :" + str(SNR)) SNR_directions.append(direction) SNR = mean_signal[direction] / noise_std SNR_output.append(SNR) data = [] data.append({ 'data': str(SNR_output[0]) + ' ' + str(SNR_output[1]) + ' ' + str(SNR_output[2]) + ' ' + str(SNR_output[3]), 'directions': 'b0' + ' ' + str(SNR_directions[0]) + ' ' + str(SNR_directions[1]) + ' ' + str(SNR_directions[2]) }) with open(os.path.join(out_dir, out_path), 'w') as myfile: json.dump(data, myfile)
csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, mask=mask, relative_peak_threshold=.5, min_separation_angle=25, parallel=True) """ For the tracking part, we will use the fiber directions from the ``csd_model`` but stop tracking in areas where fractional anisotropy is low (< 0.1). To derive the FA, used here as a stopping criterion, we would need to fit a tensor model first. Here, we fit the tensor using weighted least squares (WLS). """ tensor_model = TensorModel(gtab, fit_method='WLS') tensor_fit = tensor_model.fit(data, mask) fa = tensor_fit.fa """ In this simple example we can use FA to stop tracking. Here we stop tracking when FA < 0.1. """ tissue_classifier = ThresholdTissueClassifier(fa, 0.1) """ Now, we need to set starting points for propagating each track. We call those seeds. Using ``random_seeds_from_mask`` we can select a specific number of seeds (``seeds_count``) in each voxel where the mask ``fa > 0.3`` is true. """ seeds = random_seeds_from_mask(fa > 0.3, seeds_count=1)
def dwi_dipy_run(dwi_dir, node_size, dir_path, conn_model, parc, atlas_select, network, wm_mask=None): import os import glob import re import nipype.interfaces.fsl as fsl from dipy.reconst.dti import TensorModel, quantize_evecs from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response from dipy.tracking.local import LocalTracking, ThresholdTissueClassifier from dipy.tracking import utils from dipy.direction import peaks_from_model from dipy.tracking.eudx import EuDX from dipy.data import get_sphere from dipy.core.gradients import gradient_table from dipy.io import read_bvals_bvecs def atoi(text): return int(text) if text.isdigit() else text def natural_keys(text): return [atoi(c) for c in re.split('(\d+)', text)] dwi_img = "%s%s" % (dwi_dir, '/dwi.nii.gz') nodif_brain_mask_path = "%s%s" % (dwi_dir, '/nodif_brain_mask.nii.gz') bvals = "%s%s" % (dwi_dir, '/bval') bvecs = "%s%s" % (dwi_dir, '/bvec') img = nib.load(dwi_img) data = img.get_data() # Loads mask and ensures it's a true binary mask img = nib.load(nodif_brain_mask_path) mask = img.get_data() mask = mask > 0 [bvals, bvecs] = read_bvals_bvecs(bvals, bvecs) gtab = gradient_table(bvals, bvecs) # Estimates some tensors model = TensorModel(gtab) ten = model.fit(data, mask) sphere = get_sphere('symmetric724') ind = quantize_evecs(ten.evecs, sphere.vertices) # Tractography if conn_model == 'csd': trac_mod = 'csd' else: conn_model = 'tensor' trac_mod = ten.fa affine = img.affine print('Tracking with tensor model...') if wm_mask is None: mask = nib.load(mask).get_data() mask[0, :, :] = False mask[:, 0, :] = False mask[:, :, 0] = False seeds = utils.seeds_from_mask(mask, density=2) else: wm_mask_data = nib.load(wm_mask).get_data() wm_mask_data[0, :, :] = False wm_mask_data[:, 0, :] = False wm_mask_data[:, :, 0] = False seeds = utils.seeds_from_mask(wm_mask_data, density=2) #seeds = random_seeds_from_mask(ten.fa > 0.3, seeds_count=num_total_samples) if conn_model == 'tensor': eu = EuDX(a=trac_mod, ind=ind, seeds=seeds, odf_vertices=sphere.vertices, a_low=0.05, step_sz=.5) tracks = [e for e in eu] elif conn_model == 'csd': print('Tracking with CSD model...') if wm_mask is None: response = recursive_response(gtab, data, mask=mask.astype('bool'), sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=True) else: response = recursive_response(gtab, data, mask=wm_mask_data.astype('bool'), sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=True) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=True) tissue_classifier = ThresholdTissueClassifier(ten.fa, 0.1) streamline_generator = LocalTracking(csd_peaks, tissue_classifier, seeds, affine=affine, step_size=0.5) tracks = [e for e in streamline_generator] if parc is True: node_size = 'parc' if network: seeds_dir = "%s%s%s%s%s%s%s" % (dir_path, '/seeds_', network, '_', atlas_select, '_', str(node_size)) else: seeds_dir = "%s%s%s%s%s" % (dir_path, '/seeds_', atlas_select, '_', str(node_size)) seed_files = glob.glob("%s%s" % (seeds_dir, '/*diff.nii.gz')) seed_files.sort(key=natural_keys) # Binarize ROIs print('\nBinarizing seed masks...') j = 1 for i in seed_files: args = ' -bin ' out_file = "%s%s" % (i.split('.nii.gz')[0], '_bin.nii.gz') maths = fsl.ImageMaths(in_file=i, op_string=args, out_file=out_file) os.system(maths.cmdline) args = ' -mul ' + str(j) maths = fsl.ImageMaths(in_file=out_file, op_string=args, out_file=out_file) os.system(maths.cmdline) j = j + 1 # Create atlas from ROIs seed_files = glob.glob("%s%s" % (seeds_dir, '/*diff_bin.nii.gz')) seed_files.sort(key=natural_keys) print('\nMerging seed masks into single labels image...') label_sum = "%s%s" % (seeds_dir, '/all_rois.nii.gz') args = ' -add ' + i maths = fsl.ImageMaths(in_file=seed_files[0], op_string=args, out_file=label_sum) os.system(maths.cmdline) for i in seed_files: args = ' -add ' + i maths = fsl.ImageMaths(in_file=label_sum, op_string=args, out_file=label_sum) os.system(maths.cmdline) labels_im = nib.load(label_sum) labels_data = labels_im.get_data().astype('int') conn_matrix, grouping = utils.connectivity_matrix( tracks, labels_data, affine=affine, return_mapping=True, mapping_as_streamlines=True) conn_matrix[:3, :] = 0 conn_matrix[:, :3] = 0 return conn_matrix
def auto_response(gtab, data, roi_center=None, roi_radius=10, fa_thr=0.7, fa_callable=fa_superior, return_number_of_voxels=False): """ Automatic estimation of response function using FA. Parameters ---------- gtab : GradientTable data : ndarray diffusion data roi_center : tuple, (3,) Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]`. roi_radius : int radius of cubic ROI fa_thr : float FA threshold fa_callable : callable A callable that defines an operation that compares FA with the fa_thr. The operator should have two positional arguments (e.g., `fa_operator(FA, fa_thr)`) and it should return a bool array. return_number_of_voxels : bool If True, returns the number of voxels used for estimating the response function. Returns ------- response : tuple, (2,) (`evals`, `S0`) ratio : float The ratio between smallest versus largest eigenvalue of the response. number of voxels : int (optional) The number of voxels used for estimating the response function. Notes ----- In CSD there is an important pre-processing step: the estimation of the fiber response function. In order to do this we look for voxels with very anisotropic configurations. For example we can use an ROI (20x20x20) at the center of the volume and store the signal values for the voxels with FA values higher than 0.7. Of course, if we haven't precalculated FA we need to fit a Tensor model to the datasets. Which is what we do in this function. For the response we also need to find the average S0 in the ROI. This is possible using `gtab.b0s_mask()` we can find all the S0 volumes (which correspond to b-values equal 0) in the dataset. The `response` consists always of a prolate tensor created by averaging the highest and second highest eigenvalues in the ROI with FA higher than threshold. We also include the average S0s. We also return the `ratio` which is used for the SDT models. If requested, the number of voxels used for estimating the response function is also returned, which can be used to judge the fidelity of the response function. As a rule of thumb, at least 300 voxels should be used to estimate a good response function (see [1]_). References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2004. Direct estimation of the fiber orientation density function from diffusion-weighted MRI data using spherical deconvolution """ ten = TensorModel(gtab) if roi_center is None: ci, cj, ck = np.array(data.shape[:3]) // 2 else: ci, cj, ck = roi_center w = roi_radius roi = data[int(ci - w):int(ci + w), int(cj - w):int(cj + w), int(ck - w):int(ck + w)] tenfit = ten.fit(roi) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 indices = np.where(fa_callable(FA, fa_thr)) if indices[0].size == 0: msg = "No voxel with a FA higher than " + str(fa_thr) + " were found." msg += " Try a larger roi or a lower threshold." warnings.warn(msg, UserWarning) lambdas = tenfit.evals[indices][:, :2] S0s = roi[indices][:, np.nonzero(gtab.b0s_mask)[0]] response, ratio = _get_response(S0s, lambdas) if return_number_of_voxels: return response, ratio, indices[0].size return response, ratio
def dmri_recon(sid, data_dir, out_dir, resolution, recon='csd', num_threads=2): import tempfile #tempfile.tempdir = '/om/scratch/Fri/ksitek/' import os oldval = None if 'MKL_NUM_THREADS' in os.environ: oldval = os.environ['MKL_NUM_THREADS'] os.environ['MKL_NUM_THREADS'] = '%d' % num_threads ompoldval = None if 'OMP_NUM_THREADS' in os.environ: ompoldval = os.environ['OMP_NUM_THREADS'] os.environ['OMP_NUM_THREADS'] = '%d' % num_threads import nibabel as nib import numpy as np from glob import glob if resolution == '0.2mm': filename = 'Reg_S64550_nii4d.nii' fimg = os.path.abspath(glob(os.path.join(data_dir, filename))[0]) else: filename = 'Reg_S64550_nii4d_resamp-%s.nii.gz'%(resolution) fimg = os.path.abspath(glob(os.path.join(data_dir, 'resample', filename))[0]) print("dwi file = %s"%fimg) fbvec = os.path.abspath(glob(os.path.join(data_dir, 'bvecs', 'camino_120_RAS_flipped-xy.bvecs'))[0]) print("bvec file = %s"%fbvec) fbval = os.path.abspath(glob(os.path.join(data_dir, 'bvecs', 'camino_120_RAS.bvals'))[0]) print("bval file = %s"%fbval) img = nib.load(fimg) data = img.get_data() affine = img.get_affine() prefix = sid from dipy.io import read_bvals_bvecs from dipy.core.gradients import vector_norm bvals, bvecs = read_bvals_bvecs(fbval, fbvec) b0idx = [] for idx, val in enumerate(bvals): if val < 1: pass #bvecs[idx] = [1, 0, 0] else: b0idx.append(idx) #print "b0idx=%d"%idx #print "input bvecs:" #print bvecs bvecs[b0idx, :] = bvecs[b0idx, :]/vector_norm(bvecs[b0idx])[:, None] #print "bvecs after normalization:" #print bvecs from dipy.core.gradients import gradient_table gtab = gradient_table(bvals, bvecs) gtab.bvecs.shape == bvecs.shape gtab.bvecs gtab.bvals.shape == bvals.shape gtab.bvals from dipy.reconst.csdeconv import auto_response response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.1) # 0.7 #from dipy.segment.mask import median_otsu #b0_mask, mask = median_otsu(data[:, :, :, b0idx].mean(axis=3).squeeze(), 4, 4) if resolution == '0.2mm': mask_name = 'Reg_S64550_nii_b0-slice_mask.nii.gz' fmask1 = os.path.join(data_dir, mask_name) else: mask_name = 'Reg_S64550_nii_b0-slice_mask_resamp-%s.nii.gz'%(resolution) fmask1 = os.path.join(data_dir, 'resample', mask_name) print("fmask file = %s"%fmask1) mask = nib.load(fmask1).get_data() useFA = True print("creating model") if recon == 'csd': from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel model = ConstrainedSphericalDeconvModel(gtab, response) useFA = True elif recon == 'csa': from dipy.reconst.shm import CsaOdfModel, normalize_data model = CsaOdfModel(gtab, 4) useFA = False else: raise ValueError('only csd, csa supported currently') from dipy.reconst.dsi import (DiffusionSpectrumDeconvModel, DiffusionSpectrumModel) model = DiffusionSpectrumDeconvModel(gtab) fit = model.fit(data) from dipy.data import get_sphere sphere = get_sphere('symmetric724') #odfs = fit.odf(sphere) from dipy.reconst.peaks import peaks_from_model print("running peaks_from_model") peaks = peaks_from_model(model=model, data=data, sphere=sphere, mask=mask, return_sh=True, return_odf=False, normalize_peaks=True, npeaks=5, relative_peak_threshold=.5, min_separation_angle=25, parallel=num_threads > 1, nbr_processes=num_threads) from dipy.reconst.dti import TensorModel print("running tensor model") tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) from dipy.reconst.dti import fractional_anisotropy print("running FA") FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 fa_img = nib.Nifti1Image(FA, img.get_affine()) tensor_fa_file = os.path.abspath('%s_tensor_fa.nii.gz' % (prefix)) nib.save(fa_img, tensor_fa_file) from dipy.reconst.dti import axial_diffusivity print("running AD") AD = axial_diffusivity(tenfit.evals) AD[np.isnan(AD)] = 0 ad_img = nib.Nifti1Image(AD, img.get_affine()) tensor_ad_file = os.path.abspath('%s_tensor_ad.nii.gz' % (prefix)) nib.save(ad_img, tensor_ad_file) from dipy.reconst.dti import radial_diffusivity print("running RD") RD = radial_diffusivity(tenfit.evals) RD[np.isnan(RD)] = 0 rd_img = nib.Nifti1Image(RD, img.get_affine()) tensor_rd_file = os.path.abspath('%s_tensor_rd.nii.gz' % (prefix)) nib.save(rd_img, tensor_rd_file) from dipy.reconst.dti import mean_diffusivity print("running MD") MD = mean_diffusivity(tenfit.evals) MD[np.isnan(MD)] = 0 md_img = nib.Nifti1Image(MD, img.get_affine()) tensor_md_file = os.path.abspath('%s_tensor_md.nii.gz' % (prefix)) nib.save(md_img, tensor_md_file) evecs = tenfit.evecs evec_img = nib.Nifti1Image(evecs, img.get_affine()) tensor_evec_file = os.path.abspath('%s_tensor_evec.nii.gz' % (prefix)) nib.save(evec_img, tensor_evec_file) shm_coeff = fit.shm_coeff shm_coeff_file = os.path.abspath('%s_shm_coeff.nii.gz' % (prefix)) nib.save(nib.Nifti1Image(shm_coeff, img.get_affine()), shm_coeff_file) #from dipy.reconst.dti import quantize_evecs #peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices) #eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices, #a_low=0.2, seeds=10**6, ang_thr=35) fa_img = nib.Nifti1Image(peaks.gfa, img.get_affine()) model_gfa_file = os.path.abspath('%s_%s_gfa.nii.gz' % (prefix, recon)) nib.save(fa_img, model_gfa_file) from dipy.tracking.eudx import EuDX print("reconstructing with EuDX") if useFA: eu = EuDX(FA, peaks.peak_indices[..., 0], odf_vertices = sphere.vertices, #a_low=0.1, seeds=10**6, ang_thr=45) else: eu = EuDX(peaks.gfa, peaks.peak_indices[..., 0], odf_vertices = sphere.vertices, #a_low=0.1, seeds=10**6, ang_thr=45) sl_fname = os.path.abspath('%s_%s_streamline.trk' % (prefix, recon)) """ #import dipy.tracking.metrics as dmetrics streamlines = ((sl, None, None) for sl in eu) # if dmetrics.length(sl) > 15) hdr = nib.trackvis.empty_header() hdr['voxel_size'] = fa_img.get_header().get_zooms()[:3] hdr['voxel_order'] = 'RAS' #LAS hdr['dim'] = FA.shape[:3] nib.trackvis.write(sl_fname, streamlines, hdr, points_space='voxel') """ # trying new dipy.io.streamline module, per email to neuroimaging list # 2018.04.05 from nibabel.streamlines import Field from nibabel.orientations import aff2axcodes affine = img.get_affine() vox_size=fa_img.get_header().get_zooms()[:3] fov_shape=FA.shape[:3] if vox_size is not None and fov_shape is not None: hdr = {} hdr[Field.VOXEL_TO_RASMM] = affine.copy() hdr[Field.VOXEL_SIZES] = vox_size hdr[Field.DIMENSIONS] = fov_shape hdr[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine)) tractogram = nib.streamlines.Tractogram(eu) tractogram.affine_to_rasmm = affine trk_file = nib.streamlines.TrkFile(tractogram, header=hdr) nib.streamlines.save(trk_file, sl_fname) if oldval: os.environ['MKL_NUM_THREADS'] = oldval else: del os.environ['MKL_NUM_THREADS'] if ompoldval: os.environ['OMP_NUM_THREADS'] = ompoldval else: del os.environ['OMP_NUM_THREADS'] assert tensor_fa_file assert tensor_evec_file assert model_gfa_file assert tensor_ad_file assert tensor_rd_file assert tensor_md_file assert shm_coeff_file print('all output files created') return tensor_fa_file, tensor_evec_file, model_gfa_file, sl_fname, affine, tensor_ad_file, tensor_rd_file, tensor_md_file, shm_coeff_file
def mask_for_response_msmt(gtab, data, roi_center=None, roi_radii=10, wm_fa_thr=0.7, gm_fa_thr=0.2, csf_fa_thr=0.1, gm_md_thr=0.0007, csf_md_thr=0.002): """ Computation of masks for multi-shell multi-tissue (msmt) response function using FA and MD. Parameters ---------- gtab : GradientTable data : ndarray diffusion data (4D) roi_center : array-like, (3,) Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]`. roi_radii : int or array-like, (3,) radii of cuboid ROI wm_fa_thr : float FA threshold for WM. gm_fa_thr : float FA threshold for GM. csf_fa_thr : float FA threshold for CSF. gm_md_thr : float MD threshold for GM. csf_md_thr : float MD threshold for CSF. Returns ------- mask_wm : ndarray Mask of voxels within the ROI and with FA above the FA threshold for WM. mask_gm : ndarray Mask of voxels within the ROI and with FA below the FA threshold for GM and with MD above the MD threshold for GM. mask_csf : ndarray Mask of voxels within the ROI and with FA below the FA threshold for CSF and with MD above the MD threshold for CSF. Notes ----- In msmt-CSD there is an important pre-processing step: the estimation of every tissue's response function. In order to do this, we look for voxels corresponding to WM, GM and CSF. This function aims to accomplish that by returning a mask of voxels within a ROI and who respect some threshold constraints, for each tissue. More precisely, the WM mask must have a FA value above a given threshold. The GM mask and CSF mask must have a FA below given thresholds and a MD below other thresholds. To get the FA and MD, we need to fit a Tensor model to the datasets. """ if len(data.shape) < 4: msg = """Data must be 4D (3D image + directions). To use a 2D image, please reshape it into a (N, N, 1, ndirs) array.""" raise ValueError(msg) if isinstance(roi_radii, numbers.Number): roi_radii = (roi_radii, roi_radii, roi_radii) if roi_center is None: roi_center = np.array(data.shape[:3]) // 2 roi_radii = _roi_in_volume(data.shape, np.asarray(roi_center), np.asarray(roi_radii)) roi_mask = _mask_from_roi(data.shape[:3], roi_center, roi_radii) list_bvals = unique_bvals_tolerance(gtab.bvals) if not np.all(list_bvals <= 1200): msg_bvals = """Some b-values are higher than 1200. The DTI fit might be affected.""" warnings.warn(msg_bvals, UserWarning) ten = TensorModel(gtab) tenfit = ten.fit(data, mask=roi_mask) fa = fractional_anisotropy(tenfit.evals) fa[np.isnan(fa)] = 0 md = mean_diffusivity(tenfit.evals) md[np.isnan(md)] = 0 mask_wm = np.zeros(fa.shape, dtype=np.int64) mask_wm[fa > wm_fa_thr] = 1 mask_wm *= roi_mask md_mask_gm = np.zeros(md.shape, dtype=np.int64) md_mask_gm[(md < gm_md_thr)] = 1 fa_mask_gm = np.zeros(fa.shape, dtype=np.int64) fa_mask_gm[(fa < gm_fa_thr) & (fa > 0)] = 1 mask_gm = md_mask_gm * fa_mask_gm mask_gm *= roi_mask md_mask_csf = np.zeros(md.shape, dtype=np.int64) md_mask_csf[(md < csf_md_thr) & (md > 0)] = 1 fa_mask_csf = np.zeros(fa.shape, dtype=np.int64) fa_mask_csf[(fa < csf_fa_thr) & (fa > 0)] = 1 mask_csf = md_mask_csf * fa_mask_csf mask_csf *= roi_mask msg = """No voxel with a {0} than {1} were found. Try a larger roi or a {2} threshold for {3}.""" if np.sum(mask_wm) == 0: msg_fa = msg.format('FA higher', str(wm_fa_thr), 'lower FA', 'WM') warnings.warn(msg_fa, UserWarning) if np.sum(mask_gm) == 0: msg_fa = msg.format('FA lower', str(gm_fa_thr), 'higher FA', 'GM') msg_md = msg.format('MD lower', str(gm_md_thr), 'higher MD', 'GM') warnings.warn(msg_fa, UserWarning) warnings.warn(msg_md, UserWarning) if np.sum(mask_csf) == 0: msg_fa = msg.format('FA lower', str(csf_fa_thr), 'higher FA', 'CSF') msg_md = msg.format('MD lower', str(csf_md_thr), 'higher MD', 'CSF') warnings.warn(msg_fa, UserWarning) warnings.warn(msg_md, UserWarning) return mask_wm, mask_gm, mask_csf
def mask_for_response_ssst(gtab, data, roi_center=None, roi_radii=10, fa_thr=0.7): """ Computation of mask for single-shell single-tissue (ssst) response function using FA. Parameters ---------- gtab : GradientTable data : ndarray diffusion data (4D) roi_center : array-like, (3,) Center of ROI in data. If center is None, it is assumed that it is the center of the volume with shape `data.shape[:3]`. roi_radii : int or array-like, (3,) radii of cuboid ROI fa_thr : float FA threshold Returns ------- mask : ndarray Mask of voxels within the ROI and with FA above the FA threshold. Notes ----- In CSD there is an important pre-processing step: the estimation of the fiber response function. In order to do this, we look for voxels with very anisotropic configurations. This function aims to accomplish that by returning a mask of voxels within a ROI, that have a FA value above a given threshold. For example we can use a ROI (20x20x20) at the center of the volume and store the signal values for the voxels with FA values higher than 0.7 (see [1]_). References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2004. Direct estimation of the fiber orientation density function from diffusion-weighted MRI data using spherical deconvolution """ if len(data.shape) < 4: msg = """Data must be 4D (3D image + directions). To use a 2D image, please reshape it into a (N, N, 1, ndirs) array.""" raise ValueError(msg) if isinstance(roi_radii, numbers.Number): roi_radii = (roi_radii, roi_radii, roi_radii) if roi_center is None: roi_center = np.array(data.shape[:3]) // 2 roi_radii = _roi_in_volume(data.shape, np.asarray(roi_center), np.asarray(roi_radii)) roi_mask = _mask_from_roi(data.shape[:3], roi_center, roi_radii) ten = TensorModel(gtab) tenfit = ten.fit(data, mask=roi_mask) fa = fractional_anisotropy(tenfit.evals) fa[np.isnan(fa)] = 0 mask = np.zeros(fa.shape, dtype=np.int64) mask[fa > fa_thr] = 1 if np.sum(mask) == 0: msg = """No voxel with a FA higher than {} were found. Try a larger roi or a lower threshold.""".format(str(fa_thr)) warnings.warn(msg, UserWarning) return mask
**Stopping States** - 'ENDPOINT': stops at a position where metric_map < threshold; the streamline reached the target stopping area. - 'OUTSIDEIMAGE': stops at a position outside of metric_map; the streamline reached an area outside the image where no direction data is available. - 'TRACKPOINT': stops at a position because no direction is available; the streamline is stopping where metric_map >= threshold, but there is no valid direction to follow. - 'INVALIDPOINT': N/A. """ tensor_model = TensorModel(gtab) tenfit = tensor_model.fit(data, mask=labels > 0) FA = fractional_anisotropy(tenfit.evals) threshold_criterion = ThresholdStoppingCriterion(FA, .2) fig = plt.figure() mask_fa = FA.copy() mask_fa[mask_fa < 0.2] = 0 plt.xticks([]) plt.yticks([]) plt.imshow(mask_fa[:, :, data.shape[2] // 2].T, cmap='gray', origin='lower', interpolation='nearest') fig.tight_layout() fig.savefig('threshold_fa.png') """
del data, affine, zooms print(data2.shape) print(affine2) print(nib.aff2axcodes(affine2)) print('>>> Save resampled data, masks and S0...') # Save as nii (not nii.gz) to reduce saving and loading time fname2 = join(dname, 'dwi_1x1x1.nii') nib.save(nib.Nifti1Image(data2, affine2), fname2) fname2_mask = join(dname, 'dwi_mask_1x1x1.nii.gz') nib.save(nib.Nifti1Image(mask2.astype(np.uint8), affine2), fname2_mask) fname2_S0 = join(dname, 'dwi_S0_1x1x1.nii.gz') S0s = data2[..., b0_index] S0 = np.mean(S0s, axis=-1) nib.save(nib.Nifti1Image(S0, affine2), fname2_S0) print('>>> Calculate FA...') ten = TensorModel(gtab) tenfit = ten.fit(data2, mask2) fname2_fa = join(dname, 'dwi_fa_1x1x1.nii.gz') nib.save(nib.Nifti1Image(tenfit.fa, affine2), fname2_fa) del data2, mask2
lap = through_label_sl.label_position(labels, labelValue=1) dataslice = data[40:80, 20:80, lap[2][2] / 2] #print lap[2][2]/2 #get_csd_gfa(f_name,data,gtab,dataslice) maskdata, mask = median_otsu(data, 2, 1, False, vol_idx=range(10, 50), dilate=2) #不去背景 """ get fa and tensor evecs and ODF""" from dipy.reconst.dti import TensorModel, mean_diffusivity tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) sphere = get_sphere('symmetric724') FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 np.save(os.getcwd() + '\zhibiao' + f_name + '_FA.npy', FA) fa_img = nib.Nifti1Image(FA.astype(np.float32), affine) print FA.shape nib.save(fa_img, os.getcwd() + '/zhibiao/' + f_name + '_FA.nii.gz') print('Saving "DTI_tensor_fa.nii.gz" sucessful.') evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), affine) nib.save( evecs_img, os.getcwd() + '/zhibiao/' + f_name + '_DTI_tensor_evecs.nii.gz')
def _run_interface(self, runtime): from scipy.special import gamma from dipy.reconst.dti import TensorModel import gc img = nb.load(self.inputs.in_file) hdr = img.get_header().copy() affine = img.get_affine() data = img.get_data() gtab = self._get_gradient_table() if isdefined(self.inputs.in_mask): msk = nb.load(self.inputs.in_mask).get_data().astype(np.uint8) else: msk = np.ones(data.shape[:3], dtype=np.uint8) try_b0 = True if isdefined(self.inputs.noise_mask): noise_msk = nb.load(self.inputs.noise_mask).get_data().reshape(-1) noise_msk[noise_msk > 0.5] = 1 noise_msk[noise_msk < 1.0] = 0 noise_msk = noise_msk.astype(np.uint8) try_b0 = False elif np.all(data[msk == 0, 0] == 0): IFLOGGER.info('Input data are masked.') noise_msk = msk.reshape(-1).astype(np.uint8) else: noise_msk = (1 - msk).reshape(-1).astype(np.uint8) nb0 = np.sum(gtab.b0s_mask) dsample = data.reshape(-1, data.shape[-1]) if try_b0 and (nb0 > 1): noise_data = dsample.take(np.where(gtab.b0s_mask), axis=-1)[noise_msk == 0, ...] n = nb0 else: nodiff = np.where(~gtab.b0s_mask) nodiffidx = nodiff[0].tolist() n = 20 if len(nodiffidx) >= 20 else len(nodiffidx) idxs = np.random.choice(nodiffidx, size=n, replace=False) noise_data = dsample.take(idxs, axis=-1)[noise_msk == 1, ...] # Estimate sigma required by RESTORE mean_std = np.median(noise_data.std(-1)) try: bias = (1. - np.sqrt(2. / (n - 1)) * (gamma(n / 2.) / gamma((n - 1) / 2.))) except: bias = .0 pass sigma = mean_std * (1 + bias) if sigma == 0: IFLOGGER.warn( ('Noise std is 0.0, looks like data was masked and noise' ' cannot be estimated correctly. Using default tensor ' 'model instead of RESTORE.')) dti = TensorModel(gtab) else: IFLOGGER.info(('Performing RESTORE with noise std=%.4f.') % sigma) dti = TensorModel(gtab, fit_method='RESTORE', sigma=sigma) try: fit_restore = dti.fit(data, msk) except TypeError: dti = TensorModel(gtab) fit_restore = dti.fit(data, msk) hdr.set_data_dtype(np.float32) hdr['data_type'] = 16 for k in self._outputs().get(): scalar = getattr(fit_restore, k) hdr.set_data_shape(np.shape(scalar)) nb.Nifti1Image(scalar.astype(np.float32), affine, hdr).to_filename(self._gen_filename(k)) return runtime
def main(): parser = buildArgsParser() args = parser.parse_args() if args.par is None: print('Need output name for parallel curve') return None if args.perp is None: print('Need output name for perpendicular curve') return None if args.diff is None: print('Need output name for perpendicular difference curve') return None # load and concatenate all the bval print('Loading bval') bvals = [np.genfromtxt(fname) for fname in args.bval] bval = np.concatenate(bvals, axis=0) print('{:} b-values'.format(bval.shape[0])) del bvals # load and concatenate all the bvec print('Loading bvec') bvecs = [] for fname in args.bvec: tmp = np.genfromtxt(fname) if tmp.shape[1] != 3: tmp = tmp.T bvecs.append(tmp) bvec = np.concatenate(bvecs, axis=0) print('{:} b-vectors'.format(bvec.shape[0])) del bvecs if bval.shape[0] != bvec.shape[0]: print('Mismatch of bval and bvec') return None # load and concatenate all the data print('Loading data') data_img = [nib.load(fname) for fname in args.data] affine = data_img[0].affine data_data = [] for img in data_img: tmp = img.get_fdata() print('data shape = {:}'.format(tmp.shape)) # need 4D data for the concatenate if tmp.ndim == 3: tmp = tmp[..., None] data_data.append(tmp) data = np.concatenate(data_data, axis=3) print('Full data shape = {:}'.format(data.shape)) del data_data if bval.shape[0] != data.shape[3]: print('Mismatch of bval/bvec and data') return None # load and multiply all the mask print('Loading Mask') mask = np.ones(data.shape[:3], dtype=np.bool) mask_data = [nib.load(fname).get_fdata().astype(np.bool) for fname in args.mask] for tmp in mask_data: mask = np.logical_and(mask, tmp) print('Final mask has {:} voxels ({:.1f} % of total)'.format(mask.sum(), 100*mask.sum()/np.prod(data.shape[:3]))) del mask_data b0_th = 50 # threshold below which a bvalues is considered a b0 round_th = 250 # threshold used to round the bvals into shells print('bval below {:} are round to 0'.format(b0_th)) print('rounding bval to nearest {:}'.format(round_th)) bval[bval<b0_th] = 0 bval = round_th*np.round(bval/round_th, decimals=0) bval_shell = sorted(list(set(bval))) print('#Vol | bval Shell') for b in bval_shell: print('{:} at b = {:.0f}'.format((bval==b).sum(), b)) # clean data data = np.clip(data, 0, np.inf) data[np.isinf(data)] = 0 data[np.isnan(data)] = 0 # Fit DTI with all data # keep eigenvectors print('Fit DTI on full data') gtab = gradient_table(bval, bvec) tenmodel = TensorModel(gtab, fit_method='WLS', min_signal=1e-16) start_time = time() tenfit = tenmodel.fit(data, mask) end_time = time() print('elapsed time = {:.0f} sec'.format(end_time-start_time)) eigenvectors1 = tenfit.evecs[..., :, 0] eigenvectors2 = tenfit.evecs[..., :, 1] eigenvectors3 = tenfit.evecs[..., :, 2] S_par_list = [] S_perp1_list = [] S_perp2_list = [] # Fit DTI on each shell for bshell in bval_shell: if bshell > 0: print('Fitting b = {:}'.format(bshell)) shell_mask = np.logical_or(bval==0, bval==bshell) shell_gtab = gradient_table(bval[shell_mask], bvec[shell_mask]) shell_tenmodel = TensorModel(shell_gtab, fit_method='WLS', min_signal=1e-16) start_time = time() shell_tenfit = shell_tenmodel.fit(data[..., shell_mask], mask) end_time = time() adc_par = np.einsum('...i,...i->...', np.einsum('...i,...ij->...j', eigenvectors1, tenfit.quadratic_form), eigenvectors1) adc_perp1 = np.einsum('...i,...i->...', np.einsum('...i,...ij->...j', eigenvectors2, tenfit.quadratic_form), eigenvectors2) adc_perp2 = np.einsum('...i,...i->...', np.einsum('...i,...ij->...j', eigenvectors3, tenfit.quadratic_form), eigenvectors3) S_par = np.exp(-bshell*adc_par) S_perp1 = np.exp(-bshell*adc_perp1) S_perp2 = np.exp(-bshell*adc_perp2) S_par_list.append(S_par[...,None]) S_perp1_list.append(S_perp1[...,None]) S_perp2_list.append(S_perp2[...,None]) data_par = np.concatenate(S_par_list, axis=3) data_perp1 = np.concatenate(S_perp1_list, axis=3) data_perp2 = np.concatenate(S_perp2_list, axis=3) data_perp = (data_perp1 + data_perp2) / 2.0 data_diff = np.abs(data_perp1 - data_perp2) nib.Nifti1Image(data_par*mask[...,None], affine).to_filename(args.par) nib.Nifti1Image(data_perp*mask[...,None], affine).to_filename(args.perp) nib.Nifti1Image(data_diff*mask[...,None], affine).to_filename(args.diff)
def main(): parser = _build_args_parser() args = parser.parse_args() img = nib.load(args.input) data = img.get_data() bmax = int(args.bmax) print('\ndata shape ({}, {}, {}, {})'.format(data.shape[0], data.shape[1], data.shape[2], data.shape[3])) print('total voxels {}'.format(np.prod(data.shape[:3]))) # remove negatives print('\ncliping negative ({} voxels, {:.2f} % of total)'.format( (data < 0).sum(), 100 * (data < 0).sum() / float(np.prod(data.shape[:3])))) data = np.clip(data, 0, np.inf) affine = img.affine if args.mask is None: mask = None masksum = np.prod(data.shape[:3]) else: mask = nib.load(args.mask).get_data().astype(np.bool) masksum = mask.sum() print('\nMask has {} voxels, {:.2f} % of total'.format( masksum, 100 * masksum / float(np.prod(data.shape[:3])))) # Validate bvals and bvecs bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) if not is_normalized_bvecs(bvecs): print('Your b-vectors do not seem normalized...') bvecs = normalize_bvecs(bvecs) # Get tensors method = 'WLS' min_signal = 1e-16 print('\nUsing fitting method {}'.format(method)) # print('Using minimum signal = {}'.format(min_signal) b0_thr = bvals.min() + 10 print('\nassuming existence of b0 (thr = {})\n'.format(b0_thr)) # restricted gtab gtab = gradient_table(bvals[bvals < bmax + 22], bvecs[bvals < bmax + 22], b0_threshold=b0_thr) tenmodel = TensorModel(gtab, fit_method=method, min_signal=min_signal) tenfit = tenmodel.fit(data[..., bvals < bmax + 22], mask) MD = tenfit.md FA = tenfit.fa evalmax = np.max(tenfit.evals, axis=3) invevalmax = evalmax**-1 invevalmax[np.isnan(invevalmax)] = 0 invevalmax[np.isinf(invevalmax)] = 0 evalmin = np.min(tenfit.evals, axis=3) weird_contrast = np.exp(-bmax * evalmin) - np.exp(-bmax * evalmax) invMD = MD**-1 invMD[np.isnan(invMD)] = 0 invMD[np.isinf(invMD)] = 0 nib.nifti1.Nifti1Image(MD, img.affine).to_filename('./MD_bmax_{}'.format(bmax)) nib.nifti1.Nifti1Image(invMD, img.affine).to_filename( './invMD_bmax_{}'.format(bmax)) nib.nifti1.Nifti1Image(FA, img.affine).to_filename('./FA_bmax_{}'.format(bmax)) nib.nifti1.Nifti1Image(invevalmax, img.affine).to_filename( './inv_e1_bmax_{}'.format(bmax)) nib.nifti1.Nifti1Image(weird_contrast, img.affine).to_filename( './minmax_contrast_bmax_{}'.format(bmax))
def compute_tensors(self, dti_vol, atlas_file, gtab): # WGR:TODO figure out how to organize tensor options and formats # WGR:TODO figure out how to deal with files on disk vs. in workspace """ Takes registered DTI image and produces tensors **Positional Arguments:** dti_vol: - Registered DTI volume, from workspace. atlas_file: - File containing an atlas (or brain mask). gtab: - Structure containing dipy formatted bval/bvec information """ labeldata = nib.load(atlas_file) label = labeldata.get_data() """ Create a brain mask. Here we just threshold labels. """ mask = (label > 0) gtab.info print data.shape """ For the constrained spherical deconvolution we need to estimate the response function (see :ref:`example_reconst_csd`) and create a model. """ response, ratio = auto_response(gtab, dti_vol, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) """ Next, we use ``peaks_from_model`` to fit the data and calculated the fiber directions in all voxels. """ sphere = get_sphere('symmetric724') csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, mask=mask, relative_peak_threshold=.5, min_separation_angle=25, parallel=True) """ For the tracking part, we will use ``csd_model`` fiber directions but stop tracking where fractional anisotropy (FA) is low (< 0.1). To derive the FA, used as a stopping criterion, we need to fit a tensor model first. Here, we use weighted least squares (WLS). """ print 'tensors...' tensor_model = TensorModel(gtab, fit_method='WLS') tensor_fit = tensor_model.fit(data, mask) FA = fractional_anisotropy(tensor_fit.evals) """ In order for the stopping values to be used with our tracking algorithm we need to have the same dimensions as the ``csd_peaks.peak_values``. For this reason, we can assign the same FA value to every peak direction in the same voxel in the following way. """ stopping_values = np.zeros(csd_peaks.peak_values.shape) stopping_values[:] = FA[..., None] print datetime.now() - startTime pass
def test_wls_and_ls_fit(): """ Tests the WLS and LS fitting functions to see if they returns the correct eigenvalues and eigenvectors. Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii as the data. """ # Defining Test Voxel (avoid nibabel dependency) ### # Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s b0 = 1000. bvec, bval = read_bvec_file(get_fnames('55dir_grad.bvec')) B = bval[1] # Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) # Design Matrix gtab = grad.gradient_table(bval, bvec) X = dti.design_matrix(gtab) # Signals Y = np.exp(np.dot(X, D)) npt.assert_almost_equal(Y[0], b0) Y.shape = (-1, ) + Y.shape # Testing WLS Fit on Single Voxel # If you do something wonky (passing min_signal<0), you should get an # error: npt.assert_raises(ValueError, TensorModel, gtab, fit_method='WLS', min_signal=-1) # Estimate tensor from test signals model = TensorModel(gtab, fit_method='WLS', return_S0_hat=True) tensor_est = model.fit(Y) npt.assert_equal(tensor_est.shape, Y.shape[:-1]) npt.assert_array_almost_equal(tensor_est.evals[0], evals) npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor, err_msg="Calculation of tensor from Y does " "not compare to analytical solution") npt.assert_almost_equal(tensor_est.md[0], md) npt.assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3) # Test that we can fit a single voxel's worth of data (a 1d array) y = Y[0] tensor_est = model.fit(y) npt.assert_equal(tensor_est.shape, tuple()) npt.assert_array_almost_equal(tensor_est.evals, evals) npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor) npt.assert_almost_equal(tensor_est.md, md) npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D) # Test using fit_method='LS' model = TensorModel(gtab, fit_method='LS') tensor_est = model.fit(y) npt.assert_equal(tensor_est.shape, tuple()) npt.assert_array_almost_equal(tensor_est.evals, evals) npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor) npt.assert_almost_equal(tensor_est.md, md) npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D) npt.assert_array_almost_equal(tensor_est.linearity, linearity(evals)) npt.assert_array_almost_equal(tensor_est.planarity, planarity(evals)) npt.assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
def eudx_advanced(self, dti_file, mask_file, gtab, seed_num=100000, stop_val=0.1): """ Tracking with more complex tensors - experimental Initializes the graph with nodes corresponding to the number of ROIs **Positional Arguments:** dti_file: - File (registered) to use for tensor/fiber tracking mask_file: - Brain mask to keep tensors inside the brain gtab: - dipy formatted bval/bvec Structure **Optional Arguments:** seed_num: - Number of seeds to use for fiber tracking stop_val: - Value to cutoff fiber track """ img = nb.load(dti_file) data = img.get_data() img = nb.load(mask_file) mask = img.get_data() mask = mask > 0 # to ensure binary mask """ For the constrained spherical deconvolution we need to estimate the response function (see :ref:`example_reconst_csd`) and create a model. """ response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) """ Next, we use ``peaks_from_model`` to fit the data and calculated the fiber directions in all voxels. """ sphere = get_sphere('symmetric724') csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, mask=mask, relative_peak_threshold=.5, min_separation_angle=25, parallel=True) """ For the tracking part, we will use ``csd_model`` fiber directions but stop tracking where fractional anisotropy (FA) is low (< 0.1). To derive the FA, used as a stopping criterion, we need to fit a tensor model first. Here, we use weighted least squares (WLS). """ print 'tensors...' tensor_model = TensorModel(gtab, fit_method='WLS') tensor_fit = tensor_model.fit(data, mask) FA = fractional_anisotropy(tensor_fit.evals) """ In order for the stopping values to be used with our tracking algorithm we need to have the same dimensions as the ``csd_peaks.peak_values``. For this reason, we can assign the same FA value to every peak direction in the same voxel in the following way. """ stopping_values = np.zeros(csd_peaks.peak_values.shape) stopping_values[:] = FA[..., None] streamline_generator = EuDX(stopping_values, csd_peaks.peak_indices, seeds=seed_num, odf_vertices=sphere.vertices, a_low=stop_val) streamlines = [streamline for streamline in streamline_generator] return streamlines
def run(context): #################################################### # Get the path to input files and other parameter # #################################################### analysis_data = context.fetch_analysis_data() settings = analysis_data['settings'] postprocessing = settings['postprocessing'] hcpl_dwi_file_handle = context.get_files('input', modality='HARDI')[0] hcpl_dwi_file_path = hcpl_dwi_file_handle.download('/root/') hcpl_bvalues_file_handle = context.get_files( 'input', reg_expression='.*prep.bvalues.hcpl.txt')[0] hcpl_bvalues_file_path = hcpl_bvalues_file_handle.download('/root/') hcpl_bvecs_file_handle = context.get_files( 'input', reg_expression='.*prep.gradients.hcpl.txt')[0] hcpl_bvecs_file_path = hcpl_bvecs_file_handle.download('/root/') dwi_file_handle = context.get_files('input', modality='DSI')[0] dwi_file_path = dwi_file_handle.download('/root/') bvalues_file_handle = context.get_files( 'input', reg_expression='.*prep.bvalues.txt')[0] bvalues_file_path = bvalues_file_handle.download('/root/') bvecs_file_handle = context.get_files( 'input', reg_expression='.*prep.gradients.txt')[0] bvecs_file_path = bvecs_file_handle.download('/root/') inject_file_handle = context.get_files( 'input', reg_expression='.*prep.inject.nii.gz')[0] inject_file_path = inject_file_handle.download('/root/') VUMC_ROIs_file_handle = context.get_files( 'input', reg_expression='.*VUMC_ROIs.nii.gz')[0] VUMC_ROIs_file_path = VUMC_ROIs_file_handle.download('/root/') ############################### # _____ _____ _______ __ # # | __ \_ _| __ \ \ / / # # | | | || | | |__) \ \_/ / # # | | | || | | ___/ \ / # # | |__| || |_| | | | # # |_____/_____|_| |_| # # # # dipy.org/documentation # ############################### # IronTract Team # # TrackyMcTrackface # ############################### ################# # Load the data # ################# dwi_img = nib.load(hcpl_dwi_file_path) bvals, bvecs = read_bvals_bvecs(hcpl_bvalues_file_path, hcpl_bvecs_file_path) gtab = gradient_table(bvals, bvecs) ############################################ # Extract the brain mask from the b0 image # ############################################ _, brain_mask = median_otsu(dwi_img.get_data()[:, :, :, 0], median_radius=2, numpass=1) ################################################################## # Fit the tensor model and compute the fractional anisotropy map # ################################################################## context.set_progress(message='Processing voxel-wise DTI metrics.') tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(dwi_img.get_data(), mask=brain_mask) FA = fractional_anisotropy(tenfit.evals) # fa_file_path = "/root/fa.nii.gz" # nib.Nifti1Image(FA,dwi_img.affine).to_filename(fa_file_path) ################################################ # Compute Fiber Orientation Distribution (CSD) # ################################################ context.set_progress(message='Processing voxel-wise FOD estimation.') response, _ = auto_response_ssst(gtab, dwi_img.get_data(), roi_radii=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6) csd_fit = csd_model.fit(dwi_img.get_data(), mask=brain_mask) # fod_file_path = "/root/fod.nii.gz" # nib.Nifti1Image(csd_fit.shm_coeff,dwi_img.affine).to_filename(fod_file_path) ########################################### # Compute DIPY Probabilistic Tractography # ########################################### context.set_progress(message='Processing tractography.') sphere = get_sphere("repulsion724") seed_mask_img = nib.load(inject_file_path) affine = seed_mask_img.affine seeds = utils.seeds_from_mask(seed_mask_img.get_data(), affine, density=5) stopping_criterion = ThresholdStoppingCriterion(FA, 0.2) prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=20., sphere=sphere) streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds, affine, step_size=.2, max_cross=1) streamlines = Streamlines(streamline_generator) # sft = StatefulTractogram(streamlines, seed_mask_img, Space.RASMM) # streamlines_file_path = "/root/streamlines.trk" # save_trk(sft, streamlines_file_path) ########################################################################### # Compute 3D volumes for the IronTract Challenge. For 'EPFL', we only # # keep streamlines with length > 1mm. We compute the visitation count # # image and apply a small gaussian smoothing. The gaussian smoothing # # is especially usefull to increase voxel coverage of deterministic # # algorithms. The log of the smoothed visitation count map is then # # iteratively thresholded producing 200 volumes/operation points. # # For VUMC, additional streamline filtering is done using anatomical # # priors (keeping only streamlines that intersect with at least one ROI). # ########################################################################### if postprocessing in ["EPFL", "ALL"]: context.set_progress(message='Processing density map (EPFL)') volume_folder = "/root/vol_epfl" output_epfl_zip_file_path = "/root/TrackyMcTrackface_EPFL_example.zip" os.mkdir(volume_folder) lengths = length(streamlines) streamlines = streamlines[lengths > 1] density = utils.density_map(streamlines, affine, seed_mask_img.shape) density = scipy.ndimage.gaussian_filter(density.astype("float32"), 0.5) log_density = np.log10(density + 1) max_density = np.max(log_density) for i, t in enumerate(np.arange(0, max_density, max_density / 200)): nbr = str(i) nbr = nbr.zfill(3) mask = log_density >= t vol_filename = os.path.join( volume_folder, "vol" + nbr + "_t" + str(t) + ".nii.gz") nib.Nifti1Image(mask.astype("int32"), affine, seed_mask_img.header).to_filename(vol_filename) shutil.make_archive(output_epfl_zip_file_path[:-4], 'zip', volume_folder) if postprocessing in ["VUMC", "ALL"]: context.set_progress(message='Processing density map (VUMC)') ROIs_img = nib.load(VUMC_ROIs_file_path) volume_folder = "/root/vol_vumc" output_vumc_zip_file_path = "/root/TrackyMcTrackface_VUMC_example.zip" os.mkdir(volume_folder) lengths = length(streamlines) streamlines = streamlines[lengths > 1] rois = ROIs_img.get_fdata().astype(int) _, grouping = utils.connectivity_matrix(streamlines, affine, rois, inclusive=True, return_mapping=True, mapping_as_streamlines=False) streamlines = streamlines[grouping[(0, 1)]] density = utils.density_map(streamlines, affine, seed_mask_img.shape) density = scipy.ndimage.gaussian_filter(density.astype("float32"), 0.5) log_density = np.log10(density + 1) max_density = np.max(log_density) for i, t in enumerate(np.arange(0, max_density, max_density / 200)): nbr = str(i) nbr = nbr.zfill(3) mask = log_density >= t vol_filename = os.path.join( volume_folder, "vol" + nbr + "_t" + str(t) + ".nii.gz") nib.Nifti1Image(mask.astype("int32"), affine, seed_mask_img.header).to_filename(vol_filename) shutil.make_archive(output_vumc_zip_file_path[:-4], 'zip', volume_folder) ################### # Upload the data # ################### context.set_progress(message='Uploading results...') # context.upload_file(fa_file_path, 'fa.nii.gz') # context.upload_file(fod_file_path, 'fod.nii.gz') # context.upload_file(streamlines_file_path, 'streamlines.trk') if postprocessing in ["EPFL", "ALL"]: context.upload_file(output_epfl_zip_file_path, 'TrackyMcTrackface_EPFL_example.zip') if postprocessing in ["VUMC", "ALL"]: context.upload_file(output_vumc_zip_file_path, 'TrackyMcTrackface_VUMC_example.zip')
import nibabel as nib img = nib.load('t0.nii.gz') data = img.get_data() print('data.shape (%d, %d, %d, %d)' % data.shape) mask = data[..., 0] > 50 from dipy.io import read_bvals_bvecs bvals, bvecs = read_bvals_bvecs(fbval, fbvec) from dipy.core.gradients import gradient_table gtab = gradient_table(bvals, bvecs) from dipy.reconst.dti import TensorModel ten = TensorModel(gtab) tenfit = ten.fit(data,mask) from dipy.reconst.dti import fractional_anisotropy fa = fractional_anisotropy(tenfit.evals) fa[np.isnan(fa)] = 0 from dipy.reconst.dti import color_fa Rgbv = color_fa(fa, tenfit.evecs) fa = np.clip(fa, 0,1) #save FA to image nib.save(nib.Nifti1Image(np.array(255*Rgbv,'uint8'),img.get_affine()),'tensor_rgb.nii.gz')
# Separate the b-values and find the indices. bval_list, b_inds, unique_b, bvals_scaled = ozu.separate_bvals(bvals) all_b_idx = np.where(bvals_scaled != 0) ad_arr = np.zeros(3) rd_arr = np.zeros(3) for b_idx in np.arange(1, len(unique_b)): # Separate data by b-value and create a b0 mask. bnk_b0_inds = np.concatenate((b_inds[0], b_inds[b_idx])) bnk_data = data[..., bnk_b0_inds] b0_mask, mask = median_otsu(data[..., b_inds[0][0]], 4, 4) # Fit a tensor for generating a color FA map gtab = gradient_table(bvals[bnk_b0_inds], bvecs[:, bnk_b0_inds]) tenmodel = TensorModel(gtab) tensorfit = tenmodel.fit(bnk_data, mask=mask) # Now segment the corpus callosum threshold = (0.5, 1, 0, 0.2, 0, 0.2) CC_box = np.zeros_like(data[..., b_inds[0][0]]) # Create a bounding box in which to look for the corpus # callosum. mins, maxs = bounding_box(mask) mins = np.array(mins) maxs = np.array(maxs) diff = (maxs - mins) // 5 bounds_min = mins + diff bounds_max = maxs - diff CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1],
def prepare_data_for_actors(dwi_filename, bvals_filename, bvecs_filename, target_template_filename, slices_choice, shells=None): # Load and prepare the data dwi_img = nib.load(dwi_filename) dwi_data = dwi_img.get_data() dwi_affine = dwi_img.get_affine() bvals, bvecs = read_bvals_bvecs(bvals_filename, bvecs_filename) target_template_img = nib.load(target_template_filename) target_template_data = target_template_img.get_data() target_template_affine = target_template_img.affine mask_data = np.zeros(target_template_data.shape) mask_data[target_template_data > 0] = 1 # Prepare mask for tensors fit x_slice, y_slice, z_slice = slices_choice mask_data = prepare_slices_mask(mask_data, x_slice, y_slice, z_slice) # Extract B0 gtab = gradient_table(bvals, normalize_bvecs(bvecs), b0_threshold=10) b0_idx = np.where(gtab.b0s_mask)[0] mean_b0 = np.mean(dwi_data[..., b0_idx], axis=3, dtype=dwi_data.dtype) if shells: indices = [get_shell_indices(bvals, shell) for shell in shells] indices = np.sort(np.hstack(indices)) if len(indices) < 1: raise ValueError( 'There are no volumes that have the supplied b-values.') shell_data = np.zeros((dwi_data.shape[:-1] + (len(indices), )), dtype=dwi_data.dtype) shell_bvecs = np.zeros((len(indices), 3)) shell_bvals = np.zeros((len(indices), )) for i, indice in enumerate(indices): shell_data[..., i] = dwi_data[..., indice] shell_bvals[i] = bvals[indice] shell_bvecs[i, :] = bvecs[indice, :] else: shell_data = dwi_data shell_bvals = bvals shell_bvecs = bvecs # Register the DWI data to the template transformed_dwi, transformation = register_image( target_template_data, target_template_affine, mean_b0, dwi_affine, transformation_type='rigid', dwi=shell_data) # Rotate gradients rotated_bvecs = np.dot(shell_bvecs, transformation[0:3, 0:3]) rotated_bvecs = normalize_bvecs(rotated_bvecs) rotated_gtab = gradient_table(shell_bvals, rotated_bvecs, b0_threshold=10) # Get tensors tensor_model = TensorModel(rotated_gtab, fit_method='LS') tensor_fit = tensor_model.fit(transformed_dwi, mask_data) # Get FA fa_map = np.clip(fractional_anisotropy(tensor_fit.evals), 0, 1) # Get eigen vals/vecs evals = np.zeros(target_template_data.shape + (1, )) evals[..., 0] = tensor_fit.evals[..., 0] / np.max(tensor_fit.evals[..., 0]) evecs = np.zeros(target_template_data.shape + (1, 3)) evecs[:, :, :, 0, :] = tensor_fit.evecs[..., 0] return fa_map, evals, evecs
data = zoom(data, zoom=scale, order=1, mode='constant') labels = zoom(labels, zoom=scale[:3], order=1, mode='constant') labels = labels.astype(int) # Build Brain Mask #bm = np.where(labels == 0, False, True) bm = (labels != 0) * 1 mask = bm #sphere = get_sphere('repulsion724') sphere = get_sphere('repulsion200') from dipy.reconst.dti import TensorModel tensor_model = TensorModel(gtab) t1 = time() tensor_fit = tensor_model.fit(data, bm) # save_nifti('bmfa.nii.gz', tensor_fit.fa, affine) # wenlin make this change-adress name to each animal affine = affine @ np.diag(scale) save_nifti(outpath + 'bmfa' + runno + '.nii.gz', tensor_fit.fa, affine) fa = tensor_fit.fa duration1 = time() - t1 #wenlin make this change-adress name to each animal # print('DTI duration %.3f' % (duration1,)) print(runno + ' DTI duration %.3f' % (duration1, )) #response : 3.96154132e-04, 9.23377324e-05, 9.23377324e-05 #replace CSA with CSD # Build Brain Mask
def nii2streamlines(imgfile, maskfile, bvals, bvecs): import numpy as np import nibabel as nib import os from dipy.reconst.dti import TensorModel img = nib.load(imgfile) bvals = np.genfromtxt(bvals) bvecs = np.genfromtxt(bvecs) if bvecs.shape[1] != 3: bvecs = bvecs.T from nipype.utils.filemanip import split_filename _, prefix, _ = split_filename(imgfile) from dipy.data import gradient_table gtab = gradient_table(bvals, bvecs) data = img.get_data() affine = img.get_affine() zooms = img.get_header().get_zooms()[:3] new_zooms = (2., 2., 2.) data2, affine2 = data, affine mask = nib.load(maskfile).get_data().astype(np.bool) tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data2, mask) from dipy.reconst.dti import fractional_anisotropy FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 fa_img = nib.Nifti1Image(FA, img.get_affine()) nib.save(fa_img, '%s_tensor_fa.nii.gz' % prefix) evecs = tenfit.evecs evec_img = nib.Nifti1Image(evecs, img.get_affine()) nib.save(evec_img, '%s_tensor_evec.nii.gz' % prefix) from dipy.data import get_sphere sphere = get_sphere('symmetric724') from dipy.reconst.dti import quantize_evecs peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices) from dipy.tracking.eudx import EuDX eu = EuDX(FA, peak_indices, odf_vertices=sphere.vertices, a_low=0.2, seeds=10**6, ang_thr=35) tensor_streamlines = [streamline for streamline in eu] hdr = nib.trackvis.empty_header() hdr['voxel_size'] = new_zooms hdr['voxel_order'] = 'LPS' hdr['dim'] = data2.shape[:3] import dipy.tracking.metrics as dmetrics tensor_streamlines = ((sl, None, None) for sl in tensor_streamlines if dmetrics.length(sl) > 15) ten_sl_fname = '%s_streamline.trk' % prefix nib.trackvis.write(ten_sl_fname, tensor_streamlines, hdr, points_space='voxel') return ten_sl_fname
def dodata(f_name, data_path): dipy_home = pjoin(os.path.expanduser('~'), 'dipy_data') folder = pjoin(dipy_home, data_path) fraw = pjoin(folder, f_name + '.nii.gz') fbval = pjoin(folder, f_name + '.bval') fbvec = pjoin(folder, f_name + '.bvec') flabels = pjoin(folder, f_name + '.nii-label.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) data = img.get_data() affine = img.get_affine() label_img = nib.load(flabels) labels = label_img.get_data() lap = through_label_sl.label_position(labels, labelValue=1) dataslice = data[40:80, 20:80, lap[2][2] / 2] #print lap[2][2]/2 #get_csd_gfa(f_name,data,gtab,dataslice) maskdata, mask = median_otsu(data, 2, 1, False, vol_idx=range(10, 50), dilate=2) #不去背景 """ get fa and tensor evecs and ODF""" from dipy.reconst.dti import TensorModel, mean_diffusivity tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) sphere = get_sphere('symmetric724') FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 np.save(os.getcwd() + '\zhibiao' + f_name + '_FA.npy', FA) fa_img = nib.Nifti1Image(FA.astype(np.float32), affine) nib.save(fa_img, os.getcwd() + '\zhibiao' + f_name + '_FA.nii.gz') print('Saving "DTI_tensor_fa.nii.gz" sucessful.') evecs_img = nib.Nifti1Image(tenfit.evecs.astype(np.float32), affine) nib.save(evecs_img, os.getcwd() + '\zhibiao' + f_name + '_DTI_tensor_evecs.nii.gz') print('Saving "DTI_tensor_evecs.nii.gz" sucessful.') MD1 = mean_diffusivity(tenfit.evals) nib.save(nib.Nifti1Image(MD1.astype(np.float32), img.get_affine()), os.getcwd() + '\zhibiao' + f_name + '_MD.nii.gz') #tensor_odfs = tenmodel.fit(data[20:50, 55:85, 38:39]).odf(sphere) #from dipy.reconst.odf import gfa #dti_gfa=gfa(tensor_odfs) wm_mask = (np.logical_or(FA >= 0.4, (np.logical_and(FA >= 0.15, MD >= 0.0011)))) response = recursive_response(gtab, data, mask=wm_mask, sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=False) from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel csd_model = ConstrainedSphericalDeconvModel(gtab, response) #csd_fit = csd_model.fit(data) from dipy.direction import peaks_from_model csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, relative_peak_threshold=.5, min_separation_angle=25, parallel=False) GFA = csd_peaks.gfa nib.save(GFA, os.getcwd() + '\zhibiao' + f_name + '_MSD.nii.gz') print('Saving "GFA.nii.gz" sucessful.') from dipy.reconst.shore import ShoreModel asm = ShoreModel(gtab) print('Calculating...SHORE msd') asmfit = asm.fit(data, mask) msd = asmfit.msd() msd[np.isnan(msd)] = 0 #print GFA[:,:,slice].T print('Saving msd_img.png') nib.save(msd, os.getcwd() + '\zhibiao' + f_name + '_GFA.nii.gz')
data, affine, gtab = get_train_dti(30) elif datat == 1: print 'fitting with hardi' data, affine, gtab = get_train_hardi(30) elif datat == 2: print 'fitting with dsi' data, affine, gtab = get_train_dsi(30) mask, affine = get_train_mask() data.shape mask.shape model = TensorModel(gtab) fit = model.fit(data, mask) print 'done!' fa = fit.fa slice_z = 25 Th = [0.05, 0.075, 0.1,0.15] figure(2*datat+1) imshow(fa[:, :, slice_z], interpolation='nearest') colorbar() title(mask.sum()) figure(2*datat + 2) for i in range(4):
def test_WLS_and_LS_fit(): """ Tests the WLS and LS fitting functions to see if they returns the correct eigenvalues and eigenvectors. Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii as the data. """ ### Defining Test Voxel (avoid nibabel dependency) ### #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s b0 = 1000. bvec, bval = read_bvec_file(get_data('55dir_grad.bvec')) B = bval[1] #Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) #Design Matrix X = dti.design_matrix(bvec, bval) #Signals Y = np.exp(np.dot(X, D)) assert_almost_equal(Y[0], b0) Y.shape = (-1,) + Y.shape gtab = grad.gradient_table(bval, bvec) ### Testing WLS Fit on Single Voxel ### #Estimate tensor from test signals model = TensorModel(gtab, min_signal=1e-8, fit_method='WLS') tensor_est = model.fit(Y) assert_equal(tensor_est.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_est.evals[0], evals) assert_array_almost_equal(tensor_est.quadratic_form[0], tensor, err_msg="Calculation of tensor from Y does not " "compare to analytical solution") assert_almost_equal(tensor_est.md[0], md) # Test that we can fit a single voxel's worth of data (a 1d array) y = Y[0] tensor_est = model.fit(y) assert_equal(tensor_est.shape, tuple()) assert_array_almost_equal(tensor_est.evals, evals) assert_array_almost_equal(tensor_est.quadratic_form, tensor) assert_almost_equal(tensor_est.md, md) assert_array_almost_equal(tensor_est.lower_triangular(b0), D) # Test using fit_method='LS' model = TensorModel(gtab, min_signal=1e-8, fit_method='LS') tensor_est = model.fit(y) assert_equal(tensor_est.shape, tuple()) assert_array_almost_equal(tensor_est.evals, evals) assert_array_almost_equal(tensor_est.quadratic_form, tensor) assert_almost_equal(tensor_est.md, md) assert_array_almost_equal(tensor_est.lower_triangular(b0), D) assert_array_almost_equal(tensor_est.linearity, linearity(evals)) assert_array_almost_equal(tensor_est.planarity, planarity(evals)) assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
def test_eudx_further(): """ Cause we love testin.. ;-) """ fimg, fbvals, fbvecs = get_fnames('small_101D') img = ni.load(fimg) data = img.get_data() gtab = gradient_table(fbvals, fbvecs) tensor_model = TensorModel(gtab) ten = tensor_model.fit(data) x, y, z = data.shape[:3] seeds = np.zeros((10**4, 3)) for i in range(10**4): rx = (x - 1) * np.random.rand() ry = (y - 1) * np.random.rand() rz = (z - 1) * np.random.rand() seeds[i] = np.ascontiguousarray(np.array([rx, ry, rz]), dtype=np.float64) sphere = get_sphere('symmetric724') ind = quantize_evecs(ten.evecs) eu = EuDX(a=ten.fa, ind=ind, seeds=seeds, odf_vertices=sphere.vertices, a_low=.2) T = [e for e in eu] # check that there are no negative elements for t in T: assert_equal(np.sum(t.ravel() < 0), 0) # Test eudx with affine def random_affine(seeds): affine = np.eye(4) affine[:3, :] = np.random.random((3, 4)) seeds = np.dot(seeds, affine[:3, :3].T) seeds += affine[:3, 3] return affine, seeds # Make two random affines and move seeds affine1, seeds1 = random_affine(seeds) affine2, seeds2 = random_affine(seeds) # Make tracks using different affines eu1 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices, seeds=seeds1, a_low=.2, affine=affine1) eu2 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices, seeds=seeds2, a_low=.2, affine=affine2) # Move from eu2 affine2 to affine1 eu2_to_eu1 = utils.move_streamlines(eu2, output_space=affine1, input_space=affine2) # Check that the tracks are the same for sl1, sl2 in zip(eu1, eu2_to_eu1): assert_array_almost_equal(sl1, sl2)
def test_recursive_response_calibration(): """ Test the recursive response calibration method. """ SNR = 100 S0 = 1 _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) sphere = default_sphere gtab = gradient_table(bvals, bvecs) evals = np.array([0.0015, 0.0003, 0.0003]) evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (90, 0)] where_dwi = lazy_index(~gtab.b0s_mask) S_cross, _ = multi_tensor(gtab, mevals, S0, angles=angles, fractions=[50, 50], snr=SNR) S_single = single_tensor(gtab, S0, evals, evecs, snr=SNR) data = np.concatenate((np.tile(S_cross, (8, 1)), np.tile(S_single, (2, 1))), axis=0) odf_gt_cross = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50]) odf_gt_single = single_tensor_odf(sphere.vertices, evals, evecs) response = recursive_response(gtab, data, mask=None, sh_order=8, peak_thr=0.01, init_fa=0.05, init_trace=0.0021, iter=8, convergence=0.001, parallel=False) csd = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd.fit(data) assert_equal(np.all(csd_fit.shm_coeff[:, 0] >= 0), True) fodf = csd_fit.odf(sphere) directions_gt_single, _, _ = peak_directions(odf_gt_single, sphere) directions_gt_cross, _, _ = peak_directions(odf_gt_cross, sphere) directions_single, _, _ = peak_directions(fodf[8, :], sphere) directions_cross, _, _ = peak_directions(fodf[0, :], sphere) ang_sim = angular_similarity(directions_cross, directions_gt_cross) assert_equal(ang_sim > 1.9, True) assert_equal(directions_cross.shape[0], 2) assert_equal(directions_gt_cross.shape[0], 2) ang_sim = angular_similarity(directions_single, directions_gt_single) assert_equal(ang_sim > 0.9, True) assert_equal(directions_single.shape[0], 1) assert_equal(directions_gt_single.shape[0], 1) with warnings.catch_warnings(record=True) as w: sphere = Sphere(xyz=gtab.gradients[where_dwi]) npt.assert_equal(len(w), 1) npt.assert_(issubclass(w[0].category, UserWarning)) npt.assert_("Vertices are not on the unit sphere" in str(w[0].message)) sf = response.on_sphere(sphere) S = np.concatenate(([response.S0], sf)) tenmodel = TensorModel(gtab, min_signal=0.001) tenfit = tenmodel.fit(S) FA = fractional_anisotropy(tenfit.evals) FA_gt = fractional_anisotropy(evals) assert_almost_equal(FA, FA_gt, 1)
def main(): parser = _build_args_parser() args = parser.parse_args() if not args.not_all: args.fa = args.fa or 'fa.nii.gz' args.ga = args.ga or 'ga.nii.gz' args.rgb = args.rgb or 'rgb.nii.gz' args.md = args.md or 'md.nii.gz' args.ad = args.ad or 'ad.nii.gz' args.rd = args.rd or 'rd.nii.gz' args.mode = args.mode or 'mode.nii.gz' args.norm = args.norm or 'tensor_norm.nii.gz' args.tensor = args.tensor or 'tensor.nii.gz' args.evecs = args.evecs or 'tensor_evecs.nii.gz' args.evals = args.evals or 'tensor_evals.nii.gz' args.residual = args.residual or 'dti_residual.nii.gz' args.p_i_signal =\ args.p_i_signal or 'physically_implausible_signals_mask.nii.gz' args.pulsation = args.pulsation or 'pulsation_and_misalignment.nii.gz' outputs = [ args.fa, args.ga, args.rgb, args.md, args.ad, args.rd, args.mode, args.norm, args.tensor, args.evecs, args.evals, args.residual, args.p_i_signal, args.pulsation ] if args.not_all and not any(outputs): parser.error('When using --not_all, you need to specify at least ' + 'one metric to output.') assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs], [args.mask]) assert_outputs_exist(parser, args, outputs) img = nib.load(args.input) data = img.get_data() affine = img.get_affine() if args.mask is None: mask = None else: mask = nib.load(args.mask).get_data().astype(np.bool) # Validate bvals and bvecs logging.info('Tensor estimation with the %s method...', args.method) bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) if not is_normalized_bvecs(bvecs): logging.warning('Your b-vectors do not seem normalized...') bvecs = normalize_bvecs(bvecs) check_b0_threshold(args, bvals.min()) gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min()) # Get tensors if args.method == 'restore': sigma = ne.estimate_sigma(data) tenmodel = TensorModel(gtab, fit_method=args.method, sigma=sigma, min_signal=_get_min_nonzero_signal(data)) else: tenmodel = TensorModel(gtab, fit_method=args.method, min_signal=_get_min_nonzero_signal(data)) tenfit = tenmodel.fit(data, mask) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 FA = np.clip(FA, 0, 1) if args.tensor: # Get the Tensor values and format them for visualisation # in the Fibernavigator. tensor_vals = lower_triangular(tenfit.quadratic_form) correct_order = [0, 1, 3, 2, 4, 5] tensor_vals_reordered = tensor_vals[..., correct_order] fiber_tensors = nib.Nifti1Image( tensor_vals_reordered.astype(np.float32), affine) nib.save(fiber_tensors, args.tensor) if args.fa: fa_img = nib.Nifti1Image(FA.astype(np.float32), affine) nib.save(fa_img, args.fa) if args.ga: GA = geodesic_anisotropy(tenfit.evals) GA[np.isnan(GA)] = 0 ga_img = nib.Nifti1Image(GA.astype(np.float32), affine) nib.save(ga_img, args.ga) if args.rgb: RGB = color_fa(FA, tenfit.evecs) rgb_img = nib.Nifti1Image(np.array(255 * RGB, 'uint8'), affine) nib.save(rgb_img, args.rgb) if args.md: MD = mean_diffusivity(tenfit.evals) md_img = nib.Nifti1Image(MD.astype(np.float32), affine) nib.save(md_img, args.md) if args.ad: AD = axial_diffusivity(tenfit.evals) ad_img = nib.Nifti1Image(AD.astype(np.float32), affine) nib.save(ad_img, args.ad) if args.rd: RD = radial_diffusivity(tenfit.evals) rd_img = nib.Nifti1Image(RD.astype(np.float32), affine) nib.save(rd_img, args.rd) if args.mode: # Compute tensor mode inter_mode = dipy_mode(tenfit.quadratic_form) # Since the mode computation can generate NANs when not masked, # we need to remove them. non_nan_indices = np.isfinite(inter_mode) mode = np.zeros(inter_mode.shape) mode[non_nan_indices] = inter_mode[non_nan_indices] mode_img = nib.Nifti1Image(mode.astype(np.float32), affine) nib.save(mode_img, args.mode) if args.norm: NORM = norm(tenfit.quadratic_form) norm_img = nib.Nifti1Image(NORM.astype(np.float32), affine) nib.save(norm_img, args.norm) if args.evecs: evecs = tenfit.evecs.astype(np.float32) evecs_img = nib.Nifti1Image(evecs, affine) nib.save(evecs_img, args.evecs) # save individual e-vectors also e1_img = nib.Nifti1Image(evecs[..., 0], affine) e2_img = nib.Nifti1Image(evecs[..., 1], affine) e3_img = nib.Nifti1Image(evecs[..., 2], affine) nib.save(e1_img, add_filename_suffix(args.evecs, '_v1')) nib.save(e2_img, add_filename_suffix(args.evecs, '_v2')) nib.save(e3_img, add_filename_suffix(args.evecs, '_v3')) if args.evals: evals = tenfit.evals.astype(np.float32) evals_img = nib.Nifti1Image(evals, affine) nib.save(evals_img, args.evals) # save individual e-values also e1_img = nib.Nifti1Image(evals[..., 0], affine) e2_img = nib.Nifti1Image(evals[..., 1], affine) e3_img = nib.Nifti1Image(evals[..., 2], affine) nib.save(e1_img, add_filename_suffix(args.evals, '_e1')) nib.save(e2_img, add_filename_suffix(args.evals, '_e2')) nib.save(e3_img, add_filename_suffix(args.evals, '_e3')) if args.p_i_signal: S0 = np.mean(data[..., gtab.b0s_mask], axis=-1, keepdims=True) DWI = data[..., ~gtab.b0s_mask] pis_mask = np.max(S0 < DWI, axis=-1) if args.mask is not None: pis_mask *= mask pis_img = nib.Nifti1Image(pis_mask.astype(np.int16), affine) nib.save(pis_img, args.p_i_signal) if args.pulsation: STD = np.std(data[..., ~gtab.b0s_mask], axis=-1) if args.mask is not None: STD *= mask std_img = nib.Nifti1Image(STD.astype(np.float32), affine) nib.save(std_img, add_filename_suffix(args.pulsation, '_std_dwi')) if np.sum(gtab.b0s_mask) <= 1: logger.info('Not enough b=0 images to output standard ' 'deviation map') else: if len(np.where(gtab.b0s_mask)) == 2: logger.info('Only two b=0 images. Be careful with the ' 'interpretation of this std map') STD = np.std(data[..., gtab.b0s_mask], axis=-1) if args.mask is not None: STD *= mask std_img = nib.Nifti1Image(STD.astype(np.float32), affine) nib.save(std_img, add_filename_suffix(args.pulsation, '_std_b0')) if args.residual: if args.mask is None: logger.info("Outlier detection will not be performed, since no " "mask was provided.") S0 = np.mean(data[..., gtab.b0s_mask], axis=-1) data_p = tenfit.predict(gtab, S0) R = np.mean(np.abs(data_p[..., ~gtab.b0s_mask] - data[..., ~gtab.b0s_mask]), axis=-1) if args.mask is not None: R *= mask R_img = nib.Nifti1Image(R.astype(np.float32), affine) nib.save(R_img, args.residual) R_k = np.zeros(data.shape[-1]) # mean residual per DWI std = np.zeros(data.shape[-1]) # std residual per DWI q1 = np.zeros(data.shape[-1]) # first quartile q3 = np.zeros(data.shape[-1]) # third quartile iqr = np.zeros(data.shape[-1]) # interquartile for i in range(data.shape[-1]): x = np.abs(data_p[..., i] - data[..., i])[mask] R_k[i] = np.mean(x) std[i] = np.std(x) q3[i], q1[i] = np.percentile(x, [75, 25]) iqr[i] = q3[i] - q1[i] # Outliers are observations that fall below Q1 - 1.5(IQR) or # above Q3 + 1.5(IQR) We check if a volume is an outlier only if # we have a mask, else we are biased. if args.mask is not None and R_k[i] < (q1[i] - 1.5 * iqr[i]) \ or R_k[i] > (q3[i] + 1.5 * iqr[i]): logger.warning( 'WARNING: Diffusion-Weighted Image i=%s is an ' 'outlier', i) residual_basename, _ = split_name_with_nii(args.residual) res_stats_basename = residual_basename + ".npy" np.save(add_filename_suffix(res_stats_basename, "_mean_residuals"), R_k) np.save(add_filename_suffix(res_stats_basename, "_q1_residuals"), q1) np.save(add_filename_suffix(res_stats_basename, "_q3_residuals"), q3) np.save(add_filename_suffix(res_stats_basename, "_iqr_residuals"), iqr) np.save(add_filename_suffix(res_stats_basename, "_std_residuals"), std) # To do: I would like to have an error bar with q1 and q3. # Now, q1 acts as a std dwi = np.arange(R_k[~gtab.b0s_mask].shape[0]) plt.bar(dwi, R_k[~gtab.b0s_mask], 0.75, color='y', yerr=q1[~gtab.b0s_mask]) plt.xlabel('DW image') plt.ylabel('Mean residuals +- q1') plt.title('Residuals') plt.savefig(residual_basename + '_residuals_stats.png')
def tens_mod_fa_est(gtab_file, dwi_file, B0_mask): """ Estimate a tensor FA image to use for registrations. Parameters ---------- gtab_file : str File path to pickled DiPy gradient table object. dwi_file : str File path to diffusion weighted image. B0_mask : str File path to B0 brain mask. Returns ------- fa_path : str File path to FA Nifti1Image. B0_mask : str File path to B0 brain mask Nifti1Image. gtab_file : str File path to pickled DiPy gradient table object. dwi_file : str File path to diffusion weighted Nifti1Image. fa_md_path : str File path to FA/MD mask Nifti1Image. """ import os from dipy.io import load_pickle from dipy.reconst.dti import TensorModel from dipy.reconst.dti import fractional_anisotropy, mean_diffusivity gtab = load_pickle(gtab_file) data = nib.load(dwi_file, mmap=False).get_fdata() print("Generating tensor FA image to use for registrations...") nodif_B0_img = nib.load(B0_mask, mmap=False) nodif_B0_mask_data = nodif_B0_img.get_fdata().astype("bool") model = TensorModel(gtab) mod = model.fit(data, nodif_B0_mask_data) FA = fractional_anisotropy(mod.evals) # MD = mean_diffusivity(mod.evals) # FA_MD = np.logical_or( # FA >= 0.2, (np.logical_and( # FA >= 0.08, MD >= 0.0011))) # FA_MD[np.isnan(FA_MD)] = 0 FA = np.nan_to_num(np.asarray(FA.astype('float32'))) fa_path = f"{os.path.dirname(B0_mask)}{'/tensor_fa.nii.gz'}" nib.save( nib.Nifti1Image( FA, nodif_B0_img.affine), fa_path) # md_path = f"{os.path.dirname(B0_mask)}{'/tensor_md.nii.gz'}" # nib.save( # nib.Nifti1Image( # MD.astype( # np.float32), # nodif_B0_img.affine), # md_path) nodif_B0_img.uncache() del FA return fa_path, B0_mask, gtab_file, dwi_file
def dwi_dipy_run(dwi_dir, node_size, dir_path, conn_model, parc, atlas_select, network, wm_mask=None): from dipy.reconst.dti import TensorModel, quantize_evecs from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response from dipy.tracking.local import LocalTracking, ActTissueClassifier from dipy.tracking import utils from dipy.direction import peaks_from_model from dipy.tracking.eudx import EuDX from dipy.data import get_sphere, default_sphere from dipy.core.gradients import gradient_table from dipy.io import read_bvals_bvecs from dipy.tracking.streamline import Streamlines from dipy.direction import ProbabilisticDirectionGetter, ClosestPeakDirectionGetter, BootDirectionGetter from nibabel.streamlines import save as save_trk from nibabel.streamlines import Tractogram ## dwi_dir = '/Users/PSYC-dap3463/Downloads/bedpostx_s002' img_pve_csf = nib.load( '/Users/PSYC-dap3463/Downloads/002_all/tmp/reg_a/t1w_vent_csf_diff_dwi.nii.gz' ) img_pve_wm = nib.load( '/Users/PSYC-dap3463/Downloads/002_all/tmp/reg_a/t1w_wm_in_dwi_bin.nii.gz' ) img_pve_gm = nib.load( '/Users/PSYC-dap3463/Downloads/002_all/tmp/reg_a/t1w_gm_mask_dwi.nii.gz' ) labels_img = nib.load( '/Users/PSYC-dap3463/Downloads/002_all/tmp/reg_a/dwi_aligned_atlas.nii.gz' ) num_total_samples = 10000 tracking_method = 'boot' # Options are 'boot', 'prob', 'peaks', 'closest' procmem = [2, 4] ## if parc is True: node_size = 'parc' dwi_img = "%s%s" % (dwi_dir, '/dwi.nii.gz') nodif_brain_mask_path = "%s%s" % (dwi_dir, '/nodif_brain_mask.nii.gz') bvals = "%s%s" % (dwi_dir, '/bval') bvecs = "%s%s" % (dwi_dir, '/bvec') dwi_img = nib.load(dwi_img) data = dwi_img.get_data() [bvals, bvecs] = read_bvals_bvecs(bvals, bvecs) gtab = gradient_table(bvals, bvecs) gtab.b0_threshold = min(bvals) sphere = get_sphere('symmetric724') # Loads mask and ensures it's a true binary mask mask_img = nib.load(nodif_brain_mask_path) mask = mask_img.get_data() mask = mask > 0 # Fit a basic tensor model first model = TensorModel(gtab) ten = model.fit(data, mask) fa = ten.fa # Tractography if conn_model == 'csd': print('Tracking with csd model...') elif conn_model == 'tensor': print('Tracking with tensor model...') else: raise RuntimeError("%s%s" % (conn_model, ' is not a valid model.')) # Combine seed counts from voxel with seed counts total wm_mask_data = img_pve_wm.get_data() wm_mask_data[0, :, :] = False wm_mask_data[:, 0, :] = False wm_mask_data[:, :, 0] = False seeds = utils.seeds_from_mask(wm_mask_data, density=1, affine=dwi_img.get_affine()) seeds_rnd = utils.random_seeds_from_mask(ten.fa > 0.02, seeds_count=num_total_samples, seed_count_per_voxel=True) seeds_all = np.vstack([seeds, seeds_rnd]) # Load tissue maps and prepare tissue classifier (Anatomically-Constrained Tractography (ACT)) background = np.ones(img_pve_gm.shape) background[(img_pve_gm.get_data() + img_pve_wm.get_data() + img_pve_csf.get_data()) > 0] = 0 include_map = img_pve_gm.get_data() include_map[background > 0] = 1 exclude_map = img_pve_csf.get_data() act_classifier = ActTissueClassifier(include_map, exclude_map) if conn_model == 'tensor': ind = quantize_evecs(ten.evecs, sphere.vertices) streamline_generator = EuDX(a=fa, ind=ind, seeds=seeds_all, odf_vertices=sphere.vertices, a_low=0.05, step_sz=.5) elif conn_model == 'csd': print('Tracking with CSD model...') response = recursive_response( gtab, data, mask=img_pve_wm.get_data().astype('bool'), sh_order=8, peak_thr=0.01, init_fa=0.05, init_trace=0.0021, iter=8, convergence=0.001, parallel=True) csd_model = ConstrainedSphericalDeconvModel(gtab, response) if tracking_method == 'boot': dg = BootDirectionGetter.from_data(data, csd_model, max_angle=30., sphere=default_sphere) elif tracking_method == 'prob': try: print( 'First attempting to build the direction getter directly from the spherical harmonic representation of the FOD...' ) csd_fit = csd_model.fit( data, mask=img_pve_wm.get_data().astype('bool')) dg = ProbabilisticDirectionGetter.from_shcoeff( csd_fit.shm_coeff, max_angle=30., sphere=default_sphere) except: print( 'Sphereical harmonic not available for this model. Using peaks_from_model to represent the ODF of the model on a spherical harmonic basis instead...' ) peaks = peaks_from_model( csd_model, data, default_sphere, .5, 25, mask=img_pve_wm.get_data().astype('bool'), return_sh=True, parallel=True, nbr_processes=procmem[0]) dg = ProbabilisticDirectionGetter.from_shcoeff( peaks.shm_coeff, max_angle=30., sphere=default_sphere) elif tracking_method == 'peaks': dg = peaks_from_model(model=csd_model, data=data, sphere=default_sphere, relative_peak_threshold=.5, min_separation_angle=25, mask=img_pve_wm.get_data().astype('bool'), parallel=True, nbr_processes=procmem[0]) elif tracking_method == 'closest': csd_fit = csd_model.fit(data, mask=img_pve_wm.get_data().astype('bool')) pmf = csd_fit.odf(default_sphere).clip(min=0) dg = ClosestPeakDirectionGetter.from_pmf(pmf, max_angle=30., sphere=default_sphere) streamline_generator = LocalTracking(dg, act_classifier, seeds_all, affine=dwi_img.affine, step_size=0.5) del dg try: del csd_fit except: pass try: del response except: pass try: del csd_model except: pass streamlines = Streamlines(streamline_generator, buffer_size=512) save_trk(Tractogram(streamlines, affine_to_rasmm=dwi_img.affine), 'prob_streamlines.trk') tracks = [sl for sl in streamlines if len(sl) > 1] labels_data = labels_img.get_data().astype('int') labels_affine = labels_img.affine conn_matrix, grouping = utils.connectivity_matrix( tracks, labels_data, affine=labels_affine, return_mapping=True, mapping_as_streamlines=True, symmetric=True) conn_matrix[:3, :] = 0 conn_matrix[:, :3] = 0 return conn_matrix
def _run_interface(self, runtime): from scipy.special import gamma from dipy.reconst.dti import TensorModel import gc img = nb.load(self.inputs.in_file) hdr = img.get_header().copy() affine = img.get_affine() data = img.get_data() gtab = self._get_gradient_table() if isdefined(self.inputs.in_mask): msk = nb.load(self.inputs.in_mask).get_data().astype(np.uint8) else: msk = np.ones(data.shape[:3], dtype=np.uint8) try_b0 = True if isdefined(self.inputs.noise_mask): noise_msk = nb.load(self.inputs.noise_mask).get_data().reshape(-1) noise_msk[noise_msk > 0.5] = 1 noise_msk[noise_msk < 1.0] = 0 noise_msk = noise_msk.astype(np.uint8) try_b0 = False elif np.all(data[msk == 0, 0] == 0): IFLOGGER.info('Input data are masked.') noise_msk = msk.reshape(-1).astype(np.uint8) else: noise_msk = (1 - msk).reshape(-1).astype(np.uint8) nb0 = np.sum(gtab.b0s_mask) dsample = data.reshape(-1, data.shape[-1]) if try_b0 and (nb0 > 1): noise_data = dsample.take(np.where(gtab.b0s_mask), axis=-1)[noise_msk == 0, ...] n = nb0 else: nodiff = np.where(~gtab.b0s_mask) nodiffidx = nodiff[0].tolist() n = 20 if len(nodiffidx) >= 20 else len(nodiffidx) idxs = np.random.choice(nodiffidx, size=n, replace=False) noise_data = dsample.take(idxs, axis=-1)[noise_msk == 1, ...] # Estimate sigma required by RESTORE mean_std = np.median(noise_data.std(-1)) try: bias = (1. - np.sqrt(2. / (n - 1)) * (gamma(n / 2.) / gamma( (n - 1) / 2.))) except: bias = .0 pass sigma = mean_std * (1 + bias) if sigma == 0: IFLOGGER.warn( ('Noise std is 0.0, looks like data was masked and noise' ' cannot be estimated correctly. Using default tensor ' 'model instead of RESTORE.')) dti = TensorModel(gtab) else: IFLOGGER.info(('Performing RESTORE with noise std=%.4f.') % sigma) dti = TensorModel(gtab, fit_method='RESTORE', sigma=sigma) try: fit_restore = dti.fit(data, msk) except TypeError: dti = TensorModel(gtab) fit_restore = dti.fit(data, msk) hdr.set_data_dtype(np.float32) hdr['data_type'] = 16 for k in self._outputs().get(): scalar = getattr(fit_restore, k) hdr.set_data_shape(np.shape(scalar)) nb.Nifti1Image(scalar.astype(np.float32), affine, hdr).to_filename(self._gen_filename(k)) return runtime
from conn_mat import connectivity_matrix from dipy.io.pickles import save_pickle, load_pickle from time import time threshold = 0.75 from dipy.data import get_sphere sphere = get_sphere('symmetric724') dname = 'SNR20/' if __name__ == '__main__': data, affine, gtab = get_test_hardi(snr=20, denoised=0) mask = get_test_mask() tenmodel = TensorModel(gtab) tenfit = tenmodel.fit(data, mask) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 nib.save(nib.Nifti1Image(FA.astype('float32'), affine), 'FA.nii.gz') for i in range(27) : print 'White matter bundle: ', i wm_mask = get_test_wm_mask(i) print(FA[wm_mask].max()) indicesAniso = np.where(np.logical_and(FA > threshold, wm_mask)) print ' Response function' S0s = data[indicesAniso][:, np.nonzero(gtab.b0s_mask)[0]] S0 = np.mean(S0s) if S0 == 0 :
def main(): parser = _build_args_parser() args = parser.parse_args() img = nib.load(args.input) data = img.get_fdata() print('\ndata shape ({}, {}, {}, {})'.format(data.shape[0], data.shape[1], data.shape[2], data.shape[3])) print('total voxels {}'.format(np.prod(data.shape[:3]))) # remove negatives print('\ncliping negative ({} voxels, {:.2f} % of total)'.format((data<0).sum(),100*(data<0).sum()/float(np.prod(data.shape[:3])))) data = np.clip(data, 0, np.inf) affine = img.affine if args.mask is None: mask = None masksum = np.prod(data.shape[:3]) else: mask = nib.load(args.mask).get_data().astype(np.bool) masksum = mask.sum() print('\nMask has {} voxels, {:.2f} % of total'.format(masksum,100*masksum/float(np.prod(data.shape[:3])))) # Validate bvals and bvecs bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs) if not is_normalized_bvecs(bvecs): print('Your b-vectors do not seem normalized...') bvecs = normalize_bvecs(bvecs) # detect unique b-shell and assign shell id to each volume # sort bvals to get monotone increasing bvalue bvals_argsort = np.argsort(bvals) bvals_sorted = bvals[bvals_argsort] b_shell_threshold = 25. unique_bvalues = [] shell_idx = [] unique_bvalues.append(bvals_sorted[0]) shell_idx.append(0) for newb in bvals_sorted[1:]: # check if volume is in existing shell done = False for i,b in enumerate(unique_bvalues): if (newb - b_shell_threshold < b) and (newb + b_shell_threshold > b): shell_idx.append(i) done = True if not done: unique_bvalues.append(newb) shell_idx.append(i+1) unique_bvalues = np.array(unique_bvalues) # un-sort shells shells = np.zeros_like(bvals) shells[bvals_argsort] = shell_idx print('\nWe have {} shells'.format(len(unique_bvalues))) print('with b-values {}\n'.format(unique_bvalues)) for i in range(len(unique_bvalues)): shell_b = bvals[shells==i] print('shell {}: n = {}, min/max {} {}'.format(i, len(shell_b), shell_b.min(), shell_b.max())) # Get tensors method = 'WLS' min_signal = 1e-16 print('\nUsing fitting method {}'.format(method)) # print('Using minimum signal = {}'.format(min_signal) b0_thr = bvals.min() + 10 print('\nassuming existence of b0 (thr = {})\n'.format(b0_thr)) fas = [] mds = [] lams_max = [] lams_min = [] delta_S = [] raw_signal = [] for i in range(len(unique_bvalues)-1): # max_shell = i+1 print('fitting using {} th shells (bmax = {})'.format(i+2, bvals[shells==i+1].max())) # restricted gtab # gtab = gradient_table(bvals[shells <= i+1], bvecs[shells <= i+1], b0_threshold=b0_thr) gtab = gradient_table(bvals[np.logical_or(shells == i+1, shells == 0)], bvecs[np.logical_or(shells == i+1, shells == 0)], b0_threshold=b0_thr) tenmodel = TensorModel(gtab, fit_method=method, min_signal=min_signal) tenfit = tenmodel.fit(data[..., np.logical_or(shells == i+1, shells == 0)], mask) raw_signal.append(data[..., np.logical_or(shells == i+1, shells == 0)][mask].mean(axis=1)) evalmax = np.max(tenfit.evals, axis=3) evalmin = np.min(tenfit.evals, axis=3) evalmax[np.isnan(evalmax)] = 0 evalmin[np.isnan(evalmin)] = 0 evalmax[np.isinf(evalmax)] = 0 evalmin[np.isinf(evalmin)] = 0 weird_contrast = np.exp(-unique_bvalues[i+1]*evalmin) - np.exp(-unique_bvalues[i+1]*evalmax) mds.append(tenfit.md[mask]) fas.append(tenfit.fa[mask]) lams_max.append(evalmax[mask]) lams_min.append(evalmin[mask]) delta_S.append(weird_contrast[mask]) bmaxs = np.array([bvals[shells==i+1].max() for i in range(len(unique_bvalues)-1)]) names = ['FA', 'MD', 'eval_max', 'eval_min', 'delta_S', 'eval_max_minus_eval_min', 'raw_signal'] units = ['a.u.', 'mm^2/s', 'mm^2/s', 'mm^2/s', 'contrast (a.u.)', 'mm^2/s', 'raw signal (a.u.)'] datas = [np.array(fas).mean(axis=1), np.array(mds).mean(axis=1), np.array(lams_max).mean(axis=1), np.array(lams_min).mean(axis=1), np.array(delta_S).mean(axis=1), (np.array(lams_max)-np.array(lams_min)).mean(axis=1), np.array(raw_signal).mean(axis=1)] for i in range(len(names)): plt.figure() plt.plot(bmaxs, datas[i]) plt.title(names[i]) plt.xlabel('bval (s/mm^2)') plt.ylabel(units[i]) plt.savefig('./roi_plot_'+names[i]+'.png', dpi=150) plt.show()