def test_WLS_and_LS_fit(): """ Tests the WLS and LS fitting functions to see if they returns the correct eigenvalues and eigenvectors. Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii as the data. """ ### Defining Test Voxel (avoid nibabel dependency) ### #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s b0 = 1000. gtab, bval = read_bvec_file(get_data('55dir_grad.bvec')) B = bval[1] #Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) #Design Matrix X = dti.design_matrix(gtab, bval) #Signals Y = np.exp(np.dot(X, D)) assert_almost_equal(Y[0], b0) Y.shape = (-1, ) + Y.shape ### Testing WLS Fit on Single Voxel ### #Estimate tensor from test signals tensor_est = dti.Tensor(Y, bval, gtab.T, min_signal=1e-8) assert_equal(tensor_est.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_est.evals[0], evals) assert_array_almost_equal( tensor_est.D[0], tensor, err_msg= "Calculation of tensor from Y does not compare to analytical solution") assert_almost_equal(tensor_est.md()[0], md) #test 0d tensor y = Y[0] tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8) assert_equal(tensor_est.shape, tuple()) assert_array_almost_equal(tensor_est.evals, evals) assert_array_almost_equal(tensor_est.D, tensor) assert_almost_equal(tensor_est.md(), md) assert_array_almost_equal(tensor_est.lower_triangular(b0), D) tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8, fit_method='LS') assert_equal(tensor_est.shape, tuple()) assert_array_almost_equal(tensor_est.evals, evals) assert_array_almost_equal(tensor_est.D, tensor) assert_almost_equal(tensor_est.md(), md) assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
def test_passing_maskedview(): data = np.ones((2, 4, 56)) mask = np.array([[True, False, False, True], [True, False, True, False]]) gtab, bval = read_bvec_file(get_data('55dir_grad.bvec')) data = data[mask] mv = MaskedView(mask, data) tensor = dti.Tensor(mv, bval, gtab.T, min_signal=1e-9) assert_equal(tensor.shape, (2, 4)) assert_equal(tensor.fa().shape, (2, 4)) assert_equal(tensor.evals.shape, (2, 4, 3)) assert_equal(tensor.evecs.shape, (2, 4, 3, 3)) assert_equal(type(tensor.model_params), MaskedView) assert_array_equal(tensor.mask, mask) tensor = tensor[0] assert_equal(tensor.shape, (4, )) assert_equal(tensor.fa().shape, (4, )) assert_equal(tensor.evals.shape, (4, 3)) assert_equal(tensor.evecs.shape, (4, 3, 3)) assert_equal(type(tensor.model_params), MaskedView) assert_array_equal(tensor.mask, mask[0]) tensor = tensor[0] assert_equal(tensor.shape, tuple()) assert_equal(tensor.fa().shape, tuple()) assert_equal(tensor.evals.shape, (3, )) assert_equal(tensor.evecs.shape, (3, 3)) assert_equal(type(tensor.model_params), np.ndarray)
def test_fa_of_zero(): dummy_gtab = np.zeros((10, 3)) dummy_bval = np.zeros((10, )) ten = dti.Tensor(np.zeros((0, 56)), dummy_bval, dummy_gtab) ten.model_params = np.zeros(12) assert_equal(ten.fa(), 0) assert_true(np.isnan(ten.fa(nonans=False)))
def test_init(): data = np.ones((2, 4, 56)) mask = np.ones((2, 4), 'bool') gtab, bval = read_bvec_file(get_data('55dir_grad.bvec')) tensor = dti.Tensor(data, bval, gtab.T, mask, thresh=0) mask[:] = False assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, mask) assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, min_signal=-1) assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, thresh=1) assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, fit_method='s') assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, fit_method=0)
def gq_tn_calc_save(): for simfile in simdata: dataname = simfile print dataname sim_data=np.loadtxt(simdir+dataname) marta_table_fname='/home/ian/Data/SimData/Dir_and_bvals_DSI_marta.txt' b_vals_dirs=np.loadtxt(marta_table_fname) bvals=b_vals_dirs[:,0]*1000 gradients=b_vals_dirs[:,1:] gq = dgqs.GeneralizedQSampling(sim_data,bvals,gradients) gqfile = simdir+'gq/'+dataname+'.pkl' pkl.save_pickle(gqfile,gq) ''' gq.IN gq.__doc__ gq.glob_norm_param gq.QA gq.__init__ gq.odf gq.__class__ gq.__module__ gq.q2odf_params ''' tn = ddti.Tensor(sim_data,bvals,gradients) tnfile = simdir+'tn/'+dataname+'.pkl' pkl.save_pickle(tnfile,tn) ''' tn.ADC tn.__init__ tn._getevals tn.B tn.__module__ tn._getevecs tn.D tn.__new__ tn._getndim tn.FA tn.__reduce__ tn._getshape tn.IN tn.__reduce_ex__ tn._setevals tn.MD tn.__repr__ tn._setevecs tn.__class__ tn.__setattr__ tn.adc tn.__delattr__ tn.__sizeof__ tn.evals tn.__dict__ tn.__str__ tn.evecs tn.__doc__ tn.__subclasshook__ tn.fa tn.__format__ tn.__weakref__ tn.md tn.__getattribute__ tn._evals tn.ndim tn.__getitem__ tn._evecs tn.shape tn.__hash__ tn._getD ''' ''' file has one row for every voxel, every voxel is repeating 1000
def test_tensor_scalar_attributes(): """ Tests that the tensor class scalar attributes (FA, ADC, etc...) are calculating properly. """ ### DEFINING ANALYTICAL VALUES ### evals = np.array([2., 1., 0.]) a = 1. / np.sqrt(2) #evec[:,j] is pair with eval[j] evecs = np.array([[a, 0, -a], [a, 0, a], [0, 1., 0]]) D = np.array([[1., 1., 0], [1., 1., 0], [0, 0, 1.]]) FA = np.sqrt(1. / 2 * (1 + 4 + 1) / (1 + 4 + 0)) # 0.7745966692414834 MD = 1. ### CALCULATE ESTIMATE VALUES ### dummy_data = np.ones((1, 10)) #single voxel dummy_gtab = np.zeros((10, 3)) dummy_bval = np.zeros((10, )) tensor = dti.Tensor(dummy_data, dummy_bval, dummy_gtab) tensor.model_params = np.r_['-1,2', evals, evecs.ravel()] ### TESTS ### assert_almost_equal(np.abs(np.dot(evecs[:, 2], tensor[0].evecs[:, 2].T)), 1., msg="Calculation of third eigenvector is not right") assert_array_almost_equal( D, tensor[0].D, err_msg="Recovery of self diffusion tensor from eig not adaquate") assert_almost_equal( FA, tensor.fa(), msg="Calculation of FA of self diffusion tensor is not adequate") assert_almost_equal( MD, tensor.md(), msg="Calculation of MD of self diffusion tensor is not adequate") assert_equal(True, tensor.mask.all())
def run_small_data(): smalldir = '/home/eg309/Devel/dipy/dipy/data/' bvals=np.load(smalldir+'small_64D.bvals.npy') gradients=np.load(smalldir+'small_64D.gradients.npy') img=nibabel.load(smalldir+'small_64D.nii') small_data=img.get_data() print 'real_data', small_data.shape gqsmall = dgqs.GeneralizedQSampling(small_data,bvals,gradients) tnsmall = ddti.Tensor(small_data,bvals,gradients) x,y,z,a,b=tnsmall.evecs.shape evecs=tnsmall.evecs xyz=x*y*z evecs = evecs.reshape(xyz,3,3) evals=tnsmall.evals evals = evals.reshape(xyz,3) """ eds=np.load(opj(os.path.dirname(__file__),\ '..','matrices',\ 'evenly_distributed_sphere_362.npz')) """ from dipy.data import get_sphere odf_vertices,odf_faces=get_sphere('symmetric362') scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free #water diffusion coefficient #l_values sqrt(6 D tau) D free water #diffusion coefficiet and tau included in the b-value tmp=np.tile(scaling,(3,1)) b_vector=gradients.T*tmp Lambda = 1.2 # smoothing parameter - diffusion sampling length q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi) #implements equation no. 9 from Yeh et.al. S=small_data.copy() x,y,z,g=S.shape S=S.reshape(x*y*z,g) # QA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) FA = tnsmall.fa().reshape(x*y*z) fwd = 0 #Calculate Quantitative Anisotropy and find the peaks and the indices #for every voxel summary = {} summary['vertices'] = odf_vertices # v = odf_vertices.shape[0] summary['faces'] = odf_faces # f = odf_faces.shape[0] for (i,s) in enumerate(S): istr = str(i) summary[istr] = {} t0, t1, t2, npa = gqsmall.npa(s, width = 5) summary[istr]['triple']=(t0,t1,t2) summary[istr]['npa']=npa odf = Q2odf(s,q2odf_params) peaks,inds=rp.peak_finding(odf,odf_faces) fwd=max(np.max(odf),fwd) n_peaks=min(len(peaks),5) peak_heights = [odf[i] for i in inds[:n_peaks]] IN[i][:n_peaks] = inds[:n_peaks] summary[istr]['odf'] = odf summary[istr]['peaks'] = peaks summary[istr]['inds'] = inds summary[istr]['evecs'] = evecs[i,:,:] summary[istr]['evals'] = evals[i,:] summary[istr]['n_peaks'] = n_peaks summary[istr]['peak_heights'] = peak_heights summary[istr]['fa'] = FA[i] """ QA/=fwd QA=QA.reshape(x,y,z,5) IN=IN.reshape(x,y,z,5) """ peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1] peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2] peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3] print '#voxels with 1, 2, 3 peaks', len(peaks_1),len(peaks_2),len(peaks_3) return FA, summary
bvals = np.loadtxt(fbvals) """ **Read the b-vectors**, the unit gradient directions. """ gradients = np.loadtxt(fbvecs).T """ Calculating models and parameters of directionality --------------------------------------------------- We are now set up with all the data and parameters to start calculating directional models for voxels and their associated parameters, e.g. anisotropy. **Calculate the Single Tensor Model (STM).** """ ten = dti.Tensor(data, bvals, gradients, thresh=50) """ **Calculate Fractional Anisotropy (FA) from STM** """ FA = ten.fa() print('FA.shape (%d,%d,%d)' % FA.shape) """ As expected the FA is a 3-d array with one value per voxel:: FA.shape (6,10,10) Generate a tractography ----------------------- Here we use the Euler Delta Crossings (EuDX) algorithm. The main input parameters of ``EuDX`` are