def test_eudx(): #read bvals,gradients and data fimg,fbvals, fbvecs = get_data('small_64D') bvals=np.load(fbvals) gradients=np.load(fbvecs) img =ni.load(fimg) data=img.get_data() print(data.shape) gqs = GeneralizedQSampling(data,bvals,gradients) ten = Tensor(data,bvals,gradients,thresh=50) seed_list=np.dot(np.diag(np.arange(10)),np.ones((10,3))) iT=iter(EuDX(gqs.qa(),gqs.ind(),seed_list=seed_list)) T=[] for t in iT: T.append(t) iT2=iter(EuDX(ten.fa(),ten.ind(),seed_list=seed_list)) T2=[] for t in iT2: T2.append(t) print('length T ',sum([length(t) for t in T])) print('length T2',sum([length(t) for t in T2])) print(gqs.QA[1,4,8,0]) print(gqs.QA.ravel()[ndarray_offset(np.array([1,4,8,0]),np.array(gqs.QA.strides),4,8)]) assert_equal(gqs.QA[1,4,8,0], gqs.QA.ravel()[ndarray_offset(np.array([1,4,8,0]),np.array(gqs.QA.strides),4,8)]) #assert_equal, sum([length(t) for t in T ]) , 77.999996662139893 #assert_equal, sum([length(t) for t in T2]) , 63.499998092651367 assert_equal(sum([length(t) for t in T ]) , 75.214988201856613) assert_equal(sum([length(t) for t in T2]) , 60.202986091375351)
def test_eudx(): # read bvals,gradients and data fimg, fbvals, fbvecs = get_data("small_64D") bvals = np.load(fbvals) gradients = np.load(fbvecs) img = ni.load(fimg) data = img.get_data() print(data.shape) gqs = GeneralizedQSampling(data, bvals, gradients) ten = Tensor(data, bvals, gradients, thresh=50) seed_list = np.dot(np.diag(np.arange(10)), np.ones((10, 3))) iT = iter(EuDX(gqs.qa(), gqs.ind(), seeds=seed_list)) T = [] for t in iT: T.append(t) iT2 = iter(EuDX(ten.fa(), ten.ind(), seeds=seed_list)) T2 = [] for t in iT2: T2.append(t) print("length T ", sum([length(t) for t in T])) print("length T2", sum([length(t) for t in T2])) print(gqs.QA[1, 4, 8, 0]) print(gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)]) assert_almost_equal( gqs.QA[1, 4, 8, 0], gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)] ) assert_almost_equal(sum([length(t) for t in T]), 70.999996185302734, places=3) assert_almost_equal(sum([length(t) for t in T2]), 56.999997615814209, places=3)
def best_smoother(): for smoo in np.linspace(3,5,10): gqs=GeneralizedQSampling(data,bvals,bvecs,smoo, odf_sphere=odf_sphere, mask=None, squared=True, auto=False, save_odfs=True) gqs.peak_thr=0.5 gqs.fit() gqs.ODF[gqs.ODF<0]=0. odf=gqs.ODF[0,0,0] print smoo, np.sum((direct_odf/direct_odf.max() - odf/odf.max())**2)
def uniform_seed_grid(): #read bvals,gradients and data fimg, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) gradients = np.load(fbvecs) img = ni.load(fimg) data = img.get_data() x, y, z, g = data.shape M = np.mgrid[.5:x - .5:np.complex(0, x), .5:y - .5:np.complex(0, y), .5:z - .5:np.complex(0, z)] M = M.reshape(3, x * y * z).T print(M.shape) print(M.dtype) for m in M: print(m) gqs = GeneralizedQSampling(data, bvals, gradients) iT = iter(EuDX(gqs.QA, gqs.IN, seeds=M)) T = [] for t in iT: T.append(i) print('lenT', len(T)) assert_equal(len(T), 1221)
def generate_random_tracks(rand_no): img=nib.load(fbet) data=img.get_data() affine=img.get_affine() bvals=np.loadtxt(fbvals) bvecs=np.loadtxt(fbvecs).T t=time() gqs=GeneralizedQSampling(data,bvals,bvecs) print 'gqs time',time()-t,'s' for (i,sds) in enumerate(seeds): print i,sds t=time() eu=EuDX(gqs.qa(),gqs.ind(),seeds=sds,a_low=0.0239) T=[downsample(e,12) for e in eu] np.save('/tmp/random_T.npy',np.array(T,dtype=np.object)) ################### print time()-t del T print outs
def generate_skeletons(): img=nib.load(fbet) data=img.get_data() affine=img.get_affine() bvals=np.loadtxt(fbvals) bvecs=np.loadtxt(fbvecs).T t=time() gqs=GeneralizedQSampling(data,bvals,bvecs) print 'gqs time',time()-t,'s' for (i,sds) in enumerate(seeds): print i,sds t=time() eu=EuDX(gqs.qa(),gqs.ind(),seeds=sds,a_low=0.0239) T=[downsample(e,12) for e in eu] #np.save(dout+outs[i]+'.npy',np.array(T,dtype=np.object)) C=local_skeleton_clustering(T,4.) save_pickle(dout+outs[i]+'.skl',C) print time()-t del T print outs
def test_eudx(): #read bvals,gradients and data fimg, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) gradients = np.load(fbvecs) img = ni.load(fimg) data = img.get_data() print(data.shape) gqs = GeneralizedQSampling(data, bvals, gradients) ten = Tensor(data, bvals, gradients, thresh=50) seed_list = np.dot(np.diag(np.arange(10)), np.ones((10, 3))) iT = iter(EuDX(gqs.qa(), gqs.ind(), seeds=seed_list)) T = [] for t in iT: T.append(t) iT2 = iter(EuDX(ten.fa(), ten.ind(), seeds=seed_list)) T2 = [] for t in iT2: T2.append(t) print('length T ', sum([length(t) for t in T])) print('length T2', sum([length(t) for t in T2])) print(gqs.QA[1, 4, 8, 0]) print(gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)]) assert_almost_equal( gqs.QA[1, 4, 8, 0], gqs.QA.ravel()[ndarray_offset(np.array([1, 4, 8, 0]), np.array(gqs.QA.strides), 4, 8)]) assert_almost_equal(sum([length(t) for t in T]), 70.999996185302734, places=3) assert_almost_equal(sum([length(t) for t in T2]), 56.999997615814209, places=3)
def test_dandelion(): fimg,fbvals,fbvecs=get_data('small_64D') bvals=np.load(fbvals) gradients=np.load(fbvecs) data=nib.load(fimg).get_data() print(bvals.shape, gradients.shape, data.shape) sd=SphericalDandelion(data,bvals,gradients) sdf=sd.spherical_diffusivity(data[5,5,5]) print(sdf.shape) gq=GeneralizedQSampling(data,bvals,gradients) sodf=gq.odf(data[5,5,5]) eds=np.load(get_sphere('symmetric362')) vertices=eds['vertices'] faces=eds['faces'] print(faces.shape) peaks,inds=peak_finding(np.squeeze(sdf),faces) print(peaks, inds) peaks2,inds2=peak_finding(np.squeeze(sodf),faces) print(peaks2, inds2) '''
for fib in fibs: dix=get_sim_voxels(fib) data=dix['data'] bvals=dix['bvals'] gradients=dix['gradients'] no=10 print(bvals.shape, gradients.shape, data.shape) print(dix['fibres']) np.set_printoptions(2) for no in range(len(data)): sd=SphericalDandelion(data,bvals,gradients) sdf=sd.spherical_diffusivity(data[no]) gq=GeneralizedQSampling(data,bvals,gradients) sodf=gq.odf(data[no]) #print(faces.shape) peaks,inds=peak_finding(np.squeeze(sdf),faces) #print(peaks, inds) peaks2,inds2=peak_finding(np.squeeze(sodf),faces) #print(peaks2, inds2) print 'sdi',inds,'sodf',inds2, vertices[inds[0]]-vertices[inds2[0]] #print data[no]
def simulations_marta(): #gq_tn_calc_save() #a_few_phantoms() #sd=['fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00'] #sd=['fibres_2_SNR_60_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00'] #sd=['fibres_2_SNR_100_angle_60_l1_1.4_l2_0.35_l3_0.35_iso_1_diso_0.7'] sd=['fibres_2_SNR_100_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00'] #sd=['fibres_1_SNR_100_angle_00_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00'] #sd=['fibres_2_SNR_20_angle_90_l1_1.4_l2_0.35_l3_0.35_iso_0_diso_00'] #for simfile in simdata: np.set_printoptions(2) dotpow=6 width=6 sincpow=2 sampling_length=1.2 print dotpow,width,sincpow print sampling_length verts,faces=get_sphere('symmetric362') for simfile in sd: data=np.loadtxt(simdir+simfile) sf=simfile.split('_') b_vals_dirs=np.loadtxt(simdir+'Dir_and_bvals_DSI_marta.txt') bvals=b_vals_dirs[:,0]*1000 gradients=b_vals_dirs[:,1:] data2=data[::1000,:] table={'fibres':sf[1],'snr':sf[3],'angle':sf[5],'l1':sf[7],'l2':sf[9],\ 'l3':sf[11],'iso':sf[13],'diso':sf[15],\ 'data':data2,'bvals':bvals,'gradients':gradients} print table['data'].shape pdi=ProjectiveDiffusivity(table['data'],table['bvals'],table['gradients'],dotpow,width,sincpow) gqs=GeneralizedQSampling(table['data'],table['bvals'],table['gradients'],sampling_length) ten=Tensor(table['data'],table['bvals'],table['gradients']) r=fvtk.ren() for i in range(10):#range(len(sdi.xa())): print 'No:',i print 'simulation fibres ',table['fibres'], ' snr ',table['snr'],' angle ', table['angle'] pdiind=pdi.ind()[i] gqsind=gqs.ind()[i] print 'indices',pdiind,gqsind,\ np.rad2deg(np.arccos(np.dot(verts[pdiind[0]],verts[pdiind[1]]))),\ np.rad2deg(np.arccos(np.dot(verts[gqsind[0]],verts[gqsind[1]]))) #ten.ind()[i], print 'peaks', pdi.xa()[i]*10**3,gqs.qa()[i] pd=pdi.spherical_diffusivity(table['data'][i])#*10**3 #print 'pd stat',pd.min(),pd.max(),pd.mean(),pd.std() #colors=fvtk.colors(sdf,'jet') sdfcol=np.interp(pd,[pd.mean()-4*pd.std(),pd.mean()+4*pd.std()],[0,1]) colors=fvtk.colors(sdfcol,'jet',False) fvtk.add(r,fvtk.point(5*pdi.odf_vertices+np.array([12*i,0,0]),colors,point_radius=.6,theta=10,phi=10)) odf=gqs.odf(table['data'][i]) colors=fvtk.colors(odf,'jet') fvtk.add(r,fvtk.point(5*gqs.odf_vertices+np.array([12*i,-12,0]),colors,point_radius=.6,theta=10,phi=10)) fvtk.show(r)
def show_simulated_2fiber_crossings(): btable=np.loadtxt(get_data('dsi515btable')) bvals=btable[:,0] bvecs=btable[:,1:] data=create_data_2fibers(bvals,bvecs,d=0.0015,S0=100,angles=np.arange(0,47,5),snr=None) #data=create_data_2fibers(bvals,bvecs,d=0.0015,S0=100,angles=np.arange(0,92,5),snr=None) print data.shape #stop #""" dn=DiffusionNabla(data,bvals,bvecs,odf_sphere='symmetric642', auto=False,save_odfs=True,fast=True) dn.peak_thr=.4 dn.iso_thr=.05 dn.radius=np.arange(0,5,0.1) dn.radiusn=len(dn.radius) dn.create_qspace(bvals,bvecs,16,8) dn.radon_params(64) dn.precompute_interp_coords() dn.precompute_fast_coords() dn.precompute_equator_indices(5.) dn.precompute_angular(None)#0.01) dn.fit() dn2=DiffusionNabla(data,bvals,bvecs,odf_sphere='symmetric642', auto=False,save_odfs=True,fast=False) dn2.peak_thr=.4 dn2.iso_thr=.05 dn2.radius=np.arange(0,5,0.1) dn2.radiusn=len(dn.radius) dn2.create_qspace(bvals,bvecs,16,8) dn2.radon_params(64) dn2.precompute_interp_coords() dn2.precompute_fast_coords() dn2.precompute_equator_indices(5.) dn2.precompute_angular(None)#0.01) dn2.fit() #""" eis=EquatorialInversion(data,bvals,bvecs,odf_sphere='symmetric642', auto=False,save_odfs=True,fast=True) #eis.radius=np.arange(0,6,0.2) eis.radius=np.arange(0,5,0.1) eis.gaussian_weight=None eis.set_operator('signal')#'laplap') eis.update() eis.fit() eil=EquatorialInversion(data,bvals,bvecs,odf_sphere='symmetric642', auto=False,save_odfs=True,fast=True) #eil.radius=np.arange(0,6,0.2) eil.radius=np.arange(0,5,0.1) eil.gaussian_weight=None eil.set_operator('laplacian')#'laplap') eil.update() eil.fit() eil2=EquatorialInversion(data,bvals,bvecs,odf_sphere='symmetric642', auto=False,save_odfs=True,fast=True) #eil2.radius=np.arange(0,6,0.2) eil2.radius=np.arange(0,5,0.1) eil2.gaussian_weight=None eil2.set_operator('laplap') eil2.update() eil2.fit() ds=DiffusionSpectrum(data,bvals,bvecs,odf_sphere='symmetric642',auto=True,save_odfs=True) gq=GeneralizedQSampling(data,bvals,bvecs,1.2,odf_sphere='symmetric642',squared=False,auto=False,save_odfs=True) gq.fit() gq2=GeneralizedQSampling(data,bvals,bvecs,3.,odf_sphere='symmetric642',squared=True,auto=False,save_odfs=True) gq2.fit() #blobs=np.zeros((19,1,8,dn.odfn)) blobs=np.zeros((10,1,6,dn.odfn)) """ blobs[:,0,0]=dn.odfs() blobs[:,0,1]=dn2.odfs() blobs[:,0,2]=eis.odfs() blobs[:,0,3]=eil.odfs() blobs[:,0,4]=eil2.odfs() blobs[:,0,5]=ds.odfs() blobs[:,0,6]=gq.odfs() blobs[:,0,7]=gq2.odfs() """ blobs[:,0,0]=dn2.odfs() blobs[:,0,1]=eil.odfs() eo=eil2.odfs() #eo[eo<0.05*eo.max()]=0 blobs[:,0,2]=eo blobs[:,0,3]=ds.odfs() blobs[:,0,4]=gq.odfs() blobs[:,0,5]=gq2.odfs() show_blobs(blobs,dn.odf_vertices,dn.odf_faces,scale=0.5)
for typ in types: for (i,snr) in enumerate(SNRs): data,bvals,bvecs,odf_sphere=load_data(test,typ,snr)#'3D_SF' #data=data[4,4,0] #mf,mevals,mevecs=example('1b') #signal=MultiTensor(bvals,bvecs,S0=1.,mf=mf,mevals=mevals,mevecs=mevecs) #data=signal #data=data[None,None,None,:] data=data[:,4:40,:,:] #ten ten = Tensor(100*data, bvals, bvecs) FA = ten.fa() #GQI gqs=GeneralizedQSampling(data,bvals,bvecs,smooth[i], odf_sphere=odf_sphere, mask=None, squared=True, auto=False, save_odfs=True) gqs.peak_thr=0.5 gqs.fit() gqs.ODF[gqs.ODF<0]=0. #manipulate qg=gqs #pack_results M,R=analyze_peaks(data,ten,qg) if test=='train': K=np.load('trainSF.npy') print 'SNR',snr, 'smooth',smooth[i],\ 'Missed',np.sum(np.abs(M-K)>0), \ 'Success',100*(np.float(np.prod(M.shape))-np.sum(np.abs(M-K)>0))/np.float(np.prod(M.shape)),'%' if save==True:
fvtk.add(r,fvtk.line(all,colors)) fvtk.show(r) ten=Tensor(data,bvals,bvecs,mask) FA=ten.fa() FA[np.isnan(FA)]=0 eu=EuDX(FA,ten.ind(),seeds=10**4,a_low=fa_thr,length_thr=length_thr) T =[e for e in eu] #show(T,FA,ten.ind(),eu.odf_vertices,scale=1) #r=fvtk.ren() #fvtk.add(r,fvtk.point(eu.odf_vertices,cm.orient2rgb(eu.odf_vertices),point_radius=.5,theta=30,phi=30)) #fvtk.show(r) gqs=GeneralizedQSampling(data,bvals,bvecs,Lambda=1.,mask=mask,squared=False) eu2=EuDX(gqs.qa(),gqs.ind(),seeds=10**4,a_low=0,length_thr=length_thr) T2=[e for e in eu2] show(T2,gqs.qa(),gqs.ind(),eu2.odf_vertices,scale=1) ds=DiffusionSpectrum(data,bvals,bvecs,mask=mask) eu3=EuDX(ds.gfa(),ds.ind()[...,0],seeds=10**4,a_low=0,length_thr=length_thr) T3=[e for e in eu3] #show(T3,ds.gfa(),ds.ind()[...,0],eu3.odf_vertices,scale=1) eu4=EuDX(ds.nfa(),ds.ind(),seeds=10**4,a_low=0,length_thr=length_thr) T4=[e for e in eu4] #show(T4,ds.nfa(),ds.ind(),eu4.odf_vertices,scale=1) eu5=EuDX(ds.qa(),ds.ind(),seeds=10**4,a_low=0,length_thr=length_thr) T5=[e for e in eu5]
def generate_gqi_tracks_and_warp_in_MNI_space(fname,dname): fbvals=fname+'.bval' fbvecs=fname+'.bvec' fdata=fname+'.nii.gz' if os.path.isfile(fdata): pass else: fdata=fname+'.nii' if os.path.isfile(fdata)==False: print('Data do not exist') return dti_dname=os.path.join(dname,'DTI') if os.path.isdir(dti_dname): pass else: os.mkdir(dti_dname) print dti_dname gqi_dname=os.path.join(dname,'GQI') if os.path.isdir(gqi_dname): pass else: os.mkdir(gqi_dname) print gqi_dname fdatabet=fname+'_bet.nii.gz' if os.path.isfile(fdatabet): pass else: print('fdatabet does not exist') img=nib.load(fdatabet) data=img.get_data() affine=img.get_affine() bvals=np.loadtxt(fbvals) bvecs=np.loadtxt(fbvecs).T gqs=GeneralizedQSampling(data,bvals,bvecs) eu=EuDX(gqs.qa(),gqs.ind(),seeds=10**6,a_low=0.0239) fdpy=pjoin(gqi_dname,'lsc_QA.dpy') dpw=Dpy(fdpy,'w',compression=1) for track in eu: dpw.write_track(track.astype(np.float32)) dpw.close() local=dti_dname fmat=pjoin(local,'flirt.mat') fnon=pjoin(local,'fnirt.nii.gz') finv=pjoin(local,'invw.nii.gz') fdis=pjoin(local,'dis.nii.gz') fdisa=pjoin(local,'disa.nii.gz') ffa=pjoin(local,'FA_bet.nii.gz') fdpyw=pjoin(gqi_dname,'lsc_QA_ref.dpy') #print fdatabet #print fmat #print fnon print fdpy print fdpyw read_warp_save_tracks(fdpy,ffa,fmat,finv,fdis,fdisa,fref,fdpyw)
# load data fraw = "data/subj_" + subject + "/101_32/rawbet.nii.gz" fbval = "data/subj_" + subject + "/101_32/raw.bval" fbvec = "data/subj_" + subject + "/101_32/raw.bvec" img = nib.load(fraw) data = img.get_data() affine = img.get_affine() bvals = np.loadtxt(fbval) gradients = np.loadtxt(fbvec).T # calculate FA tensors = Tensor(data, bvals, gradients, thresh=50) FA = tensors.fa() famask = FA >= 0.2 # GQI gqs = GeneralizedQSampling( data, bvals, gradients, 1.2, odf_sphere="symmetric642", mask=famask, squared=False, save_odfs=False ) """ #EIT ei=EquatorialInversion(data,bvals,gradients,odf_sphere='symmetric642',\ mask=famask,\ half_sphere_grads=True,\ auto=False,\ save_odfs=False,\ fast=True) ei.radius=np.arange(0,5,0.4) ei.gaussian_weight=0.05 ei.set_operator('laplacian') ei.update() ei.fit() ""