def test_peak_finding(): vertices, faces=get_sphere('symmetric724') odf=np.zeros(len(vertices)) odf = np.abs(vertices.sum(-1)) odf[1] = 10. odf[505] = 505. odf[143] = 143. peaks, inds=peak_finding(odf.astype('f8'), faces.astype('uint16')) print peaks, inds edges = unique_edges(faces) peaks, inds = local_maxima(odf, edges) print peaks, inds vertices_half, edges_half, faces_half = reduce_antipodal(vertices, faces) n = len(vertices_half) peaks, inds = local_maxima(odf[:n], edges_half) print peaks, inds mevals=np.array(([0.0015,0.0003,0.0003], [0.0015,0.0003,0.0003])) e0=np.array([1,0,0.]) e1=np.array([0.,1,0]) mevecs=[all_tensor_evecs(e0),all_tensor_evecs(e1)] odf = multi_tensor_odf(vertices, [0.5,0.5], mevals, mevecs) peaks, inds=peak_finding(odf, faces) print peaks, inds peaks2, inds2 = local_maxima(odf[:n], edges_half) print peaks2, inds2 assert_equal(len(peaks), 2) assert_equal(len(peaks2), 2)
def standard_dsi_algorithm(S,bvals,bvecs): #volume size sz=16 #shifting origin=8 #hanning width filter_width=32. #number of signal sampling points n=515 #odf radius #radius=np.arange(2.1,30,.1) radius=np.arange(2.1,6,.2) #radius=np.arange(.1,6,.1) bv=bvals bmin=np.sort(bv)[1] bv=np.sqrt(bv/bmin) qtable=np.vstack((bv,bv,bv)).T*bvecs qtable=np.floor(qtable+.5) #calculate radius for the hanning filter r = np.sqrt(qtable[:,0]**2+qtable[:,1]**2+qtable[:,2]**2) #setting hanning filter width and hanning hanning=.5*np.cos(2*np.pi*r/filter_width) #center and index in q space volume q=qtable+origin q=q.astype('i8') #apply the hanning filter values=S*hanning #create the signal volume Sq=np.zeros((sz,sz,sz)) for i in range(n): Sq[q[i][0],q[i][1],q[i][2]]+=values[i] #apply fourier transform Pr=fftshift(np.abs(np.real(fftn(fftshift(Sq),(sz,sz,sz))))) #vertices, edges, faces = create_unit_sphere(5) #vertices, faces = sphere_vf_from('symmetric362') vertices, faces = sphere_vf_from('symmetric724') odf = np.zeros(len(vertices)) for m in range(len(vertices)): xi=origin+radius*vertices[m,0] yi=origin+radius*vertices[m,1] zi=origin+radius*vertices[m,2] PrI=map_coordinates(Pr,np.vstack((xi,yi,zi)),order=1) for i in range(len(radius)): odf[m]=odf[m]+PrI[i]*radius[i]**2 peaks,inds=peak_finding(odf.astype('f8'),faces.astype('uint16')) return Pr,odf,peaks
def standard_dsi_algorithm(S, bvals, bvecs): #volume size sz = 16 #shifting origin = 8 #hanning width filter_width = 32. #number of signal sampling points n = 515 #odf radius #radius=np.arange(2.1,30,.1) radius = np.arange(2.1, 6, .2) #radius=np.arange(.1,6,.1) bv = bvals bmin = np.sort(bv)[1] bv = np.sqrt(bv / bmin) qtable = np.vstack((bv, bv, bv)).T * bvecs qtable = np.floor(qtable + .5) #calculate radius for the hanning filter r = np.sqrt(qtable[:, 0]**2 + qtable[:, 1]**2 + qtable[:, 2]**2) #setting hanning filter width and hanning hanning = .5 * np.cos(2 * np.pi * r / filter_width) #center and index in q space volume q = qtable + origin q = q.astype('i8') #apply the hanning filter values = S * hanning #create the signal volume Sq = np.zeros((sz, sz, sz)) for i in range(n): Sq[q[i][0], q[i][1], q[i][2]] += values[i] #apply fourier transform Pr = fftshift(np.abs(np.real(fftn(fftshift(Sq), (sz, sz, sz))))) #vertices, edges, faces = create_unit_sphere(5) #vertices, faces = sphere_vf_from('symmetric362') vertices, faces = sphere_vf_from('symmetric724') odf = np.zeros(len(vertices)) for m in range(len(vertices)): xi = origin + radius * vertices[m, 0] yi = origin + radius * vertices[m, 1] zi = origin + radius * vertices[m, 2] PrI = map_coordinates(Pr, np.vstack((xi, yi, zi)), order=1) for i in range(len(radius)): odf[m] = odf[m] + PrI[i] * radius[i]**2 peaks, inds = peak_finding(odf.astype('f8'), faces.astype('uint16')) return Pr, odf, peaks
def revised_peak_no(odf,odf_faces,peak_thr): peaks,inds=peak_finding(odf,odf_faces) ibigp=np.where(peaks>peak_thr*peaks[0])[0] l=len(ibigp) if l>3: l=3 if l==0: return np.sum(peaks[l]/np.float(peaks[0])>0) if l>0: return np.sum(peaks[:l]/np.float(peaks[0])>0)
def test_performance(): # test this implementation against Frank Yeh implementation vertices, faces = SPHERE_DATA n_vertices = vertices.shape[0] vert_inds = sym_hemisphere(vertices) adj = vertinds_to_neighbors(vert_inds, faces) np.random.seed(42) vert_vals = np.random.uniform(size=(n_vertices, )) maxinds = argmax_from_adj(vert_vals, vert_inds, adj) maxes, pfmaxinds = dcr.peak_finding(vert_vals, faces) assert_array_equal(maxinds, pfmaxinds[::-1])
def test_performance(): # test this implementation against Frank Yeh implementation vertices, faces = SPHERE_DATA n_vertices = vertices.shape[0] vert_inds = sym_hemisphere(vertices) adj = vertinds_to_neighbors(vert_inds, faces) np.random.seed(42) vert_vals = np.random.uniform(size=(n_vertices,)) maxinds = argmax_from_adj(vert_vals, vert_inds, adj) maxes, pfmaxinds = dcr.peak_finding(vert_vals, faces) assert_array_equal(maxinds, pfmaxinds[::-1])
def simple_peaks(ODF,faces,thr): x,g=ODF.shape PK=np.zeros((x,5)) IN=np.zeros((x,5)) for (i,odf) in enumerate(ODF): peaks,inds=peak_finding(odf,faces) ibigp=np.where(peaks>thr*peaks[0])[0] l=len(ibigp) if l>3: l=3 PK[i,:l]=peaks[:l] IN[i,:l]=inds[:l] return PK,IN
def test_dandelion(): fimg,fbvals,fbvecs=get_data('small_64D') bvals=np.load(fbvals) gradients=np.load(fbvecs) data=nib.load(fimg).get_data() print(bvals.shape, gradients.shape, data.shape) sd=SphericalDandelion(data,bvals,gradients) sdf=sd.spherical_diffusivity(data[5,5,5]) print(sdf.shape) gq=GeneralizedQSampling(data,bvals,gradients) sodf=gq.odf(data[5,5,5]) eds=np.load(get_sphere('symmetric362')) vertices=eds['vertices'] faces=eds['faces'] print(faces.shape) peaks,inds=peak_finding(np.squeeze(sdf),faces) print(peaks, inds) peaks2,inds2=peak_finding(np.squeeze(sodf),faces) print(peaks2, inds2) '''
def extended_peak_filtering(odfs,odf_faces,thr=0.3): new_peaks=[] for (i,odf) in enumerate(odfs): peaks,inds=peak_finding(odf,odf_faces) ismallp=np.where(peaks/peaks[0]<thr) if len(ismallp[0])>0: l=ismallp[0][0] else: l=len(peaks) print '#',i,peaknos[i] if l==0: print peaks[0]/peaks[0] else: print peaks[:l]/peaks[0]
def simple_peaks(ODF,faces,thr,low): x,y,z,g=ODF.shape S=ODF.reshape(x*y*z,g) f,g=S.shape PK=np.zeros((f,5)) IN=np.zeros((f,5)) for (i,odf) in enumerate(S): if odf.max()>low: peaks,inds=peak_finding(odf,faces) ibigp=np.where(peaks>thr*peaks[0])[0] l=len(ibigp) if l>3: l=3 PK[i,:l]=peaks[:l]/np.float(peaks[0]) IN[i,:l]=inds[:l] PK=PK.reshape(x,y,z,5) IN=IN.reshape(x,y,z,5) return PK,IN
def super_reduced_peaks(odfs,odf_vertices,odf_faces,angle): final=[] for (i,odf) in enumerate(odfs): pks,ins=peak_finding(odf,odf_faces) peaks=pks[:3] inds=ins[:3] print '#', peaks del_peaks=[] for (j,ind) in enumerate(inds): pts=radial_points_on_sphere(ind,odf_vertices,angle) for p in pts: if peaks[j]<odf[p]: del_peaks.append(j) peaks=np.delete(peaks,del_peaks) print '@',peaks print ' ', len(peaks) final.append(len(peaks)) print(final)
def extended_peak_filtering(odfs, odf_faces): new_peaks = [] for (i, odf) in enumerate(odfs): peaks, inds = peak_finding(odf, odf_faces) dpeaks = np.abs(np.diff(peaks[:3])) print "#", i, peaknos[i] print peaks[:3] print dpeaks print odf.min() print peaks[:3] / peaks[0] print peaks[1:3] / peaks[1] print """ ismallp=np.where(dpeaks<2) if len(ismallp[0])>0: l=ismallp[0][0] else: l=len(peaks) """ """
def fit(self): #memory allocations for 4D volumes if len(self.datashape) == 4: x, y, z, g = self.datashape S = self.data.reshape(x * y * z, g) GFA = np.zeros((x * y * z)) IN = np.zeros((x * y * z, 5)) NFA = np.zeros((x * y * z, 5)) QA = np.zeros((x * y * z, 5)) PK = np.zeros((x * y * z, 5)) if self.save_odfs: ODF = np.zeros((x * y * z, self.odfn)) if self.mask != None: if self.mask.shape[:3] == self.datashape[:3]: msk = self.mask.ravel().copy() if self.mask == None: self.mask = np.ones(self.datashape[:3]) msk = self.mask.ravel().copy() #memory allocations for a series of voxels if len(self.datashape) == 2: x, g = self.datashape S = self.data GFA = np.zeros(x) IN = np.zeros((x, 5)) NFA = np.zeros((x, 5)) QA = np.zeros((x, 5)) PK = np.zeros((x, 5)) if self.save_odfs: ODF = np.zeros((x, self.odfn)) if self.mask != None: if mask.shape[0] == self.datashape[0]: msk = self.mask.ravel().copy() if self.mask == None: self.mask = np.ones(self.datashape[:1]) msk = self.mask.ravel().copy() #find the global normalization parameter #useful for quantitative anisotropy glob_norm_param = 0. #loop over all voxels for (i, s) in enumerate(S): if msk[i] > 0: #calculate the diffusion propagator or spectrum Pr = self.pdf(s) #calculate the orientation distribution function odf = self.odf(Pr) if self.save_odfs: ODF[i] = odf #normalization for QA glob_norm_param = max(np.max(odf), glob_norm_param) #calculate the generalized fractional anisotropy GFA[i] = self.std_over_rms(odf) #find peaks peaks, inds = peak_finding(odf, self.odf_faces) #remove small peaks l = self.reduce_peaks(peaks, odf.min()) #print '#',l,peaks[:l] if l == 0: IN[i][l] = inds[l] NFA[i][l] = GFA[i] QA[i][l] = peaks[l] - np.min(odf) PK[i][l] = peaks[l] if l > 0 and l < 5: IN[i][:l] = inds[:l] NFA[i][:l] = GFA[i] QA[i][:l] = peaks[:l] - np.min(odf) PK[i][:l] = peaks[:l] """ if len(peaks)>0: ismallp=np.where(peaks/peaks.min()<self.peak_thr) l=ismallp[0][0] if l<5 and l>0: IN[i][:l] = inds[:l] NFA[i][:l] = GFA[i] QA[i][:l] = peaks[:l]-np.min(odf) if l==0: IN[i][l] = inds[l] NFA[i][l] = GFA[i] QA[i][l] = peaks[l]-np.min(odf) """ if len(self.datashape) == 4: self.GFA = GFA.reshape(x, y, z) self.NFA = NFA.reshape(x, y, z, 5) self.QA = QA.reshape(x, y, z, 5) / glob_norm_param self.IN = IN.reshape(x, y, z, 5) self.PK = PK.reshape(x, y, z, 5) if self.save_odfs: self.ODF = ODF.reshape(x, y, z, ODF.shape[-1]) self.QA_norm = glob_norm_param if len(self.datashape) == 2: self.GFA = GFA self.NFA = NFA self.QA = QA self.IN = IN self.PK = PK if self.save_odfs: self.ODF = ODF self.QA_norm = None
def test_gqiodf(): #read bvals,gradients and data fimg,fbvals,fbvecs=get_data('small_64D') bvals=np.load(fbvals) gradients=np.load(fbvecs) data=nib.load(fimg).get_data() #print(bvals.shape) #print(gradients.shape) #print(data.shape) t1=time.clock() gqs = gq.GeneralizedQSampling(data,bvals,gradients) ten = dt.Tensor(data,bvals,gradients,thresh=50) fa=ten.fa() x,y,z,a,b=ten.evecs.shape evecs=ten.evecs xyz=x*y*z evecs = evecs.reshape(xyz,3,3) #vs = np.sign(evecs[:,2,:]) #print vs.shape #print np.hstack((vs,vs,vs)).reshape(1000,3,3).shape #evecs = np.hstack((vs,vs,vs)).reshape(1000,3,3) #print evecs.shape evals=ten.evals evals = evals.reshape(xyz,3) #print evals.shape t2=time.clock() #print('GQS in %d' %(t2-t1)) eds=np.load(get_sphere('symmetric362')) odf_vertices=eds['vertices'] odf_faces=eds['faces'] #Yeh et.al, IEEE TMI, 2010 #calculate the odf using GQI scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free #water diffusion coefficient #l_values sqrt(6 D tau) D free water #diffusion coefficiet and tau included in the b-value tmp=np.tile(scaling,(3,1)) b_vector=gradients.T*tmp Lambda = 1.2 # smoothing parameter - diffusion sampling length q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi) #implements equation no. 9 from Yeh et.al. S=data.copy() x,y,z,g=S.shape S=S.reshape(x*y*z,g) QA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) fwd = 0 #Calculate Quantitative Anisotropy and find the peaks and the indices #for every voxel summary = {} summary['vertices'] = odf_vertices v = odf_vertices.shape[0] summary['faces'] = odf_faces f = odf_faces.shape[0] ''' If e = number_of_edges the Euler formula says f-e+v = 2 for a mesh on a sphere Here, assuming we have a healthy triangulation every face is a triangle, all 3 of whose edges should belong to exactly two faces = so 2*e = 3*f to avoid division we test whether 2*f - 3*f + 2*v == 4 or equivalently 2*v - f == 4 ''' assert_equal(2*v-f, 4,'Euler test fails') for (i,s) in enumerate(S): #print 'Volume %d' % i istr = str(i) summary[istr] = {} odf = Q2odf(s,q2odf_params) peaks,inds=rp.peak_finding(odf,odf_faces) fwd=max(np.max(odf),fwd) peaks = peaks - np.min(odf) l=min(len(peaks),5) QA[i][:l] = peaks[:l] IN[i][:l] = inds[:l] summary[istr]['odf'] = odf summary[istr]['peaks'] = peaks summary[istr]['inds'] = inds summary[istr]['evecs'] = evecs[i,:,:] summary[istr]['evals'] = evals[i,:] QA/=fwd QA=QA.reshape(x,y,z,5) IN=IN.reshape(x,y,z,5) peaks_1 = [i for i in range(1000) if len(summary[str(i)]['inds'])==1] peaks_2 = [i for i in range(1000) if len(summary[str(i)]['inds'])==2] peaks_3 = [i for i in range(1000) if len(summary[str(i)]['inds'])==3] # correct numbers of voxels with respectively 1,2,3 ODF/QA peaks assert_array_equal((len(peaks_1),len(peaks_2),len(peaks_3)), (790,196,14), 'error in numbers of QA/ODF peaks') # correct indices of odf directions for voxels 0,10,44 # with respectively 1,2,3 ODF/QA peaks assert_array_equal(summary['0']['inds'],[116], 'wrong peak indices for voxel 0') assert_array_equal(summary['10']['inds'],[105, 78], 'wrong peak indices for voxel 10') assert_array_equal(summary['44']['inds'],[95, 84, 108], 'wrong peak indices for voxel 44') assert_equal(np.argmax(summary['0']['odf']), 116) assert_equal(np.argmax(summary['10']['odf']), 105)
def test(): # img=nib.load('/home/eg309/Data/project01_dsi/connectome_0001/tp1/RAWDATA/OUT/mr000001.nii.gz') btable = np.loadtxt(get_data("dsi515btable")) # volume size sz = 16 # shifting origin = 8 # hanning width filter_width = 32.0 # number of signal sampling points n = 515 # odf radius radius = np.arange(2.1, 6, 0.2) # create q-table bv = btable[:, 0] bmin = np.sort(bv)[1] bv = np.sqrt(bv / bmin) qtable = np.vstack((bv, bv, bv)).T * btable[:, 1:] qtable = np.floor(qtable + 0.5) # copy bvals and bvecs bvals = btable[:, 0] bvecs = btable[:, 1:] # S=img.get_data()[38,50,20]#[96/2,96/2,20] S, stics = SticksAndBall( bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (60, 0), (90, 90)], fractions=[0, 0, 0], snr=None ) S2 = S.copy() S2 = S2.reshape(1, len(S)) dn = DiffusionNabla(S2, bvals, bvecs, auto=False) pR = dn.equators odf = dn.odf(S) # Xs=dn.precompute_interp_coords() peaks, inds = peak_finding(odf.astype("f8"), dn.odf_faces.astype("uint16")) print peaks print peaks / peaks.min() # print dn.PK dn.fit() print dn.PK # """ ren = fvtk.ren() colors = fvtk.colors(odf, "jet") fvtk.add(ren, fvtk.point(dn.odf_vertices, colors, point_radius=0.05, theta=8, phi=8)) fvtk.show(ren) # """ stop # ds=DiffusionSpectrum(S2,bvals,bvecs) # tpr=ds.pdf(S) # todf=ds.odf(tpr) """ #show projected signal Bvecs=np.concatenate([bvecs[1:],-bvecs[1:]]) X0=np.dot(np.diag(np.concatenate([S[1:],S[1:]])),Bvecs) ren=fvtk.ren() fvtk.add(ren,fvtk.point(X0,fvtk.yellow,1,2,16,16)) fvtk.show(ren) """ # qtable=5*matrix[:,1:] # calculate radius for the hanning filter r = np.sqrt(qtable[:, 0] ** 2 + qtable[:, 1] ** 2 + qtable[:, 2] ** 2) # setting hanning filter width and hanning hanning = 0.5 * np.cos(2 * np.pi * r / filter_width) # center and index in q space volume q = qtable + origin q = q.astype("i8") # apply the hanning filter values = S * hanning """ #plot q-table ren=fvtk.ren() colors=fvtk.colors(values,'jet') fvtk.add(ren,fvtk.point(q,colors,1,0.1,6,6)) fvtk.show(ren) """ # create the signal volume Sq = np.zeros((sz, sz, sz)) for i in range(n): Sq[q[i][0], q[i][1], q[i][2]] += values[i] # apply fourier transform Pr = fftshift(np.abs(np.real(fftn(fftshift(Sq), (sz, sz, sz))))) # """ ren = fvtk.ren() vol = fvtk.volume(Pr) fvtk.add(ren, vol) fvtk.show(ren) # """ """ from enthought.mayavi import mlab mlab.pipeline.volume(mlab.pipeline.scalar_field(Sq)) mlab.show() """ # vertices, edges, faces = create_unit_sphere(5) vertices, faces = sphere_vf_from("symmetric362") odf = np.zeros(len(vertices)) for m in range(len(vertices)): xi = origin + radius * vertices[m, 0] yi = origin + radius * vertices[m, 1] zi = origin + radius * vertices[m, 2] PrI = map_coordinates(Pr, np.vstack((xi, yi, zi)), order=1) for i in range(len(radius)): odf[m] = odf[m] + PrI[i] * radius[i] ** 2 """ ren=fvtk.ren() colors=fvtk.colors(odf,'jet') fvtk.add(ren,fvtk.point(vertices,colors,point_radius=.05,theta=8,phi=8)) fvtk.show(ren) """ """ #Pr[Pr<500]=0 ren=fvtk.ren() #ren.SetBackground(1,1,1) fvtk.add(ren,fvtk.volume(Pr)) fvtk.show(ren) """ peaks, inds = peak_finding(odf.astype("f8"), faces.astype("uint16")) Eq = np.zeros((sz, sz, sz)) for i in range(n): Eq[q[i][0], q[i][1], q[i][2]] += S[i] / S[0] LEq = laplace(Eq) # Pr[Pr<500]=0 ren = fvtk.ren() # ren.SetBackground(1,1,1) fvtk.add(ren, fvtk.volume(Eq)) fvtk.show(ren) phis = np.linspace(0, 2 * np.pi, 100) planars = [] for phi in phis: planars.append(sphere2cart(1, np.pi / 2, phi)) planars = np.array(planars) planarsR = [] for v in vertices: R = vec2vec_rotmat(np.array([0, 0, 1]), v) planarsR.append(np.dot(R, planars.T).T) """ ren=fvtk.ren() fvtk.add(ren,fvtk.point(planarsR[0],fvtk.green,1,0.1,8,8)) fvtk.add(ren,fvtk.point(2*planarsR[1],fvtk.red,1,0.1,8,8)) fvtk.show(ren) """ azimsums = [] for disk in planarsR: diskshift = 4 * disk + origin # Sq0=map_coordinates(Sq,diskshift.T,order=1) # azimsums.append(np.sum(Sq0)) # Eq0=map_coordinates(Eq,diskshift.T,order=1) # azimsums.append(np.sum(Eq0)) LEq0 = map_coordinates(LEq, diskshift.T, order=1) azimsums.append(np.sum(LEq0)) azimsums = np.array(azimsums) # """ ren = fvtk.ren() colors = fvtk.colors(azimsums, "jet") fvtk.add(ren, fvtk.point(vertices, colors, point_radius=0.05, theta=8, phi=8)) fvtk.show(ren) # """ # for p in planarsR[0]: """
def overlaps_faces(arrays): n = len(arrays) m = set([]) for i in range(n - 1): for j in range(i + 1, n): m = set.union(m, set.intersection(set(faces[arrays[i]].ravel()), set(faces[arrays[j]].ravel()))) return m import pickle f = open("dn.dump", "r") print "picked dn reloading ...\n" [faces, vertices, odf] = pickle.load(f) print "picked dn reloaded ...\n" print "old peak hunting ...\n" peaks, peakinds = peak_finding(odf, faces) print "new peak hunting ...\n" newpeaks = dominant(faces, odf) # oldpeaks = [305, 317, 171, 170, 172, 169, 40, 45, 2] newpeaks = [2, 40, 45, 169, 170, 171, 172, 305, 317, 323, 361, 366, 490, 491, 492, 493, 626, 638] # newpeaks = [2, 40, 45, 169, 170, 171, 172, 305][::-1] print "starting green paint job ...\n" # print 'old peaks', peakinds # print 'half the (old) peaks:', oldpeaks, '\n' # print 'half the peaks:', newpeaks, '\n' print "all the new peaks:", newpeaks, "\n" white = 0.0 red = 1.0
def fit(self): #memory allocations for 4D volumes if len(self.datashape)==4: x,y,z,g=self.datashape S=self.data.reshape(x*y*z,g) GFA=np.zeros((x*y*z)) IN=np.zeros((x*y*z,5)) NFA=np.zeros((x*y*z,5)) QA=np.zeros((x*y*z,5)) PK=np.zeros((x*y*z,5)) if self.save_odfs: ODF=np.zeros((x*y*z,self.odfn)) #BODF=np.zeros((x*y*z,self.odfn)) if self.mask != None: if self.mask.shape[:3]==self.datashape[:3]: msk=self.mask.ravel().copy() if self.mask == None: self.mask=np.ones(self.datashape[:3]) msk=self.mask.ravel().copy() #memory allocations for a series of voxels if len(self.datashape)==2: x,g= self.datashape S=self.data GFA=np.zeros(x) IN=np.zeros((x,5)) NFA=np.zeros((x,5)) QA=np.zeros((x,5)) PK=np.zeros((x,5)) if self.save_odfs: ODF=np.zeros((x,self.odfn)) #BODF=np.zeros((x,self.odfn)) if self.mask != None: if self.mask.shape[0]==self.datashape[0]: msk=self.mask.ravel().copy() if self.mask == None: self.mask=np.ones(self.datashape[:1]) msk=self.mask.ravel().copy() #find the global normalization parameter #useful for quantitative anisotropy glob_norm_param = 0. #loop over all voxels for (i,s) in enumerate(S): if msk[i]>0: #calculate the orientation distribution function #odf=self.odf(s) odf=self.odf(s) odf=self.angular_weighting(odf) if self.save_odfs: ODF[i]=odf #normalization for QA glob_norm_param=max(np.max(odf),glob_norm_param) #calculate the generalized fractional anisotropy GFA[i]=self.std_over_rms(odf) odf_max=odf.max() #if not in isotropic case #if odf.min()<self.iso_thr*odf_max: if np.std(odf)/np.mean(odf) > self.iso_thr: #find peaks peaks,inds=peak_finding(odf,self.odf_faces) ismallp=np.where(peaks/peaks[0]<self.peak_thr) if len(ismallp[0])>0: l=ismallp[0][0] #do not allow more that three peaks if l>3: l=3 else: l=len(peaks) if l==0: IN[i][l] = inds[l] NFA[i][l] = GFA[i] QA[i][l] = peaks[l]-np.min(odf) PK[i][l] = peaks[l] if l>0 and l<=3: IN[i][:l] = inds[:l] NFA[i][:l] = GFA[i] QA[i][:l] = peaks[:l]-np.min(odf) PK[i][:l] = peaks[:l] if len(self.datashape) == 4: self.GFA=GFA.reshape(x,y,z) self.NFA=NFA.reshape(x,y,z,5) self.QA=QA.reshape(x,y,z,5)/glob_norm_param self.PK=PK.reshape(x,y,z,5) self.IN=IN.reshape(x,y,z,5) if self.save_odfs: self.ODF=ODF.reshape(x,y,z,ODF.shape[-1]) self.QA_norm=glob_norm_param if len(self.datashape) == 2: self.GFA=GFA self.NFA=NFA self.QA=QA self.PK=PK self.IN=IN if self.save_odfs: self.ODF=ODF #self.BODF=BODF self.QA_norm=None
def __init__(self, data, bvals, gradients, Lambda=1.2, odf_sphere='symmetric362', mask=None): """ Generates a model-free description for every voxel that can be used from simple to very complicated configurations like quintuple crossings if your datasets support them. You can use this class for every kind of DWI image but it will perform much better when you have a balanced sampling scheme. Implements equation [9] from Generalized Q-Sampling as described in Fang-Cheng Yeh, Van J. Wedeen, Wen-Yih Isaac Tseng. Generalized Q-Sampling Imaging. IEEE TMI, 2010. Parameters ----------- data: array, shape(X,Y,Z,D) bvals: array, shape (N,) gradients: array, shape (N,3) also known as bvecs Lambda: float, optional smoothing parameter - diffusion sampling length odf_sphere : None or str or tuple, optional input that will result in vertex, face arrays for a sphere. mask : None or ndarray, optional Key Properties --------------- QA : array, shape(X,Y,Z,5), quantitative anisotropy IN : array, shape(X,Y,Z,5), indices of QA, qa unit directions fwd : float, normalization parameter Notes ------- In order to reconstruct the spin distribution function a nice symmetric evenly distributed sphere is provided using 362 points. This is usually sufficient for most of the datasets. See also -------- dipy.tracking.propagation.EuDX, dipy.reconst.dti.Tensor, dipy.data.__init__.get_sphere """ odf_vertices, odf_faces = sphere_vf_from(odf_sphere) self.odf_vertices=odf_vertices # 0.01506 = 6*D where D is the free water diffusion coefficient # l_values sqrt(6 D tau) D free water diffusion coefficient and # tau included in the b-value scaling = np.sqrt(bvals*0.01506) tmp=np.tile(scaling, (3,1)) #the b vectors might have nan values where they correspond to b #value equals with 0 gradients[np.isnan(gradients)]= 0. gradsT = gradients.T b_vector=gradsT*tmp # element-wise also known as the Hadamard product #q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi) q2odf_params=np.real(np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)) #q2odf_params[np.isnan(q2odf_params)]= 1. #define total mask #tot_mask = (mask > 0) & (data[...,0] > thresh) S=data datashape=S.shape #initial shape msk=None #tmp mask if len(datashape)==4: x,y,z,g=S.shape S=S.reshape(x*y*z,g) QA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) if mask != None: if mask.shape[:3]==datashape[:3]: msk=mask.ravel().copy() #print 'msk.shape',msk.shape if len(datashape)==2: x,g= S.shape QA = np.zeros((x,5)) IN = np.zeros((x,5)) glob_norm_param = 0 self.q2odf_params=q2odf_params #Calculate Quantitative Anisotropy and #find the peaks and the indices #for every voxel if mask !=None: for (i,s) in enumerate(S): if msk[i]>0: #Q to ODF odf=np.dot(s,q2odf_params) peaks,inds=rp.peak_finding(odf,odf_faces) glob_norm_param=max(np.max(odf),glob_norm_param) #remove the isotropic part peaks = peaks - np.min(odf) l=min(len(peaks),5) QA[i][:l] = peaks[:l] IN[i][:l] = inds[:l] if mask==None: for (i,s) in enumerate(S): #Q to ODF odf=np.dot(s,q2odf_params) peaks,inds=rp.peak_finding(odf,odf_faces) glob_norm_param=max(np.max(odf),glob_norm_param) #remove the isotropic part peaks = peaks - np.min(odf) l=min(len(peaks),5) QA[i][:l] = peaks[:l] IN[i][:l] = inds[:l] #normalize QA/=glob_norm_param if len(datashape) == 4: self.QA=QA.reshape(x,y,z,5) self.IN=IN.reshape(x,y,z,5) if len(datashape) == 2: self.QA=QA self.IN=IN self.glob_norm_param = glob_norm_param
def fit(self): """ process all voxels """ S = self.data datashape = S.shape #initial shape #memory allocations for 4D volumes if len(datashape) == 4: x, y, z, g = S.shape S = S.reshape(x * y * z, g) QA = np.zeros((x * y * z, 5)) IN = np.zeros((x * y * z, 5)) if self.save_odfs: ODF = np.zeros((x * y * z, self.odfn)) if self.mask != None: if self.mask.shape[:3] == datashape[:3]: msk = self.mask.ravel().copy() if self.mask == None: self.mask = np.ones(datashape[:3]) msk = self.mask.ravel().copy() #memory allocations for a series of voxels if len(datashape) == 2: x, g = S.shape QA = np.zeros((x, 5)) IN = np.zeros((x, 5)) if self.save_odfs: ODF = np.zeros((x, self.odfn)) if self.mask != None: if self.mask.shape[0] == datashape[0]: msk = self.mask.ravel().copy() if self.mask == None: self.mask = np.ones(datashape[:1]) msk = self.mask.ravel().copy() glob_norm_param = 0 #Calculate Quantitative Anisotropy and #find the peaks and the indices #for every voxel for (i, s) in enumerate(S): if msk[i] > 0: #Q to ODF odf = np.dot(s, self.q2odf_params) min_odf = np.min(odf) if self.save_odfs: ODF[i] = odf #-min_odf peaks, inds = rp.peak_finding(odf, self.odf_faces) glob_norm_param = max(np.max(odf), glob_norm_param) #print peaks,min_odf #remove the isotropic part l = self.reduce_peaks(peaks, min_odf) if l == 0: QA[i][0] = peaks[0] - min_odf IN[i][0] = inds[0] if l > 0 and l < 5: QA[i][:l] = peaks[:l] - min_odf IN[i][:l] = inds[:l] #normalize QA QA /= glob_norm_param if len(datashape) == 4: self.QA = QA.reshape(x, y, z, 5) self.IN = IN.reshape(x, y, z, 5) if self.save_odfs: self.ODF = ODF.reshape(x, y, z, ODF.shape[-1]) self.QA_norm = glob_norm_param if len(datashape) == 2: self.QA = QA self.IN = IN if self.save_odfs: self.ODF = ODF self.QA_norm = None self.glob_norm_param = glob_norm_param
def fit(self): """ process all voxels """ S=self.data datashape=S.shape #initial shape #memory allocations for 4D volumes if len(datashape)==4: x,y,z,g=S.shape S=S.reshape(x*y*z,g) QA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) if self.save_odfs: ODF=np.zeros((x*y*z,self.odfn)) if self.mask != None: if self.mask.shape[:3]==datashape[:3]: msk=self.mask.ravel().copy() if self.mask == None: self.mask=np.ones(datashape[:3]) msk=self.mask.ravel().copy() #memory allocations for a series of voxels if len(datashape)==2: x,g= S.shape QA = np.zeros((x,5)) IN = np.zeros((x,5)) if self.save_odfs: ODF=np.zeros((x,self.odfn)) if self.mask != None: if self.mask.shape[0]==datashape[0]: msk=self.mask.ravel().copy() if self.mask == None: self.mask=np.ones(datashape[:1]) msk=self.mask.ravel().copy() glob_norm_param = 0 #Calculate Quantitative Anisotropy and #find the peaks and the indices #for every voxel for (i,s) in enumerate(S): if msk[i]>0: #Q to ODF odf=np.dot(s,self.q2odf_params) min_odf=np.min(odf) if self.save_odfs: ODF[i]=odf#-min_odf peaks,inds=rp.peak_finding(odf,self.odf_faces) glob_norm_param=max(np.max(odf),glob_norm_param) #print peaks,min_odf #remove the isotropic part l=self.reduce_peaks(peaks,min_odf) if l==0: QA[i][0] = peaks[0]-min_odf IN[i][0] = inds[0] if l>0 and l<5: QA[i][:l] = peaks[:l]-min_odf IN[i][:l] = inds[:l] #normalize QA QA/=glob_norm_param if len(datashape) == 4: self.QA=QA.reshape(x,y,z,5) self.IN=IN.reshape(x,y,z,5) if self.save_odfs: self.ODF=ODF.reshape(x,y,z,ODF.shape[-1]) self.QA_norm= glob_norm_param if len(datashape) == 2: self.QA=QA self.IN=IN if self.save_odfs: self.ODF=ODF self.QA_norm=None self.glob_norm_param = glob_norm_param
def test_gqi_small(): #read bvals,gradients and data fimg,fbvals,fbvecs=get_data('small_64D') bvals=np.load(fbvals) gradients=np.load(fbvecs) data=nib.load(fimg).get_data() print(bvals.shape) print(gradients.shape) print(data.shape) t1=time.clock() gqs = gq.GeneralizedQSampling(data,bvals,gradients) t2=time.clock() print('GQS in %d' %(t2-t1)) eds=np.load(get_sphere('symmetric362')) odf_vertices=eds['vertices'] odf_faces=eds['faces'] #Yeh et.al, IEEE TMI, 2010 #calculate the odf using GQI scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free #water diffusion coefficient #l_values sqrt(6 D tau) D free water #diffusion coefficiet and tau included in the b-value tmp=np.tile(scaling,(3,1)) b_vector=gradients.T*tmp Lambda = 1.2 # smoothing parameter - diffusion sampling length q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi) #implements equation no. 9 from Yeh et.al. S=data.copy() x,y,z,g=S.shape S=S.reshape(x*y*z,g) QA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) fwd = 0 #Calculate Quantitative Anisotropy and find the peaks and the indices #for every voxel for (i,s) in enumerate(S): odf = Q2odf(s,q2odf_params) peaks,inds=rp.peak_finding(odf,odf_faces) fwd=max(np.max(odf),fwd) peaks = peaks - np.min(odf) l=min(len(peaks),5) QA[i][:l] = peaks[:l] IN[i][:l] = inds[:l] QA/=fwd QA=QA.reshape(x,y,z,5) IN=IN.reshape(x,y,z,5) print('Old %d secs' %(time.clock() - t2)) assert_equal((gqs.QA-QA).max(),0.,'Frank QA different than dipy QA') assert_equal((gqs.QA.shape),QA.shape, 'Frank QA shape is different')
def test_gqiodf(): #read bvals,gradients and data bvals=np.load(opj(os.path.dirname(__file__), \ 'data','small_64D.bvals.npy')) gradients=np.load(opj(os.path.dirname(__file__), \ 'data','small_64D.gradients.npy')) img =ni.load(os.path.join(os.path.dirname(__file__),\ 'data','small_64D.nii')) data=img.get_data() #print(bvals.shape) #print(gradients.shape) #print(data.shape) # t1=time.clock() gq.GeneralizedQSampling(data,bvals,gradients) ten = dt.Tensor(data,bvals,gradients,thresh=50) ten.fa() x,y,z,a,b=ten.evecs.shape evecs=ten.evecs xyz=x*y*z evecs = evecs.reshape(xyz,3,3) #vs = np.sign(evecs[:,2,:]) #print vs.shape #print np.hstack((vs,vs,vs)).reshape(1000,3,3).shape #evecs = np.hstack((vs,vs,vs)).reshape(1000,3,3) #print evecs.shape evals=ten.evals evals = evals.reshape(xyz,3) #print evals.shape #print('GQS in %d' %(t2-t1)) eds=np.load(opj(os.path.dirname(__file__),\ '..','matrices',\ 'evenly_distributed_sphere_362.npz')) odf_vertices=eds['vertices'] odf_faces=eds['faces'] #Yeh et.al, IEEE TMI, 2010 #calculate the odf using GQI scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free #water diffusion coefficient #l_values sqrt(6 D tau) D free water #diffusion coefficiet and tau included in the b-value tmp=np.tile(scaling,(3,1)) b_vector=gradients.T*tmp Lambda = 1.2 # smoothing parameter - diffusion sampling length q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi) #implements equation no. 9 from Yeh et.al. S=data.copy() x,y,z,g=S.shape S=S.reshape(x*y*z,g) QA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) fwd = 0 #Calculate Quantitative Anisotropy and find the peaks and the indices #for every voxel summary = {} summary['vertices'] = odf_vertices v = odf_vertices.shape[0] summary['faces'] = odf_faces f = odf_faces.shape[0] ''' If e = number_of_edges the Euler formula says f-e+v = 2 for a mesh on a sphere Here, assuming we have a healthy triangulation every face is a triangle, all 3 of whose edges should belong to exactly two faces = so 2*e = 3*f to avoid division we test whether 2*f - 3*f + 2*v == 4 or equivalently 2*v - f == 4 ''' assert_equal(2*v-f, 4,'Direct Euler test fails') assert_true(meshes.euler_characteristic_check(odf_vertices, odf_faces,chi=2),'euler_characteristic_check fails') coarse = meshes.coarseness(odf_faces) print 'coarseness: ', coarse for (i,s) in enumerate(S): #print 'Volume %d' % i istr = str(i) summary[istr] = {} odf = Q2odf(s,q2odf_params) peaks,inds=rp.peak_finding(odf,odf_faces) fwd=max(np.max(odf),fwd) peaks = peaks - np.min(odf) l=min(len(peaks),5) QA[i][:l] = peaks[:l] IN[i][:l] = inds[:l] summary[istr]['odf'] = odf summary[istr]['peaks'] = peaks summary[istr]['inds'] = inds summary[istr]['evecs'] = evecs[i,:,:] summary[istr]['evals'] = evals[i,:] QA /= fwd # QA=QA.reshape(x,y,z,5) # IN=IN.reshape(x,y,z,5) #print('Old %d secs' %(time.clock() - t2)) # assert_equal((gqs.QA-QA).max(),0.,'Frank QA different than our QA') # assert_equal((gqs.QA.shape),QA.shape, 'Frank QA shape is different') # assert_equal((gqs.QA-QA).max(), 0.) #import dipy.core.track_propagation as tp #tp.FACT_Delta(QA,IN) #return tp.FACT_Delta(QA,IN,seeds_no=10000).tracks peaks_1 = [i for i in range(1000) if len(summary[str(i)]['inds'])==1] peaks_2 = [i for i in range(1000) if len(summary[str(i)]['inds'])==2] peaks_3 = [i for i in range(1000) if len(summary[str(i)]['inds'])==3] # correct numbers of voxels with respectively 1,2,3 ODF/QA peaks assert_array_equal((len(peaks_1),len(peaks_2),len(peaks_3)), (790,196,14), 'error in numbers of QA/ODF peaks') # correct indices of odf directions for voxels 0,10,44 # with respectively 1,2,3 ODF/QA peaks assert_array_equal(summary['0']['inds'],[116], 'wrong peak indices for voxel 0') assert_array_equal(summary['10']['inds'],[105, 78], 'wrong peak indices for voxel 10') assert_array_equal(summary['44']['inds'],[95, 84, 108], 'wrong peak indices for voxel 44') assert_equal(np.argmax(summary['0']['odf']), 116) assert_equal(np.argmax(summary['10']['odf']), 105) assert_equal(np.argmax(summary['44']['odf']), 95) # pole_1 = summary['vertices'][116] #print 'pole_1', pole_1 # pole_2 = summary['vertices'][105] #print 'pole_2', pole_2 # pole_3 = summary['vertices'][95] #print 'pole_3', pole_3 vertices = summary['vertices'] width = 0.02#0.3 #0.05 ''' print 'pole_1 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_1)) < width]) print 'pole_2 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_2)) < width]) print 'pole_3 equator contains:', len([i for i,v in enumerate(vertices) if np.abs(np.dot(v,pole_3)) < width]) ''' #print 'pole_1 equator contains:', len(meshes.equatorial_vertices(vertices,pole_1,width)) #print 'pole_2 equator contains:', len(meshes.equatorial_vertices(vertices,pole_2,width)) #print 'pole_3 equator contains:', len(meshes'equatorial_vertices(vertices,pole_3,width)) #print triple_odf_maxima(vertices,summary['0']['odf'],width) #print triple_odf_maxima(vertices,summary['10']['odf'],width) #print triple_odf_maxima(vertices,summary['44']['odf'],width) #print summary['0']['evals'] ''' pole=np.array([0,0,1]) from dipy.viz import fos r=fos.ren() fos.add(r,fos.point(pole,fos.green)) for i,ev in enumerate(vertices): if np.abs(np.dot(ev,pole))<width: fos.add(r,fos.point(ev,fos.red)) fos.show(r) ''' triple = triple_odf_maxima(vertices, summary['10']['odf'], width) indmax1, odfmax1 = triple[0] indmax2, odfmax2 = triple[1] indmax3, odfmax3 = triple[2] ''' from dipy.viz import fos r=fos.ren() for v in vertices: fos.add(r,fos.point(v,fos.cyan)) fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax1]),radius=0.1,color=fos.red)) #fos.add(r,fos.line(np.array([0,0,0]),vertices[indmax1])) fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax2]),radius=0.05,color=fos.green)) fos.add(r,fos.sphere(upper_hemi_map(vertices[indmax3]),radius=0.025,color=fos.blue)) fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,0]),radius=0.1,color=fos.red,opacity=0.7)) fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,1]),radius=0.05,color=fos.green,opacity=0.7)) fos.add(r,fos.sphere(upper_hemi_map(summary['0']['evecs'][:,2]),radius=0.025,color=fos.blue,opacity=0.7)) fos.add(r,fos.sphere([0,0,0],radius=0.01,color=fos.white)) fos.show(r) ''' mat = np.vstack([vertices[indmax1],vertices[indmax2],vertices[indmax3]]) print np.dot(mat,np.transpose(mat)) # this is to assess how othogonal the triple is/are print np.dot(summary['0']['evecs'],np.transpose(mat))
def fit(self): #memory allocations for 4D volumes if len(self.datashape) == 4: x, y, z, g = self.datashape S = self.data.reshape(x * y * z, g) GFA = np.zeros((x * y * z)) IN = np.zeros((x * y * z, 5)) NFA = np.zeros((x * y * z, 5)) QA = np.zeros((x * y * z, 5)) PK = np.zeros((x * y * z, 5)) if self.save_odfs: ODF = np.zeros((x * y * z, self.odfn)) #BODF=np.zeros((x*y*z,self.odfn)) if self.mask != None: if self.mask.shape[:3] == self.datashape[:3]: msk = self.mask.ravel().copy() if self.mask == None: self.mask = np.ones(self.datashape[:3]) msk = self.mask.ravel().copy() #memory allocations for a series of voxels if len(self.datashape) == 2: x, g = self.datashape S = self.data GFA = np.zeros(x) IN = np.zeros((x, 5)) NFA = np.zeros((x, 5)) QA = np.zeros((x, 5)) PK = np.zeros((x, 5)) if self.save_odfs: ODF = np.zeros((x, self.odfn)) #BODF=np.zeros((x,self.odfn)) if self.mask != None: if self.mask.shape[0] == self.datashape[0]: msk = self.mask.ravel().copy() if self.mask == None: self.mask = np.ones(self.datashape[:1]) msk = self.mask.ravel().copy() #find the global normalization parameter #useful for quantitative anisotropy glob_norm_param = 0. #loop over all voxels for (i, s) in enumerate(S): if msk[i] > 0: #calculate the orientation distribution function #odf=self.odf(s) odf = self.odf(s) odf = self.angular_weighting(odf) if self.save_odfs: ODF[i] = odf #normalization for QA glob_norm_param = max(np.max(odf), glob_norm_param) #calculate the generalized fractional anisotropy GFA[i] = self.std_over_rms(odf) odf_max = odf.max() #if not in isotropic case #if odf.min()<self.iso_thr*odf_max: if np.std(odf) / np.mean(odf) > self.iso_thr: #find peaks peaks, inds = peak_finding(odf, self.odf_faces) ismallp = np.where(peaks / peaks[0] < self.peak_thr) if len(ismallp[0]) > 0: l = ismallp[0][0] #do not allow more that three peaks if l > 3: l = 3 else: l = len(peaks) if l == 0: IN[i][l] = inds[l] NFA[i][l] = GFA[i] QA[i][l] = peaks[l] - np.min(odf) PK[i][l] = peaks[l] if l > 0 and l <= 3: IN[i][:l] = inds[:l] NFA[i][:l] = GFA[i] QA[i][:l] = peaks[:l] - np.min(odf) PK[i][:l] = peaks[:l] if len(self.datashape) == 4: self.GFA = GFA.reshape(x, y, z) self.NFA = NFA.reshape(x, y, z, 5) self.QA = QA.reshape(x, y, z, 5) / glob_norm_param self.PK = PK.reshape(x, y, z, 5) self.IN = IN.reshape(x, y, z, 5) if self.save_odfs: self.ODF = ODF.reshape(x, y, z, ODF.shape[-1]) self.QA_norm = glob_norm_param if len(self.datashape) == 2: self.GFA = GFA self.NFA = NFA self.QA = QA self.PK = PK self.IN = IN if self.save_odfs: self.ODF = ODF #self.BODF=BODF self.QA_norm = None
for fib in fibs: dix=get_sim_voxels(fib) data=dix['data'] bvals=dix['bvals'] gradients=dix['gradients'] no=10 print(bvals.shape, gradients.shape, data.shape) print(dix['fibres']) np.set_printoptions(2) for no in range(len(data)): sd=SphericalDandelion(data,bvals,gradients) sdf=sd.spherical_diffusivity(data[no]) gq=GeneralizedQSampling(data,bvals,gradients) sodf=gq.odf(data[no]) #print(faces.shape) peaks,inds=peak_finding(np.squeeze(sdf),faces) #print(peaks, inds) peaks2,inds2=peak_finding(np.squeeze(sodf),faces) #print(peaks2, inds2) print 'sdi',inds,'sodf',inds2, vertices[inds[0]]-vertices[inds2[0]] #print data[no]
def __init__(self, data, bvals, gradients, smoothing=1., odf_sphere='symmetric362', mask=None): ''' Parameters ----------- data : array, shape(X,Y,Z,D) bvals : array, shape (N,) gradients : array, shape (N,3) also known as bvecs smoothing : float, smoothing parameter odf_sphere : str or tuple, optional If str, then load sphere of given name using ``get_sphere``. If tuple, gives (vertices, faces) for sphere. See also ---------- dipy.reconst.dti.Tensor, dipy.reconst.gqi.GeneralizedQSampling ''' odf_vertices, odf_faces = sphere_vf_from(odf_sphere) self.odf_vertices=odf_vertices self.bvals=bvals gradients[np.isnan(gradients)] = 0. self.gradients=gradients self.weighting=np.abs(np.dot(gradients,self.odf_vertices.T)) #self.weighting=self.weighting/np.sum(self.weighting,axis=0) S=data datashape=S.shape #initial shape msk=None #tmp mask if len(datashape)==4: x,y,z,g=S.shape S=S.reshape(x*y*z,g) XA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) if mask != None: if mask.shape[:3]==datashape[:3]: msk=mask.ravel().copy() if len(datashape)==2: x,g= S.shape XA = np.zeros((x,5)) IN = np.zeros((x,5)) if mask !=None: for (i,s) in enumerate(S): if msk[i]>0: odf=self.spherical_diffusivity(s) peaks,inds=peak_finding(odf,odf_faces) l=min(len(peaks),5) XA[i][:l] = peaks[:l] IN[i][:l] = inds[:l] if mask==None: for (i,s) in enumerate(S): odf=self.spherical_diffusivity(s) peaks,inds=peak_finding(odf,odf_faces) l=min(len(peaks),5) XA[i][:l] = peaks[:l] IN[i][:l] = inds[:l] if len(datashape) == 4: self.XA=XA.reshape(x,y,z,5) self.IN=IN.reshape(x,y,z,5) if len(datashape) == 2: self.XA=XA self.IN=IN
def fit(self): #memory allocations for 4D volumes if len(self.datashape)==4: x,y,z,g=self.datashape S=self.data.reshape(x*y*z,g) GFA=np.zeros((x*y*z)) IN=np.zeros((x*y*z,5)) NFA=np.zeros((x*y*z,5)) QA=np.zeros((x*y*z,5)) PK=np.zeros((x*y*z,5)) if self.save_odfs: ODF=np.zeros((x*y*z,self.odfn)) if self.mask != None: if self.mask.shape[:3]==self.datashape[:3]: msk=self.mask.ravel().copy() if self.mask == None: self.mask=np.ones(self.datashape[:3]) msk=self.mask.ravel().copy() #memory allocations for a series of voxels if len(self.datashape)==2: x,g= self.datashape S=self.data GFA=np.zeros(x) IN=np.zeros((x,5)) NFA=np.zeros((x,5)) QA=np.zeros((x,5)) PK=np.zeros((x,5)) if self.save_odfs: ODF=np.zeros((x,self.odfn)) if self.mask != None: if mask.shape[0]==self.datashape[0]: msk=self.mask.ravel().copy() if self.mask == None: self.mask=np.ones(self.datashape[:1]) msk=self.mask.ravel().copy() #find the global normalization parameter #useful for quantitative anisotropy glob_norm_param = 0. #loop over all voxels for (i,s) in enumerate(S): if msk[i]>0: #calculate the diffusion propagator or spectrum Pr=self.pdf(s) #calculate the orientation distribution function odf=self.odf(Pr) if self.save_odfs: ODF[i]=odf #normalization for QA glob_norm_param=max(np.max(odf),glob_norm_param) #calculate the generalized fractional anisotropy GFA[i]=self.std_over_rms(odf) #find peaks peaks,inds=peak_finding(odf,self.odf_faces) #remove small peaks l=self.reduce_peaks(peaks,odf.min()) #print '#',l,peaks[:l] if l==0: IN[i][l] = inds[l] NFA[i][l] = GFA[i] QA[i][l] = peaks[l]-np.min(odf) PK[i][l] = peaks[l] if l>0 and l<5: IN[i][:l] = inds[:l] NFA[i][:l] = GFA[i] QA[i][:l] = peaks[:l]-np.min(odf) PK[i][:l] = peaks[:l] """ if len(peaks)>0: ismallp=np.where(peaks/peaks.min()<self.peak_thr) l=ismallp[0][0] if l<5 and l>0: IN[i][:l] = inds[:l] NFA[i][:l] = GFA[i] QA[i][:l] = peaks[:l]-np.min(odf) if l==0: IN[i][l] = inds[l] NFA[i][l] = GFA[i] QA[i][l] = peaks[l]-np.min(odf) """ if len(self.datashape) == 4: self.GFA=GFA.reshape(x,y,z) self.NFA=NFA.reshape(x,y,z,5) self.QA=QA.reshape(x,y,z,5)/glob_norm_param self.IN=IN.reshape(x,y,z,5) self.PK=PK.reshape(x,y,z,5) if self.save_odfs: self.ODF=ODF.reshape(x,y,z,ODF.shape[-1]) self.QA_norm=glob_norm_param if len(self.datashape) == 2: self.GFA=GFA self.NFA=NFA self.QA=QA self.IN=IN self.PK=PK if self.save_odfs: self.ODF=ODF self.QA_norm=None
S,stics=SticksAndBall(bvals, bvecs, d, S0, angles=[(30, 0),(60,0),(90,90)], fractions=[0,0,0], snr=snr) data[13]=S.copy() return data if __name__ == '__main__': #def test_dni(): btable=np.loadtxt(get_data('dsi515btable')) bvals=btable[:,0] bvecs=btable[:,1:] data=sim_data(bvals,bvecs) dn=DiffusionNabla(data,bvals,bvecs,save_odfs=True) pks=dn.pk() #assert_array_equal(np.sum(pks>0,axis=1), # np.array([0, 1, 2, 3, 3, 3, 3, 3, 3, 2, 2, 1, 1, 0])) odfs=dn.odfs() peaks,inds=peak_finding(odfs[10],dn.odf_faces)
def test_gqi_small(): #read bvals,gradients and data bvals=np.load(opj(os.path.dirname(__file__), \ 'data','small_64D.bvals.npy')) gradients=np.load(opj(os.path.dirname(__file__), \ 'data','small_64D.gradients.npy')) img =ni.load(os.path.join(os.path.dirname(__file__),\ 'data','small_64D.nii')) data=img.get_data() print(bvals.shape) print(gradients.shape) print(data.shape) t1=time.clock() gqs = gq.GeneralizedQSampling(data,bvals,gradients) t2=time.clock() print('GQS in %d' %(t2-t1)) eds=np.load(opj(os.path.dirname(__file__),\ '..','matrices',\ 'evenly_distributed_sphere_362.npz')) odf_vertices=eds['vertices'] odf_faces=eds['faces'] #Yeh et.al, IEEE TMI, 2010 #calculate the odf using GQI scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free #water diffusion coefficient #l_values sqrt(6 D tau) D free water #diffusion coefficiet and tau included in the b-value tmp=np.tile(scaling,(3,1)) b_vector=gradients.T*tmp Lambda = 1.2 # smoothing parameter - diffusion sampling length q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi) #implements equation no. 9 from Yeh et.al. S=data.copy() x,y,z,g=S.shape S=S.reshape(x*y*z,g) QA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) fwd = 0 #Calculate Quantitative Anisotropy and find the peaks and the indices #for every voxel for (i,s) in enumerate(S): odf = Q2odf(s,q2odf_params) peaks,inds=rp.peak_finding(odf,odf_faces) fwd=max(np.max(odf),fwd) peaks = peaks - np.min(odf) l=min(len(peaks),5) QA[i][:l] = peaks[:l] IN[i][:l] = inds[:l] QA/=fwd QA=QA.reshape(x,y,z,5) IN=IN.reshape(x,y,z,5) print('Old %d secs' %(time.clock() - t2)) assert_equal((gqs.QA-QA).max(),0.,'Frank QA different than dipy QA') assert_equal((gqs.QA.shape),QA.shape, 'Frank QA shape is different') assert_equal(len(tp.FACT_Delta(QA,IN,seeds_no=100).tracks),100, 'FACT_Delta is not generating the right number of ' 'tracks for this dataset')
def run_small_data(): smalldir = '/home/eg309/Devel/dipy/dipy/data/' bvals=np.load(smalldir+'small_64D.bvals.npy') gradients=np.load(smalldir+'small_64D.gradients.npy') img=nibabel.load(smalldir+'small_64D.nii') small_data=img.get_data() print 'real_data', small_data.shape gqsmall = dgqs.GeneralizedQSampling(small_data,bvals,gradients) tnsmall = ddti.Tensor(small_data,bvals,gradients) x,y,z,a,b=tnsmall.evecs.shape evecs=tnsmall.evecs xyz=x*y*z evecs = evecs.reshape(xyz,3,3) evals=tnsmall.evals evals = evals.reshape(xyz,3) """ eds=np.load(opj(os.path.dirname(__file__),\ '..','matrices',\ 'evenly_distributed_sphere_362.npz')) """ from dipy.data import get_sphere odf_vertices,odf_faces=get_sphere('symmetric362') scaling=np.sqrt(bvals*0.01506) # 0.01506 = 6*D where D is the free #water diffusion coefficient #l_values sqrt(6 D tau) D free water #diffusion coefficiet and tau included in the b-value tmp=np.tile(scaling,(3,1)) b_vector=gradients.T*tmp Lambda = 1.2 # smoothing parameter - diffusion sampling length q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi) #implements equation no. 9 from Yeh et.al. S=small_data.copy() x,y,z,g=S.shape S=S.reshape(x*y*z,g) # QA = np.zeros((x*y*z,5)) IN = np.zeros((x*y*z,5)) FA = tnsmall.fa().reshape(x*y*z) fwd = 0 #Calculate Quantitative Anisotropy and find the peaks and the indices #for every voxel summary = {} summary['vertices'] = odf_vertices # v = odf_vertices.shape[0] summary['faces'] = odf_faces # f = odf_faces.shape[0] for (i,s) in enumerate(S): istr = str(i) summary[istr] = {} t0, t1, t2, npa = gqsmall.npa(s, width = 5) summary[istr]['triple']=(t0,t1,t2) summary[istr]['npa']=npa odf = Q2odf(s,q2odf_params) peaks,inds=rp.peak_finding(odf,odf_faces) fwd=max(np.max(odf),fwd) n_peaks=min(len(peaks),5) peak_heights = [odf[i] for i in inds[:n_peaks]] IN[i][:n_peaks] = inds[:n_peaks] summary[istr]['odf'] = odf summary[istr]['peaks'] = peaks summary[istr]['inds'] = inds summary[istr]['evecs'] = evecs[i,:,:] summary[istr]['evals'] = evals[i,:] summary[istr]['n_peaks'] = n_peaks summary[istr]['peak_heights'] = peak_heights summary[istr]['fa'] = FA[i] """ QA/=fwd QA=QA.reshape(x,y,z,5) IN=IN.reshape(x,y,z,5) """ peaks_1 = [i for i in range(1000) if summary[str(i)]['n_peaks']==1] peaks_2 = [i for i in range(1000) if summary[str(i)]['n_peaks']==2] peaks_3 = [i for i in range(1000) if summary[str(i)]['n_peaks']==3] print '#voxels with 1, 2, 3 peaks', len(peaks_1),len(peaks_2),len(peaks_3) return FA, summary