コード例 #1
0
ファイル: test_eit.py プロジェクト: AndrewLawrence/dipy
def test_dni_eit():
 
    btable=np.loadtxt(get_data('dsi515btable'))    
    bvals=btable[:,0]
    bvecs=btable[:,1:]
    data,descr=sim_data(bvals,bvecs)   
    #load odf sphere
    vertices,faces = sphere_vf_from('symmetric724')
    edges = unique_edges(faces)
    #create the sphere
    odf_sphere=(vertices,faces)
    dn=DiffusionNablaModel(bvals,bvecs,odf_sphere)
    dn.relative_peak_threshold = 0.5
    dn.angular_distance_threshold = 20
    dnfit=dn.fit(data)
    print('DiffusionNablaModel')
    for i,d in enumerate(data):
        print(descr[i], np.sum(dnfit.peak_values[i]>0))
    ei=EquatorialInversionModel(bvals,bvecs,odf_sphere)
    ei.relative_peak_threshold = 0.3
    ei.angular_distance_threshold = 15
    ei.set_operator('laplacian')
    eifit = ei.fit(data,return_odf=True)
    print('EquatorialInversionModel')
    for i,d in enumerate(data):
        print(descr[i], np.sum(eifit.peak_values[i]>0))
        assert_equal(descr[i][1], np.sum(eifit.peak_values[i]>0))
コード例 #2
0
ファイル: test_eit.py プロジェクト: Vincent-Methot/dipy
def test_dni_eit():

    btable = np.loadtxt(get_data('dsi515btable'))
    bvals = btable[:, 0]
    bvecs = btable[:, 1:]
    data, descr = sim_data(bvals, bvecs)
    #load odf sphere
    vertices, faces = sphere_vf_from('symmetric724')
    edges = unique_edges(faces)
    #create the sphere
    odf_sphere = (vertices, faces)
    dn = DiffusionNablaModel(bvals, bvecs, odf_sphere)
    dn.relative_peak_threshold = 0.5
    dn.angular_distance_threshold = 20
    dnfit = dn.fit(data)
    print('DiffusionNablaModel')
    for i, d in enumerate(data):
        print(descr[i], np.sum(dnfit.peak_values[i] > 0))
    ei = EquatorialInversionModel(bvals, bvecs, odf_sphere)
    ei.relative_peak_threshold = 0.3
    ei.angular_distance_threshold = 15
    ei.set_operator('laplacian')
    eifit = ei.fit(data, return_odf=True)
    print('EquatorialInversionModel')
    for i, d in enumerate(data):
        print(descr[i], np.sum(eifit.peak_values[i] > 0))
        assert_equal(descr[i][1], np.sum(eifit.peak_values[i] > 0))
コード例 #3
0
ファイル: dsi.py プロジェクト: iannimmosmith/dipy
 def __init__(self, bvals, gradients, odf_sphere='symmetric642',
              deconv=False, half_sphere_grads=False):
     '''
     Parameters
     -----------
     bvals : array, shape (N,)
     gradients : array, shape (N,3) also known as bvecs        
     odf_sphere : tuple, (verts, faces, edges)
     deconv : bool, use deconvolution
     half_sphere_grad : boolean Default(False) 
         in order to create the q-space we use the bvals and gradients. 
         If the gradients are only one hemisphere then 
     See also
     ----------
     dipy.reconst.dti.Tensor, dipy.reconst.gqi.GeneralizedQSampling
     '''
     b0 = 0
     self.bvals=bvals
     self.gradients=gradients
     #3d volume for Sq
     self.sz=16
     #necessary shifting for centering
     self.origin=8
     #hanning filter width
     self.filter_width=32.                     
     #odf collecting radius
     self.radius=np.arange(2.1,6,.2)
     #odf sphere
     odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
     self.set_odf_vertices(odf_vertices,None,odf_faces)
     self.odfn=len(self._odf_vertices)
     #number of single sampling points
     self.dn = (bvals > b0).sum()
     self.num_b0 = len(bvals) - self.dn
     self.create_qspace()
コード例 #4
0
ファイル: test_dsi.py プロジェクト: iannimmosmith/dipy
def standard_dsi_algorithm(S,bvals,bvecs):
    #volume size
    sz=16
    #shifting
    origin=8
    #hanning width
    filter_width=32.
    #number of signal sampling points
    n=515

    #odf radius
    #radius=np.arange(2.1,30,.1)
    radius=np.arange(2.1,6,.2)
    #radius=np.arange(.1,6,.1)   
    
    bv=bvals
    bmin=np.sort(bv)[1]
    bv=np.sqrt(bv/bmin)
    qtable=np.vstack((bv,bv,bv)).T*bvecs
    qtable=np.floor(qtable+.5)
   
    #calculate radius for the hanning filter
    r = np.sqrt(qtable[:,0]**2+qtable[:,1]**2+qtable[:,2]**2)
        
    #setting hanning filter width and hanning
    hanning=.5*np.cos(2*np.pi*r/filter_width)
    
    #center and index in q space volume
    q=qtable+origin
    q=q.astype('i8')
    
    #apply the hanning filter
    values=S*hanning
    
    #create the signal volume    
    Sq=np.zeros((sz,sz,sz))
    for i in range(n):        
        Sq[q[i][0],q[i][1],q[i][2]]+=values[i]
    
    #apply fourier transform
    Pr=fftshift(np.abs(np.real(fftn(fftshift(Sq),(sz,sz,sz)))))

    #vertices, edges, faces  = create_unit_sphere(5)    
    #vertices, faces = sphere_vf_from('symmetric362')           
    vertices, faces = sphere_vf_from('symmetric724')           
    odf = np.zeros(len(vertices))
        
    for m in range(len(vertices)):
        
        xi=origin+radius*vertices[m,0]
        yi=origin+radius*vertices[m,1]
        zi=origin+radius*vertices[m,2]
        
        PrI=map_coordinates(Pr,np.vstack((xi,yi,zi)),order=1)
        for i in range(len(radius)):
            odf[m]=odf[m]+PrI[i]*radius[i]**2
   
    peaks,inds=peak_finding(odf.astype('f8'),faces.astype('uint16'))

    return Pr,odf,peaks
コード例 #5
0
def standard_dsi_algorithm(S, bvals, bvecs):
    #volume size
    sz = 16
    #shifting
    origin = 8
    #hanning width
    filter_width = 32.
    #number of signal sampling points
    n = 515

    #odf radius
    #radius=np.arange(2.1,30,.1)
    radius = np.arange(2.1, 6, .2)
    #radius=np.arange(.1,6,.1)

    bv = bvals
    bmin = np.sort(bv)[1]
    bv = np.sqrt(bv / bmin)
    qtable = np.vstack((bv, bv, bv)).T * bvecs
    qtable = np.floor(qtable + .5)

    #calculate radius for the hanning filter
    r = np.sqrt(qtable[:, 0]**2 + qtable[:, 1]**2 + qtable[:, 2]**2)

    #setting hanning filter width and hanning
    hanning = .5 * np.cos(2 * np.pi * r / filter_width)

    #center and index in q space volume
    q = qtable + origin
    q = q.astype('i8')

    #apply the hanning filter
    values = S * hanning

    #create the signal volume
    Sq = np.zeros((sz, sz, sz))
    for i in range(n):
        Sq[q[i][0], q[i][1], q[i][2]] += values[i]

    #apply fourier transform
    Pr = fftshift(np.abs(np.real(fftn(fftshift(Sq), (sz, sz, sz)))))

    #vertices, edges, faces  = create_unit_sphere(5)
    #vertices, faces = sphere_vf_from('symmetric362')
    vertices, faces = sphere_vf_from('symmetric724')
    odf = np.zeros(len(vertices))

    for m in range(len(vertices)):

        xi = origin + radius * vertices[m, 0]
        yi = origin + radius * vertices[m, 1]
        zi = origin + radius * vertices[m, 2]

        PrI = map_coordinates(Pr, np.vstack((xi, yi, zi)), order=1)
        for i in range(len(radius)):
            odf[m] = odf[m] + PrI[i] * radius[i]**2

    peaks, inds = peak_finding(odf.astype('f8'), faces.astype('uint16'))

    return Pr, odf, peaks
コード例 #6
0
ファイル: eit.py プロジェクト: endolith/dipy
    def __init__(self, bvals, gradients, odf_sphere='symmetric362',
                 half_sphere_grads=False, fast=True):
        ''' Reconstruct the signal using Diffusion Nabla Imaging

        As described in E.Garyfallidis, "Towards an accurate brain
        tractograph"tractograph, PhD thesis, 2011.

        Parameters
        -----------
        bvals : array, shape (N,)
        gradients : array, shape (N,3) also known as bvecs
        odf_sphere : str or tuple, optional
            If str, then load sphere of given name using ``get_sphere``.
            If tuple, gives (vertices, faces) for sphere.
        filter : array, shape(len(vertices),)
            default is None (using standard hanning filter for DSI)
        half_sphere_grads : boolean Default(False)
            in order to create the q-space we use the bvals and gradients.
            If the gradients are only one hemisphere then

        See also
        ----------
        dipy.reconst.eit.EquatorialInversionModel, dipy.reconst.dti.TensorModel, dipy.reconst.dsi.DiffusionSpectrumModel
        '''

        #check if bvectors are provided only on a hemisphere
        if half_sphere_grads==True:
            pass
            #bvals=np.append(bvals.copy(),bvals[1:].copy())
            #gradients=np.append(gradients.copy(),-gradients[1:].copy(),axis=0)
            #data=np.append(data.copy(),data[...,1:].copy(),axis=-1)

        #load bvals and bvecs
        self.bvals=bvals
        gradients[np.isnan(gradients)] = 0.
        self.gradients=gradients
        #save number of total diffusion volumes
        self.dn=self.gradients.shape[0] #data.shape[-1]
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.set_odf_vertices(odf_vertices,None,odf_faces)
        self.odfn=odf_vertices.shape[0]

        #odf sampling radius
        self.radius=np.arange(0,5,.2)
        #self.radiusn=len(self.radius)
        #self.create_qspace(bvals,gradients,16,8)
        #peak threshold
        #self.peak_thr=.7
        #equatorial zone
        self.zone=5.
        self.gaussian_weight=0.05
        self.fast=fast
        if fast==True:
            self.evaluate_odf=self.fast_odf
        else:
            self.evaluate_odf=self.slow_odf
        self.precompute()
コード例 #7
0
 def __init__(self,
              bvals,
              gradients,
              odf_sphere='symmetric642',
              deconv=False,
              half_sphere_grads=False):
     '''
     Parameters
     -----------
     bvals : array, shape (N,)
     gradients : array, shape (N,3) also known as bvecs        
     odf_sphere : tuple, (verts, faces, edges)
     deconv : bool, use deconvolution
     half_sphere_grad : boolean Default(False) 
         in order to create the q-space we use the bvals and gradients. 
         If the gradients are only one hemisphere then 
     See also
     ----------
     dipy.reconst.dti.Tensor, dipy.reconst.gqi.GeneralizedQSampling
     '''
     b0 = 0
     self.bvals = bvals
     self.gradients = gradients
     #3d volume for Sq
     self.sz = 16
     #necessary shifting for centering
     self.origin = 8
     #hanning filter width
     self.filter_width = 32.
     #odf collecting radius
     self.radius = np.arange(2.1, 6, .2)
     #odf sphere
     odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
     self.set_odf_vertices(odf_vertices, None, odf_faces)
     self.odfn = len(self._odf_vertices)
     #number of single sampling points
     self.dn = (bvals > b0).sum()
     self.num_b0 = len(bvals) - self.dn
     self.create_qspace()
コード例 #8
0
    def __init__(self,
                 data,
                 bvals,
                 gradients,
                 odf_sphere='symmetric362',
                 mask=None,
                 half_sphere_grads=False,
                 auto=True,
                 save_odfs=False):
        '''
        Parameters
        -----------
        data : array, shape(X,Y,Z,D), or (X,D)
        bvals : array, shape (N,)
        gradients : array, shape (N,3) also known as bvecs        
        odf_sphere : str or tuple, optional
            If str, then load sphere of given name using ``get_sphere``.
            If tuple, gives (vertices, faces) for sphere.
        filter : array, shape(len(vertices),) 
            default is None (using standard hanning filter for DSI)
        half_sphere_grad : boolean Default(False) 
            in order to create the q-space we use the bvals and gradients. 
            If the gradients are only one hemisphere then 
        auto : boolean, default True 
            if True then the processing of all voxels will start automatically 
            with the class constructor,if False then you will have to call .fit()
            in order to do the heavy duty processing for every voxel
        save_odfs : boolean, default False
            save odfs, which is memory expensive  

        See also
        ----------
        dipy.reconst.dti.Tensor, dipy.reconst.gqi.GeneralizedQSampling
        '''

        #read the vertices and faces for the odf sphere
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.odf_vertices = odf_vertices
        self.odf_faces = odf_faces
        self.odfn = len(self.odf_vertices)
        self.save_odfs = save_odfs

        #check if bvectors are provided only on a hemisphere
        if half_sphere_grads == True:
            bvals = np.append(bvals.copy(), bvals[1:].copy())
            gradients = np.append(gradients.copy(),
                                  -gradients[1:].copy(),
                                  axis=0)
            data = np.append(data.copy(), data[..., 1:].copy(), axis=-1)

        #load bvals and bvecs
        self.bvals = bvals
        gradients[np.isnan(gradients)] = 0.
        self.gradients = gradients
        #save number of total diffusion volumes
        self.dn = data.shape[-1]
        self.data = data
        self.datashape = data.shape  #initial shape
        self.mask = mask
        #3d volume for Sq
        self.sz = 16
        #necessary shifting for centering
        self.origin = 8
        #hanning filter width
        self.filter_width = 32.

        #odf collecting radius
        self.radius = np.arange(2.1, 6, .2)
        self.update()

        if auto:
            self.fit()
コード例 #9
0
ファイル: test_dsi.py プロジェクト: iannimmosmith/dipy
def test_dsi():

    #load odf sphere
    vertices,faces = sphere_vf_from('symmetric724')
    edges = unique_edges(faces)
    half_vertices,half_edges,half_faces=reduce_antipodal(vertices,faces)

    #load bvals and gradients
    btable=np.loadtxt(get_data('dsi515btable'))    
    bvals=btable[:,0]
    bvecs=btable[:,1:]        
    S,stics=SticksAndBall(bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0),(90,0),(90,90)], fractions=[50,50,0], snr=None)    
    #pdf0,odf0,peaks0=standard_dsi_algorithm(S,bvals,bvecs)    
    S2=S.copy()
    S2=S2.reshape(1,len(S)) 
    
    odf_sphere=(vertices,faces)
    ds=DiffusionSpectrumModel( bvals, bvecs, odf_sphere)    
    dsfit=ds.fit(S)
    assert_equal((dsfit.peak_values>0).sum(),3)

    #change thresholds
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 30
    dsfit = ds.fit(S)
    assert_equal((dsfit.peak_values>0).sum(),2)

    #assert_almost_equal(np.sum(ds.pdf(S)-pdf0),0)
    #assert_almost_equal(np.sum(ds.odf(ds.pdf(S))-odf0),0)

    assert_almost_equal(dsfit.gfa,np.array([0.5749720469955439]))
    
    #1 fiber
    S,stics=SticksAndBall(bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0),(90,0),(90,90)], fractions=[100,0,0], snr=None)   
    ds=DiffusionSpectrumModel(bvals,bvecs,odf_sphere)
    dsfit=ds.fit(S)
    QA=dsfit.qa
    assert_equal(np.sum(QA>0),1)
    
    #2 fibers
    S,stics=SticksAndBall(bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0),(90,0),(90,90)], fractions=[50,50,0], snr=None)   
    ds=DiffusionSpectrumModel(bvals,bvecs,odf_sphere)
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 20
    dsfit=ds.fit(S)
    QA=dsfit.qa
    assert_equal(np.sum(QA>0),2)
     
    #Give me 2 directions
    assert_equal(len(dsfit.get_directions()),2)
   
    #3 fibers
    S,stics=SticksAndBall(bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0),(90,0),(90,90)], fractions=[33,33,33], snr=None)   
    ds=DiffusionSpectrumModel(bvals,bvecs,odf_sphere)
    ds.relative_peak_threshold = 0.5
    dsfit=ds.fit(S,return_odf=True)
    QA=dsfit.qa
    assert_equal(np.sum(QA>0),3)

    #Give me 3 directions
    assert_equal(len(dsfit.get_directions()),3)

    #Recalculate the odf with a different sphere.

    vertices, faces = sphere_vf_from('symmetric724') 
    
    odf1=dsfit.odf()
    print len(odf1)
    
    odf2=dsfit.odf((vertices,faces))
    print len(odf2)

    assert_array_almost_equal(odf1,odf2)

    #isotropic
    S,stics=SticksAndBall(bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0),(90,0),(90,90)], fractions=[0,0,0], snr=None)   
    ds=DiffusionSpectrumModel(bvals,bvecs,odf_sphere)
    dsfit=ds.fit(S)
    QA=dsfit.qa
    assert_equal(np.sum(QA>0),0)
コード例 #10
0
ファイル: test_gqi.py プロジェクト: Garyfallidis/dipy
def test_gqi():

    #load odf sphere
    vertices, faces = sphere_vf_from('symmetric724')
    edges = unique_edges(faces)
    half_vertices, half_edges, half_faces = reduce_antipodal(vertices, faces)

    #load bvals and gradients
    btable = np.loadtxt(get_data('dsi515btable'))
    bvals = btable[:, 0]
    bvecs = btable[:, 1:]
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[50, 50, 0],
                             snr=None)
    #pdf0,odf0,peaks0=standard_dsi_algorithm(S,bvals,bvecs)
    S2 = S.copy()
    S2 = S2.reshape(1, len(S))

    odf_sphere = (vertices, faces)
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    dsfit = ds.fit(S)
    assert_equal((dsfit.peak_values > 0).sum(), 3)

    #change thresholds
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 30
    dsfit = ds.fit(S)
    assert_equal((dsfit.peak_values > 0).sum(), 2)

    #1 fiber
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[100, 0, 0],
                             snr=None)
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 20
    dsfit = ds.fit(S)
    QA = dsfit.qa
    #1/0
    assert_equal(np.sum(QA > 0), 1)

    #2 fibers
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[50, 50, 0],
                             snr=None)
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 20
    dsfit = ds.fit(S)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 2)

    #3 fibers
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[33, 33, 33],
                             snr=None)
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    ds.relative_peak_threshold = 0.5
    dsfit = ds.fit(S)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 3)

    #isotropic
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[0, 0, 0],
                             snr=None)
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    dsfit = ds.fit(S)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 0)

    #3 fibers DSI2
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[33, 33, 33],
                             snr=None)
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere, squared=True)
    ds.relative_peak_threshold = 0.5
    dsfit = ds.fit(S, gfa_thr=0.05)
    QA = dsfit.qa

    #3 fibers DSI2 with a 3D volume
    data = np.zeros((3, 3, 3, len(S)))
    data[..., :] = S.copy()
    dsfit = ds.fit(data, gfa_thr=0.05)
    #1/0
    assert_array_almost_equal(np.sum(dsfit.peak_values > 0, axis=-1),
                              3 * np.ones((3, 3, 3)))
コード例 #11
0
ファイル: green_paint.py プロジェクト: Garyfallidis/trn
def test():

    # img=nib.load('/home/eg309/Data/project01_dsi/connectome_0001/tp1/RAWDATA/OUT/mr000001.nii.gz')
    btable = np.loadtxt(get_data("dsi515btable"))
    # volume size
    sz = 16
    # shifting
    origin = 8
    # hanning width
    filter_width = 32.0
    # number of signal sampling points
    n = 515
    # odf radius
    radius = np.arange(2.1, 6, 0.2)
    # create q-table
    bv = btable[:, 0]
    bmin = np.sort(bv)[1]
    bv = np.sqrt(bv / bmin)
    qtable = np.vstack((bv, bv, bv)).T * btable[:, 1:]
    qtable = np.floor(qtable + 0.5)
    # copy bvals and bvecs
    bvals = btable[:, 0]
    bvecs = btable[:, 1:]
    # S=img.get_data()[38,50,20]#[96/2,96/2,20]
    S, stics = SticksAndBall(
        bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (60, 0), (90, 90)], fractions=[0, 0, 0], snr=None
    )

    S2 = S.copy()
    S2 = S2.reshape(1, len(S))
    dn = DiffusionNabla(S2, bvals, bvecs, auto=False)
    pR = dn.equators
    odf = dn.odf(S)
    # Xs=dn.precompute_interp_coords()
    peaks, inds = peak_finding(odf.astype("f8"), dn.odf_faces.astype("uint16"))
    print peaks
    print peaks / peaks.min()
    # print dn.PK
    dn.fit()
    print dn.PK

    # """
    ren = fvtk.ren()
    colors = fvtk.colors(odf, "jet")
    fvtk.add(ren, fvtk.point(dn.odf_vertices, colors, point_radius=0.05, theta=8, phi=8))
    fvtk.show(ren)
    # """

    stop

    # ds=DiffusionSpectrum(S2,bvals,bvecs)
    # tpr=ds.pdf(S)
    # todf=ds.odf(tpr)

    """
    #show projected signal
    Bvecs=np.concatenate([bvecs[1:],-bvecs[1:]])
    X0=np.dot(np.diag(np.concatenate([S[1:],S[1:]])),Bvecs)    
    ren=fvtk.ren()
    fvtk.add(ren,fvtk.point(X0,fvtk.yellow,1,2,16,16))    
    fvtk.show(ren)
    """
    # qtable=5*matrix[:,1:]

    # calculate radius for the hanning filter
    r = np.sqrt(qtable[:, 0] ** 2 + qtable[:, 1] ** 2 + qtable[:, 2] ** 2)

    # setting hanning filter width and hanning
    hanning = 0.5 * np.cos(2 * np.pi * r / filter_width)

    # center and index in q space volume
    q = qtable + origin
    q = q.astype("i8")

    # apply the hanning filter
    values = S * hanning

    """
    #plot q-table
    ren=fvtk.ren()
    colors=fvtk.colors(values,'jet')
    fvtk.add(ren,fvtk.point(q,colors,1,0.1,6,6))
    fvtk.show(ren)
    """

    # create the signal volume
    Sq = np.zeros((sz, sz, sz))
    for i in range(n):
        Sq[q[i][0], q[i][1], q[i][2]] += values[i]

    # apply fourier transform
    Pr = fftshift(np.abs(np.real(fftn(fftshift(Sq), (sz, sz, sz)))))

    # """
    ren = fvtk.ren()
    vol = fvtk.volume(Pr)
    fvtk.add(ren, vol)
    fvtk.show(ren)
    # """

    """
    from enthought.mayavi import mlab
    mlab.pipeline.volume(mlab.pipeline.scalar_field(Sq))
    mlab.show()
    """

    # vertices, edges, faces  = create_unit_sphere(5)
    vertices, faces = sphere_vf_from("symmetric362")
    odf = np.zeros(len(vertices))

    for m in range(len(vertices)):

        xi = origin + radius * vertices[m, 0]
        yi = origin + radius * vertices[m, 1]
        zi = origin + radius * vertices[m, 2]
        PrI = map_coordinates(Pr, np.vstack((xi, yi, zi)), order=1)
        for i in range(len(radius)):
            odf[m] = odf[m] + PrI[i] * radius[i] ** 2

    """
    ren=fvtk.ren()
    colors=fvtk.colors(odf,'jet')
    fvtk.add(ren,fvtk.point(vertices,colors,point_radius=.05,theta=8,phi=8))
    fvtk.show(ren)
    """

    """
    #Pr[Pr<500]=0    
    ren=fvtk.ren()
    #ren.SetBackground(1,1,1)
    fvtk.add(ren,fvtk.volume(Pr))
    fvtk.show(ren)
    """

    peaks, inds = peak_finding(odf.astype("f8"), faces.astype("uint16"))

    Eq = np.zeros((sz, sz, sz))
    for i in range(n):
        Eq[q[i][0], q[i][1], q[i][2]] += S[i] / S[0]

    LEq = laplace(Eq)

    # Pr[Pr<500]=0
    ren = fvtk.ren()
    # ren.SetBackground(1,1,1)
    fvtk.add(ren, fvtk.volume(Eq))
    fvtk.show(ren)

    phis = np.linspace(0, 2 * np.pi, 100)

    planars = []
    for phi in phis:
        planars.append(sphere2cart(1, np.pi / 2, phi))
    planars = np.array(planars)

    planarsR = []
    for v in vertices:
        R = vec2vec_rotmat(np.array([0, 0, 1]), v)
        planarsR.append(np.dot(R, planars.T).T)

    """
    ren=fvtk.ren()
    fvtk.add(ren,fvtk.point(planarsR[0],fvtk.green,1,0.1,8,8))
    fvtk.add(ren,fvtk.point(2*planarsR[1],fvtk.red,1,0.1,8,8))
    fvtk.show(ren)
    """

    azimsums = []
    for disk in planarsR:
        diskshift = 4 * disk + origin
        # Sq0=map_coordinates(Sq,diskshift.T,order=1)
        # azimsums.append(np.sum(Sq0))
        # Eq0=map_coordinates(Eq,diskshift.T,order=1)
        # azimsums.append(np.sum(Eq0))
        LEq0 = map_coordinates(LEq, diskshift.T, order=1)
        azimsums.append(np.sum(LEq0))

    azimsums = np.array(azimsums)

    # """
    ren = fvtk.ren()
    colors = fvtk.colors(azimsums, "jet")
    fvtk.add(ren, fvtk.point(vertices, colors, point_radius=0.05, theta=8, phi=8))
    fvtk.show(ren)
    # """

    # for p in planarsR[0]:
    """
コード例 #12
0
ファイル: _show_odfs.py プロジェクト: endolith/dipy
def show_odfs(odfs, vertices_faces, image=None, colormap='jet',
              scale=2.2, norm=True, radial_scale=True):
    """
    Display a grid of ODFs.

    Parameters
    ----------
    odfs : (X, Y, Z, M) ndarray
        A 3-D arrangement of orientation distribution functions (ODFs).  At
        each ``(x, y, z)`` position, it contains the the values of the
        corresponding ODF evaluated on the M vertices.
    vertices_faces : str or tuple of (vertices, faces)
        A named sphere from `dipy.data.get_sphere`, or a combination of
        `(vertices, faces)`.
    image : (X, Y) ndarray
        Background image (e.g., fractional anisotropy) do display behind the
        ODFs.
    colormap : str
        Color mapping.
    scale : float
        Increasing the scale spaces ODFs further apart.
    norm : bool
        Whether or not to normalize each individual ODF (divide by its maximum
        absolute value).
    radial_scale : bool
        Whether or not to change the radial shape of the ODF according to its
        scalar value.  If set to False, the ODF is displayed as a sphere.

    Notes
    -----
    Mayavi gets really slow when `triangular_mesh` is called too many times,
    so this function stacks ODF data and calls `triangular_mesh` once.

    Examples
    --------
    >>> from dipy.data import get_sphere
    >>> verts, faces = get_sphere('symmetric724')

    >>> angle = np.linspace(0, 2*np.pi, len(verts))
    >>> odf1 = np.sin(angle)
    >>> odf2 = np.cos(angle)
    >>> odf3 = odf1**2 * odf2
    >>> odf4 = odf1 + odf2**2

    >>> odfs = [[[odf1, odf2],
    ...          [odf3, odf4]]]

    >>> show_odfs(odfs, (verts, faces), scale=5)

    """
    vertices, faces = sphere_vf_from(vertices_faces)

    odfs = np.asarray(odfs)
    if odfs.ndim != 4:
        raise ValueError("ODFs must by an (X,Y,Z,M) array. " +
                         "Has shape " + str(odfs.shape))

    grid_shape = np.array(odfs.shape[:3])
    faces = np.asarray(faces, dtype=int)

    xx, yy, zz, ff, mm = [], [], [], [], []
    count = 0

    for ijk in np.ndindex(*grid_shape):
        m = odfs[ijk]

        if norm:
            m /= abs(m).max()

        if radial_scale:
            xyz = vertices.T * m
        else:
            xyz = vertices.T.copy()

        xyz += scale * (ijk - grid_shape / 2.)[:, None]

        x, y, z = xyz
        ff.append(count + faces)
        xx.append(x)
        yy.append(y)
        zz.append(z)
        mm.append(m)

        count += len(x)

    ff, xx, yy, zz, mm = (np.concatenate(arrs) for arrs in (ff, xx, yy, zz, mm))
    mlab.triangular_mesh(xx, yy, zz, ff, scalars=mm, colormap=colormap)

    if image is not None:
        mlab.imshow(image, colormap='gray', interpolate=False)

    mlab.colorbar()
    mlab.show()
コード例 #13
0
ファイル: crosstalk.py プロジェクト: Garyfallidis/trn
def get_data(name='101_32'):    
    bvals=np.loadtxt(parameters[name][0]+'/'+parameters[name][1]+parameters[name][2])
    bvecs=np.loadtxt(parameters[name][0]+'/'+parameters[name][1]+parameters[name][3]).T
    img=nib.load(parameters[name][0]+'/'+parameters[name][1]+parameters[name][4])     
    return img.get_data(),bvals,bvecs


#siem64 =  nipy.load_image('/home/ian/Devel/dipy/dipy/core/tests/data/small_64D.gradients.npy')

data102,affine102,bvals102,dsi102=dcm.read_mosaic_dir('/home/ian/Data/Frank_Eleftherios/frank/20100511_m030y_cbu100624/08_ep2d_advdiff_101dir_DSI')

bvals102=bvals102.real
dsi102=dsi102.real

v362,f362 = sphere_vf_from('symmetric362')
v642,f642 = sphere_vf_from('symmetric642')

d = 0.0015
S0 = 100
f = [.33,.33,.33]
#f = [1.,0.]

b = 1200

#needles = np.array([-np.pi/4,np.pi/4])
needles2d = np.array([0,np.pi/4.,np.pi/2.])

angles2d = np.linspace(-np.pi, np.pi,100)

def signal2d(S0,f,b,d,needles2d,angles2d):
コード例 #14
0
def test_dsi():

    #load odf sphere
    vertices, faces = sphere_vf_from('symmetric724')
    edges = unique_edges(faces)
    half_vertices, half_edges, half_faces = reduce_antipodal(vertices, faces)

    #load bvals and gradients
    btable = np.loadtxt(get_data('dsi515btable'))
    bvals = btable[:, 0]
    bvecs = btable[:, 1:]
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[50, 50, 0],
                             snr=None)
    #pdf0,odf0,peaks0=standard_dsi_algorithm(S,bvals,bvecs)
    S2 = S.copy()
    S2 = S2.reshape(1, len(S))

    odf_sphere = (vertices, faces)
    ds = DiffusionSpectrumModel(bvals, bvecs, odf_sphere)
    dsfit = ds.fit(S)
    assert_equal((dsfit.peak_values > 0).sum(), 3)

    #change thresholds
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 30
    dsfit = ds.fit(S)
    assert_equal((dsfit.peak_values > 0).sum(), 2)

    #assert_almost_equal(np.sum(ds.pdf(S)-pdf0),0)
    #assert_almost_equal(np.sum(ds.odf(ds.pdf(S))-odf0),0)

    assert_almost_equal(dsfit.gfa, np.array([0.5749720469955439]))

    #1 fiber
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[100, 0, 0],
                             snr=None)
    ds = DiffusionSpectrumModel(bvals, bvecs, odf_sphere)
    dsfit = ds.fit(S)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 1)

    #2 fibers
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[50, 50, 0],
                             snr=None)
    ds = DiffusionSpectrumModel(bvals, bvecs, odf_sphere)
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 20
    dsfit = ds.fit(S)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 2)

    #Give me 2 directions
    assert_equal(len(dsfit.get_directions()), 2)

    #3 fibers
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[33, 33, 33],
                             snr=None)
    ds = DiffusionSpectrumModel(bvals, bvecs, odf_sphere)
    ds.relative_peak_threshold = 0.5
    dsfit = ds.fit(S, return_odf=True)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 3)

    #Give me 3 directions
    assert_equal(len(dsfit.get_directions()), 3)

    #Recalculate the odf with a different sphere.

    vertices, faces = sphere_vf_from('symmetric724')

    odf1 = dsfit.odf()
    print len(odf1)

    odf2 = dsfit.odf((vertices, faces))
    print len(odf2)

    assert_array_almost_equal(odf1, odf2)

    #isotropic
    S, stics = SticksAndBall(bvals,
                             bvecs,
                             d=0.0015,
                             S0=100,
                             angles=[(0, 0), (90, 0), (90, 90)],
                             fractions=[0, 0, 0],
                             snr=None)
    ds = DiffusionSpectrumModel(bvals, bvecs, odf_sphere)
    dsfit = ds.fit(S)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 0)
コード例 #15
0
ファイル: dni.py プロジェクト: franzmelchiori/dipy
    def __init__(self, data, bvals, gradients,odf_sphere='symmetric362', 
                 mask=None,
                 half_sphere_grads=False,
                 auto=True,
                 save_odfs=False,
                 fast=True):
        '''
        Parameters
        -----------
        data : array, shape(X,Y,Z,D), or (X,D)
        bvals : array, shape (N,)
        gradients : array, shape (N,3) also known as bvecs        
        odf_sphere : str or tuple, optional
            If str, then load sphere of given name using ``get_sphere``.
            If tuple, gives (vertices, faces) for sphere.
        filter : array, shape(len(vertices),) 
            default is None (using standard hanning filter for DSI)
        half_sphere_grads : boolean Default(False) 
            in order to create the q-space we use the bvals and gradients. 
            If the gradients are only one hemisphere then 
        auto : boolean, default True 
            if True then the processing of all voxels will start automatically 
            with the class constructor,if False then you will have to call .fit()
            in order to do the heavy duty processing for every voxel
        save_odfs : boolean, default False
            save odfs, which is memory expensive

        See also
        ----------
        dipy.reconst.dti.Tensor, dipy.reconst.dsi.DiffusionSpectrum
        '''
        
        #read the vertices and faces for the odf sphere
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.odf_vertices=odf_vertices
        self.odf_faces=odf_faces
        self.odfn=len(self.odf_vertices)
        self.save_odfs=save_odfs
        
        #check if bvectors are provided only on a hemisphere
        if half_sphere_grads==True:
            bvals=np.append(bvals.copy(),bvals[1:].copy())
            gradients=np.append(gradients.copy(),-gradients[1:].copy(),axis=0)
            data=np.append(data.copy(),data[...,1:].copy(),axis=-1)
        
        #load bvals and bvecs
        self.bvals=bvals
        gradients[np.isnan(gradients)] = 0.
        self.gradients=gradients
        #save number of total diffusion volumes
        self.dn=data.shape[-1]        
        self.data=data
        self.datashape=data.shape #initial shape  
        self.mask=mask
        #odf sampling radius  
        self.radius=np.arange(0,6,.2)
        #self.radiusn=len(self.radius)
        #self.create_qspace(bvals,gradients,16,8)
        #peak threshold
        self.peak_thr=.4
        self.iso_thr=.7
        #calculate coordinates of equators
        #self.radon_params()
        #precompute coordinates for pdf interpolation
        #self.precompute_interp_coords()        
        #self.precompute_fast_coords()
        self.zone=5.
        #self.precompute_equator_indices(self.zone)
        #precompute botox weighting
        #self.precompute_botox(0.05,.3)
        self.gaussian_weight=0.1
        #self.precompute_angular(self.gaussian_weight)
               
        self.fast=fast        
        if fast==True:            
            self.odf=self.fast_odf
        else:
            self.odf=self.slow_odf
            
        self.update()
                    
        if auto:
            self.fit()
コード例 #16
0
ファイル: dandelion.py プロジェクト: cournape/dipy
    def __init__(self, data, bvals, gradients, smoothing=1.,
                 odf_sphere='symmetric362', mask=None):
        '''
        Parameters
        -----------
        data : array, shape(X,Y,Z,D)
        bvals : array, shape (N,)
        gradients : array, shape (N,3) also known as bvecs
        smoothing : float, smoothing parameter
        odf_sphere : str or tuple, optional
            If str, then load sphere of given name using ``get_sphere``.
            If tuple, gives (vertices, faces) for sphere.

        See also
        ----------
        dipy.reconst.dti.Tensor, dipy.reconst.gqi.GeneralizedQSampling
        '''
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.odf_vertices=odf_vertices
        self.bvals=bvals

        gradients[np.isnan(gradients)] = 0.
        self.gradients=gradients
        self.weighting=np.abs(np.dot(gradients,self.odf_vertices.T))     
        #self.weighting=self.weighting/np.sum(self.weighting,axis=0)

        S=data
        datashape=S.shape #initial shape
        msk=None #tmp mask

        if len(datashape)==4:
            x,y,z,g=S.shape        
            S=S.reshape(x*y*z,g)
            XA = np.zeros((x*y*z,5))
            IN = np.zeros((x*y*z,5))
            if mask != None:
                if mask.shape[:3]==datashape[:3]:
                    msk=mask.ravel().copy()
                    
        if len(datashape)==2:
            x,g= S.shape
            XA = np.zeros((x,5))
            IN = np.zeros((x,5))
        
        if mask !=None:
            for (i,s) in enumerate(S):                            
                if msk[i]>0:               
                    
                    odf=self.spherical_diffusivity(s)                    
                    peaks,inds=peak_finding(odf,odf_faces)            
                    l=min(len(peaks),5)
                    XA[i][:l] = peaks[:l]
                    IN[i][:l] = inds[:l]

        if mask==None:
            for (i,s) in enumerate(S):                            

                odf=self.spherical_diffusivity(s)
                peaks,inds=peak_finding(odf,odf_faces)            
                l=min(len(peaks),5)
                XA[i][:l] = peaks[:l]
                IN[i][:l] = inds[:l]
                
        if len(datashape) == 4:
            self.XA=XA.reshape(x,y,z,5)    
            self.IN=IN.reshape(x,y,z,5)
                    
        if len(datashape) == 2:
            self.XA=XA
            self.IN=IN            
コード例 #17
0
ファイル: test_gqi.py プロジェクト: iannimmosmith/dipy
def test_gqi():

    # load odf sphere
    vertices, faces = sphere_vf_from("symmetric724")
    edges = unique_edges(faces)
    half_vertices, half_edges, half_faces = reduce_antipodal(vertices, faces)

    # load bvals and gradients
    btable = np.loadtxt(get_data("dsi515btable"))
    bvals = btable[:, 0]
    bvecs = btable[:, 1:]
    S, stics = SticksAndBall(
        bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (90, 0), (90, 90)], fractions=[50, 50, 0], snr=None
    )
    # pdf0,odf0,peaks0=standard_dsi_algorithm(S,bvals,bvecs)
    S2 = S.copy()
    S2 = S2.reshape(1, len(S))

    odf_sphere = (vertices, faces)
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    dsfit = ds.fit(S)
    assert_equal((dsfit.peak_values > 0).sum(), 3)

    # change thresholds
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 30
    dsfit = ds.fit(S)
    assert_equal((dsfit.peak_values > 0).sum(), 2)

    # 1 fiber
    S, stics = SticksAndBall(
        bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (90, 0), (90, 90)], fractions=[100, 0, 0], snr=None
    )
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 20
    dsfit = ds.fit(S)
    QA = dsfit.qa
    # 1/0
    assert_equal(np.sum(QA > 0), 1)

    # 2 fibers
    S, stics = SticksAndBall(
        bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (90, 0), (90, 90)], fractions=[50, 50, 0], snr=None
    )
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    ds.relative_peak_threshold = 0.5
    ds.angular_distance_threshold = 20
    dsfit = ds.fit(S)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 2)

    # 3 fibers
    S, stics = SticksAndBall(
        bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (90, 0), (90, 90)], fractions=[33, 33, 33], snr=None
    )
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    ds.relative_peak_threshold = 0.5
    dsfit = ds.fit(S)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 3)

    # isotropic
    S, stics = SticksAndBall(
        bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (90, 0), (90, 90)], fractions=[0, 0, 0], snr=None
    )
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere)
    dsfit = ds.fit(S)
    QA = dsfit.qa
    assert_equal(np.sum(QA > 0), 0)

    # 3 fibers DSI2
    S, stics = SticksAndBall(
        bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (90, 0), (90, 90)], fractions=[33, 33, 33], snr=None
    )
    ds = GeneralizedQSamplingModel(bvals, bvecs, odf_sphere, squared=True)
    ds.relative_peak_threshold = 0.5
    dsfit = ds.fit(S, gfa_thr=0.05)
    QA = dsfit.qa

    # 3 fibers DSI2 with a 3D volume
    data = np.zeros((3, 3, 3, len(S)))
    data[..., :] = S.copy()
    dsfit = ds.fit(data, gfa_thr=0.05)
    # 1/0
    assert_array_almost_equal(np.sum(dsfit.peak_values > 0, axis=-1), 3 * np.ones((3, 3, 3)))
コード例 #18
0
ファイル: gqi.py プロジェクト: cournape/dipy
    def __init__(self, data, bvals, gradients,
                 Lambda=1.2, odf_sphere='symmetric362', mask=None):
        """ Generates a model-free description for every voxel that can
        be used from simple to very complicated configurations like
        quintuple crossings if your datasets support them.

        You can use this class for every kind of DWI image but it will
        perform much better when you have a balanced sampling scheme.

        Implements equation [9] from Generalized Q-Sampling as
        described in Fang-Cheng Yeh, Van J. Wedeen, Wen-Yih Isaac Tseng.
        Generalized Q-Sampling Imaging. IEEE TMI, 2010.

        Parameters
        -----------
        data: array, shape(X,Y,Z,D)
        bvals: array, shape (N,)
        gradients: array, shape (N,3) also known as bvecs
        Lambda: float, optional
            smoothing parameter - diffusion sampling length
        odf_sphere : None or str or tuple, optional
            input that will result in vertex, face arrays for a sphere.
        mask : None or ndarray, optional

        Key Properties
        ---------------
        QA : array, shape(X,Y,Z,5), quantitative anisotropy
        IN : array, shape(X,Y,Z,5), indices of QA, qa unit directions
        fwd : float, normalization parameter

        Notes
        -------
        In order to reconstruct the spin distribution function  a nice symmetric
        evenly distributed sphere is provided using 362 points. This is usually
        sufficient for most of the datasets.

        See also
        --------
        dipy.tracking.propagation.EuDX, dipy.reconst.dti.Tensor,
        dipy.data.__init__.get_sphere
        """
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.odf_vertices=odf_vertices

        # 0.01506 = 6*D where D is the free water diffusion coefficient 
        # l_values sqrt(6 D tau) D free water diffusion coefficient and
        # tau included in the b-value
        scaling = np.sqrt(bvals*0.01506)
        tmp=np.tile(scaling, (3,1))

        #the b vectors might have nan values where they correspond to b
        #value equals with 0
        gradients[np.isnan(gradients)]= 0.
        gradsT = gradients.T
        b_vector=gradsT*tmp # element-wise also known as the Hadamard product

        #q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)              

        q2odf_params=np.real(np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi))
        
        #q2odf_params[np.isnan(q2odf_params)]= 1.

        #define total mask 
        #tot_mask = (mask > 0) & (data[...,0] > thresh)
        
        S=data

        datashape=S.shape #initial shape
        msk=None #tmp mask

        if len(datashape)==4:

            x,y,z,g=S.shape        
            S=S.reshape(x*y*z,g)
            QA = np.zeros((x*y*z,5))
            IN = np.zeros((x*y*z,5))

            if mask != None:
                if mask.shape[:3]==datashape[:3]:
                    msk=mask.ravel().copy()
                    #print 'msk.shape',msk.shape

        if len(datashape)==2:

            x,g= S.shape
            QA = np.zeros((x,5))
            IN = np.zeros((x,5))  
            
        glob_norm_param = 0

        self.q2odf_params=q2odf_params

        #Calculate Quantitative Anisotropy and 
        #find the peaks and the indices
        #for every voxel
        
        if mask !=None:
            for (i,s) in enumerate(S):                            
                if msk[i]>0:
                    #Q to ODF
                    odf=np.dot(s,q2odf_params)            
                    peaks,inds=rp.peak_finding(odf,odf_faces)            
                    glob_norm_param=max(np.max(odf),glob_norm_param)
                    #remove the isotropic part
                    peaks = peaks - np.min(odf)
                    l=min(len(peaks),5)
                    QA[i][:l] = peaks[:l]
                    IN[i][:l] = inds[:l]

        if mask==None:
            for (i,s) in enumerate(S):                            
                #Q to ODF
                odf=np.dot(s,q2odf_params)            
                peaks,inds=rp.peak_finding(odf,odf_faces)            
                glob_norm_param=max(np.max(odf),glob_norm_param)
                #remove the isotropic part
                peaks = peaks - np.min(odf)
                l=min(len(peaks),5)
                QA[i][:l] = peaks[:l]
                IN[i][:l] = inds[:l]

        #normalize
        QA/=glob_norm_param
       
        if len(datashape) == 4:
            self.QA=QA.reshape(x,y,z,5)    
            self.IN=IN.reshape(x,y,z,5)
            
        if len(datashape) == 2:
            self.QA=QA
            self.IN=IN
            
        self.glob_norm_param = glob_norm_param
コード例 #19
0
ファイル: eit.py プロジェクト: Garyfallidis/dipy
    def __init__(self,
                 bvals,
                 gradients,
                 odf_sphere='symmetric362',
                 half_sphere_grads=False,
                 fast=True):
        ''' Reconstruct the signal using Diffusion Nabla Imaging  
    
        As described in E.Garyfallidis, "Towards an accurate brain
        tractograph"tractograph, PhD thesis, 2011.
        
        Parameters
        -----------
        bvals : array, shape (N,)
        gradients : array, shape (N,3) also known as bvecs        
        odf_sphere : str or tuple, optional
            If str, then load sphere of given name using ``get_sphere``.
            If tuple, gives (vertices, faces) for sphere.
        filter : array, shape(len(vertices),) 
            default is None (using standard hanning filter for DSI)
        half_sphere_grads : boolean Default(False) 
            in order to create the q-space we use the bvals and gradients. 
            If the gradients are only one hemisphere then 

        See also
        ----------
        dipy.reconst.eit.EquatorialInversionModel, dipy.reconst.dti.TensorModel, dipy.reconst.dsi.DiffusionSpectrumModel
        '''

        #check if bvectors are provided only on a hemisphere
        if half_sphere_grads == True:
            pass
            #bvals=np.append(bvals.copy(),bvals[1:].copy())
            #gradients=np.append(gradients.copy(),-gradients[1:].copy(),axis=0)
            #data=np.append(data.copy(),data[...,1:].copy(),axis=-1)

        #load bvals and bvecs
        self.bvals = bvals
        gradients[np.isnan(gradients)] = 0.
        self.gradients = gradients
        #save number of total diffusion volumes
        self.dn = self.gradients.shape[0]  #data.shape[-1]
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.set_odf_vertices(odf_vertices, None, odf_faces)
        self.odfn = odf_vertices.shape[0]

        #odf sampling radius
        self.radius = np.arange(0, 5, .2)
        #self.radiusn=len(self.radius)
        #self.create_qspace(bvals,gradients,16,8)
        #peak threshold
        #self.peak_thr=.7
        #equatorial zone
        self.zone = 5.
        self.gaussian_weight = 0.05
        self.fast = fast
        if fast == True:
            self.evaluate_odf = self.fast_odf
        else:
            self.evaluate_odf = self.slow_odf
        self.precompute()
コード例 #20
0
    def __init__(self,
                 data,
                 bvals,
                 gradients,
                 odf_sphere='symmetric362',
                 mask=None,
                 half_sphere_grads=False,
                 auto=True,
                 save_odfs=False,
                 fast=True):
        '''
        Parameters
        -----------
        data : array, shape(X,Y,Z,D), or (X,D)
        bvals : array, shape (N,)
        gradients : array, shape (N,3) also known as bvecs        
        odf_sphere : str or tuple, optional
            If str, then load sphere of given name using ``get_sphere``.
            If tuple, gives (vertices, faces) for sphere.
        filter : array, shape(len(vertices),) 
            default is None (using standard hanning filter for DSI)
        half_sphere_grads : boolean Default(False) 
            in order to create the q-space we use the bvals and gradients. 
            If the gradients are only one hemisphere then 
        auto : boolean, default True 
            if True then the processing of all voxels will start automatically 
            with the class constructor,if False then you will have to call .fit()
            in order to do the heavy duty processing for every voxel
        save_odfs : boolean, default False
            save odfs, which is memory expensive

        See also
        ----------
        dipy.reconst.dti.Tensor, dipy.reconst.dsi.DiffusionSpectrum
        '''

        #read the vertices and faces for the odf sphere
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.odf_vertices = odf_vertices
        self.odf_faces = odf_faces
        self.odfn = len(self.odf_vertices)
        self.save_odfs = save_odfs

        #check if bvectors are provided only on a hemisphere
        if half_sphere_grads == True:
            bvals = np.append(bvals.copy(), bvals[1:].copy())
            gradients = np.append(gradients.copy(),
                                  -gradients[1:].copy(),
                                  axis=0)
            data = np.append(data.copy(), data[..., 1:].copy(), axis=-1)

        #load bvals and bvecs
        self.bvals = bvals
        gradients[np.isnan(gradients)] = 0.
        self.gradients = gradients
        #save number of total diffusion volumes
        self.dn = data.shape[-1]
        self.data = data
        self.datashape = data.shape  #initial shape
        self.mask = mask
        #odf sampling radius
        self.radius = np.arange(0, 6, .2)
        #self.radiusn=len(self.radius)
        #self.create_qspace(bvals,gradients,16,8)
        #peak threshold
        self.peak_thr = .4
        self.iso_thr = .7
        #calculate coordinates of equators
        #self.radon_params()
        #precompute coordinates for pdf interpolation
        #self.precompute_interp_coords()
        #self.precompute_fast_coords()
        self.zone = 5.
        #self.precompute_equator_indices(self.zone)
        #precompute botox weighting
        #self.precompute_botox(0.05,.3)
        self.gaussian_weight = 0.1
        #self.precompute_angular(self.gaussian_weight)

        self.fast = fast
        if fast == True:
            self.odf = self.fast_odf
        else:
            self.odf = self.slow_odf

        self.update()

        if auto:
            self.fit()
コード例 #21
0
ファイル: _show_odfs.py プロジェクト: Garyfallidis/dipy
def show_odfs(odfs,
              vertices_faces,
              image=None,
              colormap='jet',
              scale=2.2,
              norm=True,
              radial_scale=True):
    """
    Display a grid of ODFs.

    Parameters
    ----------
    odfs : (X, Y, Z, M) ndarray
        A 3-D arrangement of orientation distribution functions (ODFs).  At
        each ``(x, y, z)`` position, it contains the the values of the
        corresponding ODF evaluated on the M vertices.
    vertices_faces : str or tuple of (vertices, faces)
        A named sphere from `dipy.data.get_sphere`, or a combination of
        `(vertices, faces)`.
    image : (X, Y) ndarray
        Background image (e.g., fractional anisotropy) do display behind the
        ODFs.
    colormap : str
        Color mapping.
    scale : float
        Increasing the scale spaces ODFs further apart.
    norm : bool
        Whether or not to normalize each individual ODF (divide by its maximum
        absolute value).
    radial_scale : bool
        Whether or not to change the radial shape of the ODF according to its
        scalar value.  If set to False, the ODF is displayed as a sphere.

    Notes
    -----
    Mayavi gets really slow when `triangular_mesh` is called too many times,
    so this function stacks ODF data and calls `triangular_mesh` once.

    Examples
    --------
    >>> from dipy.data import get_sphere
    >>> verts, faces = get_sphere('symmetric724')

    >>> angle = np.linspace(0, 2*np.pi, len(verts))
    >>> odf1 = np.sin(angle)
    >>> odf2 = np.cos(angle)
    >>> odf3 = odf1**2 * odf2
    >>> odf4 = odf1 + odf2**2

    >>> odfs = [[[odf1, odf2],
    ...          [odf3, odf4]]]

    >>> show_odfs(odfs, (verts, faces), scale=5)

    """
    vertices, faces = sphere_vf_from(vertices_faces)

    odfs = np.asarray(odfs)
    if odfs.ndim != 4:
        raise ValueError("ODFs must by an (X,Y,Z,M) array. " + "Has shape " +
                         str(odfs.shape))

    grid_shape = np.array(odfs.shape[:3])
    faces = np.asarray(faces, dtype=int)

    xx, yy, zz, ff, mm = [], [], [], [], []
    count = 0

    for ijk in np.ndindex(*grid_shape):
        m = odfs[ijk]

        if norm:
            m /= abs(m).max()

        if radial_scale:
            xyz = vertices.T * m
        else:
            xyz = vertices.T.copy()

        xyz += scale * (ijk - grid_shape / 2.)[:, None]

        x, y, z = xyz
        ff.append(count + faces)
        xx.append(x)
        yy.append(y)
        zz.append(z)
        mm.append(m)

        count += len(x)

    ff, xx, yy, zz, mm = (np.concatenate(arrs)
                          for arrs in (ff, xx, yy, zz, mm))
    mlab.triangular_mesh(xx, yy, zz, ff, scalars=mm, colormap=colormap)

    if image is not None:
        mlab.imshow(image, colormap='gray', interpolate=False)

    mlab.colorbar()
    mlab.show()
コード例 #22
0
ファイル: gqi.py プロジェクト: Garyfallidis/dipy
    def __init__(self, bvals, gradients, 
                 odf_sphere='symmetric642', Lambda=1.2, squared=False):
        r""" Generates a model-free description for every voxel that can
        be used from simple to very complicated configurations like
        quintuple crossings if your datasets support them.

        You can use this class for every kind of DWI image but it will
        perform much better when you have a balanced sampling scheme.

        Implements equation [9] from Generalized Q-Sampling as
        described in Fang-Cheng Yeh, Van J. Wedeen, Wen-Yih Isaac Tseng.
        Generalized Q-Sampling Imaging. IEEE TMI, 2010.

        It also implement the radially squared version known as GQI2 as
        described in Garyfallidis et al. "Towards an accurate brain
        tractography", PhD thesis, Cambridge University, 2012.

        Parameters
        -----------
        bvals: array, shape (N,)
        gradients: array, shape (N,3) also known as bvecs
        Lambda: float, optional
            smoothing parameter - diffusion sampling length
        odf_sphere : None or str or tuple, optional
            input that will result in vertex, face arrays for a sphere.
        squared : boolean, True or False
            If True it will calculate the odf using the $L^2$ weighting. Which
            provides higher angular accuracy.

        Key Properties
        ---------------
        QA : array, shape(X,Y,Z,5), quantitative anisotropy
        IN : array, shape(X,Y,Z,5), indices of QA, qa unit directions
        fwd : float, normalization parameter

        Notes
        -------
        In order to reconstruct the spin distribution function  a nice symmetric
        evenly distributed sphere is provided using 642+ points. This is usually
        sufficient for most of the datasets. 

        See also
        --------
        dipy.reconst.dsi.DiffusionSpectrumModel, dipy.data.get_sphere

        """
        
        '''
        self.odf_vertices=np.ascontiguousarray(odf_vertices)
        self.odf_faces=np.ascontiguousarray(odf_faces)
        self.odfn=len(self.odf_vertices)
        self.mask=mask
        self.data=data
        self.save_odfs=save_odfs
        '''
        
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.set_odf_vertices(odf_vertices,None,odf_faces)
        self.squared=squared
        
        # 0.01506 = 6*D where D is the free water diffusion coefficient 
        # l_values sqrt(6 D tau) D free water diffusion coefficient and
        # tau included in the b-value
        scaling = np.sqrt(bvals*0.01506)
        tmp=np.tile(scaling,(3,1))
        #the b vectors might have nan values where they correspond to b
        #value equals with 0
        gradients[np.isnan(gradients)]= 0.
        gradsT = gradients.T
        b_vector=gradsT*tmp # element-wise also known as the Hadamard product
                
        if squared==True:
            vf=np.vectorize(self.squared_radial_component)
            #which implements
            #def H(x):
            #    res=(2*x*np.cos(x) + (x**2-2)*np.sin(x))/x**3
            #    res[np.isnan(res)]=1/3.
            #    return res            
            self.input=np.dot(b_vector.T, self.odf_vertices.T) * Lambda/np.pi
            self.q2odf_params=np.real(vf(np.dot(b_vector.T, self.odf_vertices.T) * Lambda/np.pi))
        else:
            self.q2odf_params=np.real(np.sinc(np.dot(b_vector.T, self.odf_vertices.T) * Lambda/np.pi))
コード例 #23
0
ファイル: gqi.py プロジェクト: jgors/dipy
    def __init__(self, data, bvals, gradients,
                 Lambda=1.2, odf_sphere='symmetric362', mask=None,squared=False,auto=True,save_odfs=False):
        r""" Generates a model-free description for every voxel that can
        be used from simple to very complicated configurations like
        quintuple crossings if your datasets support them.

        You can use this class for every kind of DWI image but it will
        perform much better when you have a balanced sampling scheme.

        Implements equation [9] from Generalized Q-Sampling as
        described in Fang-Cheng Yeh, Van J. Wedeen, Wen-Yih Isaac Tseng.
        Generalized Q-Sampling Imaging. IEEE TMI, 2010.

        Parameters
        -----------
        data: array, shape(X,Y,Z,D)
        bvals: array, shape (N,)
        gradients: array, shape (N,3) also known as bvecs
        Lambda: float, optional
            smoothing parameter - diffusion sampling length
        odf_sphere : None or str or tuple, optional
            input that will result in vertex, face arrays for a sphere.
        mask : None or ndarray, optional
        squared : boolean, True or False
            If True it will calculate the odf using the $L^2$ weighting.
        auto : boolean, default True 
            if True then the processing of all voxels will start automatically 
            with the class constructor,if False then you will have to call .fit()
            in order to do the heavy duty processing for every voxel
        save_odfs : boolean, default False
            save odfs, which is memory expensive

        Key Properties
        ---------------
        QA : array, shape(X,Y,Z,5), quantitative anisotropy
        IN : array, shape(X,Y,Z,5), indices of QA, qa unit directions
        fwd : float, normalization parameter

        Notes
        -------
        In order to reconstruct the spin distribution function  a nice symmetric
        evenly distributed sphere is provided using 362 points. This is usually
        sufficient for most of the datasets.

        See also
        --------
        dipy.tracking.propagation.EuDX, dipy.reconst.dti.Tensor,
        dipy.data.__init__.get_sphere
        """
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.odf_vertices=odf_vertices
        self.odf_faces=odf_faces
        self.odfn=len(self.odf_vertices)
        self.mask=mask
        self.data=data
        self.save_odfs=save_odfs
        self.squared=squared
        
        # 0.01506 = 6*D where D is the free water diffusion coefficient 
        # l_values sqrt(6 D tau) D free water diffusion coefficient and
        # tau included in the b-value
        scaling = np.sqrt(bvals*0.01506)
        tmp=np.tile(scaling,(3,1))
        #the b vectors might have nan values where they correspond to b
        #value equals with 0
        gradients[np.isnan(gradients)]= 0.
        gradsT = gradients.T
        b_vector=gradsT*tmp # element-wise also known as the Hadamard product
        #q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)
        
        
        if squared==True:
            vf=np.vectorize(self.squared_radial_component)
            #def H(x):
            #    res=(2*x*np.cos(x) + (x**2-2)*np.sin(x))/x**3
            #    res[np.isnan(res)]=1/3.
            #    return res
            
            self.input=np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi
            self.q2odf_params=np.real(vf(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi))
            #self.q2odf_params=np.real(H(1*np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi))
        else:
            self.q2odf_params=np.real(np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi))
                
        #q2odf_params[np.isnan(q2odf_params)]= 1.
        #define total mask 
        #tot_mask = (mask > 0) & (data[...,0] > thresh)
        self.peak_thr=.3
        self.iso_thr=.9        
        
        if auto:
            self.fit()
コード例 #24
0
ファイル: gqi.py プロジェクト: iannimmosmith/dipy
    def __init__(self, bvals, gradients, odf_sphere="symmetric642", Lambda=1.2, squared=False):
        r""" Generates a model-free description for every voxel that can
        be used from simple to very complicated configurations like
        quintuple crossings if your datasets support them.

        You can use this class for every kind of DWI image but it will
        perform much better when you have a balanced sampling scheme.

        Implements equation [9] from Generalized Q-Sampling as
        described in Fang-Cheng Yeh, Van J. Wedeen, Wen-Yih Isaac Tseng.
        Generalized Q-Sampling Imaging. IEEE TMI, 2010.

        It also implement the radially squared version known as GQI2 as
        described in Garyfallidis et al. "Towards an accurate brain
        tractography", PhD thesis, Cambridge University, 2012.

        Parameters
        -----------
        bvals: array, shape (N,)
        gradients: array, shape (N,3) also known as bvecs
        Lambda: float, optional
            smoothing parameter - diffusion sampling length
        odf_sphere : None or str or tuple, optional
            input that will result in vertex, face arrays for a sphere.
        squared : boolean, True or False
            If True it will calculate the odf using the $L^2$ weighting. Which
            provides higher angular accuracy.

        Key Properties
        ---------------
        QA : array, shape(X,Y,Z,5), quantitative anisotropy
        IN : array, shape(X,Y,Z,5), indices of QA, qa unit directions
        fwd : float, normalization parameter

        Notes
        -------
        In order to reconstruct the spin distribution function  a nice symmetric
        evenly distributed sphere is provided using 642+ points. This is usually
        sufficient for most of the datasets. 

        See also
        --------
        dipy.reconst.dsi.DiffusionSpectrumModel, dipy.data.get_sphere

        """

        """
        self.odf_vertices=np.ascontiguousarray(odf_vertices)
        self.odf_faces=np.ascontiguousarray(odf_faces)
        self.odfn=len(self.odf_vertices)
        self.mask=mask
        self.data=data
        self.save_odfs=save_odfs
        """

        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.set_odf_vertices(odf_vertices, None, odf_faces)
        self.squared = squared

        # 0.01506 = 6*D where D is the free water diffusion coefficient
        # l_values sqrt(6 D tau) D free water diffusion coefficient and
        # tau included in the b-value
        scaling = np.sqrt(bvals * 0.01506)
        tmp = np.tile(scaling, (3, 1))
        # the b vectors might have nan values where they correspond to b
        # value equals with 0
        gradients[np.isnan(gradients)] = 0.0
        gradsT = gradients.T
        b_vector = gradsT * tmp  # element-wise also known as the Hadamard product

        if squared == True:
            vf = np.vectorize(self.squared_radial_component)
            # which implements
            # def H(x):
            #    res=(2*x*np.cos(x) + (x**2-2)*np.sin(x))/x**3
            #    res[np.isnan(res)]=1/3.
            #    return res
            self.input = np.dot(b_vector.T, self.odf_vertices.T) * Lambda / np.pi
            self.q2odf_params = np.real(vf(np.dot(b_vector.T, self.odf_vertices.T) * Lambda / np.pi))
        else:
            self.q2odf_params = np.real(np.sinc(np.dot(b_vector.T, self.odf_vertices.T) * Lambda / np.pi))
コード例 #25
0
ファイル: dsi.py プロジェクト: jgors/dipy
    def __init__(self, data, bvals, gradients,odf_sphere='symmetric362',
                mask=None,
                half_sphere_grads=False,
                auto=True,
                save_odfs=False):
        '''
        Parameters
        -----------
        data : array, shape(X,Y,Z,D), or (X,D)
        bvals : array, shape (N,)
        gradients : array, shape (N,3) also known as bvecs        
        odf_sphere : str or tuple, optional
            If str, then load sphere of given name using ``get_sphere``.
            If tuple, gives (vertices, faces) for sphere.
        filter : array, shape(len(vertices),) 
            default is None (using standard hanning filter for DSI)
        half_sphere_grad : boolean Default(False) 
            in order to create the q-space we use the bvals and gradients. 
            If the gradients are only one hemisphere then 
        auto : boolean, default True 
            if True then the processing of all voxels will start automatically 
            with the class constructor,if False then you will have to call .fit()
            in order to do the heavy duty processing for every voxel
        save_odfs : boolean, default False
            save odfs, which is memory expensive  

        See also
        ----------
        dipy.reconst.dti.Tensor, dipy.reconst.gqi.GeneralizedQSampling
        '''
        
        #read the vertices and faces for the odf sphere
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.odf_vertices=odf_vertices
        self.odf_faces=odf_faces
        self.odfn=len(self.odf_vertices)
        self.save_odfs=save_odfs
        
        #check if bvectors are provided only on a hemisphere
        if half_sphere_grads==True:
            bvals=np.append(bvals.copy(),bvals[1:].copy())
            gradients=np.append(gradients.copy(),-gradients[1:].copy(),axis=0)
            data=np.append(data.copy(),data[...,1:].copy(),axis=-1)
        
        #load bvals and bvecs
        self.bvals=bvals
        gradients[np.isnan(gradients)] = 0.
        self.gradients=gradients
        #save number of total diffusion volumes
        self.dn=data.shape[-1]        
        self.data=data
        self.datashape=data.shape #initial shape  
        self.mask=mask                     
        #3d volume for Sq
        self.sz=16
        #necessary shifting for centering
        self.origin=8
        #hanning filter width
        self.filter_width=32.
                     
        #odf collecting radius
        self.radius=np.arange(2.1,6,.2)
        self.update()
            
        if auto:
            self.fit()        
コード例 #26
0
ファイル: gqi.py プロジェクト: jgors/dipy
    def __init__(self,
                 data,
                 bvals,
                 gradients,
                 Lambda=1.2,
                 odf_sphere='symmetric362',
                 mask=None,
                 squared=False,
                 auto=True,
                 save_odfs=False):
        r""" Generates a model-free description for every voxel that can
        be used from simple to very complicated configurations like
        quintuple crossings if your datasets support them.

        You can use this class for every kind of DWI image but it will
        perform much better when you have a balanced sampling scheme.

        Implements equation [9] from Generalized Q-Sampling as
        described in Fang-Cheng Yeh, Van J. Wedeen, Wen-Yih Isaac Tseng.
        Generalized Q-Sampling Imaging. IEEE TMI, 2010.

        Parameters
        -----------
        data: array, shape(X,Y,Z,D)
        bvals: array, shape (N,)
        gradients: array, shape (N,3) also known as bvecs
        Lambda: float, optional
            smoothing parameter - diffusion sampling length
        odf_sphere : None or str or tuple, optional
            input that will result in vertex, face arrays for a sphere.
        mask : None or ndarray, optional
        squared : boolean, True or False
            If True it will calculate the odf using the $L^2$ weighting.
        auto : boolean, default True 
            if True then the processing of all voxels will start automatically 
            with the class constructor,if False then you will have to call .fit()
            in order to do the heavy duty processing for every voxel
        save_odfs : boolean, default False
            save odfs, which is memory expensive

        Key Properties
        ---------------
        QA : array, shape(X,Y,Z,5), quantitative anisotropy
        IN : array, shape(X,Y,Z,5), indices of QA, qa unit directions
        fwd : float, normalization parameter

        Notes
        -------
        In order to reconstruct the spin distribution function  a nice symmetric
        evenly distributed sphere is provided using 362 points. This is usually
        sufficient for most of the datasets.

        See also
        --------
        dipy.tracking.propagation.EuDX, dipy.reconst.dti.Tensor,
        dipy.data.__init__.get_sphere
        """
        odf_vertices, odf_faces = sphere_vf_from(odf_sphere)
        self.odf_vertices = odf_vertices
        self.odf_faces = odf_faces
        self.odfn = len(self.odf_vertices)
        self.mask = mask
        self.data = data
        self.save_odfs = save_odfs
        self.squared = squared

        # 0.01506 = 6*D where D is the free water diffusion coefficient
        # l_values sqrt(6 D tau) D free water diffusion coefficient and
        # tau included in the b-value
        scaling = np.sqrt(bvals * 0.01506)
        tmp = np.tile(scaling, (3, 1))
        #the b vectors might have nan values where they correspond to b
        #value equals with 0
        gradients[np.isnan(gradients)] = 0.
        gradsT = gradients.T
        b_vector = gradsT * tmp  # element-wise also known as the Hadamard product
        #q2odf_params=np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi)

        if squared == True:
            vf = np.vectorize(self.squared_radial_component)
            #def H(x):
            #    res=(2*x*np.cos(x) + (x**2-2)*np.sin(x))/x**3
            #    res[np.isnan(res)]=1/3.
            #    return res

            self.input = np.dot(b_vector.T, odf_vertices.T) * Lambda / np.pi
            self.q2odf_params = np.real(
                vf(np.dot(b_vector.T, odf_vertices.T) * Lambda / np.pi))
            #self.q2odf_params=np.real(H(1*np.dot(b_vector.T, odf_vertices.T) * Lambda/np.pi))
        else:
            self.q2odf_params = np.real(
                np.sinc(np.dot(b_vector.T, odf_vertices.T) * Lambda / np.pi))

        #q2odf_params[np.isnan(q2odf_params)]= 1.
        #define total mask
        #tot_mask = (mask > 0) & (data[...,0] > thresh)
        self.peak_thr = .3
        self.iso_thr = .9

        if auto:
            self.fit()