예제 #1
0
def test_sphere_cart():
    # test arrays of points
    rs, thetas, phis = cart2sphere(*(sphere_points.T))
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal, xyz, sphere_points.T
    # test radius estimation
    big_sph_pts = sphere_points * 10.4
    rs, thetas, phis = cart2sphere(*big_sph_pts.T)
    yield assert_array_almost_equal, rs, 10.4
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal, xyz, big_sph_pts.T, 6
    # test that result shapes match
    x, y, z = big_sph_pts.T
    r, theta, phi = cart2sphere(x[:1], y[:1], z)
    yield assert_equal, r.shape, theta.shape
    yield assert_equal, r.shape, phi.shape
    x, y, z = sphere2cart(r[:1], theta[:1], phi)
    yield assert_equal, x.shape, y.shape
    yield assert_equal, x.shape, z.shape
    # test a scalar point
    pt = sphere_points[3]
    r, theta, phi = cart2sphere(*pt)
    xyz = sphere2cart(r, theta, phi)
    yield assert_array_almost_equal, xyz, pt

    # Test full circle on x=0, y=0, z=0
    x, y, z = sphere2cart(*cart2sphere(0., 0., 0.))
    yield assert_array_equal, (x, y, z), (0., 0., 0.)
예제 #2
0
파일: shm.py 프로젝트: endolith/dipy
 def sampling_matrix(self, sphere):
     """Returns a matrix that can be used to sample the function from
     coefficients"""
     x, y, z = sphere.vertices.T
     r, pol, azi = cart2sphere(x, y, z)
     S = real_sph_harm(self._m, self._n, azi[:, None], pol[:, None])
     return S
예제 #3
0
def quadrature_points(N=72):
    """Load quadrature points on the sphere.

    Parameters
    ----------
    N : int, {72, 132, 192, 492}
        A quadrature set with N points is loaded.

    Returns
    -------
    theta, phi : (N,) ndarray
        Quadrature point coordinates (inclination and azimuth).
    w : (N,) ndarray
        Quadrature weights.

    """
    import os
    basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), '../data')

    quad_file = {72: 'qsph1-14-72DP.dat',
                 132: 'qsph1-16-132DP.dat',
                 192: 'qsph1-23-192DP.dat',
                 492: 'qsph1-37-492DP.dat'}

    q_pts = np.loadtxt(os.path.join(basedir, quad_file[N]))
    q_theta, q_phi = cart2sphere(*q_pts[:, :3].T)[1:]
    q_w = q_pts[:, 3]

    return q_theta, q_phi, q_w
예제 #4
0
def sh_smooth(data, bvals, bvecs, sh_order=4, similarity_threshold=50, regul=0.006):
    """Smooth the raw diffusion signal with spherical harmonics.
    data : ndarray
        The diffusion data to smooth.
    gtab : gradient table object
        Corresponding gradients table object to data.
    sh_order : int, default 8
        Order of the spherical harmonics to fit.
    similarity_threshold : int, default 50
        All b-values such that |b_1 - b_2| < similarity_threshold
        will be considered as identical for smoothing purpose.
        Must be lower than 200.
    regul : float, default 0.006
        Amount of regularization to apply to sh coefficients computation.
    Return
    ---------
    pred_sig : ndarray
        The smoothed diffusion data, fitted through spherical harmonics.
    """

    if similarity_threshold > 200:
        raise ValueError("similarity_threshold = {}, which is higher than 200,"
                         " please use a lower value".format(similarity_threshold))

    m, n = sph_harm_ind_list(sh_order)
    L = -n * (n + 1)
    where_b0s = bvals == 0
    pred_sig = np.zeros_like(data, dtype=np.float32)

    # Round similar bvals together for identifying similar shells
    rounded_bvals = np.zeros_like(bvals)

    for unique_bval in np.unique(bvals):
        idx = np.abs(unique_bval - bvals) < similarity_threshold
        rounded_bvals[idx] = unique_bval

    # process each b-value separately
    for unique_bval in np.unique(rounded_bvals):
        idx = rounded_bvals == unique_bval

        # Just give back the signal for the b0s since we can't really do anything about it
        if np.all(idx == where_b0s):
            if np.sum(where_b0s) > 1:
                pred_sig[..., idx] = np.mean(data[..., idx], axis=-1, keepdims=True)
            else:
                pred_sig[..., idx] = data[..., idx]
            continue

        x, y, z = bvecs[:, idx]
        r, theta, phi = cart2sphere(x, y, z)

        # Find the sh coefficients to smooth the signal
        B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None])
        invB = smooth_pinv(B_dwi, np.sqrt(regul) * L)
        sh_coeff = np.dot(data[..., idx], invB.T)

        # Find the smoothed signal from the sh fit for the given gtab
        pred_sig[..., idx] = np.dot(sh_coeff, B_dwi.T)

    return pred_sig
def quadrature_points(N=72):
    """Load quadrature points on the sphere.

    Parameters
    ----------
    N : int, {72, 132, 289, 492}
        A quadrature set with N points is loaded.

    Returns
    -------
    theta, phi : (N,) ndarray
        Quadrature point coordinates (inclination and azimuth).
    w : (N,) ndarray
        Quadrature weights.

    """
    import os
    if N==289:
    	basedir = '/home/jnealy/projects/spheredwi/src/python/data'
    else:	
    	basedir = os.path.abspath(os.path.dirname(__file__))

    quad_file = {72: 'qsph1-14-72DP.dat',
                 132: 'qsph1-16-132DP.dat',
		 289: 'md016.00289.txt',
                 492: 'qsph1-37-492DP.dat'}

    q_pts = np.loadtxt(os.path.join(basedir, quad_file[N]))
    q_theta, q_phi = cart2sphere(*q_pts[:, :3].T)[1:]
    q_w = q_pts[:, 3]
    
    return q_theta, q_phi, q_w
예제 #6
0
파일: shm.py 프로젝트: iannimmosmith/dipy
 def set_odf_vertices(self, odf_vertices, odf_edges=None):
     """Also sets sampling_matrix"""
     OdfModel.set_odf_vertices(self, odf_vertices, odf_edges)
     x, y, z = odf_vertices.T
     r, pol, azi = cart2sphere(x, y, z)
     S = real_sph_harm(self._m, self._n, azi[:, None], pol[:, None])
     self._sampling_matrix = dot(S, self._fit_matrix)
예제 #7
0
파일: shm.py 프로젝트: cpresseau/dipy
    def __init__(self, gtab, sh_order, smooth=0, min_signal=1.,
                 assume_normed=False):
        """Creates a model that can be used to fit or sample diffusion data

        Arguments
        ---------
        gtab : GradientTable
            Diffusion gradients used to acquire data
        sh_order : even int >= 0
            the spherical harmonic order of the model
        smoothness : float between 0 and 1
            The regularization parameter of the model
        assume_normed : bool
            If True, data will not be normalized before fitting to the model

        """
        m, n = sph_harm_ind_list(sh_order)
        self._where_b0s = lazy_index(gtab.b0s_mask)
        self._where_dwi = lazy_index(~gtab.b0s_mask)
        self.assume_normed = assume_normed
        self.min_signal = min_signal
        x, y, z = gtab.gradients[self._where_dwi].T
        r, theta, phi = cart2sphere(x, y, z)
        B = real_sph_harm(m, n, theta[:, None], phi[:, None])
        L = -n * (n + 1)
        legendre0 = lpn(sh_order, 0)[0]
        F = legendre0[n]
        self.B = B
        self.m = m
        self.n = n
        self._set_fit_matrix(B, L, F, smooth)
예제 #8
0
파일: shore.py 프로젝트: gsangui/dipy
def SHOREmatrix_odf(radial_order, zeta, sphere_vertices):
    """Compute the SHORE matrix"

    Parameters
    ----------
    radial_order : unsigned int,
        Radial Order
    zeta : unsigned int,
        scale factor
    sphere_vertices : array, shape (N,3)
        vertices of the odf sphere
    """

    r, theta, phi = cart2sphere(sphere_vertices[:, 0], sphere_vertices[:, 1], sphere_vertices[:, 2])
    theta[np.isnan(theta)] = 0
    counter = 0
    upsilon = np.zeros(
        (len(sphere_vertices), (radial_order + 1) * ((radial_order + 1) / 2) * (2 * radial_order + 1)))
    for n in range(radial_order + 1):
        for l in range(0, n + 1, 2):
            for m in range(-l, l + 1):
                upsilon[:, counter] = (-1) ** (n - l / 2.0) * __kappa_odf(zeta, n, l) * \
                    hyp2f1(l - n, l / 2.0 + 1.5, l + 1.5, 2.0) * \
                    real_sph_harm(m, l, theta, phi)
                counter += 1

    return upsilon[:, 0:counter]
예제 #9
0
파일: shore.py 프로젝트: gsangui/dipy
def SHOREmatrix_pdf(radial_order, zeta, rtab):
    """Compute the SHORE matrix"

    Parameters
    ----------
    radial_order : unsigned int,
        Radial Order
    zeta : unsigned int,
        scale factor
    rtab : array, shape (N,3)
        r-space points in which calculates the pdf
    """

    r, theta, phi = cart2sphere(rtab[:, 0], rtab[:, 1], rtab[:, 2])
    theta[np.isnan(theta)] = 0

    psi = np.zeros(
        (r.shape[0], (radial_order + 1) * ((radial_order + 1) / 2) * (2 * radial_order + 1)))
    counter = 0
    for n in range(radial_order + 1):
        for l in range(0, n + 1, 2):
            for m in range(-l, l + 1):
                psi[:, counter] = real_sph_harm(m, l, theta, phi) * \
                    genlaguerre(n - l, l + 0.5)(4 * np.pi ** 2 * zeta * r ** 2 ) *\
                    np.exp(-2 * np.pi ** 2 * zeta * r ** 2) *\
                    __kappa_pdf(zeta, n, l) *\
                    (4 * np.pi ** 2 * zeta * r ** 2) ** (l / 2) * \
                    (-1) ** (n - l / 2)
                counter += 1
    return psi[:, 0:counter]
예제 #10
0
파일: shore.py 프로젝트: deflavio/dipy
def shore_matrix_odf(radial_order, zeta, sphere_vertices):
    r"""Compute the SHORE ODF matrix [1]_"

    Parameters
    ----------
    radial_order : unsigned int,
        an even integer that represent the order of the basis
    zeta : unsigned int,
        scale factor
    sphere_vertices : array, shape (N,3)
        vertices of the odf sphere

    References
    ----------
    .. [1] Merlet S. et. al, "Continuous diffusion signal, EAP and
    ODF estimation via Compressive Sensing in diffusion MRI", Medical
    Image Analysis, 2013.
    """

    r, theta, phi = cart2sphere(sphere_vertices[:, 0], sphere_vertices[:, 1],
                                sphere_vertices[:, 2])
    theta[np.isnan(theta)] = 0
    F = radial_order / 2
    n_c = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))
    upsilon = np.zeros((len(sphere_vertices), n_c))
    counter = 0
    for l in range(0, radial_order + 1, 2):
        for n in range(l, int((radial_order + l) / 2) + 1):
            for m in range(-l, l + 1):
                upsilon[:, counter] = (-1) ** (n - l / 2.0) * _kappa_odf(zeta, n, l) * \
                    hyp2f1(l - n, l / 2.0 + 1.5, l + 1.5, 2.0) * \
                    real_sph_harm(m, l, theta, phi)
                counter += 1

    return upsilon
    def odf(self, vertices=None, cache=None):
        """Predict the ODF at the given vertices.

        """
        if vertices is None:
            self._odf_kernel_matrix = cache._odf_kernel_matrix
        else:
            odf_theta, odf_phi = cart2sphere(*vertices.T)[1:]
            X = kernel_matrix(odf_theta, odf_phi,
                              self.model.kernel_theta,
                              self.model.kernel_phi,
                              kernel=even_kernel,
                              N=self.model.sh_order)
			      		      
			      
	    #attempt to append iso col and spherical harmonic cols to X:
	    append = sh(odf_theta, odf_phi, np.size(odf_theta))
	
	    X = np.hstack((X, append))
	    		      
            self._odf_kernel_matrix = X
	    

        #return np.dot(self._odf_kernel_matrix, self.beta) + \
         #      self.intercept
	 
	return self._odf_kernel_matrix, self.beta, self.intercept
    def __init__(self, data, model, sphere, sh_order=None, tol=1e-2):
        if sh_order is None:
            if hasattr(model, "sh_order"):
                sh_order = model.sh_order
            else:
                sh_order = default_SH
            
        self.where_dwi = shm.lazy_index(~model.gtab.b0s_mask)
        if not isinstance(self.where_dwi, slice):
            msg = ("For optimal bootstrap tracking consider reordering the "
                   "diffusion volumes so that all the b0 volumes are at the "
                   "beginning")
            warn(msg)
        x, y, z = model.gtab.gradients[self.where_dwi].T
        r, theta, phi = cart2sphere(x, y, z)
        b_range = (r.max() - r.min()) / r.min()
        if b_range > tol:
            raise ValueError("BootOdfGen only supports single shell data")

        B, m, n = shm.real_sym_sh_basis(sh_order, theta, phi)
        H = shm.hat(B)
        R = shm.lcr_matrix(H)

        self.data = np.asarray(data, "float64")
        self.model = model
        self.sphere = sphere
        self.H = H
        self.R = R
예제 #13
0
def plot_ellipsoid_mpl(Tensor, n=60):
    """
    Plot an ellipsoid from a tensor using matplotlib

    Parameters
    ----------

    Tensor: an ozt.Tensor class instance

    n: optional. If an integer is provided, we will plot this for a sphere with
    n (grid) equi-sampled points. Otherwise, we will plot the originally
    provided Tensor's bvecs.

    
    """

    x,y,z = sphere(n=n)
    new_bvecs = np.vstack([x.ravel(), y.ravel(), z.ravel()])
    Tensor = ozt.Tensor(Tensor.Q, new_bvecs,
                        np.ones(new_bvecs.shape[-1]) * Tensor.bvals[0])

    v = Tensor.diffusion_distance.reshape(x.shape)
    
    r, phi, theta = geo.cart2sphere(x,y,z)
    x_plot, y_plot, z_plot = geo.sphere2cart(v, phi, theta)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    ax.plot_surface(x_plot, y_plot, z_plot,  rstride=2, cstride=2, shade=True)

    return fig
예제 #14
0
파일: lut.py 프로젝트: htygithub/AMICO
def aux_structures_resample( scheme, lmax = 12 ) :
    """Compute the auxiliary data structures to resample the kernels to the original acquisition scheme.

    Parameters
    ----------
    scheme : Scheme class
        Acquisition scheme of the acquired signal
    lmax : int
        Maximum SH order to use for the rotation phase (default : 12)

    Returns
    -------
    idx_OUT : numpy array
        Indices of the samples belonging to each shell
    Ylm_OUT : numpy array
        Operator to transform each shell from Spherical harmonics to original signal space
    """
    nSH = (lmax+1)*(lmax+2)/2
    idx_OUT = np.zeros( scheme.dwi_count, dtype=np.int32 )
    Ylm_OUT = np.zeros( (scheme.dwi_count,nSH*len(scheme.shells)), dtype=np.float32 ) # matrix from SH to real space
    idx = 0
    for s in xrange( len(scheme.shells) ) :
        nS = len( scheme.shells[s]['idx'] )
        idx_OUT[ idx:idx+nS ] = scheme.shells[s]['idx']
        _, theta, phi = cart2sphere( scheme.shells[s]['grad'][:,0], scheme.shells[s]['grad'][:,1], scheme.shells[s]['grad'][:,2] )
        tmp, _, _ = real_sym_sh_basis( lmax, theta, phi )
        Ylm_OUT[ idx:idx+nS, nSH*s:nSH*(s+1) ] = tmp
        idx += nS
    return ( idx_OUT, Ylm_OUT )
예제 #15
0
def test_sphere_cart():
    # test arrays of points
    rs, thetas, phis = cart2sphere(*(sphere_points.T))
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal(xyz, sphere_points.T)
    # test radius estimation
    big_sph_pts = sphere_points * 10.4
    rs, thetas, phis = cart2sphere(*big_sph_pts.T)
    yield assert_array_almost_equal(rs, 10.4)
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal(xyz, big_sph_pts.T, 6)
    # test a scalar point
    pt = sphere_points[3]
    r, theta, phi = cart2sphere(*pt)
    xyz = sphere2cart(r, theta, phi)
    yield assert_array_almost_equal(xyz, pt)
예제 #16
0
def mats_odfdeconv(sphere, basis=None, ratio=3 / 15., sh_order=8, lambda_=1., tau=0.1, r2=True):
    m, n = sph_harm_ind_list(sh_order)
    r, theta, phi = cart2sphere(sphere.x, sphere.y, sphere.z)
    real_sym_sh = sph_harm_lookup[basis]
    B_reg, m, n = real_sym_sh(sh_order, theta[:, None], phi[:, None])
    R, P = forward_sdt_deconv_mat(ratio, sh_order, r2_term=r2)
    lambda_ = lambda_ * R.shape[0] * R[0, 0] / B_reg.shape[0]
    return R, B_reg
예제 #17
0
def test_invert_transform():
    n = 100.
    theta = np.arange(n)/n * np.pi  # Limited to 0,pi
    phi = (np.arange(n)/n - .5) * 2 * np.pi  # Limited to 0,2pi
    x, y, z = sphere2cart(1, theta, phi)  # Let's assume they're all unit vecs
    r, new_theta, new_phi = cart2sphere(x, y, z)  # Transform back

    yield assert_array_almost_equal, theta, new_theta
    yield assert_array_almost_equal, phi, new_phi
예제 #18
0
파일: csdeconv.py 프로젝트: swederik/dipy
def odf_sh_to_sharp(odfs_sh, sphere, basis=None, ratio=3 / 15., sh_order=8, lambda_=1., tau=0.1):
    r""" Sharpen odfs using the spherical deconvolution transform [1]_

    This function can be used to sharpen any smooth ODF spherical function. In theory, this should
    only be used to sharpen QballModel ODFs, but in practice, one can play with the deconvolution
    ratio and sharpen almost any ODF-like spherical function. The constrained-regularization is stable
    and will not only sharp the ODF peaks but also regularize the noisy peaks.

    Parameters
    ---------- 
    odfs_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, )
        array of odfs expressed as spherical harmonics coefficients
    sphere : Sphere
        sphere used to build the regularization matrix    
    basis : {None, 'mrtrix', 'fibernav'}
        different spherical harmonic basis. None is the fibernav basis as well.
    ratio : float, 
        ratio of the smallest vs the largest eigenvalue of the single prolate tensor response function
        (:math:`\frac{\lambda_2}{\lambda_1}`)
    sh_order : int
        maximal SH order of the SH representation
    lambda_ : float
        lambda parameter (see odfdeconv) (default 1.0)
    tau : float
        tau parameter in the L matrix construction (see odfdeconv) (default 0.1)

    Returns
    -------
    fodf_sh : ndarray
        sharpened odf expressed as spherical harmonics coefficients

    References
    ----------
    .. [1] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based
           on Complex Fibre Orientation Distributions
    """
    m, n = sph_harm_ind_list(sh_order)
    r, theta, phi = cart2sphere(sphere.x, sphere.y, sphere.z)

    real_sym_sh = sph_harm_lookup[basis]

    B_reg, m, n = real_sym_sh(sh_order, theta[:, None], phi[:, None])
    
    R, P = forward_sdt_deconv_mat(ratio, sh_order)

    # scale lambda to account for differences in the number of
    # SH coefficients and number of mapped directions
    lambda_ = lambda_ * R.shape[0] * R[0, 0] / B_reg.shape[0]

    fodf_sh = np.zeros(odfs_sh.shape)

    for index in ndindex(odfs_sh.shape[:-1]):

        fodf_sh[index], num_it = odf_deconv(odfs_sh[index], sh_order, R, B_reg, lambda_=lambda_, tau=tau)

    return fodf_sh
예제 #19
0
def test_lambert_equal_area_projection_cart():

    xyz = np.array([[1,0,0],[0,1,0],[0,0,1],[-1,0,0],[0,-1,0],[0,0,-1]])
    # points sit on +/-1 on all 3 axes
    
    r,theta,phi = cart2sphere(*xyz.T)

    leap = lambert_equal_area_projection_polar(theta,phi)
    r2 = np.sqrt(2)
    yield assert_array_almost_equal(np.sqrt(np.sum(leap**2,axis=1)),
                                    np.array([ r2,r2,0,r2,r2,2]))
예제 #20
0
    def sph_harm_set(self):
        """
        Calculate the spherical harmonics, provided n parameters (corresponding
        to nc = (L+1) * (L+2)/2 with L being the maximal harmonic degree for
        the set of bvecs of the object

        Note
        ----

        1. This was written according to the documentation of mrtrix's
        'csdeconv'. The following is taken from there:  

          Note that this program makes use of implied symmetries in the
          diffusion profile. First, the fact the relative signal profile is
          real implies that it has conjugate symmetry, i.e. Y(l,-m) = Y(l,m)*
          (where * denotes the complex conjugate). Second, the diffusion
          profile should be antipodally symmetric (i.e. S(x) = S(-x)), implying
          that all odd l components should be zero. Therefore, this program
          only computes the even elements.

          Note that the spherical harmonics equations used here differ slightly
          from those conventionally used, in that the (-1)^m factor has been
          omitted. This should be taken into account in all subsequent
          calculations.

          Each volume in the output image corresponds to a different spherical
          harmonic component, according to the following convention: [0]    
          Y(0,0)  [1] Im {Y(2,2)} [2] Im {Y(2,1)} [3]     Y(2,0) [4] Re
          {Y(2,1)} [5] Re {Y(2,2)}  [6] Im {Y(4,4)} [7] Im {Y(4,3)} etc... 

          
        2. Take heed that it seems that scipy's sph_harm actually has the
        order/degree in reverse order than the convention used by mrtrix, so
        that needs to be taken into account in the calculation below

        """

        # Convert to spherical coordinates:
        r, theta, phi = geo.cart2sphere(self.bvecs[0, self.b_idx], self.bvecs[1, self.b_idx], self.bvecs[2, self.b_idx])

        # Preallocate:
        b = np.empty((self.model_coeffs.shape[-1], theta.shape[0]))

        i = 0
        # Only even order are taken:
        for order in np.arange(0, self.L + 1, 2):  # Go to L, inclusive!
            for degree in np.arange(-order, order + 1):
                # In negative degrees, take the imaginary part:
                if degree < 0:
                    b[i, :] = np.imag(sph_harm(-1 * degree, order, phi, theta))
                else:
                    b[i, :] = np.real(sph_harm(degree, order, phi, theta))
                i = i + 1
        return b
예제 #21
0
    def __init__(self, gtab, sh_order=8, qp=132,
                 loglog_tf=True, l1_ratio=None, alpha=None):
        """Sparse kernel model.

        Parameters
        ----------
        gtab : GradientTable
            B-values and gradient directions.
        sh_order : int
            Highest order of spherical harmonic fit.
        qp : {72, 132, 492}
            Number of kernels used to represent the signal.
        loglog_tf : bool
            Whether to perform ``log(-log(.))`` on the signal before fitting.
            In theory, this gives a better representation of the ODF (but does
            predict back the original signal).  Also, it seems not to work well
            for low b-values (<= 1500).
        l1_ratio : float (optional)
            Argument passed to sklearn's ElasticNet to control L1 vs L2
            penalization.  Should be > 0.01.
        alpha : float (optional)
            Argument passed to sklearn's ElasticNet.  Controls the weight of
            both L1 and L2 penalties.

        See also
        --------
        sklearn.linear_model.ElasticNet

        """
        mask = gtab.bvals > 0
        bvecs = gtab.bvecs[mask]

        self.qp = qp
        self.sh_order = sh_order
        self.loglog_tf = loglog_tf
        self.gradient_theta, self.gradient_phi = \
                             cart2sphere(*bvecs.T)[1:]

        self.kernel_theta, self.kernel_phi, _ = quadrature_points(N=qp)

        self.X = np.asfortranarray(
            kernel_matrix(self.gradient_theta, self.gradient_phi,
                          self.kernel_theta, self.kernel_phi,
                          kernel=inv_funk_radon_even_kernel,
                          N=self.sh_order)
            )

        if l1_ratio is None:
            l1_ratio = 1
        if alpha is None:
            alpha = 1

        self.l1_ratio = l1_ratio
        self.alpha = alpha
예제 #22
0
def gqi(training, category, snr, denoised, odeconv, tv, method, weight=0.1, sl=3.):

    data, affine, gtab, mask, evals, S0, prefix = prepare(training,
                                                          category,
                                                          snr,
                                                          denoised,
                                                          odeconv,
                                                          tv,
                                                          method)
    


    model = GeneralizedQSamplingModel(gtab,
                                      method='gqi2',
                                      sampling_length=sl,
                                      normalize_peaks=False)

    fit = model.fit(data, mask)

    sphere = get_sphere('symmetric724')   

    odf = fit.odf(sphere)

    if odeconv == True:

        odf_sh = sf_to_sh(odf, sphere, sh_order=8,
                          basis_type='mrtrix')

        # # nib.save(nib.Nifti1Image(odf_sh, affine), model_tag + 'odf_sh.nii.gz')

        reg_sphere = get_sphere('symmetric724')

        fodf_sh = odf_sh_to_sharp(odf_sh,
                                  reg_sphere, basis='mrtrix', ratio=3.8 / 16.6,
                                  sh_order=8, Lambda=1., tau=1.)

        # # nib.save(nib.Nifti1Image(odf_sh, affine), model_tag + 'fodf_sh.nii.gz')

        fodf_sh[np.isnan(fodf_sh)]=0

        r, theta, phi = cart2sphere(sphere.x, sphere.y, sphere.z)
        B_regul, m, n = real_sph_harm_mrtrix(8, theta[:, None], phi[:, None])

        fodf = np.dot(fodf_sh, B_regul.T)

        odf = fodf

    if tv == True:

        odf = tv_denoise_4d(odf, weight=weight)

    save_odfs_peaks(training, odf, affine, sphere, dres, prefix)
예제 #23
0
파일: maya.py 프로젝트: qytian/osmosis
def plot_tensor_3d(Tensor, cmap='jet', mode='ADC', file_name=None,
                   origin=[0,0,0], colorbar=False, figure=None, vmin=None,
                   vmax=None, offset=0, azimuth=60, elevation=90, roll=0,
                   scale_factor=1.0, rgb_pdd=False):

    """

    mode: either "ADC", "ellipse" or "pred_sig"

    """
    
    Q = Tensor.Q
    sphere = create_unit_sphere(5)
    vertices = sphere.vertices
    faces = sphere.faces
    x,y,z = vertices.T 

    new_bvecs = np.vstack([x.ravel(), y.ravel(), z.ravel()])
    Tensor = ozt.Tensor(Q, new_bvecs,
                        Tensor.bvals[0] * np.ones(new_bvecs.shape[-1]))

    if mode == 'ADC':
        v = Tensor.ADC * scale_factor
    elif mode == 'ellipse':
        v = Tensor.diffusion_distance * scale_factor
    elif mode == 'pred_sig':
        v = Tensor.predicted_signal(1) * scale_factor
    else:
        raise ValueError("Mode not recognized")
        
    r, phi, theta = geo.cart2sphere(x,y,z)
    x_plot, y_plot, z_plot = geo.sphere2cart(v, phi, theta)

    if rgb_pdd:
        evals, evecs = Tensor.decompose
        xyz = evecs[0]
        r = np.abs(xyz[0])/np.sum(np.abs(xyz))
        g = np.abs(xyz[1])/np.sum(np.abs(xyz))
        b = np.abs(xyz[2])/np.sum(np.abs(xyz))

        color = (r, g, b)
    else:
        color = None
    # Call and return straightaway:
    return _display_maya_voxel(x_plot, y_plot, z_plot, faces, v, origin,
                               cmap=cmap, colorbar=colorbar, color=color,
                               figure=figure,
                               vmin=vmin, vmax=vmax, file_name=file_name,
                               azimuth=azimuth, elevation=elevation)
예제 #24
0
def rho_matrix(sh_order, vecs):
    r"""Compute the SH matrix $\rho$
    """

    r, theta, phi = cart2sphere(vecs[:, 0], vecs[:, 1], vecs[:, 2])
    theta[np.isnan(theta)] = 0

    n_c = int((sh_order + 1) * (sh_order + 2) / 2)
    rho = np.zeros((vecs.shape[0], n_c))
    counter = 0
    for l in range(0, sh_order + 1, 2):
        for m in range(-l, l + 1):
            rho[:, counter] = real_sph_harm(m, l, theta, phi)
            counter += 1
    return rho
예제 #25
0
def scale_bvecs_by_sig(bvecs, sig):
    """
    Helper function to rescale your bvecs according to some signal, so that
    they don't fall on the unit sphere, but instead are represented in space as
    distance from the origin.
    """

    x,y,z = bvecs

    r, theta, phi = geo.cart2sphere(x, y, z)

    # Simply replace r with sig:
    x,y,z = geo.sphere2cart(sig, theta, phi)

    return x,y,z
예제 #26
0
def test_hemisphere_constructor():
    s0 = HemiSphere(xyz=verts)
    s1 = HemiSphere(theta=theta, phi=phi)
    s2 = HemiSphere(*verts.T)

    uniq_verts = verts[::2].T
    rU, thetaU, phiU = cart2sphere(*uniq_verts)

    nt.assert_array_almost_equal(s0.theta, s1.theta)
    nt.assert_array_almost_equal(s0.theta, s2.theta)
    nt.assert_array_almost_equal(s0.theta, thetaU)

    nt.assert_array_almost_equal(s0.phi, s1.phi)
    nt.assert_array_almost_equal(s0.phi, s2.phi)
    nt.assert_array_almost_equal(s0.phi, phiU)
예제 #27
0
def rho_matrix(sh_order, vecs):
    r"""Compute the SH matrix $\rho$
    """

    r, theta, phi = cart2sphere(vecs[:, 0], vecs[:, 1], vecs[:, 2])
    theta[np.isnan(theta)] = 0

    n_c = int((sh_order + 1) * (sh_order + 2) / 2)
    rho = np.zeros((vecs.shape[0], n_c))
    counter = 0
    for l in range(0, sh_order + 1, 2):
        for m in range(-l, l + 1):
            rho[:, counter] = real_sph_harm(m, l, theta, phi)
            counter += 1
    return rho
예제 #28
0
def test_sphere_cart():
    # test arrays of points
    rs, thetas, phis = cart2sphere(*(sphere_points.T))
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal, xyz, sphere_points.T
    # test radius estimation
    big_sph_pts = sphere_points * 10.4
    rs, thetas, phis = cart2sphere(*big_sph_pts.T)
    yield assert_array_almost_equal, rs, 10.4
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal, xyz, big_sph_pts.T, 6
    #test that result shapes match
    x, y, z = big_sph_pts.T
    r, theta, phi = cart2sphere(x[:1], y[:1], z)
    yield assert_equal, r.shape, theta.shape
    yield assert_equal, r.shape, phi.shape
    x, y, z = sphere2cart(r[:1], theta[:1], phi)
    yield assert_equal, x.shape, y.shape
    yield assert_equal, x.shape, z.shape
    # test a scalar point
    pt = sphere_points[3]
    r, theta, phi = cart2sphere(*pt)
    xyz = sphere2cart(r, theta, phi)
    yield assert_array_almost_equal, xyz, pt
예제 #29
0
def test_hemisphere_constructor():
    s0 = HemiSphere(xyz=verts)
    s1 = HemiSphere(theta=theta, phi=phi)
    s2 = HemiSphere(*verts.T)

    uniq_verts = verts[::2].T
    rU, thetaU, phiU = cart2sphere(*uniq_verts)

    nt.assert_array_almost_equal(s0.theta, s1.theta)
    nt.assert_array_almost_equal(s0.theta, s2.theta)
    nt.assert_array_almost_equal(s0.theta, thetaU)

    nt.assert_array_almost_equal(s0.phi, s1.phi)
    nt.assert_array_almost_equal(s0.phi, s2.phi)
    nt.assert_array_almost_equal(s0.phi, phiU)
예제 #30
0
def test_bdg_residual():
    """This tests the bootstrapping residual.
    """

    hsph_updated = HemiSphere.from_sphere(unit_icosahedron).subdivide(2)
    vertices = hsph_updated.vertices
    bvecs = vertices
    bvals = np.ones(len(vertices)) * 1000
    bvecs = np.insert(bvecs, 0, np.array([0, 0, 0]), axis=0)
    bvals = np.insert(bvals, 0, 0)
    gtab = gradient_table(bvals, bvecs)
    r, theta, phi = cart2sphere(*vertices.T)
    B, m, n = shm.real_sh_descoteaux(6, theta, phi)
    shm_coeff = np.random.random(B.shape[1])

    # sphere_func is sampled of the spherical function for each point of
    # the sphere
    sphere_func = np.dot(shm_coeff, B.T)

    voxel = np.concatenate((np.zeros(1), sphere_func))
    data = np.tile(voxel, (3, 3, 3, 1))

    csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6)
    boot_pmf_gen = BootPmfGen(data,
                              model=csd_model,
                              sphere=hsph_updated,
                              sh_order=6)

    # Two boot samples should be the same
    odf1 = boot_pmf_gen.get_pmf(np.array([1.5, 1.5, 1.5]))
    odf2 = boot_pmf_gen.get_pmf(np.array([1.5, 1.5, 1.5]))
    npt.assert_array_almost_equal(odf1, odf2)

    # A boot sample with less sh coeffs should have residuals, thus the two
    # should be different
    boot_pmf_gen2 = BootPmfGen(data,
                               model=csd_model,
                               sphere=hsph_updated,
                               sh_order=4)
    odf1 = boot_pmf_gen2.get_pmf(np.array([1.5, 1.5, 1.5]))
    odf2 = boot_pmf_gen2.get_pmf(np.array([1.5, 1.5, 1.5]))
    npt.assert_(np.any(odf1 != odf2))

    # test with a gtab with two shells and assert you get an error
    bvals[-1] = 2000
    gtab = gradient_table(bvals, bvecs)
    csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6)
    npt.assert_raises(ValueError, BootPmfGen, data, csd_model, hsph_updated, 6)
예제 #31
0
def precompute_rotation_matrices(lmax=12):
    """Precompute the rotation matrices to rotate the high-resolution kernels (500 directions/shell).

    Parameters
    ----------
    lmax : int
        Maximum SH order to use for the rotation phase (default : 12)
    """
    if not isdir(dipy_home):
        makedirs(dipy_home)
    filename = pjoin(dipy_home, 'AMICO_aux_matrices_lmax=%d.pickle' % lmax)
    if isfile(filename):
        return

    print('\n-> Precomputing rotation matrices for l_max=%d:' % lmax)
    AUX = {}
    AUX['lmax'] = lmax

    # matrix to fit the SH coefficients
    _, theta, phi = cart2sphere(grad[:, 0], grad[:, 1], grad[:, 2])
    tmp, _, _ = real_sym_sh_basis(lmax, theta, phi)
    AUX['fit'] = np.dot(np.linalg.pinv(np.dot(tmp.T, tmp)), tmp.T)

    # matrices to rotate the functions in SH space
    AUX['Ylm_rot'] = np.zeros((181, 181), dtype=np.object)
    for ox in range(181):
        for oy in range(181):
            tmp, _, _ = real_sym_sh_basis(lmax, ox / 180.0 * np.pi,
                                          oy / 180.0 * np.pi)
            AUX['Ylm_rot'][ox, oy] = tmp.reshape(-1)

    # auxiliary data to perform rotations
    AUX['const'] = np.zeros(AUX['fit'].shape[0], dtype=np.float64)
    AUX['idx_m0'] = np.zeros(AUX['fit'].shape[0], dtype=np.int32)
    i = 0
    for l in range(0, AUX['lmax'] + 1, 2):
        const = np.sqrt(4.0 * np.pi / (2.0 * l + 1.0))
        idx_m0 = (l * l + l + 2.0) / 2.0 - 1
        for m in range(-l, l + 1):
            AUX['const'][i] = const
            AUX['idx_m0'][i] = idx_m0
            i += 1

    with open(filename, 'wb+') as fid:
        pickle.dump(AUX, fid, protocol=2)

    print('   [ DONE ]')
예제 #32
0
    def __init__(self,
                 gtab,
                 sh_order,
                 smooth=0.006,
                 min_signal=1.,
                 assume_normed=False):
        """Creates a model that can be used to fit or sample diffusion data

        Arguments
        ---------
        gtab : GradientTable
            Diffusion gradients used to acquire data
        sh_order : even int >= 0
            the spherical harmonic order of the model
        smooth : float between 0 and 1, optional
            The regularization parameter of the model
        min_signal : float, > 0, optional
            During fitting, all signal values less than `min_signal` are
            clipped to `min_signal`. This is done primarily to avoid values
            less than or equal to zero when taking logs.
        assume_normed : bool, optional
            If True, clipping and normalization of the data with respect to the
            mean B0 signal are skipped during mode fitting. This is an advanced
            feature and should be used with care.

        See Also
        --------
        normalize_data

        """
        SphHarmModel.__init__(self, gtab)
        self._where_b0s = lazy_index(gtab.b0s_mask)
        self._where_dwi = lazy_index(~gtab.b0s_mask)
        self.assume_normed = assume_normed
        self.min_signal = min_signal
        x, y, z = gtab.gradients[self._where_dwi].T
        r, theta, phi = cart2sphere(x, y, z)
        B, m, n = real_sym_sh_basis(sh_order, theta[:, None], phi[:, None])
        L = -n * (n + 1)
        legendre0 = lpn(sh_order, 0)[0]
        F = legendre0[n]
        self.sh_order = sh_order
        self.B = B
        self.m = m
        self.n = n
        self._set_fit_matrix(B, L, F, smooth)
예제 #33
0
    def odf(self, vertices=None, cache=None):
        """Predict the ODF at the given vertices.

        """
        if vertices is None:
            self._odf_kernel_matrix = cache._odf_kernel_matrix
        else:
            odf_theta, odf_phi = cart2sphere(*vertices.T)[1:]
            X = kernel_matrix(odf_theta, odf_phi,
                              self.model.kernel_theta,
                              self.model.kernel_phi,
                              kernel=even_kernel,
                              N=self.model.sh_order)
            self._odf_kernel_matrix = X

        return np.dot(self._odf_kernel_matrix, self.beta) + \
               self.intercept, self.beta
예제 #34
0
파일: lut.py 프로젝트: htygithub/AMICO
def precompute_rotation_matrices( lmax = 12 ) :
    """Precompute the rotation matrices to rotate the high-resolution kernels (500 directions/shell).

    Parameters
    ----------
    lmax : int
        Maximum SH order to use for the rotation phase (default : 12)
    """
    if not isdir(dipy_home) :
        makedirs(dipy_home)
    filename = pjoin( dipy_home, 'AMICO_aux_matrices_lmax=%d.pickle'%lmax )
    if isfile( filename ) :
        return

    print '\n-> Precomputing rotation matrices for l_max=%d:' % lmax
    AUX = {}
    AUX['lmax'] = lmax

    # matrix to fit the SH coefficients
    _, theta, phi = cart2sphere( grad[:,0], grad[:,1], grad[:,2] )
    tmp, _, _ = real_sym_sh_basis( lmax, theta, phi )
    AUX['fit'] = np.dot( np.linalg.pinv( np.dot(tmp.T,tmp) ), tmp.T )

    # matrices to rotate the functions in SH space
    AUX['Ylm_rot'] = np.zeros( (181,181), dtype=np.object )
    for ox in xrange(181) :
        for oy in xrange(181) :
            tmp, _, _ = real_sym_sh_basis( lmax, ox/180.0*np.pi, oy/180.0*np.pi )
            AUX['Ylm_rot'][ox,oy] = tmp.reshape(-1)

    # auxiliary data to perform rotations
    AUX['const'] = np.zeros( AUX['fit'].shape[0], dtype=np.float64 )
    AUX['idx_m0'] = np.zeros( AUX['fit'].shape[0], dtype=np.int32 )
    i = 0
    for l in xrange(0,AUX['lmax']+1,2) :
        const  = np.sqrt(4.0*np.pi/(2.0*l+1.0))
        idx_m0 = (l*l + l + 2.0)/2.0 - 1
        for m in xrange(-l,l+1) :
            AUX['const'][i]  = const
            AUX['idx_m0'][i] = idx_m0
            i += 1

    with open( filename, 'wb+' ) as fid :
        cPickle.dump( AUX, fid, protocol=2 )

    print '   [ DONE ]'
    def __init__(self, bvals, gradients, sh_order=8, qp=132,
                 loglog_tf=True):
        """Sparse kernel model.

        Parameters
        ----------
        bvals : 1-D ndarray
            B-values.
        gradients : (N, 3) ndarray
            Gradient directions, xyz.
        sh_order : int
            Highest order of spherical harmonic fit.
        qp : {72, 132, 492}
            Number of kernels used to represent the signal.
        loglog_tf : bool
            Whether to perform ``log(-log(.))`` on the signal before fitting.
            In theory, this gives a better representation of the ODF (but does
            predict back the original signal).  Also, it seems not to work well
            for low b-values (<= 1500).

        """
        where_dwi = bvals > 0

        self.qp = qp
        self.sh_order = sh_order
        self.loglog_tf = loglog_tf
        self.gradient_theta, self.gradient_phi = \
                             cart2sphere(*gradients[where_dwi].T)[1:]

        self.kernel_theta, self.kernel_phi, _ = quadrature_points(N=qp)

        self.X = np.asfortranarray(
            kernel_matrix(self.gradient_theta, self.gradient_phi,
                          self.kernel_theta, self.kernel_phi,
                          kernel=inv_funk_radon_even_kernel,
                          N=self.sh_order))
			  
	#attempt to append iso col and spherical harmonic cols to X:
	P2 = sp.special.legendre(2)		  
	append = sh(self.gradient_theta, self.gradient_phi, np.size(self.gradient_theta))
	append[:,1:] = append[:,1:]/(-12*np.pi*P2(0))
	
	#print append, "append"
	
	self.X = np.hstack((self.X, append))
예제 #36
0
파일: smoothing.py 프로젝트: BIG-S2/PSC
def sh_smooth(data, gtab, sh_order=4):
    """Smooth the raw diffusion signal with spherical harmonics

    data : ndarray
        The diffusion data to smooth.

    gtab : gradient table object
        Corresponding gradients table object to data.

    sh_order : int, default 4
        Order of the spherical harmonics to fit.

    Return
    ---------
    pred_sig : ndarray
        The smoothed diffusion data, fitted through spherical harmonics.
    """

    m, n = sph_harm_ind_list(sh_order)
    where_b0s = lazy_index(gtab.b0s_mask)
    where_dwi = lazy_index(~gtab.b0s_mask)

    x, y, z = gtab.gradients[where_dwi].T
    r, theta, phi = cart2sphere(x, y, z)

    # Find the sh coefficients to smooth the signal
    B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None])
    sh_shape = (np.prod(data.shape[:-1]), -1)
    sh_coeff = np.linalg.lstsq(B_dwi, data[...,
                                           where_dwi].reshape(sh_shape).T)[0]

    # Find the smoothed signal from the sh fit for the given gtab
    smoothed_signal = np.dot(B_dwi,
                             sh_coeff).T.reshape(data.shape[:-1] + (-1, ))
    pred_sig = np.zeros(smoothed_signal.shape[:-1] + (gtab.bvals.shape[0], ))
    pred_sig[..., ~gtab.b0s_mask] = smoothed_signal

    # Just give back the signal for the b0s since we can't really do anything about it
    if np.sum(gtab.b0s_mask) > 1:
        pred_sig[..., where_b0s] = np.mean(data[..., where_b0s], axis=-1)
    else:
        pred_sig[..., where_b0s] = data[..., where_b0s]

    return pred_sig
예제 #37
0
    def predict(self, sh_coeff, gtab=None, S0=1):
        """Compute a signal prediction given spherical harmonic coefficients
        and (optionally) a response function for the provided GradientTable
        class instance.

        Parameters
        ----------
        sh_coeff : ndarray
            The spherical harmonic representation of the FOD from which to make
            the signal prediction.
        gtab : GradientTable
            The gradients for which the signal will be predicted. Use the
            model's gradient table by default.
        S0 : ndarray or float
            The non diffusion-weighted signal value.

        Returns
        -------
        pred_sig : ndarray
            The predicted signal.

        """
        if gtab is None or gtab is self.gtab:
            SH_basis = self.B_dwi
            gtab = self.gtab
        else:
            x, y, z = gtab.gradients[~gtab.b0s_mask].T
            r, theta, phi = cart2sphere(x, y, z)
            SH_basis, m, n = real_sym_sh_basis(self.sh_order, theta, phi)

        # Because R is diagonal, the matrix multiply is written as a multiply
        predict_matrix = SH_basis * self.R.diagonal()
        S0 = np.asarray(S0)[..., None]
        scaling = S0 / self.response_scaling

        # This is the key operation: convolve and multiply by S0:
        pre_pred_sig = scaling * np.dot(predict_matrix, sh_coeff)

        # Now put everything in its right place:
        pred_sig = np.zeros(pre_pred_sig.shape[:-1] + (gtab.bvals.shape[0],))
        pred_sig[..., ~gtab.b0s_mask] = pre_pred_sig
        pred_sig[..., gtab.b0s_mask] = S0

        return pred_sig
예제 #38
0
파일: converters.py 프로젝트: dPys/qsiprep
def amplitudes_to_sh_mif(amplitudes_img, odf_dirs, output_file, working_dir):
    """Convert an image of ODF amplitudes to a MRtrix sh mif file.

    Parameters:
    ============

    amplitudes_img: nb.Nifti1Image
        4d NIfTI image that contains amplitudes for the ODFs
    odf_dirs: np.ndarray
        2*N x 3 array containing the directions corresponding to the
        amplitudes in ``amplitudes_img``. The values in
        ``amplitudes_img.get_data()[..., i]`` are for the
        direction in ``odf_dirs[i]``. Here the second half of the
        directions are the opposite of the fist and therefore have the
        same amplitudes.
    output_file: str
        Path where the output ``.mif`` file will be written.
    working_dir: str
        Path where temp files will be written to

    Returns:
    ========

    None

    """
    temp_nii = op.join(working_dir, "odf_values.nii")
    amplitudes_img.to_filename(temp_nii)

    num_dirs, _ = odf_dirs.shape
    hemisphere = num_dirs // 2
    x, y, z = odf_dirs[:hemisphere].T
    _, theta, phi = cart2sphere(-x, -y, z)
    dirs_txt = op.join(working_dir, "ras+directions.txt")
    np.savetxt(dirs_txt, np.column_stack([phi, theta]))

    popen_run([
        "amp2sh", "-quiet", "-force", "-directions", dirs_txt,
        "odf_values.nii", output_file
    ])
    os.remove(temp_nii)
    os.remove(dirs_txt)
예제 #39
0
    def __init__(self,
                 x=None,
                 y=None,
                 z=None,
                 theta=None,
                 phi=None,
                 xyz=None,
                 faces=None,
                 edges=None):

        all_specified = _all_specified(x, y, z) + _all_specified(xyz) + \
                        _all_specified(theta, phi)
        one_complete = (_some_specified(x, y, z) + _some_specified(xyz) +
                        _some_specified(theta, phi))

        if not (all_specified == 1 and one_complete == 1):
            raise ValueError("Sphere must be constructed using either "
                             "(x,y,z), (theta, phi) or xyz.")

        if edges is not None and faces is None:
            raise ValueError("Either specify both faces and "
                             "edges, only faces, or neither.")

        if edges is not None:
            self.edges = np.asarray(edges)
        if faces is not None:
            self.faces = np.asarray(faces)

        if theta is not None:
            self.theta = np.array(theta, copy=False, ndmin=1)
            self.phi = np.array(phi, copy=False, ndmin=1)
            return

        if xyz is not None:
            xyz = np.asarray(xyz)
            x, y, z = xyz.T

        x, y, z = (np.asarray(t) for t in (x, y, z))
        r, self.theta, self.phi = cart2sphere(x, y, z)

        if not np.allclose(r, 1):
            warnings.warn("Vertices are not on the unit sphere.")
예제 #40
0
파일: shm.py 프로젝트: Garyfallidis/dipy
    def __init__(self,
                 bval,
                 gradients,
                 sh_order,
                 smooth=0,
                 odf_vertices=None,
                 odf_edges=None):
        """Creates a model that can be used to fit or sample diffusion data

        Arguments
        ---------
        bval : ndarray (n,)
            the b values for the data, where n is the number of volumes in data
        gradient : ndarray (n, 3)
            the diffusing weighting gradient directions for the data, n is the
            number of volumes in the data
        sh_order : even int >= 0
            the spherical harmonic order of the model
        smoothness : float between 0 and 1
            The regulization peramater of the model
        odf_vertices : ndarray (v, 3), optional
            Points on a unit sphere, used to evaluate odf
        odf_edges : ndarray (e, 2), dtype=int16, optional
            A list of Neighboring vertices

        """
        m, n = sph_harm_ind_list(sh_order)
        where_dwi = bval > 0
        self._index = (Ellipsis, lazy_index(where_dwi))
        x, y, z = gradients[where_dwi].T
        r, pol, azi = cart2sphere(x, y, z)
        B = real_sph_harm(m, n, azi[:, None], pol[:, None])
        L = -n * (n + 1)
        legendre0 = lpn(sh_order, 0)[0]
        F = legendre0[n]
        self.B = B
        self._m = m
        self._n = n
        self._set_fit_matrix(B, L, F, smooth)
        if odf_vertices is not None:
            self.set_odf_vertices(odf_vertices, odf_edges)
예제 #41
0
def brainsuite_shore_matrix_pdf(radial_order, zeta, rtab):
    r"""Compute the SHORE propagator matrix [1]_"

    Parameters
    ----------
    radial_order : unsigned int,
        an even integer that represent the order of the basis
    zeta : unsigned int,
        scale factor
    rtab : array, shape (N,3)
        real space points in which calculates the pdf

    References
    ----------
    .. [1] Merlet S. et al., "Continuous diffusion signal, EAP and
    ODF estimation via Compressive Sensing in diffusion MRI", Medical
    Image Analysis, 2013.
    """

    r, theta, phi = cart2sphere(rtab[:, 0], rtab[:, 1], rtab[:, 2])
    theta[np.isnan(theta)] = 0
    F = radial_order / 2
    psi = []

    # Angular part of the basis - Spherical harmonics
    S, Z, L = real_sym_sh_brainsuite(radial_order, theta, phi)
    Snew = []

    for n in range(radial_order + 1):
        for l in range(0, n + 1, 2):
            Snew.append(S[:, L == l])
            for m in range(-l, l + 1):
                psi.append(
                    genlaguerre(n - l, l + 0.5)(4 * np.pi ** 2 *
                                                zeta * r ** 2) *\
                    np.exp(-2 * np.pi ** 2 * zeta * r ** 2) *\
                    _kappa_pdf(zeta, n, l) * \
                    (4 * np.pi ** 2 * zeta * r ** 2) ** (l / 2) * \
                    (-1) ** (n - l / 2))

    return np.column_stack(psi) * np.column_stack(Snew)
예제 #42
0
def insert_graph(d2mask, graph_i, vic_rots, Panorama_Field):
    cts_shifted2d = np.dot(
        vic_rots,
        d2mask)  # make orientation this is achieved in 2d, 3*(240*320)
    # cts_shifted3d = d2mask.reshape(np.roll(d3mask.shape,1)).transpose(1,2,0) #transform back from 2d to 3d
    r, phi_s, lmd_s = cart2sphere(cts_shifted2d[0, :], cts_shifted2d[1, :],
                                  cts_shifted2d[2, :])  # final location in
    # sphereical coordinate at 2d
    new_row = phi_s / math.pi * 4 * 240
    new_column = (lmd_s + math.pi) / (2 * math.pi) * 6 * 320
    #print(new_row)
    new_location2d = np.array([new_row, new_column])
    new_location3d = new_location2d.reshape(np.roll(
        (240, 320, 2), 1)).transpose(1, 2, 0)  # transform back from 2d to 3d

    for i in range(new_location3d.shape[0]):  # first scanning row 240
        for k in range(new_location3d.shape[1]):  # scanning column
            row = int(new_location3d[i, k, 0])
            column = int(new_location3d[i, k, 1])
            Panorama_Field[row, column, :] = graph_i[i, k, :]
    return Panorama_Field
예제 #43
0
def test_hat_and_lcr():
    v, e, f = create_half_unit_sphere(6)
    m, n = sph_harm_ind_list(8)
    r, pol, azi = cart2sphere(*v.T)
    B = real_sph_harm(m, n, azi[:, None], pol[:, None])
    H = hat(B)
    B_hat = np.dot(H, B)
    assert_array_almost_equal(B, B_hat)

    R = lcr_matrix(H)
    d = np.arange(len(azi))
    r = d - np.dot(H, d)
    lev = np.sqrt(1 - H.diagonal())
    r /= lev
    r -= r.mean()

    r2 = np.dot(R, d)
    assert_array_almost_equal(r, r2)

    r3 = np.dot(d, R.T)
    assert_array_almost_equal(r, r3)
예제 #44
0
    def rotations(self):
        """
        Calculate the response function for alignment with each one of the
        b vectors
        """
        out = []
        for idx, bvec in enumerate(self.bvecs.T):
            rot = ozu.calculate_rotation(bvec, [1, 0, 0])
            bvecs = np.asarray(np.dot(rot, self.bvecs)).squeeze()
            r, theta, phi = geo.cart2sphere(bvecs[0], bvecs[1], bvecs[2])

            sph_harm_set = []
            degree = 0
            for order in np.arange(0, 2 * self.n_coeffs, 2):
                sph_harm_set.append(
                    np.real(sph_harm(degree, order, theta, phi)))

            sph_harm_set = np.array(sph_harm_set)
            out.append(np.dot(self.coeffs, sph_harm_set))

        return np.array(out)
예제 #45
0
    def set_sampling_points(self, sampling_points, sampling_edges=None):
        """Sets the sampling points

        The sampling points are the points at which the modle is sampled when
        the sample method is called.

        Parameters
        ----------
        sampling_points : ndarray (n, 3), dtype=float
            The x, y, z coordinates of n points on a unit sphere.
        sampling_edges : ndarray (m, 2), dtype=int, optional
            Indices to sampling_points so that every unique pair of neighbors
            in sampling_points is one of the m edges.

        """
        x, y, z = sampling_points.T
        r, pol, azi = cart2sphere(x, y, z)
        S = real_sph_harm(self._m, self._n, azi[:, None], pol[:, None])

        self._sampling_matrix = dot(S, self._fit_matrix)
        self._sampling_points = sampling_points
        self._sampling_edges = sampling_edges
예제 #46
0
def esh_matrix(order, gtab):
    """ Matrix that evaluates SH coeffs in the given directions

    Parameters
    ----------
    order
    gtab

    Returns
    -------

    """
    bvecs = gtab.bvecs
    r, theta, phi = cart2sphere(bvecs[:, 0], bvecs[:, 1], bvecs[:, 2])
    theta[np.isnan(theta)] = 0
    M = np.zeros((bvecs.shape[0], esh.LENGTH[order]))
    counter = 0
    for l in range(0, order + 1, 2):
        for m in range(-l, l + 1):
            M[:, counter] = real_sph_harm(m, l, theta, phi)
            counter += 1
    return M
예제 #47
0
def flow_angle(source_vertex, target_vertex):
    """
    Compute normalized unit vector step direction between two surfaces.

    Parameters:
    - - - - -
    source_vertex: float, array
        source mesh vertices
    target_vertex: float, array
        target mesh vertices
    """

    mvmt = target_vertex - source_vertex
    mvmt = mvmt / np.linalg.norm(mvmt, axis=1)[:, None]

    x = mvmt[:, 0]
    y = mvmt[:, 1]
    z = mvmt[:, 2]

    [r, theta, phi] = geometry.cart2sphere(x, y, z)

    return r, theta, phi
예제 #48
0
def shore_matrix_pdf(radial_order, zeta, rtab):
    r"""Compute the SHORE propagator matrix [1]_"

    Parameters
    ----------
    radial_order : unsigned int,
        an even integer that represent the order of the basis
    zeta : unsigned int,
        scale factor
    rtab : array, shape (N,3)
        real space points in which calculates the pdf

    References
    ----------
    .. [1] Merlet S. et al., "Continuous diffusion signal, EAP and
    ODF estimation via Compressive Sensing in diffusion MRI", Medical
    Image Analysis, 2013.
    """

    r, theta, phi = cart2sphere(rtab[:, 0], rtab[:, 1], rtab[:, 2])
    theta[np.isnan(theta)] = 0
    F = radial_order / 2
    n_c = int(np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3)))
    psi = np.zeros((r.shape[0], n_c))
    counter = 0
    for l in range(0, radial_order + 1, 2):
        for n in range(l, int((radial_order + l) / 2) + 1):
            for m in range(-l, l + 1):
                psi[:, counter] = real_sh_descoteaux_from_index(
                    m, l, theta, phi) * \
                    genlaguerre(n - l, l + 0.5)(4 * np.pi ** 2 *
                                                zeta * r ** 2) *\
                    np.exp(-2 * np.pi ** 2 * zeta * r ** 2) *\
                    _kappa_pdf(zeta, n, l) *\
                    (4 * np.pi ** 2 * zeta * r ** 2) ** (l / 2) * \
                    (-1) ** (n - l / 2)
                counter += 1
    return psi
예제 #49
0
def shore_K_mu_dependent(radial_order, mu, rgrad):
    '''Computes mu dependent part of K [2]. Same trick as with Q.
    '''
    r, theta, phi = cart2sphere(rgrad[:, 0], rgrad[:, 1], rgrad[:, 2])
    theta[np.isnan(theta)] = 0

    ind_mat = shore_index_matrix(radial_order)

    n_elem = ind_mat.shape[0]
    n_rgrad = rgrad.shape[0]
    K = np.zeros((n_rgrad, n_elem))

    counter = 0
    for n in range(0, radial_order + 1, 2):
        for j in range(1, 2 + n / 2):
            l = n + 2 - 2 * j
            const = (mu ** 3) ** (-1) * mu ** (-l) *\
                np.exp(-r ** 2 / (2 * mu ** 2)) *\
                genlaguerre(j - 1, l + 0.5)(r ** 2 / mu ** 2)
            for m in range(-l, l + 1):
                K[:, counter] = const
                counter += 1
    return K
예제 #50
0
def test_smooth_pinv():
    v, e, f = create_half_unit_sphere(3)
    m, n = sph_harm_ind_list(4)
    r, pol, azi = cart2sphere(*v.T)
    B = real_sph_harm(m, n, azi[:, None], pol[:, None])

    L = np.zeros(len(m))
    C = smooth_pinv(B, L)
    D = np.dot(npl.inv(np.dot(B.T, B)), B.T)
    assert_array_almost_equal(C, D)

    L = n * (n + 1) * .05
    C = smooth_pinv(B, L)
    L = np.diag(L)
    D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T)

    assert_array_almost_equal(C, D)

    L = np.arange(len(n)) * .05
    C = smooth_pinv(B, L)
    L = np.diag(L)
    D = np.dot(npl.inv(np.dot(B.T, B) + L * L), B.T)
    assert_array_almost_equal(C, D)
예제 #51
0
def shore_K_mu_independent(radial_order, rgrad):
    '''Computes mu independent part of K [2]. Same trick as with Q.
    '''
    r, theta, phi = cart2sphere(rgrad[:, 0], rgrad[:, 1], rgrad[:, 2])
    theta[np.isnan(theta)] = 0

    ind_mat = shore_index_matrix(radial_order)

    n_elem = ind_mat.shape[0]
    n_rgrad = rgrad.shape[0]
    K = np.zeros((n_rgrad, n_elem))

    counter = 0
    for n in range(0, radial_order + 1, 2):
        for j in range(1, 2 + n / 2):
            l = n + 2 - 2 * j
            const = (-1) ** (j - 1) *\
                (np.sqrt(2) * np.pi) ** (-1) *\
                (r ** 2 / 2) ** (l / 2)
            for m in range(-l, l + 1):
                K[:, counter] = const * real_sph_harm(m, l, theta, phi)
                counter += 1
    return K
예제 #52
0
def vecs2hemi(vecs):
    """
    Take vecs in x,y,z and make sure that they are all pointing towards the
    same hemisphere, rotating those that are not to their antipodal as necessary.

    Parameters
    ----------
    vecs : float array
        Vectors in 3 space


    Returns
    -------
    new_vecs : the vectors, with all vectors pointing towards positive y values
    
    """
    r, theta, phi = geo.cart2sphere(vecs[0], vecs[1], vecs[2])

    # Select to make sure all the phi's are in the same hemisphere: 
    anti_idx = np.where(phi<0)
    new_vecs = np.copy(vecs)
    new_vecs[:, anti_idx] *= -1  # Invert antipodally
    return new_vecs
예제 #53
0
    def __init__(self, sh_order, bval, bvec, smooth=0, sampling_points=None,
                 sampling_edges=None):
        """Creates a model that can be used to fit or sample diffusion data

        Arguments
        ---------
        sh_order : even int >= 0
            the spherical harmonic order of the model
        bval : ndarray (n,)
            the b values for the data, where n is the number of volumes in data
        bvec : ndarray (3, n)
            the diffusing weighting gradient directions for the data, n is the
            number of volumes in the data
        smoothness : float between 0 and 1
            The regulization peramater of the model
        sampling_points : ndarray (3, m), optional
            points for sampling the model, these points are used when the
            sample method is called
        sampling_edges : ndarray (e, 2), dtype=int, optional
            Indices to sampling_points so that every unique pair of neighbors
            in sampling_points is one of the m edges.

        """
        bvec = bvec[:, bval > 0]
        m, n = sph_harm_ind_list(sh_order)
        x, y, z = bvec
        r, pol, azi = cart2sphere(x, y, z)
        B = real_sph_harm(m, n, azi[:, None], pol[:, None])
        L = -n*(n+1)
        legendre0 = lpn(sh_order, 0)[0]
        F = legendre0[n]
        self.B = B
        self._m = m
        self._n = n
        self._set_fit_matrix(B, L, F, smooth)
        if sampling_points is not None:
            self.set_sampling_points(sampling_points, sampling_edges)
예제 #54
0
def shore_matrix_odf(radial_order, zeta, sphere_vertices):
    r"""Compute the SHORE ODF matrix [1]_"

    Parameters
    ----------
    radial_order : unsigned int,
        an even integer that represent the order of the basis
    zeta : unsigned int,
        scale factor
    sphere_vertices : array, shape (N,3)
        vertices of the odf sphere

    References
    ----------
    .. [1] Merlet S. et al., "Continuous diffusion signal, EAP and
    ODF estimation via Compressive Sensing in diffusion MRI", Medical
    Image Analysis, 2013.
    """

    r, theta, phi = cart2sphere(sphere_vertices[:, 0], sphere_vertices[:, 1],
                                sphere_vertices[:, 2])
    theta[np.isnan(theta)] = 0
    upsilon = []

    # Angular part of the basis - Spherical harmonics
    S, Z, L = real_sym_sh_brainsuite(radial_order, theta, phi)
    Snew = []

    for n in range(radial_order + 1):
        for l in range(0, n + 1, 2):
            Snew.append(S[:, L == l])
            for m in range(-l, l + 1):
                upsilon.append( (-1) ** (n - l / 2.0) * \
                    _kappa_odf(zeta, n, l) * \
                    hyp2f1(l - n, l / 2.0 + 1.5, l + 1.5, 2.0))

    return np.column_stack(upsilon) * np.column_stack(Snew)
예제 #55
0
    def top_axa(self):
        """
        Axis-angle representation of points with max overlap (all shells, real part)
        :return: axis-angles
        """
        for shell in range(0, self.shellN):
            temp = self.so3[:, :, :, 0, shell]
            max_indices = np.asarray(np.where(temp == 1))

            shape = max_indices.shape

            x = np.empty(shape[1])
            y = np.empty(shape[1])
            z = np.empty(shape[1])
            psi = np.empty(shape[1])
            for i in range(0, shape[1]):
                max_index = max_indices[:, i]
                beta = max_index[0] * (np.pi / (self.N - 1))
                alpha = max_index[1] * (2 * np.pi / (self.N - 1))
                gamma = max_index[2] * (2 * np.pi / (self.N - 1))
                axis_angle = somemath.euler_to_axis_angle_zyz(
                    alpha, beta, gamma)
                x[i] = axis_angle[0][0]
                y[i] = axis_angle[0][1]
                z[i] = axis_angle[0][2]
                psi[i] = axis_angle[1]
            r, theta, phi = cart2sphere(x, y, z)
            axa_top = np.row_stack((theta, phi, psi))
            self.max_indices.append(max_indices)
            self.axa_top.append(axa_top)

        #self.axa_top = np.asarray(self.axa_top)
        avg = self.axa_top[1:, 2, 0]
        avg = np.asarray(avg)
        avg = avg.sum()
        avg = avg / (self.shellN - 1)
        self.top_average = avg
예제 #56
0
def shore_matrix_odf(radial_order, zeta, sphere_vertices):
    r"""Compute the SHORE ODF matrix [1]_"

    Parameters
    ----------
    radial_order : unsigned int,
        an even integer that represent the order of the basis
    zeta : unsigned int,
        scale factor
    sphere_vertices : array, shape (N,3)
        vertices of the odf sphere

    References
    ----------
    .. [1] Merlet S. et al., "Continuous diffusion signal, EAP and
    ODF estimation via Compressive Sensing in diffusion MRI", Medical
    Image Analysis, 2013.
    """

    r, theta, phi = cart2sphere(sphere_vertices[:, 0], sphere_vertices[:, 1],
                                sphere_vertices[:, 2])
    theta[np.isnan(theta)] = 0
    F = radial_order / 2
    n_c = int(np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3)))
    upsilon = np.zeros((len(sphere_vertices), n_c))
    counter = 0
    for l in range(0, radial_order + 1, 2):
        for n in range(l, int((radial_order + l) / 2) + 1):
            for m in range(-l, l + 1):
                upsilon[:, counter] = (-1) ** (n - l / 2.0) * \
                    _kappa_odf(zeta, n, l) * \
                    hyp2f1(l - n, l / 2.0 + 1.5, l + 1.5, 2.0) * \
                    real_sh_descoteaux_from_index(m, l, theta, phi)
                counter += 1

    return upsilon
예제 #57
0
파일: sphere_stats.py 프로젝트: jgors/dipy
def random_uniform_on_sphere(n=1, coords='xyz'):
    r''' Random unit vectors from a uniform distribution on the sphere
    
    Parameters
    -----------
    n: int, number of random vectors
    coords: str, 'xyz' in cartesian form
        'radians' for spherical form in rads
        'degrees' for spherical form in degrees
        
    Returns
    --------
    X: array, shape (n,3) if coords='xyz' or shape (n,2) otherwise
    
    Examples
    ---------
    >>> from dipy.core.sphere_stats import random_uniform_on_sphere
    >>> X=random_uniform_on_sphere(4,'radians')
    >>> X.shape
    (4, 2)
    >>> X=random_uniform_on_sphere(4,'xyz')
    >>> X.shape
    (4, 3)
    '''
    u = np.random.normal(0, 1, (n, 3))
    u = u / np.sqrt(np.sum(u**2, axis=1)).reshape(n, 1)
    if coords == 'xyz':
        return u
    else:
        angles = np.zeros((n, 2))
        for (i, xyz) in enumerate(u):
            angles[i, :] = geometry.cart2sphere(*xyz)[1:]
        if coords == 'radians':
            return angles
        if coords == 'degrees':
            return (180. / np.pi) * angles
예제 #58
0
def shore_matrix(radial_order, zeta, gtab, tau=1 / (4 * np.pi ** 2)):
    r"""Compute the SHORE matrix for modified Merlet's 3D-SHORE [1]_

    ..math::
            :nowrap:
                \begin{equation}
                    \textbf{E}(q\textbf{u})=\sum_{l=0, even}^{N_{max}}
                                            \sum_{n=l}^{(N_{max}+l)/2}
                                            \sum_{m=-l}^l c_{nlm}
                                            \phi_{nlm}(q\textbf{u})
                \end{equation}

    where $\phi_{nlm}$ is
    ..math::
            :nowrap:
                \begin{equation}
                    \phi_{nlm}^{SHORE}(q\textbf{u})=\Biggl[\dfrac{2(n-l)!}
                        {\zeta^{3/2} \Gamma(n+3/2)} \Biggr]^{1/2}
                        \Biggl(\dfrac{q^2}{\zeta}\Biggr)^{l/2}
                        exp\Biggl(\dfrac{-q^2}{2\zeta}\Biggr)
                        L^{l+1/2}_{n-l} \Biggl(\dfrac{q^2}{\zeta}\Biggr)
                        Y_l^m(\textbf{u}).
                \end{equation}

    Parameters
    ----------
    radial_order : unsigned int,
        an even integer that represent the order of the basis
    zeta : unsigned int,
        scale factor
    gtab : GradientTable,
        gradient directions and bvalues container class
    tau : float,
        diffusion time. By default the value that makes q=sqrt(b).

    References
    ----------
    .. [1] Merlet S. et al., "Continuous diffusion signal, EAP and
    ODF estimation via Compressive Sensing in diffusion MRI", Medical
    Image Analysis, 2013.

    """

    qvals = np.sqrt(gtab.bvals / (4 * np.pi ** 2 * tau))
    qvals[gtab.b0s_mask] = 0
    bvecs = gtab.bvecs

    qgradients = qvals[:, None] * bvecs

    r, theta, phi = cart2sphere(qgradients[:, 0], qgradients[:, 1],
                                qgradients[:, 2])
    theta[np.isnan(theta)] = 0
    F = radial_order / 2
    n_c = int(np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3)))
    M = np.zeros((r.shape[0], n_c))

    counter = 0
    for l in range(0, radial_order + 1, 2):
        for n in range(l, int((radial_order + l) / 2) + 1):
            for m in range(-l, l + 1):
                M[:, counter] = real_sh_descoteaux_from_index(
                    m, l, theta, phi) * \
                    genlaguerre(n - l, l + 0.5)(r ** 2 / zeta) * \
                    np.exp(- r ** 2 / (2.0 * zeta)) * \
                    _kappa(zeta, n, l) * \
                    (r ** 2 / zeta) ** (l / 2)
                counter += 1
    return M
예제 #59
0
파일: csdeconv.py 프로젝트: mbeyeler/dipy
def recursive_response(gtab,
                       data,
                       mask=None,
                       sh_order=8,
                       peak_thr=0.01,
                       init_fa=0.08,
                       init_trace=0.0021,
                       iter=8,
                       convergence=0.001,
                       parallel=True,
                       nbr_processes=None,
                       sphere=default_sphere):
    """ Recursive calibration of response function using peak threshold

    Parameters
    ----------
    gtab : GradientTable
    data : ndarray
        diffusion data
    mask : ndarray, optional
        mask for recursive calibration, for example a white matter mask. It has
        shape `data.shape[0:3]` and dtype=bool. Default: use the entire data
        array.
    sh_order : int, optional
        maximal spherical harmonics order. Default: 8
    peak_thr : float, optional
        peak threshold, how large the second peak can be relative to the first
        peak in order to call it a single fiber population [1]. Default: 0.01
    init_fa : float, optional
        FA of the initial 'fat' response function (tensor). Default: 0.08
    init_trace : float, optional
        trace of the initial 'fat' response function (tensor). Default: 0.0021
    iter : int, optional
        maximum number of iterations for calibration. Default: 8.
    convergence : float, optional
        convergence criterion, maximum relative change of SH
        coefficients. Default: 0.001.
    parallel : bool, optional
        Whether to use parallelization in peak-finding during the calibration
        procedure. Default: True
    nbr_processes: int
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()).
    sphere : Sphere, optional.
        The sphere used for peak finding. Default: default_sphere.

    Returns
    -------
    response : ndarray
        response function in SH coefficients

    Notes
    -----
    In CSD there is an important pre-processing step: the estimation of the
    fiber response function. Using an FA threshold is not a very robust method.
    It is dependent on the dataset (non-informed used subjectivity), and still
    depends on the diffusion tensor (FA and first eigenvector),
    which has low accuracy at high b-value. This function recursively
    calibrates the response function, for more information see [1].

    References
    ----------
    .. [1] Tax, C.M.W., et al. NeuroImage 2014. Recursive calibration of
           the fiber response function for spherical deconvolution of
           diffusion MRI data.
    """
    S0 = 1.
    evals = fa_trace_to_lambdas(init_fa, init_trace)
    res_obj = (evals, S0)

    if mask is None:
        data = data.reshape(-1, data.shape[-1])
    else:
        data = data[mask]

    n = np.arange(0, sh_order + 1, 2)
    where_dwi = lazy_index(~gtab.b0s_mask)
    response_p = np.ones(len(n))

    for _ in range(iter):
        r_sh_all = np.zeros(len(n))
        csd_model = ConstrainedSphericalDeconvModel(gtab,
                                                    res_obj,
                                                    sh_order=sh_order)

        csd_peaks = peaks_from_model(model=csd_model,
                                     data=data,
                                     sphere=sphere,
                                     relative_peak_threshold=peak_thr,
                                     min_separation_angle=25,
                                     parallel=parallel,
                                     nbr_processes=nbr_processes)

        dirs = csd_peaks.peak_dirs
        vals = csd_peaks.peak_values
        single_peak_mask = (vals[:, 1] / vals[:, 0]) < peak_thr
        data = data[single_peak_mask]
        dirs = dirs[single_peak_mask]

        for num_vox in range(data.shape[0]):
            rotmat = vec2vec_rotmat(dirs[num_vox, 0], np.array([0, 0, 1]))

            rot_gradients = np.dot(rotmat, gtab.gradients.T).T

            x, y, z = rot_gradients[where_dwi].T
            r, theta, phi = cart2sphere(x, y, z)
            # for the gradient sphere
            B_dwi = real_sph_harm(0, n, theta[:, None], phi[:, None])
            r_sh_all += np.linalg.lstsq(B_dwi,
                                        data[num_vox, where_dwi],
                                        rcond=-1)[0]

        response = r_sh_all / data.shape[0]
        res_obj = AxSymShResponse(data[:, gtab.b0s_mask].mean(), response)

        change = abs((response_p - response) / response_p)
        if all(change < convergence):
            break

        response_p = response

    return res_obj