示例#1
0
def test_sphere_cart():
    # test arrays of points
    rs, thetas, phis = cart2sphere(*(sphere_points.T))
    xyz = sphere2cart(rs, thetas, phis)
    assert_array_almost_equal(xyz, sphere_points.T)
    # test radius estimation
    big_sph_pts = sphere_points * 10.4
    rs, thetas, phis = cart2sphere(*big_sph_pts.T)
    assert_array_almost_equal(rs, 10.4)
    xyz = sphere2cart(rs, thetas, phis)
    assert_array_almost_equal(xyz, big_sph_pts.T, decimal=6)
    # test that result shapes match
    x, y, z = big_sph_pts.T
    r, theta, phi = cart2sphere(x[:1], y[:1], z)
    assert_equal(r.shape, theta.shape)
    assert_equal(r.shape, phi.shape)
    x, y, z = sphere2cart(r[:1], theta[:1], phi)
    assert_equal(x.shape, y.shape)
    assert_equal(x.shape, z.shape)
    # test a scalar point
    pt = sphere_points[3]
    r, theta, phi = cart2sphere(*pt)
    xyz = sphere2cart(r, theta, phi)
    assert_array_almost_equal(xyz, pt)

    # Test full circle on x=1, y=1, z=1
    x, y, z = sphere2cart(*cart2sphere(1.0, 1.0, 1.0))
    assert_array_almost_equal((x, y, z), (1.0, 1.0, 1.0))
示例#2
0
def test_sphere_cart():
    # test arrays of points
    rs, thetas, phis = cart2sphere(*(sphere_points.T))
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal, xyz, sphere_points.T
    # test radius estimation
    big_sph_pts = sphere_points * 10.4
    rs, thetas, phis = cart2sphere(*big_sph_pts.T)
    yield assert_array_almost_equal, rs, 10.4
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal, xyz, big_sph_pts.T, 6
    # test that result shapes match
    x, y, z = big_sph_pts.T
    r, theta, phi = cart2sphere(x[:1], y[:1], z)
    yield assert_equal, r.shape, theta.shape
    yield assert_equal, r.shape, phi.shape
    x, y, z = sphere2cart(r[:1], theta[:1], phi)
    yield assert_equal, x.shape, y.shape
    yield assert_equal, x.shape, z.shape
    # test a scalar point
    pt = sphere_points[3]
    r, theta, phi = cart2sphere(*pt)
    xyz = sphere2cart(r, theta, phi)
    yield assert_array_almost_equal, xyz, pt

    # Test full circle on x=0, y=0, z=0
    x, y, z = sphere2cart(*cart2sphere(0., 0., 0.))
    yield assert_array_equal, (x, y, z), (0., 0., 0.)
示例#3
0
def test_sphere_cart():
    # test arrays of points
    rs, thetas, phis = cart2sphere(*(sphere_points.T))
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal(xyz, sphere_points.T)
    # test radius estimation
    big_sph_pts = sphere_points * 10.4
    rs, thetas, phis = cart2sphere(*big_sph_pts.T)
    yield assert_array_almost_equal(rs, 10.4)
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal(xyz, big_sph_pts.T, 6)
    # test a scalar point
    pt = sphere_points[3]
    r, theta, phi = cart2sphere(*pt)
    xyz = sphere2cart(r, theta, phi)
    yield assert_array_almost_equal(xyz, pt)
示例#4
0
    def _pred_sig(self, theta, phi, beta, lambda1, lambda2):
        """
        The predicted signal for a particular setting of the parameters
        """

        Q = np.array([[lambda1, 0, 0], [0, lambda2, 0], [0, 0, lambda2]])

        # If for some reason this is not symmetrical, then something is wrong
        # (in all likelihood, this means that the optimization process is
        # trying out some crazy value, such as nan). In that case, abort and
        # return a nan:
        if not np.allclose(Q.T, Q):
            return np.nan

        response_function = ozt.Tensor(Q, self.bvecs[:, self.b_idx],
                                       self.bvals[:, self.b_idx])

        # Convert theta and phi to cartesian coordinates:
        x, y, z = geo.sphere2cart(1, theta, phi)
        bvec = [x, y, z]
        evals, evecs = response_function.decompose

        rot_tensor = ozt.tensor_from_eigs(
            evecs * ozu.calculate_rotation(bvec, evecs[0]), evals,
            self.bvecs[:, self.b_idx], self.bvals[:, self.b_idx])

        iso_sig = np.exp(-self.bvals[self.b_idx][0] * lambda1)
        tensor_sig = rot_tensor.predicted_signal(1)

        return beta * iso_sig + (1 - beta) * tensor_sig
示例#5
0
def test_sphere_cart():
    # test arrays of points
    rs, thetas, phis = cart2sphere(*(sphere_points.T))
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal(xyz, sphere_points.T)
    # test radius estimation
    big_sph_pts = sphere_points * 10.4
    rs, thetas, phis = cart2sphere(*big_sph_pts.T)
    yield assert_array_almost_equal(rs, 10.4)
    xyz = sphere2cart(rs, thetas, phis)
    yield assert_array_almost_equal(xyz, big_sph_pts.T, 6)
    # test a scalar point
    pt = sphere_points[3]
    r, theta, phi = cart2sphere(*pt)
    xyz = sphere2cart(r, theta, phi)
    yield assert_array_almost_equal(xyz, pt)
示例#6
0
def callnonlin(x0,Sreal,S0,bvals,gradients):
    
    d=x0[0]/10.**4
    if len(x0)==1:
        S=np.zeros(len(gradients))
        for (i,g) in enumerate(gradients[1:]):
            S[i+1]=S0*np.exp(-bvals[i+1]*d)
        S[0]=S0
        return Sreal-S
    if len(x0)==4:
        angles=[(x0[1],x0[2])]
        fractions=[x0[3]/100.]
    if len(x0)==7:
        angles=[(x0[1],x0[2]),(x0[3],x0[4])]
        fractions=[x0[5]/100.,x0[6]/100.]
    if len(x0)==10:
        angles=[(x0[1],x0[2]),(x0[3],x0[4]),(x0[5],x0[6])]
        fractions=[x0[7]/100.,x0[8]/100.,x0[9]/100.]    
    f0=1-np.sum(fractions)    
    S=np.zeros(len(gradients))        
    sticks=[ sphere2cart(1,np.deg2rad(pair[0]),np.deg2rad(pair[1]))  for pair in angles]
    sticks=np.array(sticks)    
    for (i,g) in enumerate(gradients[1:]):
        S[i+1]=f0*np.exp(-bvals[i+1]*d)+ np.sum([fractions[j]*np.exp(-bvals[i+1]*d*np.dot(s,g)**2) for (j,s) in enumerate(sticks)])
        S[i+1]=S0*S[i+1]    
    S[0]=S0
    return Sreal-S
示例#7
0
文件: mpl.py 项目: zhangerjun/osmosis
def plot_ellipsoid_mpl(Tensor, n=60):
    """
    Plot an ellipsoid from a tensor using matplotlib

    Parameters
    ----------

    Tensor: an ozt.Tensor class instance

    n: optional. If an integer is provided, we will plot this for a sphere with
    n (grid) equi-sampled points. Otherwise, we will plot the originally
    provided Tensor's bvecs.

    
    """

    x, y, z = sphere(n=n)
    new_bvecs = np.vstack([x.ravel(), y.ravel(), z.ravel()])
    Tensor = ozt.Tensor(Tensor.Q, new_bvecs,
                        np.ones(new_bvecs.shape[-1]) * Tensor.bvals[0])

    v = Tensor.diffusion_distance.reshape(x.shape)

    r, phi, theta = geo.cart2sphere(x, y, z)
    x_plot, y_plot, z_plot = geo.sphere2cart(v, phi, theta)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    ax.plot_surface(x_plot, y_plot, z_plot, rstride=2, cstride=2, shade=True)

    return fig
示例#8
0
def test_perpendicular_directions():
    num = 35

    vectors_v = np.zeros((4, 3))

    for v in range(4):
        theta = random.uniform(0, np.pi)
        phi = random.uniform(0, 2*np.pi)
        vectors_v[v] = sphere2cart(1., theta, phi)
    vectors_v[3] = [1, 0, 0]

    for vector_v in vectors_v:
        pd = perpendicular_directions(vector_v, num=num, half=False)

        # see if length of pd is equal to the number of intendend samples
        assert_equal(num, len(pd))

        # check if all directions are perpendicular to vector v
        for d in pd:
            cos_angle = np.dot(d, vector_v)
            assert_almost_equal(cos_angle, 0)

        # check if directions are sampled by multiples of 2*pi / num
        delta_a = 2. * np.pi / num
        for d in pd[1:]:
            angle = np.arccos(np.dot(pd[0], d))
            rest = angle % delta_a
            if rest > delta_a * 0.99:  # To correct cases of negative error
                rest = rest - delta_a
            assert_almost_equal(rest, 0)
    def _pred_sig(self, theta, phi, beta, lambda1, lambda2):
        """
        The predicted signal for a particular setting of the parameters
        """

        Q = np.array([[lambda1, 0, 0],
                      [0, lambda2, 0],
                      [0, 0, lambda2]])

        # If for some reason this is not symmetrical, then something is wrong
        # (in all likelihood, this means that the optimization process is
        # trying out some crazy value, such as nan). In that case, abort and
        # return a nan:
        if not np.allclose(Q.T, Q):
            return np.nan
        
        response_function = ozt.Tensor(Q,
                                        self.bvecs[:,self.b_idx],
                                        self.bvals[:,self.b_idx])
                                        
        # Convert theta and phi to cartesian coordinates:
        x,y,z = geo.sphere2cart(1, theta, phi)
        bvec = [x,y,z]
        evals, evecs = response_function.decompose

        rot_tensor = ozt.tensor_from_eigs(
            evecs * ozu.calculate_rotation(bvec, evecs[0]),
            evals, self.bvecs[:,self.b_idx], self.bvals[:,self.b_idx])

        iso_sig = np.exp(-self.bvals[self.b_idx][0] * lambda1)
        tensor_sig =  rot_tensor.predicted_signal(1)

        return beta * iso_sig + (1-beta) * tensor_sig
示例#10
0
def plot_ellipsoid_mpl(Tensor, n=60):
    """
    Plot an ellipsoid from a tensor using matplotlib

    Parameters
    ----------

    Tensor: an ozt.Tensor class instance

    n: optional. If an integer is provided, we will plot this for a sphere with
    n (grid) equi-sampled points. Otherwise, we will plot the originally
    provided Tensor's bvecs.

    
    """

    x,y,z = sphere(n=n)
    new_bvecs = np.vstack([x.ravel(), y.ravel(), z.ravel()])
    Tensor = ozt.Tensor(Tensor.Q, new_bvecs,
                        np.ones(new_bvecs.shape[-1]) * Tensor.bvals[0])

    v = Tensor.diffusion_distance.reshape(x.shape)
    
    r, phi, theta = geo.cart2sphere(x,y,z)
    x_plot, y_plot, z_plot = geo.sphere2cart(v, phi, theta)

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    ax.plot_surface(x_plot, y_plot, z_plot,  rstride=2, cstride=2, shade=True)

    return fig
示例#11
0
def _check_directions(angles):
    """
    Helper function to check if direction ground truth have the right format
    and are in cartesian coordinates

    Parameters
    ----------
    angles : array (K,2) or (K, 3)
        List of K polar angles (in degrees) for the sticks or array of K
        sticks as unit vectors.

    Returns
    -------
    sticks : (K,3)
        Sticks in cartesian coordinates.
    """
    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [
            sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
            for pair in angles
        ]
        sticks = np.array(sticks)

    return sticks
示例#12
0
文件: voxel.py 项目: hassemlal/dipy
def _check_directions(angles):
    """
    Helper function to check if direction ground truth have the right format
    and are in cartesian coordinates

    Parameters
    -----------
    angles : array (K,2) or (K, 3)
        List of K polar angles (in degrees) for the sticks or array of K
        sticks as unit vectors.

    Returns
    --------
    sticks : (K,3)
        Sticks in cartesian coordinates.
    """
    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
                  for pair in angles]
        sticks = np.array(sticks)

    return sticks
示例#13
0
def sticks_and_ball(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)],
                    fractions=[35, 35], snr=20):
    """ Simulate the signal for a Sticks & Ball model.

    Parameters
    -----------
    gtab : GradientTable
        Signal measurement directions.
    d : float
        Diffusivity value.
    S0 : float
        Unweighted signal value.
    angles : array (K,2) or (K, 3)
        List of K polar angles (in degrees) for the sticks or array of K
        sticks as unit vectors.
    fractions : float
        Percentage of each stick.  Remainder to 100 specifies isotropic
        component.
    snr : float
        Signal to noise ratio, assuming Rician noise.  If set to None, no
        noise is added.

    Returns
    --------
    S : (N,) ndarray
        Simulated signal.
    sticks : (M,3)
        Sticks in cartesian coordinates.

    References
    ----------
    .. [1] Behrens et al., "Probabilistic diffusion
           tractography with multiple fiber orientations:  what can we gain?",
           Neuroimage, 2007.

    """
    fractions = [f / 100. for f in fractions]
    f0 = 1 - np.sum(fractions)
    S = np.zeros(len(gtab.bvals))

    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
                  for pair in angles]
        sticks = np.array(sticks)

    for (i, g) in enumerate(gtab.bvecs[1:]):
        S[i + 1] = f0 * np.exp(-gtab.bvals[i + 1] * d) + \
            np.sum([fractions[j] * np.exp(-gtab.bvals[i + 1] * d * np.dot(s, g) ** 2)
                   for (j, s) in enumerate(sticks)])

        S[i + 1] = S0 * S[i + 1]

    S[gtab.b0s_mask] = S0
    S = add_noise(S, snr, S0)

    return S, sticks
示例#14
0
def plot_signal(bvecs,
                signal,
                origin=[0, 0, 0],
                maya=True,
                cmap='jet',
                file_name=None,
                colorbar=False,
                figure=None,
                vmin=None,
                vmax=None,
                offset=0,
                azimuth=60,
                elevation=90,
                roll=0,
                non_neg=False):
    """

    Interpolate a measured signal, using RBF interpolation.

    Parameters
    ----------
    signal:

    bvecs: array (3,n)
        the x,y,z locations where the signal was measured 

    offset : float
        where to place the plotted voxel (on the z axis)

    
    """

    s0 = Sphere(xyz=bvecs.T)
    vertices = s0.vertices
    faces = s0.faces
    x, y, z = vertices.T

    r, phi, theta = geo.cart2sphere(x, y, z)
    x_plot, y_plot, z_plot = geo.sphere2cart(signal, phi, theta)

    if non_neg:
        signal[np.where(signal < 0)] = 0

    # Call and return straightaway:
    return _display_maya_voxel(x_plot,
                               y_plot,
                               z_plot,
                               faces,
                               signal,
                               origin,
                               cmap=cmap,
                               colorbar=colorbar,
                               figure=figure,
                               vmin=vmin,
                               vmax=vmax,
                               file_name=file_name,
                               azimuth=azimuth,
                               elevation=elevation)
示例#15
0
文件: voxel.py 项目: nmabhi/dipy
def multi_tensor_odf(odf_verts, mevals, angles, fractions):
    r'''Simulate a Multi-Tensor ODF.

    Parameters
    ----------
    odf_verts : (N,3) ndarray
        Vertices of the reconstruction sphere.
    mevals : sequence of 1D arrays,
        Eigen-values for each tensor.
    angles : sequence of 2d tuples,
        Sequence of principal directions for each tensor in polar angles
        or cartesian unit coordinates.
    fractions : sequence of floats,
        Percentages of the fractions for each tensor.

    Returns
    -------
    ODF : (N,) ndarray
        Orientation distribution function.

    Examples
    --------
    Simulate a MultiTensor ODF with two peaks and calculate its exact ODF.

    >>> import numpy as np
    >>> from dipy.sims.voxel import multi_tensor_odf, all_tensor_evecs
    >>> from dipy.data import get_sphere
    >>> sphere = get_sphere('symmetric724')
    >>> vertices, faces = sphere.vertices, sphere.faces
    >>> mevals = np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
    >>> angles = [(0, 0), (90, 0)]
    >>> odf = multi_tensor_odf(vertices, mevals, angles, [50, 50])

    '''

    mf = [f / 100. for f in fractions]

    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [
            sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
            for pair in angles
        ]
        sticks = np.array(sticks)

    odf = np.zeros(len(odf_verts))

    mevecs = []
    for s in sticks:
        mevecs += [all_tensor_evecs(s).T]

    for (j, f) in enumerate(mf):
        odf += f * single_tensor_odf(
            odf_verts, evals=mevals[j], evecs=mevecs[j])
    return odf
示例#16
0
def test_invert_transform():
    n = 100.
    theta = np.arange(n)/n * np.pi  # Limited to 0,pi
    phi = (np.arange(n)/n - .5) * 2 * np.pi  # Limited to 0,2pi
    x, y, z = sphere2cart(1, theta, phi)  # Let's assume they're all unit vecs
    r, new_theta, new_phi = cart2sphere(x, y, z)  # Transform back

    yield assert_array_almost_equal, theta, new_theta
    yield assert_array_almost_equal, phi, new_phi
示例#17
0
文件: voxel.py 项目: jgors/dipy
def SticksAndBall(bvals,
                  gradients,
                  d=0.0015,
                  S0=100,
                  angles=[(0, 0), (90, 0)],
                  fractions=[35, 35],
                  snr=20):
    """ Simulating the signal for a Sticks & Ball model 
    
    Based on the paper by Tim Behrens, H.J. Berg, S. Jbabdi, "Probabilistic Diffusion Tractography with multiple fiber orientations
    what can we gain?", Neuroimage, 2007. 
    
    Parameters
    -----------
    bvals : array, shape (N,)
    gradients : array, shape (N,3) also known as bvecs
    d : diffusivity value 
    S0 : unweighted signal value
    angles : array (K,2) list of polar angles (in degrees) for the sticks
        or array (K,3) with sticks as Cartesian unit vectors and K the number of sticks
    fractions : percentage of each stick
    snr : signal to noise ration assuming gaussian noise. Provide None for no noise.
    
    Returns
    --------
    S : simulated signal
    sticks : sticks in cartesian coordinates 
    
    """

    fractions = [f / 100. for f in fractions]
    f0 = 1 - np.sum(fractions)
    S = np.zeros(len(gradients))

    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    if angles.shape[-1] == 2:
        sticks = [
            sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
            for pair in angles
        ]
        sticks = np.array(sticks)

    for (i, g) in enumerate(gradients[1:]):
        S[i + 1] = f0 * np.exp(-bvals[i + 1] * d) + np.sum([
            fractions[j] * np.exp(-bvals[i + 1] * d * np.dot(s, g)**2)
            for (j, s) in enumerate(sticks)
        ])
        S[i + 1] = S0 * S[i + 1]
    S[0] = S0
    if snr != None:
        std = S0 / snr
        S = S + np.random.randn(len(S)) * std

    return S, sticks
示例#18
0
文件: voxel.py 项目: nmabhi/dipy
def multi_tensor_pdf(pdf_points,
                     mevals,
                     angles,
                     fractions,
                     tau=1 / (4 * np.pi**2)):
    r'''Simulate a Multi-Tensor ODF.

    Parameters
    ----------
    pdf_points : (N, 3) ndarray
        Points to evaluate the PDF.
    mevals : sequence of 1D arrays,
        Eigen-values for each tensor.  By default, values typical for prolate
        white matter are used.
    angles : sequence,
        Sequence of principal directions for each tensor in polar angles
        or cartesian unit coordinates.
    fractions : sequence of floats,
        Percentages of the fractions for each tensor.
    tau : float,
        diffusion time. By default the value that makes q=sqrt(b).

    Returns
    -------
    pdf : (N,) ndarray,
        Probability density function of the water displacement.

    References
    ----------
    .. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
           and its Features in Diffusion MRI", PhD Thesis, 2012.

    '''
    mf = [f / 100. for f in fractions]

    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [
            sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
            for pair in angles
        ]
        sticks = np.array(sticks)

    pdf = np.zeros(len(pdf_points))

    mevecs = []
    for s in sticks:
        mevecs += [all_tensor_evecs(s).T]

    for j, f in enumerate(mf):
        pdf += f * single_tensor_pdf(
            pdf_points, evals=mevals[j], evecs=mevecs[j], tau=tau)
    return pdf
示例#19
0
def multi_tensor_odf(odf_verts, mevals, angles, fractions):
    r'''Simulate a Multi-Tensor ODF.

    Parameters
    ----------
    odf_verts : (N,3) ndarray
        Vertices of the reconstruction sphere.
    mevals : sequence of 1D arrays,
        Eigen-values for each tensor.
    angles : sequence of 2d tuples,
        Sequence of principal directions for each tensor in polar angles
        or cartesian unit coordinates.
    fractions : sequence of floats,
        Percentages of the fractions for each tensor.

    Returns
    -------
    ODF : (N,) ndarray
        Orientation distribution function.

    Examples
    --------
    Simulate a MultiTensor ODF with two peaks and calculate its exact ODF.

    >>> import numpy as np
    >>> from dipy.sims.voxel import multi_tensor_odf, all_tensor_evecs
    >>> from dipy.data import get_sphere
    >>> sphere = get_sphere('symmetric724')
    >>> vertices, faces = sphere.vertices, sphere.faces
    >>> mevals = np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
    >>> angles = [(0, 0), (90, 0)]
    >>> odf = multi_tensor_odf(vertices, mevals, angles, [50, 50])

    '''

    mf = [f / 100. for f in fractions]

    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
                  for pair in angles]
        sticks = np.array(sticks)

    odf = np.zeros(len(odf_verts))

    mevecs = []
    for s in sticks:
        mevecs += [all_tensor_evecs(s).T]

    for (j, f) in enumerate(mf):
        odf += f * single_tensor_odf(odf_verts,
                                     evals=mevals[j], evecs=mevecs[j])
    return odf
示例#20
0
文件: eit.py 项目: Garyfallidis/dipy
 def radon_params(self, ang_res=64):
     #calculate radon integration parameters
     phis = np.linspace(0, 2 * np.pi, ang_res)[:-1]
     planars = []
     for phi in phis:
         planars.append(sphere2cart(1, np.pi / 2, phi))
     planars = np.array(planars)
     planarsR = []
     for v in self.odf_vertices:
         R = vec2vec_rotmat(np.array([0, 0, 1]), v)
         planarsR.append(np.dot(R, planars.T).T)
     self.equators = planarsR
     self.equatorn = len(phis)
示例#21
0
文件: eit.py 项目: endolith/dipy
 def radon_params(self,ang_res=64):
     #calculate radon integration parameters
     phis=np.linspace(0,2*np.pi,ang_res)[:-1]
     planars=[]
     for phi in phis:
         planars.append(sphere2cart(1,np.pi/2,phi))
     planars=np.array(planars)
     planarsR=[]
     for v in self.odf_vertices:
         R=vec2vec_rotmat(np.array([0,0,1]),v)
         planarsR.append(np.dot(R,planars.T).T)
     self.equators=planarsR
     self.equatorn=len(phis)
示例#22
0
def multi_tensor_pdf(pdf_points, mevals, angles, fractions,
                     tau=1 / (4 * np.pi ** 2)):
    r'''Simulate a Multi-Tensor ODF.

    Parameters
    ----------
    pdf_points : (N, 3) ndarray
        Points to evaluate the PDF.
    mevals : sequence of 1D arrays,
        Eigen-values for each tensor.  By default, values typical for prolate
        white matter are used.
    angles : sequence,
        Sequence of principal directions for each tensor in polar angles
        or cartesian unit coordinates.
    fractions : sequence of floats,
        Percentages of the fractions for each tensor.
    tau : float,
        diffusion time. By default the value that makes q=sqrt(b).

    Returns
    -------
    pdf : (N,) ndarray,
        Probability density function of the water displacement.

    References
    ----------
    .. [1] Cheng J., "Estimation and Processing of Ensemble Average Propagator
           and its Features in Diffusion MRI", PhD Thesis, 2012.

    '''
    mf = [f / 100. for f in fractions]

    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
                  for pair in angles]
        sticks = np.array(sticks)

    pdf = np.zeros(len(pdf_points))

    mevecs = []
    for s in sticks:
        mevecs += [all_tensor_evecs(s).T]

    for j, f in enumerate(mf):
        pdf += f * single_tensor_pdf(pdf_points,
                                     evals=mevals[j], evecs=mevecs[j], tau=tau)
    return pdf
示例#23
0
文件: maya.py 项目: qytian/osmosis
def plot_tensor_3d(Tensor, cmap='jet', mode='ADC', file_name=None,
                   origin=[0,0,0], colorbar=False, figure=None, vmin=None,
                   vmax=None, offset=0, azimuth=60, elevation=90, roll=0,
                   scale_factor=1.0, rgb_pdd=False):

    """

    mode: either "ADC", "ellipse" or "pred_sig"

    """
    
    Q = Tensor.Q
    sphere = create_unit_sphere(5)
    vertices = sphere.vertices
    faces = sphere.faces
    x,y,z = vertices.T 

    new_bvecs = np.vstack([x.ravel(), y.ravel(), z.ravel()])
    Tensor = ozt.Tensor(Q, new_bvecs,
                        Tensor.bvals[0] * np.ones(new_bvecs.shape[-1]))

    if mode == 'ADC':
        v = Tensor.ADC * scale_factor
    elif mode == 'ellipse':
        v = Tensor.diffusion_distance * scale_factor
    elif mode == 'pred_sig':
        v = Tensor.predicted_signal(1) * scale_factor
    else:
        raise ValueError("Mode not recognized")
        
    r, phi, theta = geo.cart2sphere(x,y,z)
    x_plot, y_plot, z_plot = geo.sphere2cart(v, phi, theta)

    if rgb_pdd:
        evals, evecs = Tensor.decompose
        xyz = evecs[0]
        r = np.abs(xyz[0])/np.sum(np.abs(xyz))
        g = np.abs(xyz[1])/np.sum(np.abs(xyz))
        b = np.abs(xyz[2])/np.sum(np.abs(xyz))

        color = (r, g, b)
    else:
        color = None
    # Call and return straightaway:
    return _display_maya_voxel(x_plot, y_plot, z_plot, faces, v, origin,
                               cmap=cmap, colorbar=colorbar, color=color,
                               figure=figure,
                               vmin=vmin, vmax=vmax, file_name=file_name,
                               azimuth=azimuth, elevation=elevation)
示例#24
0
def get_mask(vert, hori):
    delta_v = 45 / 240  # vertical leads to row
    delta_h = 60 / 320
    ds = math.pi / 180  # degree scale
    mask = np.zeros((vert, hori, 3))
    for i in range(vert):  #first scanning row
        for k in range(hori):  #scanning column
            # above convert one pixel into sphere coordinate
            phi = (67.5 + delta_v / 2 + delta_v *
                   i) * ds  #relate to row, which is the latitude, phi, 0 to pi
            lmd = (-30 + delta_h / 2 + delta_h *
                   k) * ds  #relate to column, which is the longitude, xy,lmd
            v = sphere2cart(1, phi, lmd)
            mask[i, k, :] = [v[0].tolist(), v[1].tolist(), v[2].tolist()]
    return mask  #in cartesian
示例#25
0
文件: mpl.py 项目: zhangerjun/osmosis
def scale_bvecs_by_sig(bvecs, sig):
    """
    Helper function to rescale your bvecs according to some signal, so that
    they don't fall on the unit sphere, but instead are represented in space as
    distance from the origin.
    """

    x, y, z = bvecs

    r, theta, phi = geo.cart2sphere(x, y, z)

    # Simply replace r with sig:
    x, y, z = geo.sphere2cart(sig, theta, phi)

    return x, y, z
示例#26
0
def scale_bvecs_by_sig(bvecs, sig):
    """
    Helper function to rescale your bvecs according to some signal, so that
    they don't fall on the unit sphere, but instead are represented in space as
    distance from the origin.
    """

    x,y,z = bvecs

    r, theta, phi = geo.cart2sphere(x, y, z)

    # Simply replace r with sig:
    x,y,z = geo.sphere2cart(sig, theta, phi)

    return x,y,z
示例#27
0
def random_uniform_on_sphere(n=1, coords='xyz'):
    r'''Random unit vectors from a uniform distribution on the sphere.

    Parameters
    -----------
    n : int
        Number of random vectors
    coords : {'xyz', 'radians', 'degrees'}
        'xyz' for cartesian form
        'radians' for spherical form in rads
        'degrees' for spherical form in degrees

    Notes
    ------
    The uniform distribution on the sphere, parameterized by spherical
    coordinates $(\theta, \phi)$, should verify $\phi\sim U[0,2\pi]$, while
    $z=\cos(\theta)\sim U[-1,1]$.

    References
    -----------
    .. [1] http://mathworld.wolfram.com/SpherePointPicking.html.

    Returns
    --------
    X : array, shape (n,3) if coords='xyz' or shape (n,2) otherwise
        Uniformly distributed vectors on the unit sphere.

    Examples
    ---------
    >>> from dipy.core.sphere_stats import random_uniform_on_sphere
    >>> X = random_uniform_on_sphere(4, 'radians')
    >>> X.shape == (4, 2)
    True
    >>> X = random_uniform_on_sphere(4, 'xyz')
    >>> X.shape == (4, 3)
    True
    '''
    z = np.random.uniform(-1, 1, n)
    theta = np.arccos(z)
    phi = np.random.uniform(0, 2*np.pi, n)
    if coords == 'xyz':
        r = np.ones(n)
        return np.vstack(geometry.sphere2cart(r, theta, phi)).T
    angles = np.vstack((theta, phi)).T
    if coords == 'radians':
        return angles
    if coords == 'degrees':
        return np.rad2deg(angles)
示例#28
0
def random_uniform_on_sphere(n=1, coords='xyz'):
    r"""Random unit vectors from a uniform distribution on the sphere.

    Parameters
    -----------
    n : int
        Number of random vectors
    coords : {'xyz', 'radians', 'degrees'}
        'xyz' for cartesian form
        'radians' for spherical form in rads
        'degrees' for spherical form in degrees

    Notes
    ------
    The uniform distribution on the sphere, parameterized by spherical
    coordinates $(\theta, \phi)$, should verify $\phi\sim U[0,2\pi]$, while
    $z=\cos(\theta)\sim U[-1,1]$.

    References
    -----------
    .. [1] http://mathworld.wolfram.com/SpherePointPicking.html.

    Returns
    --------
    X : array, shape (n,3) if coords='xyz' or shape (n,2) otherwise
        Uniformly distributed vectors on the unit sphere.

    Examples
    ---------
    >>> from dipy.core.sphere_stats import random_uniform_on_sphere
    >>> X = random_uniform_on_sphere(4, 'radians')
    >>> X.shape == (4, 2)
    True
    >>> X = random_uniform_on_sphere(4, 'xyz')
    >>> X.shape == (4, 3)
    True
    """
    z = np.random.uniform(-1, 1, n)
    theta = np.arccos(z)
    phi = np.random.uniform(0, 2 * np.pi, n)
    if coords == 'xyz':
        r = np.ones(n)
        return np.vstack(geometry.sphere2cart(r, theta, phi)).T
    angles = np.vstack((theta, phi)).T
    if coords == 'radians':
        return angles
    if coords == 'degrees':
        return np.rad2deg(angles)
示例#29
0
 def _tensor_helper(self, theta, phi):
     """
         This code is used in all three error functions, so we write it out
         only once here
         """
     # Convert to cartesian coordinates:
     x, y, z = geo.sphere2cart(1, theta, phi)
     bvec = [x, y, z]
     # decompose is an auto-attr of the tensor object, so is only run once
     # and then cached:
     evals, evecs = self.response_function.decompose
     rot = ozu.calculate_rotation(bvec, evecs[0])
     rot_evecs = evecs * rot
     rot_tensor = ozt.tensor_from_eigs(rot_evecs, evals,
                                       self.bvecs[:, self.b_idx],
                                       self.bvals[self.b_idx])
     return rot_tensor
示例#30
0
def simulations_dipy(bvals,gradients,d=0.0015,S0=100,angles=[(0,0),(90,0)],fractions=[35,35],snr=20):
    
    fractions=[f/100. for f in fractions]    
    f0=1-np.sum(fractions)    
    S=np.zeros(len(gradients))        
    sticks=[ sphere2cart(1,np.deg2rad(pair[0]),np.deg2rad(pair[1]))  for pair in angles]
    sticks=np.array(sticks)    
    for (i,g) in enumerate(gradients[1:]):
        S[i+1]=f0*np.exp(-bvals[i+1]*d)+ np.sum([fractions[j]*np.exp(-bvals[i+1]*d*np.dot(s,g)**2) for (j,s) in enumerate(sticks)])
        S[i+1]=S0*S[i+1]    
    S[0]=S0
    
    if snr!=None:
        std=S0/snr
        S=S+np.random.randn(len(S))*std
    
    return S,sticks
示例#31
0
 def _tensor_helper(self, theta, phi):
         """
         This code is used in all three error functions, so we write it out
         only once here
         """
         # Convert to cartesian coordinates:
         x,y,z = geo.sphere2cart(1, theta, phi)
         bvec = [x,y,z]
         # decompose is an auto-attr of the tensor object, so is only run once
         # and then cached:
         evals, evecs = self.response_function.decompose
         rot = ozu.calculate_rotation(bvec, evecs[0])
         rot_evecs = evecs * rot
         rot_tensor = ozt.tensor_from_eigs(rot_evecs,
                                           evals,
                                           self.bvecs[:,self.b_idx],
                                           self.bvals[:,self.b_idx])
         return rot_tensor
示例#32
0
文件: voxel.py 项目: jgors/dipy
def SticksAndBall(bvals,gradients,d=0.0015,S0=100,angles=[(0,0),(90,0)],fractions=[35,35],snr=20):
    """ Simulating the signal for a Sticks & Ball model 
    
    Based on the paper by Tim Behrens, H.J. Berg, S. Jbabdi, "Probabilistic Diffusion Tractography with multiple fiber orientations
    what can we gain?", Neuroimage, 2007. 
    
    Parameters
    -----------
    bvals : array, shape (N,)
    gradients : array, shape (N,3) also known as bvecs
    d : diffusivity value 
    S0 : unweighted signal value
    angles : array (K,2) list of polar angles (in degrees) for the sticks
        or array (K,3) with sticks as Cartesian unit vectors and K the number of sticks
    fractions : percentage of each stick
    snr : signal to noise ration assuming gaussian noise. Provide None for no noise.
    
    Returns
    --------
    S : simulated signal
    sticks : sticks in cartesian coordinates 
    
    """
    
    fractions=[f/100. for f in fractions]    
    f0=1-np.sum(fractions)    
    S=np.zeros(len(gradients))
    
    angles=np.array(angles)
    if angles.shape[-1]==3:
        sticks=angles
    if angles.shape[-1]==2:
        sticks=[ sphere2cart(1,np.deg2rad(pair[0]),np.deg2rad(pair[1]))  for pair in angles]    
        sticks=np.array(sticks)
    
    for (i,g) in enumerate(gradients[1:]):
        S[i+1]=f0*np.exp(-bvals[i+1]*d)+ np.sum([fractions[j]*np.exp(-bvals[i+1]*d*np.dot(s,g)**2) for (j,s) in enumerate(sticks)])
        S[i+1]=S0*S[i+1]    
    S[0]=S0    
    if snr!=None:
        std=S0/snr
        S=S+np.random.randn(len(S))*std
    
    return S,sticks
示例#33
0
def rotate_needles(needles=[(0,0),(90,0),(90,90)],angles=(90,0,90)):

    theta,phi,psi=angles
    
    rot_theta = np.deg2rad(theta)
    rot_phi = np.deg2rad(phi)
    rot_psi = np.deg2rad(psi)
    #ANGLES FOR ROTATION IN EULER 
    
    v = np.array([np.cos(rot_phi)*np.sin(rot_theta),np.sin(rot_phi)*np.sin(rot_theta),np.cos(rot_theta)])
    #print v
    k_cross = np.array([[0,-v[2],v[1]],[v[2],0,-v[0]],[-v[1],v[0],0]])
    #print k_cross
    #rot = rotation_vec2mat(r)
    rot = np.eye(3)+np.sin(rot_psi)*k_cross+(1-np.cos(rot_psi))*np.dot(k_cross,k_cross)
    #print rot
    
    #needles=[(0,0),(90,0),(90,90)]
    #INITIAL NEEDLES in (theta, phi)
    
    needle_rads = []
    for n in needles:
        needle_rads+=[[1]+list(np.deg2rad(n))]
    #print needle_rads 
    #initial needles in (r, theta, phi)
    
    rad_needles=[]
    for n in needle_rads:
        rad_needles+=[cart2sphere(*tuple(np.dot(rot,sphere2cart(*n))))[1:3]]
    #print rad_needles
    #ROTATED NEEDLES IN SPHERICAL COORDS (RADIANS)
    
    deg_angles = []
    for n in rad_needles:
        a = []
        for c in n:
            a+=[np.rad2deg(c)]
        deg_angles += [a]
    #print deg_angles
    #ROTATED NEEDLES IN SPHERICAL COORDS (DEGREES)
    
    return deg_angles
示例#34
0
文件: maya.py 项目: qytian/osmosis
def plot_signal(bvecs, signal, origin=[0,0,0],
                maya=True, cmap='jet', file_name=None,
                colorbar=False, figure=None, vmin=None, vmax=None,
                offset=0, azimuth=60, elevation=90, roll=0):

    """

    Interpolate a measured signal, using RBF interpolation.

    Parameters
    ----------
    signal:

    bvecs: array (3,n)
        the x,y,z locations where the signal was measured 

    offset : float
        where to place the plotted voxel (on the z axis)

    
    """

    s0 = Sphere(xyz=bvecs.T)
    vertices = s0.vertices    
    faces = s0.faces
    x,y,z = vertices.T 

    r, phi, theta = geo.cart2sphere(x,y,z)
    x_plot, y_plot, z_plot = geo.sphere2cart(signal, phi, theta)


    # Call and return straightaway:
    return _display_maya_voxel(x_plot, y_plot, z_plot, faces,
                               signal, origin,
                               cmap=cmap, colorbar=colorbar, figure=figure,
                               vmin=vmin, vmax=vmax, file_name=file_name,
                               azimuth=azimuth, elevation=elevation)
示例#35
0
def compare_dni_dsi_gqi_gqi2_eit():

    #stop
    
    btable=np.loadtxt(get_data('dsi515btable'))
    bvals=btable[:,0]
    bvecs=btable[:,1:]
    
    type=3
    axesn=200
    SNR=20
    
    data,gold,angs,anglesn,axesn,angles=generate_gold_data(bvals,bvecs,fibno=type,axesn=axesn,SNR=SNR)
    
    gq=GeneralizedQSampling(data,bvals,bvecs,1.2,odf_sphere='symmetric642',squared=False,save_odfs=True)    
    gq2=GeneralizedQSampling(data,bvals,bvecs,3.,odf_sphere='symmetric642',squared=True,save_odfs=True)
    
    ds=DiffusionSpectrum(data,bvals,bvecs,odf_sphere='symmetric642',auto=False,save_odfs=True)
    ds.filter_width=32.    
    ds.update()
    ds.fit()
    
    ei=EquatorialInversion(data,bvals,bvecs,odf_sphere='symmetric642',auto=False,save_odfs=True,fast=True)    
    ei.radius=np.arange(0,5,0.1)
    ei.gaussian_weight=None#0.01
    ei.set_operator('laplacian')
    ei.update()
    ei.fit()
    
    ei2=EquatorialInversion(data,bvals,bvecs,odf_sphere='symmetric642',auto=False,save_odfs=True,fast=True)    
    ei2.radius=np.arange(0,5,0.1)
    ei2.gaussian_weight=None
    ei2.set_operator('laplap')
    ei2.update()
    ei2.fit()
    
    ei3=EquatorialInversion(data,bvals,bvecs,odf_sphere='symmetric642',auto=False,save_odfs=True,fast=True)    
    ei3.radius=np.arange(0,5,0.1)
    ei3.gaussian_weight=None
    ei3.set_operator('signal')
    ei3.update()
    ei3.fit()    
    
    """
    blobs=np.zeros((2,4,642))
    
    no=200
    blobs[0,0,:]=gq.ODF[no]
    blobs[0,1,:]=gq2.ODF[no]
    blobs[0,2,:]=ds.ODF[no]
    blobs[0,3,:]=ei.ODF[no]
    no=399
    blobs[1,0,:]=gq.ODF[no]
    blobs[1,1,:]=gq2.ODF[no]
    blobs[1,2,:]=ds.ODF[no]
    blobs[1,3,:]=ei.ODF[no]       
    show_blobs(blobs[None,:,:,:],ei.odf_vertices,ei.odf_faces,1.2)
    """
    #stop
    
    vts=gq.odf_vertices
    
    def simple_peaks(ODF,faces,thr):
        x,g=ODF.shape
        PK=np.zeros((x,5))
        IN=np.zeros((x,5))
        for (i,odf) in enumerate(ODF):
            peaks,inds=peak_finding(odf,faces)
            ibigp=np.where(peaks>thr*peaks[0])[0]
            l=len(ibigp)
            if l>3:
                l=3
            PK[i,:l]=peaks[:l]
            IN[i,:l]=inds[:l]
        return PK,IN
    
    
    thresh=0.5
    
    PK,IN=simple_peaks(ds.ODF,ds.odf_faces,thresh)
    res,me,st =do_compare(gold,vts,PK,IN,0,anglesn,axesn,type)
    
    PK,IN=simple_peaks(gq.ODF,ds.odf_faces,thresh)
    res2,me2,st2 =do_compare(gold,vts,PK,IN,0,anglesn,axesn,type)
    
    PK,IN=simple_peaks(gq2.ODF,ds.odf_faces,thresh)
    res3,me3,st3 =do_compare(gold,vts,PK,IN,0,anglesn,axesn,type)
    
    PK,IN=simple_peaks(ei.ODF,ds.odf_faces,thresh)
    res4,me4,st4 =do_compare(gold,vts,PK,IN,0,anglesn,axesn,type)
        
    PK,IN=simple_peaks(ei2.ODF,ds.odf_faces,thresh)
    res5,me5,st5 =do_compare(gold,vts,PK,IN,0,anglesn,axesn,type)
      
    PK,IN=simple_peaks(ei3.ODF,ei3.odf_faces,thresh)
    res6,me6,st6 =do_compare(gold,vts,PK,IN,0,anglesn,axesn,type)
    
    #res7,me7,st7 =do_compare(ei2.PK,ei2.IN,0,anglesn,axesn,type)
    
    if type==1:    
        plt.figure(1)       
        plt.plot(res,'r',label='DSI')
        plt.plot(res2,'g',label='GQI')
        plt.plot(res3,'b',label='GQI2')
        plt.plot(res4,'k',label='EITL')
        plt.plot(res5,'k--',label='EITL2')
        plt.plot(res6,'k-.',label='EITS')
        #plt.xlabel('angle')
        #plt.ylabel('resolution')
        #plt.title('Angular accuracy')
        plt.legend()
        plt.show()   
    else:
        
        if type==3:
            x,y,z=sphere2cart(np.ones(len(angles)),np.deg2rad(angles),np.zeros(len(angles)))
            x2,y2,z2=sphere2cart(np.ones(len(angles)),np.deg2rad(angles),np.deg2rad(120*np.ones(len(angles))))
            angles2=[]        
            for i in range(len(x)):
                angles2.append(np.rad2deg(np.arccos(np.dot([x[i],y[i],z[i]],[x2[i],y2[i],z2[i]]))))
            angles=angles2    
            
        plt.figure(1)
        plt.plot(angles,me,'r',linewidth=3.,label='DSI')
        plt.plot(angles,me2,'g',linewidth=3.,label='GQI')
        plt.plot(angles,me3,'b',linewidth=3.,label='GQI2')
        plt.plot(angles,me4,'k',linewidth=3.,label='EITL')
        plt.plot(angles,me5,'k--',linewidth=3.,label='EITL2')
        plt.plot(angles,me6,'k-.',linewidth=3.,label='EITS')
        #plt.plot(angles,me7,'r--',linewidth=3.,label='EITL2')
        plt.xlabel('angle')
        plt.ylabel('similarity')
        
        title='Angular similarity of ' + str(type) + '-fibres crossing with SNR ' + str(SNR)
        plt.title(title)
        plt.legend(loc='center right')
        plt.savefig('/tmp/test.png',dpi=300)
        plt.show()
示例#36
0
def sticks_and_ball(bvals,
                    gradients,
                    d=0.0015,
                    S0=100,
                    angles=[(0, 0), (90, 0)],
                    fractions=[35, 35],
                    snr=20):
    """ Simulate the signal for a Sticks & Ball model.

    Parameters
    -----------
    bvals : (N,) ndarray
        B-values for measurements.
    gradients : (N,3) ndarray
        Also known as b-vectors.
    d : float
        Diffusivity value.
    S0 : float
        Unweighted signal value.
    angles : array (K,2) or (M,3)
        List of K polar angles (in degrees) for the sticks or array of M
        sticks as Cartesian unit vectors.
    fractions : float
        Percentage of each stick.
    snr : float
        Signal to noise ratio, assuming gaussian noise.  If set to None, no
        noise is added.

    Returns
    --------
    S : (N,) ndarray
        Simulated signal.
    sticks : (M,3)
        Sticks in cartesian coordinates.

    References
    ----------
    .. [1] Behrens et al., "Probabilistic diffusion
           tractography with multiple fiber orientations:  what can we gain?",
           Neuroimage, 2007.

    """

    fractions = [f / 100. for f in fractions]
    f0 = 1 - np.sum(fractions)
    S = np.zeros(len(gradients))

    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [
            sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
            for pair in angles
        ]
        sticks = np.array(sticks)

    for (i, g) in enumerate(gradients[1:]):
        S[i + 1] = f0 * np.exp(-bvals[i+1] * d) + \
                   np.sum([
            fractions[j] * np.exp(-bvals[i + 1] * d * np.dot(s, g)**2)
            for (j,s) in enumerate(sticks)
                          ])

        S[i + 1] = S0 * S[i + 1]

    S[0] = S0
    if snr is not None:
        std = S0 / snr
        S = S + np.random.randn(len(S)) * std

    return S, sticks
示例#37
0
def sticks_and_ball(bvals, gradients, d=0.0015, S0=100, angles=[(0,0), (90,0)],
                    fractions=[35,35], snr=20):
    """ Simulate the signal for a Sticks & Ball model.

    Parameters
    -----------
    bvals : (N,) ndarray
        B-values for measurements.
    gradients : (N,3) ndarray
        Also known as b-vectors.
    d : float
        Diffusivity value.
    S0 : float
        Unweighted signal value.
    angles : array (K,2) or (M,3)
        List of K polar angles (in degrees) for the sticks or array of M
        sticks as Cartesian unit vectors.
    fractions : float
        Percentage of each stick.
    snr : float
        Signal to noise ratio, assuming gaussian noise.  If set to None, no
        noise is added.

    Returns
    --------
    S : (N,) ndarray
        Simulated signal.
    sticks : (M,3)
        Sticks in cartesian coordinates.

    References
    ----------
    .. [1] Behrens et al., "Probabilistic diffusion
           tractography with multiple fiber orientations:  what can we gain?",
           Neuroimage, 2007.

    """

    fractions = [f / 100. for f in fractions]
    f0 = 1 - np.sum(fractions)
    S = np.zeros(len(gradients))

    angles=np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
                  for pair in angles]
        sticks = np.array(sticks)

    for (i, g) in enumerate(gradients[1:]):
        S[i + 1] = f0 * np.exp(-bvals[i+1] * d) + \
                   np.sum([
            fractions[j] * np.exp(-bvals[i + 1] * d * np.dot(s, g)**2)
            for (j,s) in enumerate(sticks)
                          ])

        S[i + 1] = S0 * S[i + 1]

    S[0] = S0
    if snr is not None:
        std = S0 / snr
        S = S + np.random.randn(len(S)) * std

    return S, sticks
示例#38
0
def test_kurtosis_maximum():
    # TEST 1
    # simulate a crossing fibers interserting at 70 degrees. The first fiber
    # is aligned to the x-axis while the second fiber is aligned to the x-z
    # plane with an angular deviation of 70 degrees from the first one.
    # According to Neto Henriques et al, 2015 (NeuroImage 111: 85-99), the
    # kurtosis tensor of this simulation will have a maxima aligned to axis y
    angles = [(90, 0), (90, 0), (20, 0), (20, 0)]
    signal_70, dt_70, kt_70 = multi_tensor_dki(gtab_2s, mevals_cross, S0=100,
                                               angles=angles,
                                               fractions=frac_cross, snr=None)
    # prepare inputs
    dkiM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="WLS")
    dkiF = dkiM.fit(signal_70)
    MD = dkiF.md
    kt = dkiF.kt
    R = dkiF.evecs
    evals = dkiF.evals
    dt = lower_triangular(np.dot(np.dot(R, np.diag(evals)), R.T))
    sphere = get_sphere('symmetric724')

    # compute maxima
    k_max_cross, max_dir = dki._voxel_kurtosis_maximum(dt, MD, kt, sphere,
                                                       gtol=1e-5)

    yaxis = np.array([0., 1., 0.])
    cos_angle = np.abs(np.dot(max_dir[0], yaxis))
    assert_almost_equal(cos_angle, 1.)

    # TEST 2
    # test the function on cases of well aligned fibers oriented in a random
    # defined direction. According to Neto Henriques et al, 2015 (NeuroImage
    # 111: 85-99), the maxima of kurtosis is any direction perpendicular to the
    # fiber direction. Moreover, according to multicompartmetal simulations,
    # kurtosis in this direction has to be equal to:
    fie = 0.49
    ADi = 0.00099
    ADe = 0.00226
    RDi = 0
    RDe = 0.00087
    RD = fie*RDi + (1-fie)*RDe
    RK = 3 * fie * (1-fie) * ((RDi-RDe) / RD) ** 2

    # prepare simulation:
    theta = random.uniform(0, 180)
    phi = random.uniform(0, 320)
    angles = [(theta, phi), (theta, phi)]
    mevals = np.array([[ADi, RDi, RDi], [ADe, RDe, RDe]])
    frac = [fie*100, (1 - fie)*100]
    signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
                                      fractions=frac, snr=None)

    # prepare inputs
    dkiM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="WLS")
    dkiF = dkiM.fit(signal)
    MD = dkiF.md
    kt = dkiF.kt
    R = dkiF.evecs
    evals = dkiF.evals
    dt = lower_triangular(np.dot(np.dot(R, np.diag(evals)), R.T))

    # compute maxima
    k_max, max_dir = dki._voxel_kurtosis_maximum(dt, MD, kt, sphere, gtol=1e-5)

    # check if max direction is perpendicular to fiber direction
    fdir = np.array([sphere2cart(1., np.deg2rad(theta), np.deg2rad(phi))])
    cos_angle = np.abs(np.dot(max_dir[0], fdir[0]))
    assert_almost_equal(cos_angle, 0., decimal=5)

    # check if max direction is equal to expected value
    assert_almost_equal(k_max, RK)

    # According to Neto Henriques et al., 2015 (NeuroImage 111: 85-99),
    # e.g. see figure 1 of this article, kurtosis maxima for the first test is
    # also equal to the maxima kurtosis value of well-aligned fibers, since
    # simulations parameters (apart from fiber directions) are equal
    assert_almost_equal(k_max_cross, RK)

    # Test 3 - Test performance when kurtosis is spherical - this case, can be
    # problematic since a spherical kurtosis does not have an maximum
    k_max, max_dir = dki._voxel_kurtosis_maximum(dt_sph, np.mean(evals_sph),
                                                 kt_sph, sphere, gtol=1e-2)
    assert_almost_equal(k_max, Kref_sphere)

    # Test 4 - Test performance when kt have all elements zero - this case, can
    # be problematic this case does not have an maximum
    k_max, max_dir = dki._voxel_kurtosis_maximum(dt_sph, np.mean(evals_sph),
                                                 np.zeros(15), sphere,
                                                 gtol=1e-2)
    assert_almost_equal(k_max, 0.0)
示例#39
0
def plot_tensor_3d(Tensor,
                   cmap='jet',
                   mode='ADC',
                   file_name=None,
                   origin=[0, 0, 0],
                   colorbar=False,
                   figure=None,
                   vmin=None,
                   vmax=None,
                   offset=0,
                   azimuth=60,
                   elevation=90,
                   roll=0,
                   scale_factor=1.0,
                   rgb_pdd=False):
    """

    mode: either "ADC", "ellipse" or "pred_sig"

    """

    Q = Tensor.Q
    sphere = create_unit_sphere(5)
    vertices = sphere.vertices
    faces = sphere.faces
    x, y, z = vertices.T

    new_bvecs = np.vstack([x.ravel(), y.ravel(), z.ravel()])
    Tensor = ozt.Tensor(Q, new_bvecs,
                        Tensor.bvals[0] * np.ones(new_bvecs.shape[-1]))

    if mode == 'ADC':
        v = Tensor.ADC * scale_factor
    elif mode == 'ellipse':
        v = Tensor.diffusion_distance * scale_factor
    elif mode == 'pred_sig':
        v = Tensor.predicted_signal(1) * scale_factor
    else:
        raise ValueError("Mode not recognized")

    r, phi, theta = geo.cart2sphere(x, y, z)
    x_plot, y_plot, z_plot = geo.sphere2cart(v, phi, theta)

    if rgb_pdd:
        evals, evecs = Tensor.decompose
        xyz = evecs[0]
        r = np.abs(xyz[0]) / np.sum(np.abs(xyz))
        g = np.abs(xyz[1]) / np.sum(np.abs(xyz))
        b = np.abs(xyz[2]) / np.sum(np.abs(xyz))

        color = (r, g, b)
    else:
        color = None
    # Call and return straightaway:
    return _display_maya_voxel(x_plot,
                               y_plot,
                               z_plot,
                               faces,
                               v,
                               origin,
                               cmap=cmap,
                               colorbar=colorbar,
                               color=color,
                               figure=figure,
                               vmin=vmin,
                               vmax=vmax,
                               file_name=file_name,
                               azimuth=azimuth,
                               elevation=elevation)
示例#40
0
def sig_on_sphere(bvecs, val, fig=None, sphere_dim=1000, r_from_val=False,
                  **kwargs):
    """
    Presente values on a sphere.

    Parameters
    ----------
    bvecs: co-linear to the array with data, the theta, phi on the circle
        from which the data was taken

    val: array with data

    fig: matplotlib figure, optional. Default: make new figure

    sphere_dim: The data will be interpolated into a sphere_dim by sphere_dim
        grid 

    r_from_val: Whether to double-code the values both by color and by the
        distance from the center

    cmap: Specify a matplotlib colormap to use for coloring the data. 

    Additional kwargs can be passed to the matplotlib.pyplot.plot3D command.

    """

    # We don't need the r output
    _, theta, phi = geo.cart2sphere(bvecs[0], bvecs[1], bvecs[2])

    if fig is None:
        fig = plt.figure()
        ax = fig.add_subplot(1,1,1, projection='3d')

    # Get the cmap argument out of your kwargs, or use the default:
    cmap = kwargs.pop('cmap', matplotlib.cm.RdBu)
    
    u = np.linspace(0, 2 * np.pi, sphere_dim)
    v = np.linspace(0, np.pi, sphere_dim)

    x_inter = 10 * np.outer(np.cos(u), np.sin(v))
    y_inter = 10 * np.outer(np.sin(u), np.sin(v))
    inter_val = np.zeros(x_inter.shape).ravel()
    z_inter = np.outer(np.ones(np.size(u)), np.cos(v))
    grid_r, grid_theta, grid_phi = geo.cart2sphere(x_inter, y_inter, z_inter)
    for idx, this in enumerate(zip(grid_theta.ravel(), grid_phi.ravel())):
        this_theta = np.abs(theta - np.array(this[0]))
        this_phi = np.abs(phi - np.array(this[1]))
        # The closest theta and phi:
        min_idx = np.argmin(this_theta + this_phi)
        inter_val[idx] = val[min_idx]

    # Calculate the values from the colormap that will be used for the
    # face-colors: 
    c = np.array(color_from_val(inter_val,
                                min_val=np.min(val),
                                max_val=np.max(val),
                                cmap_or_lut=cmap))
    
    new_shape = (x_inter.shape + (3,))

    c = np.reshape(c, new_shape)

    if r_from_val:
        # Use the interpolated values to set z: 
        new_x, new_y, new_z = geo.sphere2cart(inter_val.reshape(x_inter.shape),
                                              grid_theta, grid_phi)

        ax.plot_surface(new_x, new_y, new_z, rstride=4, cstride=4,
                        facecolors=c, **kwargs)
    else: 
        ax.plot_surface(x_inter, y_inter, z_inter, rstride=4, cstride=4,
                        facecolors=c, **kwargs)

    return fig
示例#41
0
文件: mpl.py 项目: zhangerjun/osmosis
def sig_on_sphere(bvecs,
                  val,
                  fig=None,
                  sphere_dim=1000,
                  r_from_val=False,
                  **kwargs):
    """
    Presente values on a sphere.

    Parameters
    ----------
    bvecs: co-linear to the array with data, the theta, phi on the circle
        from which the data was taken

    val: array with data

    fig: matplotlib figure, optional. Default: make new figure

    sphere_dim: The data will be interpolated into a sphere_dim by sphere_dim
        grid 

    r_from_val: Whether to double-code the values both by color and by the
        distance from the center

    cmap: Specify a matplotlib colormap to use for coloring the data. 

    Additional kwargs can be passed to the matplotlib.pyplot.plot3D command.

    """

    # We don't need the r output
    _, theta, phi = geo.cart2sphere(bvecs[0], bvecs[1], bvecs[2])

    if fig is None:
        fig = plt.figure()
        ax = fig.add_subplot(1, 1, 1, projection='3d')

    # Get the cmap argument out of your kwargs, or use the default:
    cmap = kwargs.pop('cmap', matplotlib.cm.RdBu)

    u = np.linspace(0, 2 * np.pi, sphere_dim)
    v = np.linspace(0, np.pi, sphere_dim)

    x_inter = 10 * np.outer(np.cos(u), np.sin(v))
    y_inter = 10 * np.outer(np.sin(u), np.sin(v))
    inter_val = np.zeros(x_inter.shape).ravel()
    z_inter = np.outer(np.ones(np.size(u)), np.cos(v))
    grid_r, grid_theta, grid_phi = geo.cart2sphere(x_inter, y_inter, z_inter)
    for idx, this in enumerate(zip(grid_theta.ravel(), grid_phi.ravel())):
        this_theta = np.abs(theta - np.array(this[0]))
        this_phi = np.abs(phi - np.array(this[1]))
        # The closest theta and phi:
        min_idx = np.argmin(this_theta + this_phi)
        inter_val[idx] = val[min_idx]

    # Calculate the values from the colormap that will be used for the
    # face-colors:
    c = np.array(
        color_from_val(inter_val,
                       min_val=np.min(val),
                       max_val=np.max(val),
                       cmap_or_lut=cmap))

    new_shape = (x_inter.shape + (3, ))

    c = np.reshape(c, new_shape)

    if r_from_val:
        # Use the interpolated values to set z:
        new_x, new_y, new_z = geo.sphere2cart(inter_val.reshape(x_inter.shape),
                                              grid_theta, grid_phi)

        ax.plot_surface(new_x,
                        new_y,
                        new_z,
                        rstride=4,
                        cstride=4,
                        facecolors=c,
                        **kwargs)
    else:
        ax.plot_surface(x_inter,
                        y_inter,
                        z_inter,
                        rstride=4,
                        cstride=4,
                        facecolors=c,
                        **kwargs)

    return fig
示例#42
0
def spkm(data,
         k,
         weights=None,
         seeds=None,
         antipodal=True,
         max_iter=1000,
         calc_sse=True):
    """
   Spherical k means. 

   Parameters
   ----------
   data : 2d float array
        Unit vectors on the hyper-sphere. This array has n data points rows, by
        m feature columns.

   k : int
       The number of clusters

   weights : 1d float array 
       Some data-points may be more important than others, so they will receive
       more weighting in determining the centroids 

   seeds : float array (optional).
        If n by k array is provided, these are used as centroids to initialize
        the algorithm. Otherwise, random centroids are chosen

   antipodal : bool
      In cases in which antipodal symmetry can be assumed, we want to cluster
      together points that are pointing close to *opposite* directions. In that
      case, correlations between putative centroids and each data point treats
      correlation and anti-correlation in equal vein.

   max_iter : int
       If you run this many iterations without convergence, warn and exit.

   calc_sse : bool
      Whether to calculate SSE or not. 

   Returns
   -------
   mu : the estimated centroid 
   y_n : assignments of each data point to a centroid
   SSE : the sum of squared error in centroid-to-data-point assignment
   
   """
    # 0. Preliminaries:
    # For the calculation of the centroids, we want to make sure that the data
    # are all pointing into the same hemisphere (expects 3 by n):
    data = ozu.vecs2hemi(data.T).T
    # If no weights are provided treat all data points equally:
    if weights is None:
        weights = np.ones(data.shape[0])

    # 1. Initialization:
    if seeds is None:
        # Choose random seeds.
        # thetas are uniform [0,pi]:
        theta = np.random.rand(k) * np.pi
        # phis are uniform [0, 2pi]
        phi = np.random.rand(k) * 2 * np.pi
        # They're all unit vectors:
        r = np.ones(k)
        # et voila:
        seeds = np.array(geo.sphere2cart(theta, phi, r)).T

    mu = seeds.copy()
    is_changing = True
    last_y_n = False
    iter = 0
    while is_changing:

        # Make sure they're all unit vectors, so that correlation below is scaled
        # properly:
        mu = np.array([ozu.unit_vector(x) for x in mu])
        data = np.array([ozu.unit_vector(x) for x in data])

        # 2. Data assignment:
        # Calculate all the correlations in one swoop:
        corr = np.dot(data, mu.T)
        # In cases where antipodal symmetry is assumed,
        if antipodal == True:
            corr = np.abs(corr)

        # This chooses the centroid for each one:
        y_n = np.argmax(corr, -1)

        # 3. Centroid estimation:
        for this_k in range(k):
            idx = np.where(y_n == this_k)
            if len(idx[0]) > 0:
                # The average will be based on the data points that are considered
                # in this centroid with a weighted average:
                this_sum = np.dot(weights[idx], data[idx])

                # This goes into the volume of the sphere, so we renormalize to the
                # surface (or to the origin, if it's 0):
                this_norm = ozu.l2_norm(this_sum)

                if this_norm > 0:
                    # Scale by the mean of the weights
                    mu[this_k] = (this_sum / this_norm) * np.mean(weights[idx])
                elif this_norm < 0:
                    mu[this_k] = np.array([0, 0, 0])

        # Did it change?
        if np.all(y_n == last_y_n):
            # 4. Stop if there's no change in assignment:
            is_changing = False
        else:
            last_y_n = y_n

        # Another stopping condition is if this has gone on for a while
        iter += 1
        if iter > max_iter:
            is_changing = False

        # Once you are done computing 'em all, calculate the resulting SSE:
        SSE = 0
        if calc_sse:
            for this_k in range(k):
                idx = np.where(y_n == this_k)
                len_idx = len(idx[0])
                if len_idx > 0:
                    scaled_data = data[idx] * weights[idx].reshape(len_idx, 1)
                    SSE += np.sum((mu[this_k] - scaled_data)**2)

    return mu, y_n, SSE
示例#43
0
 def vertices(self):
     return np.column_stack(sphere2cart(1, self.theta, self.phi))
示例#44
0
def main():

    # object_settings_file = Path(os.path.join(IMAGE_DIR_LIST[0], "_object_settings.json"))
    # camera_settings_file = Path(os.path.join(IMAGE_DIR_LIST[0], "_camera_settings.json"))

    if object_settings_file.is_file():
        with open(object_settings_file) as file:
            object_settings_data = json.load(file)
            if SELECTED_OBJECTS is None:
                CLASSES = object_settings_data['exported_object_classes']
            else:
                CLASSES = SELECTED_OBJECTS
            CATEGORIES = [{
                'id': i,
                'name': CLASSES[i].replace('_16k', '').replace('_16K', ''),
                'supercategory': 'shape',
            } for i in range(0,len(CLASSES))]

            FIXED_TRANSFORMS = {}
            for i in range(0,len(object_settings_data['exported_object_classes'])):
                class_name = object_settings_data['exported_objects'][i]['class']
                transform = object_settings_data['exported_objects'][i]['fixed_model_transform']
                if class_name in CLASSES:
                    class_name = class_name.replace('_16k', '').replace('_16K', '')
                    FIXED_TRANSFORMS[class_name] = transform
                    
            # print(FIXED_TRANSFORMS)
            # SEGMENTATION_DATA =  object_settings_data['exported_objects']    
    else:
        raise Exception("Object settings file not found")

    if camera_settings_file.is_file():
        with open(camera_settings_file) as file:
            camera_settings_data = json.load(file)       
            CAMERA_INTRINSICS = camera_settings_data['camera_settings'][0]['intrinsic_settings']
    else:
        raise Exception("Camera settings file not found")

    VIEWPOINTS = [viewpoints_xyz[i].tolist() for i in range(0, len(viewpoints_xyz))]

    INPLANE_ROTATIONS = [inplane_rot_angles[i] for i in range(0, len(inplane_rot_angles))]

    coco_output = {
        "info": INFO,
        "licenses": LICENSES,
        "categories": CATEGORIES,
        "viewpoints" : VIEWPOINTS,
        "inplane_rotations" : INPLANE_ROTATIONS,
        "camera_intrinsic_settings": CAMERA_INTRINSICS,
        "fixed_transforms": FIXED_TRANSFORMS,
        "images": [],
        "annotations": []
    }

    image_global_id = 1
    segmentation_global_id = 1

    # filter for jpeg images
    for IMAGE_DIR_T in IMAGE_DIR_LIST:
        # for root, _, files in os.walk(IMAGE_DIR):
        for SCENE in SCENES:
            if IMAGE_DIR_T == "":
                IMAGE_DIR = os.path.join(ROOT_DIR, SCENE)
            else:
                IMAGE_DIR = os.path.join(ROOT_DIR, IMAGE_DIR_T, SCENE)

            all_dir_files = os.listdir(IMAGE_DIR)
            image_files = filter_for_jpeg(IMAGE_DIR, all_dir_files)
            # dir_name = os.path.basename(IMAGE_DIR)
            SEGMENTATION_DATA = get_segmentation_data_for_scene(IMAGE_DIR)
            print(SEGMENTATION_DATA)
            # go through each image
            for ii in trange(len(image_files)):
                image_filename = image_files[ii]
                if IMAGE_DIR_T == "":
                    image_out_filename =  os.path.join(SCENE, os.path.basename(image_filename))
                else:
                    image_out_filename =  os.path.join(IMAGE_DIR_T, SCENE, os.path.basename(image_filename))

                img_size = (960,540)
                image_info = pycococreatortools.create_image_info(
                    image_global_id, image_out_filename, img_size
                )
                # plt.figure()
                # skimage.io.imshow(skimage.io.imread(image_filename))
                # plt.show()
                # filter for associated png annotations
                # for root, _, files in os.walk(IMAGE_DIR):
                segmentation_image_files = filter_for_annotations(IMAGE_DIR, all_dir_files, image_filename)
                label_files = filter_for_labels(IMAGE_DIR, all_dir_files, image_filename)
                boxes = []
                labels = []
                segmentation_ids = []
                label_filename = label_files[0]
                # go through each associated json file containing objects data
                # for label_filename in label_files:
                # print("File %d - %s"% (image_global_id, label_filename))
                my_file = Path(label_filename)
                segmentation_image = skimage.io.imread(segmentation_image_files[0])
                # print("File %d - %s"% (image_global_id, segmentation_image_files[0]))
                
                if my_file.is_file():
                    with open(label_filename) as file:
                        label_data = json.load(file)
                        # all_objects_yaw_only = True
                        for i in range(0, len(label_data['objects'])):
                            class_name = label_data['objects'][i]['class']

                            if class_name not in SELECTED_OBJECTS:
                                continue
                            # print(class_name)
                            class_bounding_box = label_data['objects'][i]['bounding_box']
                            quat = label_data['objects'][i]['quaternion_xyzw']
                            
                            angles = RT_transform.quat2euler(get_wxyz_quaternion(quat))
                            # angles = RT_transform.quat2euler(get_wxyz_quaternion(quat), 'syxz')
                            # angles = apply_angle_symmetry(angles, SYMMETRY_INFO[class_name])
                            # This function gives angles with this convention of euler - https://en.wikipedia.org/wiki/Euler_angles#Signs_and_ranges (geometric definition)

                            # if np.isclose(angles[1], 0):
                            #     print("Test")
                            theta, phi = euler2sphere(angles[1], angles[0])
                            actual_angles = np.array([1, theta, phi])
                            xyz_coord = sphere2cart(1, theta, phi)
                            
                            viewpoint_id = find_viewpoint_id(viewpoints_xyz, xyz_coord)
                            r_xyz = get_viewpoint_from_id(viewpoints_xyz, viewpoint_id)
                            recovered_angles = np.array(cart2sphere(r_xyz[0], r_xyz[1], r_xyz[2]))

                            inplane_rotation_id = find_inplane_rotation_id(inplane_rot_angles, angles[2])
                            # inplate_rotation_angle = get_inplane_rotation_from_id(INPLANE_ROTATIONS, inplane_rotation_id)


                            if np.all(np.isclose(actual_angles, recovered_angles, atol=0.4)) == False:
                                print("Mismatch in : {}".format(label_filename))
                                print("sphere2cart angles : {}".format(actual_angles))
                                print("cart2sphere angles : {}".format(recovered_angles))
                            # elif np.all(np.isclose(actual_angles, recovered_angles, atol=0.4)) == True:
                            #     print("Match")

                            # print(inplate_rotation_angle)
                            class_label = [x['id'] for x in CATEGORIES if x['name'] in class_name][0]
                            segmentation_id = [x['segmentation_class_id'] for x in SEGMENTATION_DATA if x['class'] in class_name][0]

                            boxes.append(class_bounding_box['top_left'] + class_bounding_box['bottom_right'])
                            labels.append(class_label)     
                            segmentation_ids.append([x['segmentation_class_id'] for x in SEGMENTATION_DATA if x['class'] in class_name][0])

                            # Create binary masks from segmentation image for every object
                            # for segmentation_image_file in segmentation_image_files:
                            # segmentation_image = skimage.io.imread(segmentation_image_file)
                            binary_mask = np.copy(segmentation_image)
                            binary_mask[binary_mask != segmentation_id] = 0
                            binary_mask[binary_mask == segmentation_id] = 1
                            # skimage.io.imshow(binary_mask, cmap=plt.cm.gray)
                            # plt.show()
                            # TODO : check if its actually a crowd in case of multiple instances of one object type
                            # class_label = [x['class'] for x in SEGMENTATION_DATA if x['segmentation_class_id'] in segmentation_id][0]
                            category_info = {'id': class_label, 'is_crowd': 0}

                            
                            annotation_info = pycococreatortools.create_annotation_info(
                                segmentation_global_id, image_global_id, category_info, binary_mask,
                                img_size, tolerance=2)
                            
                            # print(annotation_info)

                            if annotation_info is not None:
                                annotation_info['viewpoint_id'] = int(viewpoint_id)
                                annotation_info['inplane_rotation_id'] = int(inplane_rotation_id)
                                annotation_info['camera_pose'] = label_data['camera_data']
                                annotation_info['location'] = label_data['objects'][i]['location']
                                annotation_info['quaternion_xyzw'] = quat
                                coco_output["annotations"].append(annotation_info)
                                coco_output["images"].append(image_info)
                            else:
                                tqdm.write("File %s doesn't have boxes or labels in json file" % image_filename)
                            segmentation_global_id = segmentation_global_id + 1
                else:
                    tqdm.write("File %s doesn't have a label file" % image_filename)
                        

                image_global_id = image_global_id + 1

            with open('{}/{}.json'.format(ROOT_DIR, OUTFILE_NAME), 'w') as output_json_file:
                json.dump(coco_output, output_json_file)
示例#45
0
文件: sphere.py 项目: DALILA2015/dipy
 def vertices(self):
     return np.column_stack(sphere2cart(1, self.theta, self.phi))
示例#46
0
def generate_gold_data(bvals,bvecs,fibno=1,axesn=10,SNR=None):
        
    #1 fiber case
    if fibno==1:
        data=np.zeros((axesn,len(bvals)))
        gold=np.zeros((axesn,3))    
        X=random_uniform_on_sphere(axesn,'xyz')        
        for (j,x) in enumerate(X):
            S,sticks=SticksAndBall(bvals,bvecs,d=0.0015,S0=100,angles=np.array([x]),fractions=[100],snr=SNR)
            data[j,:]=S[:]
            gold[j,:3]=x
        return data,gold,None,None,axesn,None

    #2 fibers case
    if fibno==2:        
        angles=np.arange(0,92,2.5)#5
        u=np.array([0,0,1])    
        anglesn=len(angles)        
        data=np.zeros((anglesn*axesn,len(bvals)))
        gold=np.zeros((anglesn*axesn,6))
        angs=[]
        for (i,a) in enumerate(angles):        
            v=sphere2cart(1,np.deg2rad(a),0)
            X=random_uniform_on_sphere(axesn,'xyz')        
            for (j,x) in enumerate(X):
                R=rodriguez_axis_rotation(x,360*np.random.rand())                
                ur=np.dot(R,u)
                vr=np.dot(R,v)                        
                S,sticks=SticksAndBall(bvals,bvecs,d=0.0015,S0=100,angles=np.array([ur,vr]),fractions=[50,50],snr=SNR)
                data[i*axesn+j,:]=S[:]
                gold[i*axesn+j,:3]=ur
                gold[i*axesn+j,3:]=vr
                angs.append(a)
        return data,gold,angs,anglesn,axesn,angles
               
    #3 fibers case
    if fibno==3:    
        #phis=np.array([0.,120.,240.])
        #angles=np.arange(0,92,5)
        angles=np.linspace(0,54.73,40)#20           
        anglesn=len(angles)        
        data=np.zeros((anglesn*axesn,len(bvals)))
        gold=np.zeros((anglesn*axesn,9))
        angs=[]
        for (i,a) in enumerate(angles): 
            u=sphere2cart(1,np.deg2rad(a),np.deg2rad(0))
            v=sphere2cart(1,np.deg2rad(a),np.deg2rad(120))
            w=sphere2cart(1,np.deg2rad(a),np.deg2rad(240))        
            X=random_uniform_on_sphere(axesn,'xyz')        
            for (j,x) in enumerate(X):
                R=rodriguez_axis_rotation(x,360*np.random.rand())
                ur=np.dot(R,u)
                vr=np.dot(R,v)
                wr=np.dot(R,w)         
                S,sticks=SticksAndBall(bvals,bvecs,d=0.0015,S0=100,angles=np.array([ur,vr,wr]),fractions=[33,33,33],snr=SNR)
                print i,j
                data[i*axesn+j,:]=S[:]
                gold[i*axesn+j,:3]=ur
                gold[i*axesn+j,3:6]=vr
                gold[i*axesn+j,6:]=wr                                        
                angs.append(a)
                
        return data,gold,angs,anglesn,axesn,angles
示例#47
0
def plot_signal_interp(bvecs,
                       signal,
                       origin=[0, 0, 0],
                       maya=True,
                       cmap='jet',
                       file_name=None,
                       colorbar=False,
                       figure=None,
                       vmin=None,
                       vmax=None,
                       offset=0,
                       azimuth=60,
                       elevation=90,
                       roll=0,
                       points=False,
                       cmap_points=None,
                       scale_points=False,
                       non_neg=False,
                       interp_kwargs=dict(function='thin_plate', smooth=0)):
    """

    Interpolate a measured signal, using RBF interpolation.

    Parameters
    ----------
    signal:

    bvecs: array (3,n)
        the x,y,z locations where the signal was measured 

    offset : float
        where to place the plotted voxel (on the z axis)

    points : bool
       whether to show the sampling points on the interpolated
       surface. Default: False.
    
    """

    bvecs_new = np.hstack([bvecs, -bvecs])
    new_signal = np.hstack([signal, signal])

    s0 = Sphere(xyz=bvecs_new.T)
    s1 = create_unit_sphere(7)

    signal[np.isnan(signal)] = 0

    interp_signal = interp_rbf(new_signal, s0, s1, **interp_kwargs)
    vertices = s1.vertices

    if non_neg:
        interp_signal[interp_signal < 0] = 0

    faces = s1.faces
    x, y, z = vertices.T

    r, phi, theta = geo.cart2sphere(x, y, z)
    x_plot, y_plot, z_plot = geo.sphere2cart(interp_signal, phi, theta)

    if points:
        r, phi, theta = geo.cart2sphere(s0.x, s0.y, s0.z)
        x_p, y_p, z_p = geo.sphere2cart(signal, phi, theta)
        figure = _display_maya_voxel(x_p,
                                     y_p,
                                     z_p,
                                     faces,
                                     signal,
                                     origin,
                                     cmap=cmap,
                                     colorbar=colorbar,
                                     figure=figure,
                                     vmin=vmin,
                                     vmax=vmax,
                                     file_name=file_name,
                                     azimuth=azimuth,
                                     elevation=elevation,
                                     points=True,
                                     cmap_points=cmap_points,
                                     scale_points=scale_points)

    # Call and return straightaway:
    return _display_maya_voxel(x_plot,
                               y_plot,
                               z_plot,
                               faces,
                               interp_signal,
                               origin,
                               cmap=cmap,
                               colorbar=colorbar,
                               figure=figure,
                               vmin=vmin,
                               vmax=vmax,
                               file_name=file_name,
                               azimuth=azimuth,
                               elevation=elevation)
示例#48
0
文件: voxel.py 项目: nmabhi/dipy
def multi_tensor(gtab,
                 mevals,
                 S0=100,
                 angles=[(0, 0), (90, 0)],
                 fractions=[50, 50],
                 snr=20):
    r"""Simulate a Multi-Tensor signal.

    Parameters
    -----------
    gtab : GradientTable
    mevals : array (K, 3)
        each tensor's eigenvalues in each row
    S0 : float
        Unweighted signal value (b0 signal).
    angles : array (K,2) or (K,3)
        List of K tensor directions in polar angles (in degrees) or unit vectors
    fractions : float
        Percentage of the contribution of each tensor. The sum of fractions
        should be equal to 100%.
    snr : float
        Signal to noise ratio, assuming Rician noise.  If set to None, no
        noise is added.

    Returns
    --------
    S : (N,) ndarray
        Simulated signal.
    sticks : (M,3)
        Sticks in cartesian coordinates.

    Examples
    --------
    >>> import numpy as np
    >>> from dipy.sims.voxel import multi_tensor
    >>> from dipy.data import get_data
    >>> from dipy.core.gradients import gradient_table
    >>> from dipy.io.gradients import read_bvals_bvecs
    >>> fimg, fbvals, fbvecs = get_data('small_101D')
    >>> bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
    >>> gtab = gradient_table(bvals, bvecs)
    >>> mevals=np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
    >>> e0 = np.array([1, 0, 0.])
    >>> e1 = np.array([0., 1, 0])
    >>> S = multi_tensor(gtab, mevals)

    """
    if np.round(np.sum(fractions), 2) != 100.0:
        raise ValueError('Fractions should sum to 100')

    fractions = [f / 100. for f in fractions]

    S = np.zeros(len(gtab.bvals))

    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [
            sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
            for pair in angles
        ]
        sticks = np.array(sticks)

    for i in range(len(fractions)):
        S = S + fractions[i] * single_tensor(gtab,
                                             S0=S0,
                                             evals=mevals[i],
                                             evecs=all_tensor_evecs(
                                                 sticks[i]).T,
                                             snr=None)

    return add_noise(S, snr, S0), sticks
示例#49
0
def compare_sdni_eitl():
    
    btable=np.loadtxt(get_data('dsi515btable'))
    bvals=btable[:,0]
    bvecs=btable[:,1:]   
    
    type=3
    axesn=200
    SNR=20
    
    data,gold,angs,anglesn,axesn,angles=generate_gold_data(bvals,bvecs,fibno=type,axesn=axesn,SNR=SNR)
    
    ei=EquatorialInversion(data,bvals,bvecs,odf_sphere='symmetric642',
                           auto=False,save_odfs=True,fast=True)    
    ei.radius=np.arange(0,5,0.1)
    ei.gaussian_weight=None
    ei.set_operator('laplacian')
    #{'reflect','constant','nearest','mirror', 'wrap'}
    ei.set_mode(order=1,zoom=1,mode='constant')    
    ei.update()
    ei.fit()

    dn=DiffusionNabla(data,bvals,bvecs,odf_sphere='symmetric642',
                      auto=False,save_odfs=True,fast=False)    
    dn.radius=np.arange(0,5,0.1)
    dn.gaussian_weight=None
    dn.update()
    dn.fit()
    
    vts=ei.odf_vertices
    
    def simple_peaks(ODF,faces,thr):
        x,g=ODF.shape
        PK=np.zeros((x,5))
        IN=np.zeros((x,5))
        for (i,odf) in enumerate(ODF):
            peaks,inds=peak_finding(odf,faces)
            ibigp=np.where(peaks>thr*peaks[0])[0]
            l=len(ibigp)
            if l>3:
                l=3
            PK[i,:l]=peaks[:l]
            IN[i,:l]=inds[:l]
        return PK,IN
    
    print "test0"
    thresh=0.5
    PK,IN=simple_peaks(ei.ODF,ei.odf_faces,thresh)
    res,me,st =do_compare(gold,vts,PK,IN,0,anglesn,axesn,type)
    #PKG,ING=simple_peaks(eig.ODF,ei.odf_faces,thresh)
    #resg,meg,stg =do_compare(gold,vts,PKG,ING,0,anglesn,axesn,type)
    PK,IN=simple_peaks(dn.ODF,dn.odf_faces,thresh)
    res2,me2,st2 =do_compare(gold,vts,PK,IN,0,anglesn,axesn,type)
    if type==3:
            x,y,z=sphere2cart(np.ones(len(angles)),np.deg2rad(angles),np.zeros(len(angles)))
            x2,y2,z2=sphere2cart(np.ones(len(angles)),np.deg2rad(angles),np.deg2rad(120*np.ones(len(angles))))
            angles2=[]
            for i in range(len(x)):
                angles2.append(np.rad2deg(np.arccos(np.dot([x[i],y[i],z[i]],[x2[i],y2[i],z2[i]]))))
            angles=angles2
    print "test1"
    plt.figure(1)
    plt.plot(angles,me,'k:',linewidth=3.,label='EITL')
    plt.plot(angles,me2,'k--',linewidth=3.,label='sDNI')
    #plt.plot(angles,me7,'r--',linewidth=3.,label='EITL2')
    plt.xlabel('angle')
    plt.ylabel('similarity')
    title='Angular similarity of ' + str(type) + '-fibres crossing with SNR ' + str(SNR)
    plt.title(title)
    plt.legend(loc='center right')
    plt.savefig('/tmp/test.png',dpi=300)
    plt.show()
示例#50
0
#needles = np.array([-np.pi/4,np.pi/4])
needles2d = np.array([0,np.pi/4.,np.pi/2.])

angles2d = np.linspace(-np.pi, np.pi,100)

def signal2d(S0,f,b,d,needles2d,angles2d):
    return S0*np.array([(1-f[0]-f[1]-f[2])*np.exp(-b*d) + \
                      f[0]*np.exp(-b*d*(np.cos(needles2d[0]-gradient))**2) + \
                      f[1]*np.exp(-b*d*(np.cos(needles2d[1]-gradient))**2) + \
                      f[2]*np.exp(-b*d*(np.cos(needles2d[2]-gradient))**2) \
                      for gradient in angles2d])


#needles3d = [sphere2cart(1,0,0),sphere2cart(1,np.pi/2.,0),sphere2cart(1,np.pi/2.,np.pi/2.)]
needles3d = np.array([sphere2cart(1,0,0),sphere2cart(1,np.pi/2.,0),sphere2cart(1,np.pi/2.,np.pi/2.)])

angles2d = np.linspace(-np.pi, np.pi,100)

def signal3d(S0,f,b0,d,needles3d,bvals,gradients):
    S = S0*np.array([(1-f[0]-f[1]-f[2])*np.exp(-b0*d) + \
                      f[0]*np.exp(-bvals[i]*d*(np.dot(needles3d[0],gradient))**2) + \
                      f[1]*np.exp(-bvals[i]*d*(np.dot(needles3d[1],gradient))**2) + \
                      f[2]*np.exp(-bvals[i]*d*(np.dot(needles3d[2],gradient))**2) \
                      for (i,gradient) in enumerate(gradients)])
    return S, np.dot(np.diag(S),gradients)

#dsi202 = np.row_stack((dsi102[1:,:],-dsi102[1:,:]))
#print dsi202.shape

#print bvals102.shape
示例#51
0
def test_kurtosis_maximum():
    # TEST 1
    # simulate a crossing fibers interserting at 70 degrees. The first fiber
    # is aligned to the x-axis while the second fiber is aligned to the x-z
    # plane with an angular deviation of 70 degrees from the first one.
    # According to Neto Henriques et al, 2015 (NeuroImage 111: 85-99), the
    # kurtosis tensor of this simulation will have a maxima aligned to axis y
    angles = [(90, 0), (90, 0), (20, 0), (20, 0)]
    signal_70, dt_70, kt_70 = multi_tensor_dki(gtab_2s, mevals_cross, S0=100,
                                               angles=angles,
                                               fractions=frac_cross, snr=None)
    # prepare inputs
    dkiM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="WLS")
    dkiF = dkiM.fit(signal_70)
    MD = dkiF.md
    kt = dkiF.kt
    R = dkiF.evecs
    evals = dkiF.evals
    dt = lower_triangular(np.dot(np.dot(R, np.diag(evals)), R.T))
    sphere = get_sphere('symmetric724')

    # compute maxima
    k_max_cross, max_dir = dki._voxel_kurtosis_maximum(dt, MD, kt, sphere,
                                                       gtol=1e-5)

    yaxis = np.array([0., 1., 0.])
    cos_angle = np.abs(np.dot(max_dir[0], yaxis))
    assert_almost_equal(cos_angle, 1.)

    # TEST 2
    # test the function on cases of well aligned fibers oriented in a random
    # defined direction. According to Neto Henriques et al, 2015 (NeuroImage
    # 111: 85-99), the maxima of kurtosis is any direction perpendicular to the
    # fiber direction. Moreover, according to multicompartmetal simulations,
    # kurtosis in this direction has to be equal to:
    fie = 0.49
    ADi = 0.00099
    ADe = 0.00226
    RDi = 0
    RDe = 0.00087
    RD = fie*RDi + (1-fie)*RDe
    RK = 3 * fie * (1-fie) * ((RDi-RDe) / RD) ** 2

    # prepare simulation:
    theta = random.uniform(0, 180)
    phi = random.uniform(0, 320)
    angles = [(theta, phi), (theta, phi)]
    mevals = np.array([[ADi, RDi, RDi], [ADe, RDe, RDe]])
    frac = [fie*100, (1 - fie)*100]
    signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles,
                                      fractions=frac, snr=None)

    # prepare inputs
    dkiM = dki.DiffusionKurtosisModel(gtab_2s, fit_method="WLS")
    dkiF = dkiM.fit(signal)
    MD = dkiF.md
    kt = dkiF.kt
    R = dkiF.evecs
    evals = dkiF.evals
    dt = lower_triangular(np.dot(np.dot(R, np.diag(evals)), R.T))

    # compute maxima
    k_max, max_dir = dki._voxel_kurtosis_maximum(dt, MD, kt, sphere, gtol=1e-5)

    # check if max direction is perpendicular to fiber direction
    fdir = np.array([sphere2cart(1., np.deg2rad(theta), np.deg2rad(phi))])
    cos_angle = np.abs(np.dot(max_dir[0], fdir[0]))
    assert_almost_equal(cos_angle, 0., decimal=5)

    # check if max direction is equal to expected value
    assert_almost_equal(k_max, RK)

    # According to Neto Henriques et al., 2015 (NeuroImage 111: 85-99),
    # e.g. see figure 1 of this article, kurtosis maxima for the first test is
    # also equal to the maxima kurtosis value of well-aligned fibers, since
    # simulations parameters (apart from fiber directions) are equal
    assert_almost_equal(k_max_cross, RK)

    # Test 3 - Test performance when kurtosis is spherical - this case, can be
    # problematic since a spherical kurtosis does not have an maximum
    k_max, max_dir = dki._voxel_kurtosis_maximum(dt_sph, np.mean(evals_sph),
                                                 kt_sph, sphere, gtol=1e-2)
    assert_almost_equal(k_max, Kref_sphere)

    # Test 4 - Test performance when kt have all elements zero - this case, can
    # be problematic this case does not have an maximum
    k_max, max_dir = dki._voxel_kurtosis_maximum(dt_sph, np.mean(evals_sph),
                                                 np.zeros(15), sphere,
                                                 gtol=1e-2)
    assert_almost_equal(k_max, 0.0)
示例#52
0
def test():

    # img=nib.load('/home/eg309/Data/project01_dsi/connectome_0001/tp1/RAWDATA/OUT/mr000001.nii.gz')
    btable = np.loadtxt(get_data("dsi515btable"))
    # volume size
    sz = 16
    # shifting
    origin = 8
    # hanning width
    filter_width = 32.0
    # number of signal sampling points
    n = 515
    # odf radius
    radius = np.arange(2.1, 6, 0.2)
    # create q-table
    bv = btable[:, 0]
    bmin = np.sort(bv)[1]
    bv = np.sqrt(bv / bmin)
    qtable = np.vstack((bv, bv, bv)).T * btable[:, 1:]
    qtable = np.floor(qtable + 0.5)
    # copy bvals and bvecs
    bvals = btable[:, 0]
    bvecs = btable[:, 1:]
    # S=img.get_data()[38,50,20]#[96/2,96/2,20]
    S, stics = SticksAndBall(
        bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (60, 0), (90, 90)], fractions=[0, 0, 0], snr=None
    )

    S2 = S.copy()
    S2 = S2.reshape(1, len(S))
    dn = DiffusionNabla(S2, bvals, bvecs, auto=False)
    pR = dn.equators
    odf = dn.odf(S)
    # Xs=dn.precompute_interp_coords()
    peaks, inds = peak_finding(odf.astype("f8"), dn.odf_faces.astype("uint16"))
    print peaks
    print peaks / peaks.min()
    # print dn.PK
    dn.fit()
    print dn.PK

    # """
    ren = fvtk.ren()
    colors = fvtk.colors(odf, "jet")
    fvtk.add(ren, fvtk.point(dn.odf_vertices, colors, point_radius=0.05, theta=8, phi=8))
    fvtk.show(ren)
    # """

    stop

    # ds=DiffusionSpectrum(S2,bvals,bvecs)
    # tpr=ds.pdf(S)
    # todf=ds.odf(tpr)

    """
    #show projected signal
    Bvecs=np.concatenate([bvecs[1:],-bvecs[1:]])
    X0=np.dot(np.diag(np.concatenate([S[1:],S[1:]])),Bvecs)    
    ren=fvtk.ren()
    fvtk.add(ren,fvtk.point(X0,fvtk.yellow,1,2,16,16))    
    fvtk.show(ren)
    """
    # qtable=5*matrix[:,1:]

    # calculate radius for the hanning filter
    r = np.sqrt(qtable[:, 0] ** 2 + qtable[:, 1] ** 2 + qtable[:, 2] ** 2)

    # setting hanning filter width and hanning
    hanning = 0.5 * np.cos(2 * np.pi * r / filter_width)

    # center and index in q space volume
    q = qtable + origin
    q = q.astype("i8")

    # apply the hanning filter
    values = S * hanning

    """
    #plot q-table
    ren=fvtk.ren()
    colors=fvtk.colors(values,'jet')
    fvtk.add(ren,fvtk.point(q,colors,1,0.1,6,6))
    fvtk.show(ren)
    """

    # create the signal volume
    Sq = np.zeros((sz, sz, sz))
    for i in range(n):
        Sq[q[i][0], q[i][1], q[i][2]] += values[i]

    # apply fourier transform
    Pr = fftshift(np.abs(np.real(fftn(fftshift(Sq), (sz, sz, sz)))))

    # """
    ren = fvtk.ren()
    vol = fvtk.volume(Pr)
    fvtk.add(ren, vol)
    fvtk.show(ren)
    # """

    """
    from enthought.mayavi import mlab
    mlab.pipeline.volume(mlab.pipeline.scalar_field(Sq))
    mlab.show()
    """

    # vertices, edges, faces  = create_unit_sphere(5)
    vertices, faces = sphere_vf_from("symmetric362")
    odf = np.zeros(len(vertices))

    for m in range(len(vertices)):

        xi = origin + radius * vertices[m, 0]
        yi = origin + radius * vertices[m, 1]
        zi = origin + radius * vertices[m, 2]
        PrI = map_coordinates(Pr, np.vstack((xi, yi, zi)), order=1)
        for i in range(len(radius)):
            odf[m] = odf[m] + PrI[i] * radius[i] ** 2

    """
    ren=fvtk.ren()
    colors=fvtk.colors(odf,'jet')
    fvtk.add(ren,fvtk.point(vertices,colors,point_radius=.05,theta=8,phi=8))
    fvtk.show(ren)
    """

    """
    #Pr[Pr<500]=0    
    ren=fvtk.ren()
    #ren.SetBackground(1,1,1)
    fvtk.add(ren,fvtk.volume(Pr))
    fvtk.show(ren)
    """

    peaks, inds = peak_finding(odf.astype("f8"), faces.astype("uint16"))

    Eq = np.zeros((sz, sz, sz))
    for i in range(n):
        Eq[q[i][0], q[i][1], q[i][2]] += S[i] / S[0]

    LEq = laplace(Eq)

    # Pr[Pr<500]=0
    ren = fvtk.ren()
    # ren.SetBackground(1,1,1)
    fvtk.add(ren, fvtk.volume(Eq))
    fvtk.show(ren)

    phis = np.linspace(0, 2 * np.pi, 100)

    planars = []
    for phi in phis:
        planars.append(sphere2cart(1, np.pi / 2, phi))
    planars = np.array(planars)

    planarsR = []
    for v in vertices:
        R = vec2vec_rotmat(np.array([0, 0, 1]), v)
        planarsR.append(np.dot(R, planars.T).T)

    """
    ren=fvtk.ren()
    fvtk.add(ren,fvtk.point(planarsR[0],fvtk.green,1,0.1,8,8))
    fvtk.add(ren,fvtk.point(2*planarsR[1],fvtk.red,1,0.1,8,8))
    fvtk.show(ren)
    """

    azimsums = []
    for disk in planarsR:
        diskshift = 4 * disk + origin
        # Sq0=map_coordinates(Sq,diskshift.T,order=1)
        # azimsums.append(np.sum(Sq0))
        # Eq0=map_coordinates(Eq,diskshift.T,order=1)
        # azimsums.append(np.sum(Eq0))
        LEq0 = map_coordinates(LEq, diskshift.T, order=1)
        azimsums.append(np.sum(LEq0))

    azimsums = np.array(azimsums)

    # """
    ren = fvtk.ren()
    colors = fvtk.colors(azimsums, "jet")
    fvtk.add(ren, fvtk.point(vertices, colors, point_radius=0.05, theta=8, phi=8))
    fvtk.show(ren)
    # """

    # for p in planarsR[0]:
    """
示例#53
0
def test():
    K.set_image_data_format(
        'channels_last')  # TF dimension ordering in this code
    featdef = {
        #'magfld': tf.FixedLenSequenceFeature(shape=[YMagfld*XMagfld*ZMagfld], dtype=tf.string, allow_missing=True),
        #'stokes': tf.FixedLenSequenceFeature(shape=[YStokes*XStokes*ZStokes], dtype=tf.string, allow_missing=True)
        'magfld':
        tf.FixedLenSequenceFeature(shape=[],
                                   dtype=tf.float32,
                                   allow_missing=True),
        'stokes':
        tf.FixedLenSequenceFeature(shape=[],
                                   dtype=tf.float32,
                                   allow_missing=True),
        #    'name':   tf.FixedLenFeature(shape=[], dtype=tf.string)
    }

    def _parse_record(example_proto, clip=False):
        """Parse a single record into x and y images"""
        example = tf.parse_single_example(example_proto, featdef)
        #x = tf.decode_raw(example['stokes'], tf.float32)
        x = example['stokes']

        # normalize Stokes Parameters
        if doNormalize:
            x = tf.reshape(x, (WStokes * YStokes * XStokes, ZStokes))
            x = tf.slice(x, (0, 0), (WStokes * YDim * XDim, ZStokes))
            x = x - nm
            x = x / ns

        x = tf.reshape(x, (WStokes, YStokes, XStokes, ZStokes))
        x = tf.slice(x, (0, 0, 0, 0), (WStokes, YDim, XDim, ZStokes))

        if doWLConvolution == False:
            # unstack wavelength dimension (axis=0)
            # now we have a list of WDim tensors of shape (YDim*XDim, ZStokes)
            wl = tf.unstack(x, axis=0)
            # concatenate the wavelengths in the channel (Stokes Param) dimension (axis=2)
            x = tf.concat(wl, axis=2)
            x = tf.slice(x, (0, 0, 0), (YDim, XDim, WStokes * ZStokes))

        y = example['magfld']
        y = tf.reshape(y, (YMagfld, XMagfld, ZMagfld))
        y = tf.slice(y, (0, 0, 0), (YDim, XDim, ZMagfld))

        #    name = example['name']

        #    return x, y, name
        return x, y

    print('-' * 30)
    print('Opening test dataset ...')
    print('-' * 30)
    dsTest = tf.data.TFRecordDataset(pathTest).map(_parse_record,
                                                   num_parallel_calls=8)
    dsTest = dsTest.prefetch(10)
    dsTest = dsTest.repeat(1)
    dsTest = dsTest.batch(sizeBatch)

    print('-' * 30)
    print('Creating and compiling model...')
    print('-' * 30)
    model = UNet()

    #print(model.summary())

    print('-' * 30)
    print('Testing the model...')
    print('-' * 30)

    sess = K.get_session()
    n = 0

    scores = model.evaluate(dsTest, steps=nStep, verbose=1)
    for m in range(0, len(scores)):
        print('%s=%f' % (model.metrics_names[m], scores[m]))

    if doPlot:
        plt.gray()
    r = np.zeros((ZMagfld, (nStep + 1) * sizeBatch))
    mae = np.zeros((ZMagfld, (nStep + 1) * sizeBatch))
    mse = np.zeros((ZMagfld, (nStep + 1) * sizeBatch))

    # add an iterator on the test set to the graph
    diTest = dsTest.make_one_shot_iterator().get_next()
    start = time.time()

    # for each batch in the test set
    for t in range(0, nStep + 1):
        #for t in range(0, 1):
        # get the next batch of test examples
        examples = sess.run(diTest)
        x = examples[0]
        print(x.shape)
        y = examples[1]
        print(y.shape)
        #print(y)
        if False:
            plt.imshow(x[1, 56, :, :, 0])
            plt.show()
            plt.imshow(y[1, :, :, 0])
            plt.show()

            plt.imshow(x[1, 56, :, :, 1])
            plt.show()
            plt.imshow(y[1, :, :, 1])
            plt.show()

            plt.imshow(x[1, 56, :, :, 2])
            plt.show()
            plt.imshow(y[1, :, :, 2])
            plt.show()

            plt.imshow(x[1, 56, :, :, 3])
            plt.show()

        y_pred = model.predict(x, steps=1)
        print(y_pred.shape)
        #print(y_pred)
        if False:
            plt.imshow(y_pred[1, :, :, 0])
            plt.show()
            plt.imshow(y_pred[1, :, :, 1])
            plt.show()
            plt.imshow(y_pred[1, :, :, 2])
            plt.show()

        # convert Intensity units from Gauss to kG
        #y[:,:,:,0] = np.true_divide(y[:,:,:,0], 1000.0)
        #y_pred[:,:,:,0] = np.true_divide(y_pred[:,:,:,0], 1000.0)

        if useBquv:
            # transform from spherical coords to Ramos style Bq,Bu,Bv Cartesian coords
            y[:, :, :, 1:] = np.radians(y[:, :, :, 1:])
            Bx, By, Bz = sphere2cart(y[:, :, :, 0], y[:, :, :, 1], y[:, :, :,
                                                                     2])
            Bxsq = np.square(Bx)
            Bysq = np.square(By)
            Bxy = np.multiply(Bx, By)
            Bq = np.sign(Bxsq - Bysq) * np.sqrt(np.abs(Bxsq - Bysq))
            Bu = np.sign(Bxy) * np.sqrt(np.abs(Bxy))
            Bv = Bz
            y[:, :, :, 0], y[:, :, :, 1], y[:, :, :, 2] = Bq, Bu, Bv

            y_pred[:, :, :, 1:] = np.radians(y_pred[:, :, :, 1:])
            Bx, By, Bz = sphere2cart(y_pred[:, :, :, 0], y_pred[:, :, :, 1],
                                     y_pred[:, :, :, 2])
            Bxsq = np.square(Bx)
            Bysq = np.square(By)
            Bxy = np.multiply(Bx, By)
            Bq = np.sign(Bxsq - Bysq) * np.sqrt(np.abs(Bxsq - Bysq))
            Bu = np.sign(Bxy) * np.sqrt(np.abs(Bxy))
            Bv = Bz
            y_pred[:, :, :, 0], y_pred[:, :, :, 1], y_pred[:, :, :,
                                                           2] = Bq, Bu, Bv

        # for each test example in this batch
        for i in range(x.shape[0]):
            #for i in range(28, 29):
            n += 1
            print('.', end='', flush=True)

            # for each output dimension
            for d in range(0, ZMagfld):
                yt = y[i, 0:64, 0:64, d]
                yp = y_pred[i, 0:64, 0:64, d]

                # sum the Pearson Rs to take an average per dimension after the main loop
                pr, pp = stats.pearsonr(yt.flatten(), yp.flatten())
                r[d, n - 1] = pr

                mae[d, n - 1] = metrics.mean_absolute_error(yt, yp)
                mse[d, n - 1] = metrics.mean_squared_error(yt, yp)

                yUnits = units[useBquv]
                yChannel = channel[useBquv]
                ySym = symbol[useBquv]
                if doPlot:
                    #xt = x[i, 52, 0:64, 0:64, 0]
                    #plt.imshow(xt)
                    #plt.show()

                    plt.imshow(yt)
                    plt.show()
                    plt.imshow(yp)
                    plt.show()

                    if False:
                        title = '%s: R=%.6f' % (yChannel[d], pr)
                        plt.title(title)
                        plt.imshow(corr(yt, yp))
                        plt.show()

                    if True:
                        title = '%s KDE' % (yChannel[d])
                        print('%s [%s]' % (yChannel[d], yUnits[d]))
                        yleg = "%s [%s]" % (ySym[d], yUnits[d])
                        xleg = "%s [%s]" % (ySym[d], yUnits[d])
                        kde_error(yt, yp, title, yleg, xleg, yUnits[d])
                        print("")

            #imI = normalize(level1[i,:,0:64,0:64,:])
            #imI = level1[i,:,0:64,0:64,:]
            #print('%f,'%(imI[56,32,32,0]), end='')
            #print('%f,'%(imI[56,32,32,1]), end='')
            #print('%f,'%(imI[56,32,32,2]), end='')
            #print('%f,'%(imI[56,32,32,3]), end='')
            #print('%s,'%(fname[i]))
            #break
            #for j in range(X.shape[1]):
            #if False: #for j in range(0, 1):
            #  #print(fname[i],end='')
            #  imI = level1[i,j,0:64,0:64,0]
            #  #nz = np.nonzero(imI)
            #  #print('%d,'%(j), end='')
            #  #print('%f,'%(np.percentile(imI, 95.0)), end='')
            #  #print('%f,'%(np.percentile(imI, 10.0)), end='')
            #  #print('%f,'%(np.min(imI)), end='')
            #  #print('%f,'%(np.max(imI)), end='')
            #  #print('%f,'%(np.mean(imI)), end='')
            #  #print('%f,'%(np.std(imI)), end='')
            #  plt.imshow(imI)
            #  #plt.title(fname[i])
            #  plt.show()
            #  imQ = level1[i,j,0:64,0:864,1]
            #  plt.gray()
            #  plt.imshow(imQ)
            #  #plt.title(fname[i])
            #  plt.show()
            #  #print('%f,'%(np.percentile(imQ[nz], 95.0)), end='')
            #  #print('%f,'%(np.percentile(imQ[nz], 10.0)), end='')
            #  #print('%f,'%(np.min(imQ[nz])), end='')
            #  #print('%f,'%(np.max(imQ[nz])), end='')
            #  #print('%f,'%(np.mean(imQ[nz])), end='')
            #  #print('%f,'%(np.std(imQ[nz])), end='')
            #  imU = level1[i,j,0:64,0:864,2]
            #  #nz = np.nonzero(imU)
            #  plt.gray()
            #  plt.imshow(imU)
            #  #plt.title(fname[i])
            #  plt.show()
            #  #print('%f,'%(np.percentile(imU[nz], 95.0)), end='')
            #  #print('%f,'%(np.percentile(imU[nz], 10.0)), end='')
            #  #print('%f,'%(np.min(imU[nz])), end='')
            #  #print('%f,'%(np.max(imU[nz])), end='')
            #  #print('%f,'%(np.mean(imU[nz])), end='')
            #  #print('%f,'%(np.std(imU[nz])), end='')
            #  imV = level1[i,j,0:64,0:864,3]
            #  #nz = np.nonzero(imV)
            #  plt.gray()
            #  plt.imshow(imV)
            #  #plt.title(fname[i])
            #  plt.show()
            #  #print('%f,'%(np.percentile(imV[nz], 95.0)), end='')
            #  #print('%f,'%(np.percentile(imV[nz], 10.0)), end='')
            #  #print('%f,'%(np.min(imV[nz])), end='')
            #  #print('%f,'%(np.max(imV[nz])), end='')
            #  #print('%f,'%(np.mean(imV[nz])), end='')
            #  #print('%f'%(np.std(imV[nz])))

    #scores = model.evaluate(diTest, steps=1, verbose=1)
    #print(scores)
    #scores = model.evaluate(dsTest.take(2), steps=1, verbose=1)
    #y_pred = model.predict(dsTest, steps=1)
    #print(y_pred)

    #print('\nevaluate result: loss={}, mae={}'.format(*scores))
    #image = Y[0,:,:,0]

    #fig = plt.figure(num='Level 2 - Predicted')

    #plt.gray()
    #plt.imshow(image)
    #plt.show()
    #plt.close(fig)
    end = time.time()
    elapsed = end - start
    #print(r)
    #print('%d test examples in %d seconds: %.3f'%(n,elapsed, elapsed/n))
    print('\n%d test examples' % (n))
    print('%d seconds elapsed' % (elapsed))
    print('Average inversion time %f.2 seconds' % (elapsed / n))

    rmean = np.mean(r, axis=0)
    rstd = np.std(r, axis=0)
    for m in range(0, ZMagfld):
        print('%s: 2D correlation R mean  %f' % (yChannel[m], rmean[m]))
        print('%s: 2D correlation R stdev %f' % (yChannel[m], rstd[m]))

    mmean = np.mean(mae, axis=0)
    mrms = np.mean(mse, axis=0)
    mstd = np.std(mae, axis=0)
    for m in range(0, ZMagfld):
        print('%s: MAE mean  %f' % (yChannel[m], mmean[m]))
        print('%s: MAE stdev %f' % (yChannel[m], mstd[m]))
        print('%s: MSE mean  %f' % (yChannel[m], mrms[m]))

    if doPlot:
        plt.plot(r[0],
                 marker='o',
                 color='blue',
                 linewidth=2,
                 label="intensity")
        plt.plot(r[1],
                 marker='*',
                 color='red',
                 linewidth=2,
                 label="inclination")
        plt.plot(r[2], marker='x', color='green', linewidth=2, label="azimuth")
        plt.legend()
        plt.show()
    K.clear_session()
示例#54
0
def multi_tensor(gtab, mevals, S0=100, angles=[(0, 0), (90, 0)],
                 fractions=[50, 50], snr=20):
    r"""Simulate a Multi-Tensor signal.

    Parameters
    -----------
    gtab : GradientTable
    mevals : array (K, 3)
        each tensor's eigenvalues in each row
    S0 : float
        Unweighted signal value (b0 signal).
    angles : array (K,2) or (K,3)
        List of K tensor directions in polar angles (in degrees) or unit vectors
    fractions : float
        Percentage of the contribution of each tensor. The sum of fractions
        should be equal to 100%.
    snr : float
        Signal to noise ratio, assuming Rician noise.  If set to None, no
        noise is added.

    Returns
    --------
    S : (N,) ndarray
        Simulated signal.
    sticks : (M,3)
        Sticks in cartesian coordinates.

    Examples
    --------
    >>> import numpy as np
    >>> from dipy.sims.voxel import multi_tensor
    >>> from dipy.data import get_data
    >>> from dipy.core.gradients import gradient_table
    >>> from dipy.io.gradients import read_bvals_bvecs
    >>> fimg, fbvals, fbvecs = get_data('small_101D')
    >>> bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
    >>> gtab = gradient_table(bvals, bvecs)
    >>> mevals=np.array(([0.0015, 0.0003, 0.0003],[0.0015, 0.0003, 0.0003]))
    >>> e0 = np.array([1, 0, 0.])
    >>> e1 = np.array([0., 1, 0])
    >>> S = multi_tensor(gtab, mevals)

    """
    if np.round(np.sum(fractions), 2) != 100.0:
        raise ValueError('Fractions should sum to 100')

    fractions = [f / 100. for f in fractions]

    S = np.zeros(len(gtab.bvals))

    angles = np.array(angles)
    if angles.shape[-1] == 3:
        sticks = angles
    else:
        sticks = [sphere2cart(1, np.deg2rad(pair[0]), np.deg2rad(pair[1]))
                  for pair in angles]
        sticks = np.array(sticks)

    for i in range(len(fractions)):
            S = S + fractions[i] * single_tensor(gtab, S0=S0, evals=mevals[i],
                                                 evecs=all_tensor_evecs(
                                                     sticks[i]).T,
                                                 snr=None)

    return add_noise(S, snr, S0), sticks