def test_adc(): """ Test the implementation of the calculation of apparent diffusion coefficient """ data, gtab = dsi_voxels() dm = dti.TensorModel(gtab, 'LS') mask = np.zeros(data.shape[:-1], dtype=bool) mask[0, 0, 0] = True dtifit = dm.fit(data) sphere = create_unit_sphere(4) # The ADC in the principal diffusion direction should be equal to the AD in # each voxel: pdd0 = dtifit.evecs[0,0,0,0] sphere_pdd0 = dps.Sphere(x=pdd0[0], y=pdd0[1], z=pdd0[2]) assert_array_almost_equal(dtifit.adc(sphere_pdd0)[0,0,0], dtifit.ad[0,0,0], decimal=5) # Test that it works for cases in which the data is 1D dtifit = dm.fit(data[0,0,0]) sphere_pdd0 = dps.Sphere(x=pdd0[0], y=pdd0[1], z=pdd0[2]) assert_array_almost_equal(dtifit.adc(sphere_pdd0), dtifit.ad, decimal=5)
def test_TensorModel(): data, gtab = dsi_voxels() dm = dti.TensorModel(gtab, 'LS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.5, True) dm = dti.TensorModel(gtab, 'WLS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.5, True) sphere = create_unit_sphere(4) assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices)) assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1) # Check that the multivoxel case works: dtifit = dm.fit(data) assert_equal(dtifit.fa.shape, data.shape[:3]) # Make some synthetic data b0 = 1000. bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T) # The first b value is 0., so we take the second one: B = bvals[1] #Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) evecs = np.linalg.eigh(tensor)[1] #Design Matrix X = dti.design_matrix(bvecs, bvals) #Signals Y = np.exp(np.dot(X,D)) assert_almost_equal(Y[0], b0) Y.shape = (-1,) + Y.shape # Test fitting with different methods: #XXX Add NNLS methods! for fit_method in ['OLS', 'WLS']: tensor_model = dti.TensorModel(gtab, fit_method=fit_method) tensor_fit = tensor_model.fit(Y) assert_true(tensor_fit.model is tensor_model) assert_equal(tensor_fit.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_fit.evals[0], evals) assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor, err_msg =\ "Calculation of tensor from Y does not compare to analytical solution") assert_almost_equal(tensor_fit.md[0], md) assert_equal(tensor_fit.directions.shape[-2], 1) assert_equal(tensor_fit.directions.shape[-1], 3) # Test error-handling: assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method')
def test_mapmri_odf(radial_order=6): gtab = get_gtab_taiwan_dsi() # load repulsion 724 sphere sphere = default_sphere # load icosahedron sphere l1, l2, l3 = [0.0015, 0.0003, 0.0003] data, golden_directions = generate_signal_crossing(gtab, l1, l2, l3, angle2=90) mapmod = MapmriModel(gtab, radial_order=radial_order, laplacian_regularization=True, laplacian_weighting=0.01) # repulsion724 sphere2 = create_unit_sphere(5) mapfit = mapmod.fit(data) odf = mapfit.odf(sphere) directions, _, _ = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions odf = mapfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] asmfit = mapmod.fit(data) odf = asmfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True) # for the isotropic implementation check if the odf spherical harmonics # actually represent the discrete sphere function. mapmod = MapmriModel(gtab, radial_order=radial_order, laplacian_regularization=True, laplacian_weighting=0.01, anisotropic_scaling=False) mapfit = mapmod.fit(data) odf = mapfit.odf(sphere) odf_sh = mapfit.odf_sh() odf_from_sh = sh_to_sf(odf_sh, sphere, radial_order, basis_type=None) assert_almost_equal(odf, odf_from_sh, 10)
def test_odf_with_zeros(): fdata, fbval, fbvec = get_data('small_25') gtab = grad.gradient_table(fbval, fbvec) data = nib.load(fdata).get_data() dm = dti.TensorModel(gtab) df = dm.fit(data) df.evals[0, 0, 0] = np.array([0, 0, 0]) sphere = create_unit_sphere(4) odf = df.odf(sphere) npt.assert_equal(odf[0, 0, 0], np.zeros(sphere.vertices.shape[0]))
def test_interp_rbf(): def data_func(s, a, b): return a * np.cos(s.theta) + b * np.sin(s.phi) s0 = create_unit_sphere(3) s1 = create_unit_sphere(4) for a, b in zip([1, 2, 0.5], [1, 0.5, 2]): data = data_func(s0, a, b) expected = data_func(s1, a, b) interp_data_a = interp_rbf(data, s0, s1, norm="angle") npt.assert_(np.mean(np.abs(interp_data_a - expected)) < 0.1) # Test that using the euclidean norm raises a warning # (following # https://docs.python.org/2/library/warnings.html#testing-warnings) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") interp_rbf(data, s0, s1, norm="euclidean_norm") npt.assert_(len(w) == 1) npt.assert_(issubclass(w[-1].category, PendingDeprecationWarning)) npt.assert_("deprecated" in str(w[-1].message))
def test_shore_odf(): gtab = get_isbi2013_2shell_gtab() # load repulsion 724 sphere sphere = default_sphere # load icosahedron sphere sphere2 = create_unit_sphere(5) data, golden_directions = sticks_and_ball(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) asm = ShoreModel(gtab, radial_order=6, zeta=700, lambdaN=1e-8, lambdaL=1e-8) # repulsion724 asmfit = asm.fit(data) odf = asmfit.odf(sphere) odf_sh = asmfit.odf_sh() odf_from_sh = sh_to_sf(odf_sh, sphere, 6, basis_type=None, legacy=True) npt.assert_almost_equal(odf, odf_from_sh, 10) expected_phi = shore_matrix(radial_order=6, zeta=700, gtab=gtab) npt.assert_array_almost_equal(np.dot(expected_phi, asmfit.shore_coeff), asmfit.fitted_signal()) directions, _, _ = peak_directions(odf, sphere, .35, 25) npt.assert_equal(len(directions), 2) npt.assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions odf = asmfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) npt.assert_equal(len(directions), 2) npt.assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] asmfit = asm.fit(data) odf = asmfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: npt.assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: npt.assert_equal(gfa(odf) < 0.1, True)
def test_mapmri_odf(radial_order=6): gtab = get_gtab_taiwan_dsi() # load symmetric 724 sphere sphere = get_sphere('symmetric724') # load icosahedron sphere l1, l2, l3 = [0.0015, 0.0003, 0.0003] data, golden_directions = generate_signal_crossing(gtab, l1, l2, l3, angle2=90) mapmod = MapmriModel(gtab, radial_order=radial_order, laplacian_regularization=True, laplacian_weighting=0.01) # symmetric724 sphere2 = create_unit_sphere(5) mapfit = mapmod.fit(data) odf = mapfit.odf(sphere) directions, _, _ = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal( angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions odf = mapfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal( angular_similarity(directions, golden_directions), 2, 1) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] asmfit = mapmod.fit(data) odf = asmfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True) # for the isotropic implementation check if the odf spherical harmonics # actually represent the discrete sphere function. mapmod = MapmriModel(gtab, radial_order=radial_order, laplacian_regularization=True, laplacian_weighting=0.01, anisotropic_scaling=False) mapfit = mapmod.fit(data) odf = mapfit.odf(sphere) odf_sh = mapfit.odf_sh() odf_from_sh = sh_to_sf(odf_sh, sphere, radial_order, basis_type=None) assert_almost_equal(odf, odf_from_sh, 10)
def test_interp_rbf(): def data_func(s, a, b): return a * np.cos(s.theta) + b * np.sin(s.phi) from dipy.core.sphere import Sphere, interp_rbf import numpy as np s0 = create_unit_sphere(3) s1 = create_unit_sphere(4) for a, b in zip([1, 2, 0.5], [1, 0.5, 2]): data = data_func(s0, a, b) expected = data_func(s1, a, b) interp_data_a = interp_rbf(data, s0, s1, norm="angle") nt.assert_(np.mean(np.abs(interp_data_a - expected)) < 0.1) # Test that using the euclidean norm raises a warning # (following https://docs.python.org/2/library/warnings.html#testing-warnings) with warnings.catch_warnings(record=True) as w: warnings.simplefilter("always") interp_data_en = interp_rbf(data, s0, s1, norm ="euclidean_norm") nt.assert_(len(w) == 1) nt.assert_(issubclass(w[-1].category, DeprecationWarning)) nt.assert_("deprecated" in str(w[-1].message))
def plot_tensor_3d(Tensor, cmap='jet', mode='ADC', file_name=None, origin=[0,0,0], colorbar=False, figure=None, vmin=None, vmax=None, offset=0, azimuth=60, elevation=90, roll=0, scale_factor=1.0, rgb_pdd=False): """ mode: either "ADC", "ellipse" or "pred_sig" """ Q = Tensor.Q sphere = create_unit_sphere(5) vertices = sphere.vertices faces = sphere.faces x,y,z = vertices.T new_bvecs = np.vstack([x.ravel(), y.ravel(), z.ravel()]) Tensor = ozt.Tensor(Q, new_bvecs, Tensor.bvals[0] * np.ones(new_bvecs.shape[-1])) if mode == 'ADC': v = Tensor.ADC * scale_factor elif mode == 'ellipse': v = Tensor.diffusion_distance * scale_factor elif mode == 'pred_sig': v = Tensor.predicted_signal(1) * scale_factor else: raise ValueError("Mode not recognized") r, phi, theta = geo.cart2sphere(x,y,z) x_plot, y_plot, z_plot = geo.sphere2cart(v, phi, theta) if rgb_pdd: evals, evecs = Tensor.decompose xyz = evecs[0] r = np.abs(xyz[0])/np.sum(np.abs(xyz)) g = np.abs(xyz[1])/np.sum(np.abs(xyz)) b = np.abs(xyz[2])/np.sum(np.abs(xyz)) color = (r, g, b) else: color = None # Call and return straightaway: return _display_maya_voxel(x_plot, y_plot, z_plot, faces, v, origin, cmap=cmap, colorbar=colorbar, color=color, figure=figure, vmin=vmin, vmax=vmax, file_name=file_name, azimuth=azimuth, elevation=elevation)
def test_shore_odf(): gtab = get_isbi2013_2shell_gtab() # load symmetric 724 sphere sphere = get_sphere('symmetric724') # load icosahedron sphere sphere2 = create_unit_sphere(5) data, golden_directions = SticksAndBall(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) asm = ShoreModel(gtab, radial_order=6, zeta=700, lambdaN=1e-8, lambdaL=1e-8) # symmetric724 asmfit = asm.fit(data) odf = asmfit.odf(sphere) odf_sh = asmfit.odf_sh() odf_from_sh = sh_to_sf(odf_sh, sphere, 6, basis_type=None) assert_almost_equal(odf, odf_from_sh, 10) directions, _, _ = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions odf = asmfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] asmfit = asm.fit(data) odf = asmfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True)
def test_dsi(): # load repulsion 724 sphere sphere = default_sphere # load icosahedron sphere sphere2 = create_unit_sphere(5) btable = np.loadtxt(get_fnames('dsi515btable')) gtab = gradient_table(btable[:, 0], btable[:, 1:]) data, golden_directions = sticks_and_ball(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) ds = DiffusionSpectrumDeconvModel(gtab) # repulsion724 dsfit = ds.fit(data) odf = dsfit.odf(sphere) directions, _, _ = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions dsfit = ds.fit(data) odf2 = dsfit.odf(sphere2) directions, _, _ = peak_directions(odf2, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) assert_equal(dsfit.pdf().shape, 3 * (ds.qgrid_size, )) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] odf = ds.fit(data).odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True) assert_raises(ValueError, DiffusionSpectrumDeconvModel, gtab, qgrid_size=16)
def test_dsi(): #load symmetric 724 sphere sphere = get_sphere('symmetric724') #load icosahedron sphere sphere2 = create_unit_sphere(5) btable = np.loadtxt(get_data('dsi515btable')) bvals = btable[:,0] bvecs = btable[:,1:] data, golden_directions = SticksAndBall(bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) gtab = gradient_table(bvals, bvecs) ds = DiffusionSpectrumModel(gtab) #symmetric724 ds.direction_finder.config(sphere=sphere, min_separation_angle=25, relative_peak_threshold=.35) dsfit = ds.fit(data) odf = dsfit.odf(sphere) directions = dsfit.directions assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) #5 subdivisions ds.direction_finder.config(sphere=sphere2, min_separation_angle=25, relative_peak_threshold=.35) dsfit = ds.fit(data) odf2 = dsfit.odf(sphere2) directions = dsfit.directions assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) #from dipy.viz._show_odfs import show_odfs #show_odfs(odf[None,None,None,:], (sphere.vertices, sphere.faces)) #show_odfs(odf2[None,None,None,:], (sphere2.vertices, sphere2.faces)) assert_equal(dsfit.pdf.shape, 3 * (ds.qgrid_size, )) sb_dummies=sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] odf = ds.fit(data).odf(sphere2) directions = ds.fit(data).directions #show_odfs(odf[None, None, None, :], (sphere2.vertices, sphere2.faces)) if len(directions) <= 3: assert_equal(len(ds.fit(data).directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(ds.fit(data).odf(sphere2)) < 0.1, True)
def test_shore_odf(): gtab = get_isbi2013_2shell_gtab() # load symmetric 724 sphere sphere = get_sphere('symmetric724') # load icosahedron sphere sphere2 = create_unit_sphere(5) data, golden_directions = SticksAndBall(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) asm = ShoreModel(gtab, radial_order=6, zeta=700, lambdaN=1e-8, lambdaL=1e-8) # symmetric724 asmfit = asm.fit(data) odf = asmfit.odf(sphere) odf_sh = asmfit.odf_sh() odf_from_sh = sh_to_sf(odf_sh, sphere, 6, basis_type=None) assert_almost_equal(odf, odf_from_sh, 10) directions, _ , _ = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal( angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions odf = asmfit.odf(sphere2) directions, _ , _ = peak_directions(odf, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal( angular_similarity(directions, golden_directions), 2, 1) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] asmfit = asm.fit(data) odf = asmfit.odf(sphere2) directions, _ , _ = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True)
def test_dsi(): # load symmetric 724 sphere sphere = get_sphere('symmetric724') # load icosahedron sphere sphere2 = create_unit_sphere(5) btable = np.loadtxt(get_data('dsi515btable')) gtab = gradient_table(btable[:, 0], btable[:, 1:]) data, golden_directions = SticksAndBall(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) ds = DiffusionSpectrumModel(gtab) # symmetric724 dsfit = ds.fit(data) odf = dsfit.odf(sphere) directions, _, _ = peak_directions(odf, sphere) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions dsfit = ds.fit(data) odf2 = dsfit.odf(sphere2) directions, _, _ = peak_directions(odf2, sphere2) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) assert_equal(dsfit.pdf().shape, 3 * (ds.qgrid_size, )) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] odf = ds.fit(data).odf(sphere2) directions, _, _ = peak_directions(odf, sphere2) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True) assert_raises(ValueError, DiffusionSpectrumModel, gtab, qgrid_size=16)
def test_gqi(): #load symmetric 724 sphere sphere = get_sphere('symmetric724') #load icosahedron sphere sphere2 = create_unit_sphere(5) btable = np.loadtxt(get_data('dsi515btable')) bvals = btable[:,0] bvecs = btable[:,1:] data, golden_directions = SticksAndBall(bvals, bvecs, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) gtab = gradient_table(bvals, bvecs) gq = GeneralizedQSamplingModel(gtab, method='gqi2', sampling_length=1.4) #symmetric724 gq.direction_finder.config(sphere=sphere, min_separation_angle=25, relative_peak_threshold=.35) gqfit = gq.fit(data) odf = gqfit.odf(sphere) #from dipy.viz._show_odfs import show_odfs #show_odfs(odf[None,None,None,:], (sphere.vertices, sphere.faces)) directions = gqfit.directions assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) #5 subdivisions gq.direction_finder.config(sphere=sphere2, min_separation_angle=25, relative_peak_threshold=.35) gqfit = gq.fit(data) odf2 = gqfit.odf(sphere2) directions = gqfit.directions assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) #show_odfs(odf[None,None,None,:], (sphere.vertices, sphere.faces)) sb_dummies=sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] odf = gq.fit(data).odf(sphere2) directions = gq.fit(data).directions #show_odfs(odf[None, None, None, :], (sphere2.vertices, sphere2.faces)) if len(directions) <= 3: assert_equal(len(gq.fit(data).directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(gq.fit(data).odf(sphere2)) < 0.1, True)
def test_gqi(): #load symmetric 724 sphere sphere = get_sphere('symmetric724') #load icosahedron sphere sphere2 = create_unit_sphere(5) btable = np.loadtxt(get_data('dsi515btable')) bvals = btable[:, 0] bvecs = btable[:, 1:] gtab = gradient_table(bvals, bvecs) data, golden_directions = SticksAndBall(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) gq = GeneralizedQSamplingModel(gtab, method='gqi2', sampling_length=1.4) #symmetric724 gqfit = gq.fit(data) odf = gqfit.odf(sphere) directions, values, indices = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) #5 subdivisions gqfit = gq.fit(data) odf2 = gqfit.odf(sphere2) directions, values, indices = peak_directions(odf2, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] odf = gq.fit(data).odf(sphere2) directions, values, indices = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True)
def test_mapmri_odf(): gtab = get_3shell_gtab() # load symmetric 724 sphere sphere = get_sphere('symmetric724') # load icosahedron sphere sphere2 = create_unit_sphere(5) evals = np.array(([0.0017, 0.0003, 0.0003], [0.0017, 0.0003, 0.0003])) data, golden_directions = MultiTensor(gtab, evals, S0=1.0, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) map_model = MapmriModel(gtab, radial_order=4) # symmetric724 mapfit = map_model.fit(data) odf = mapfit.odf(sphere) directions, _, _ = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions odf = mapfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] mapfit = map_model.fit(data) odf = mapfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True)
def test_gqi(): # load symmetric 724 sphere sphere = get_sphere('symmetric724') # load icosahedron sphere sphere2 = create_unit_sphere(5) btable = np.loadtxt(get_fnames('dsi515btable')) bvals = btable[:, 0] bvecs = btable[:, 1:] gtab = gradient_table(bvals, bvecs) data, golden_directions = SticksAndBall(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) gq = GeneralizedQSamplingModel(gtab, method='gqi2', sampling_length=1.4) # symmetric724 gqfit = gq.fit(data) odf = gqfit.odf(sphere) directions, values, indices = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions gqfit = gq.fit(data) odf2 = gqfit.odf(sphere2) directions, values, indices = peak_directions(odf2, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] odf = gq.fit(data).odf(sphere2) directions, values, indices = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True)
def test_mapmri_odf(): gtab = get_3shell_gtab() # load symmetric 724 sphere sphere = get_sphere('symmetric724') # load icosahedron sphere sphere2 = create_unit_sphere(5) evals = np.array(([0.0017, 0.0003, 0.0003], [0.0017, 0.0003, 0.0003])) data, golden_directions = MultiTensor( gtab, evals, S0=1.0, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) map_model = MapmriModel(gtab, radial_order=4) # symmetric724 mapfit = map_model.fit(data) odf = mapfit.odf(sphere) directions, _, _ = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal( angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions odf = mapfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal( angular_similarity(directions, golden_directions), 2, 1) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] mapfit = map_model.fit(data) odf = mapfit.odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True)
def test_adc(): """ Test the implementation of the calculation of apparent diffusion coefficient """ data, gtab = dsi_voxels() dm = dti.TensorModel(gtab, 'LS') mask = np.zeros(data.shape[:-1], dtype=bool) mask[0, 0, 0] = True dtifit = dm.fit(data) sphere = create_unit_sphere(4) # The ADC in the principal diffusion direction should be equal to the AD in # each voxel: pdd0 = dtifit.evecs[0, 0, 0, 0] sphere_pdd0 = dps.Sphere(x=pdd0[0], y=pdd0[1], z=pdd0[2]) assert_array_almost_equal(dtifit.adc(sphere_pdd0)[0, 0, 0], dtifit.ad[0, 0, 0], decimal=5) # Test that it works for cases in which the data is 1D dtifit = dm.fit(data[0, 0, 0]) sphere_pdd0 = dps.Sphere(x=pdd0[0], y=pdd0[1], z=pdd0[2]) assert_array_almost_equal(dtifit.adc(sphere_pdd0), dtifit.ad, decimal=5)
import sys sys.path.insert(0, '..') import numpy as np from sphdif import plot, coord from dipy.core.subdivide_octahedron import create_unit_sphere s = create_unit_sphere(3) from mayavi import mlab mlab.figure(bgcolor=(1, 1, 1), fgcolor=(0, 0, 0)) mask = s.theta <= np.pi/2. plot.scatter_3D(s.theta[mask], s.phi[mask], color=(0, 0, 1)) #ef = s.edges.ravel() #for e in s.edges: # mlab.plot3d(s.x[e], s.y[e], s.z[e], tube_radius=None) N = 20 mlab.quiver3d([s.x[N]], [s.y[N]], [s.z[N]], color=(1, 0, 0), mode='2darrow') plot.show()
def plot_signal_interp(bvecs, signal, origin=[0, 0, 0], maya=True, cmap='jet', file_name=None, colorbar=False, figure=None, vmin=None, vmax=None, offset=0, azimuth=60, elevation=90, roll=0, points=False, cmap_points=None, scale_points=False, non_neg=False, interp_kwargs=dict(function='thin_plate', smooth=0)): """ Interpolate a measured signal, using RBF interpolation. Parameters ---------- signal: bvecs: array (3,n) the x,y,z locations where the signal was measured offset : float where to place the plotted voxel (on the z axis) points : bool whether to show the sampling points on the interpolated surface. Default: False. """ bvecs_new = np.hstack([bvecs, -bvecs]) new_signal = np.hstack([signal, signal]) s0 = Sphere(xyz=bvecs_new.T) s1 = create_unit_sphere(7) signal[np.isnan(signal)] = 0 interp_signal = interp_rbf(new_signal, s0, s1, **interp_kwargs) vertices = s1.vertices if non_neg: interp_signal[interp_signal < 0] = 0 faces = s1.faces x, y, z = vertices.T r, phi, theta = geo.cart2sphere(x, y, z) x_plot, y_plot, z_plot = geo.sphere2cart(interp_signal, phi, theta) if points: r, phi, theta = geo.cart2sphere(s0.x, s0.y, s0.z) x_p, y_p, z_p = geo.sphere2cart(signal, phi, theta) figure = _display_maya_voxel(x_p, y_p, z_p, faces, signal, origin, cmap=cmap, colorbar=colorbar, figure=figure, vmin=vmin, vmax=vmax, file_name=file_name, azimuth=azimuth, elevation=elevation, points=True, cmap_points=cmap_points, scale_points=scale_points) # Call and return straightaway: return _display_maya_voxel(x_plot, y_plot, z_plot, faces, interp_signal, origin, cmap=cmap, colorbar=colorbar, figure=figure, vmin=vmin, vmax=vmax, file_name=file_name, azimuth=azimuth, elevation=elevation)
def test_create_unit_sphere(): sphere = create_unit_sphere(7) v, e, f = sphere.vertices, sphere.edges, sphere.faces assert_array_almost_equal((v * v).sum(1), 1)
"""Visualize kernel in ODF and signal space. """ from sphdif.kernel import even_kernel, inv_funk_radon_even_kernel from dipy.viz import show_odfs from dipy.core.subdivide_octahedron import create_unit_sphere sphere = create_unit_sphere(6) # sphere.z = np.dot([0, 0, 1], [x, y, z]) = cos(mu) kernel_odf = even_kernel(sphere.z, N=8) kernel_signal = inv_funk_radon_even_kernel(sphere.z, N=8) show_odfs([[[kernel_odf, kernel_signal]]], (sphere.vertices, sphere.faces))
def plot_signal_interp(bvecs, signal, origin=[0,0,0], maya=True, cmap='jet', file_name=None, colorbar=False, figure=None, vmin=None, vmax=None, offset=0, azimuth=60, elevation=90, roll=0, points=False, cmap_points=None, scale_points=False, non_neg=False, interp_kwargs=dict(function='thin_plate', smooth=0)): """ Interpolate a measured signal, using RBF interpolation. Parameters ---------- signal: bvecs: array (3,n) the x,y,z locations where the signal was measured offset : float where to place the plotted voxel (on the z axis) points : bool whether to show the sampling points on the interpolated surface. Default: False. """ bvecs_new = np.hstack([bvecs, -bvecs]) new_signal = np.hstack([signal, signal]) s0 = Sphere(xyz=bvecs_new.T) s1 = create_unit_sphere(7) signal[np.isnan(signal)] = 0 interp_signal = interp_rbf(new_signal, s0, s1, **interp_kwargs) vertices = s1.vertices if non_neg: interp_signal[interp_signal<0] = 0 faces = s1.faces x,y,z = vertices.T r, phi, theta = geo.cart2sphere(x,y,z) x_plot, y_plot, z_plot = geo.sphere2cart(interp_signal, phi, theta) if points: r, phi, theta = geo.cart2sphere(s0.x, s0.y, s0.z) x_p, y_p, z_p = geo.sphere2cart(signal, phi, theta) figure = _display_maya_voxel(x_p, y_p, z_p, faces, signal, origin, cmap=cmap, colorbar=colorbar, figure=figure, vmin=vmin, vmax=vmax, file_name=file_name, azimuth=azimuth, elevation=elevation, points=True, cmap_points=cmap_points, scale_points=scale_points) # Call and return straightaway: return _display_maya_voxel(x_plot, y_plot, z_plot, faces, interp_signal, origin, cmap=cmap, colorbar=colorbar, figure=figure, vmin=vmin, vmax=vmax, file_name=file_name, azimuth=azimuth, elevation=elevation)
def test_tensor_model(): fdata, fbval, fbvec = get_data('small_25') data1 = nib.load(fdata).get_data() gtab1 = grad.gradient_table(fbval, fbvec) data2, gtab2 = dsi_voxels() for data, gtab in zip([data1, data2], [gtab1, gtab2]): dm = dti.TensorModel(gtab, 'LS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.9, True) dm = dti.TensorModel(gtab, 'WLS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.9, True) assert_equal(dtifit.fa > 0, True) sphere = create_unit_sphere(4) assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices)) # Check that the multivoxel case works: dtifit = dm.fit(data) # Check that it works on signal that has already been normalized to S0: dm_to_relative = dti.TensorModel(gtab) if np.any(gtab.b0s_mask): relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0, gtab.b0s_mask])) dtifit_to_relative = dm_to_relative.fit(relative_data) npt.assert_almost_equal(dtifit.fa[0, 0, 0], dtifit_to_relative.fa, decimal=3) # And smoke-test that all these operations return sensibly-shaped arrays: assert_equal(dtifit.fa.shape, data.shape[:3]) assert_equal(dtifit.ad.shape, data.shape[:3]) assert_equal(dtifit.md.shape, data.shape[:3]) assert_equal(dtifit.rd.shape, data.shape[:3]) assert_equal(dtifit.trace.shape, data.shape[:3]) assert_equal(dtifit.mode.shape, data.shape[:3]) assert_equal(dtifit.linearity.shape, data.shape[:3]) assert_equal(dtifit.planarity.shape, data.shape[:3]) assert_equal(dtifit.sphericity.shape, data.shape[:3]) # Test for the shape of the mask assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3))) # Make some synthetic data b0 = 1000. bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T) # The first b value is 0., so we take the second one: B = bvals[1] # Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3) mode = (3 * np.sqrt(6) * np.linalg.det(A_squiggle / np.linalg.norm(A_squiggle))) evals_eigh, evecs_eigh = np.linalg.eigh(tensor) # Sort according to eigen-value from large to small: evecs = evecs_eigh[:, np.argsort(evals_eigh)[::-1]] # Check that eigenvalues and eigenvectors are properly sorted through # that previous operation: for i in range(3): assert_array_almost_equal(np.dot(tensor, evecs[:, i]), evals[i] * evecs[:, i]) # Design Matrix X = dti.design_matrix(gtab) # Signals Y = np.exp(np.dot(X, D)) assert_almost_equal(Y[0], b0) Y.shape = (-1,) + Y.shape # Test fitting with different methods: for fit_method in ['OLS', 'WLS', 'NLLS']: tensor_model = dti.TensorModel(gtab, fit_method=fit_method, return_S0_hat=True) tensor_fit = tensor_model.fit(Y) assert_true(tensor_fit.model is tensor_model) assert_equal(tensor_fit.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_fit.evals[0], evals) assert_array_almost_equal(tensor_fit.S0_hat, b0, decimal=3) # Test that the eigenvectors are correct, one-by-one: for i in range(3): # Eigenvectors have intrinsic sign ambiguity # (see # http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf) # so we need to allow for sign flips. One of the following should # always be true: assert_( np.all(np.abs(tensor_fit.evecs[0][:, i] - evecs[:, i]) < 10e-6) or np.all(np.abs(-tensor_fit.evecs[0][:, i] - evecs[:, i]) < 10e-6)) # We set a fixed tolerance of 10e-6, similar to array_almost_equal err_msg = "Calculation of tensor from Y does not compare to " err_msg += "analytical solution" assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor, err_msg=err_msg) assert_almost_equal(tensor_fit.md[0], md) assert_array_almost_equal(tensor_fit.mode, mode, decimal=5) assert_equal(tensor_fit.directions.shape[-2], 1) assert_equal(tensor_fit.directions.shape[-1], 3) # Test error-handling: assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method') # Test custom fit tensor method try: model = dti.TensorModel(gtab, fit_method=lambda *args, **kwargs: 42) fit = model.fit_method() except Exception as exc: assert False, "TensorModel should accept custom fit methods: %s" % exc assert fit == 42, "Custom fit method for TensorModel returned %s." % fit # Test multi-voxel data data = np.zeros((3, Y.shape[1])) # Normal voxel data[0] = Y # High diffusion voxel, all diffusing weighted signal equal to zero data[1, gtab.b0s_mask] = b0 data[1, ~gtab.b0s_mask] = 0 # Masked voxel, all data set to zero data[2] = 0. tensor_model = dti.TensorModel(gtab) fit = tensor_model.fit(data) assert_array_almost_equal(fit[0].evals, evals) # Return S0_test tensor_model = dti.TensorModel(gtab, return_S0_hat=True) fit = tensor_model.fit(data) assert_array_almost_equal(fit[0].evals, evals) assert_array_almost_equal(fit[0].S0_hat, b0) # Evals should be high for high diffusion voxel assert_(all(fit[1].evals > evals[0] * .9)) # Evals should be zero where data is masked assert_array_almost_equal(fit[2].evals, 0.)
def test_tensor_model(): fdata, fbval, fbvec = get_data('small_25') data1 = nib.load(fdata).get_data() gtab1 = grad.gradient_table(fbval, fbvec) data2, gtab2 = dsi_voxels() for data, gtab in zip([data1, data2], [gtab1, gtab2]): dm = dti.TensorModel(gtab, 'LS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.9, True) dm = dti.TensorModel(gtab, 'WLS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.9, True) assert_equal(dtifit.fa > 0, True) sphere = create_unit_sphere(4) assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices)) # Check that the multivoxel case works: dtifit = dm.fit(data) # Check that it works on signal that has already been normalized to S0: dm_to_relative = dti.TensorModel(gtab) if np.any(gtab.b0s_mask): relative_data = (data[0, 0, 0] / np.mean(data[0, 0, 0, gtab.b0s_mask])) dtifit_to_relative = dm_to_relative.fit(relative_data) npt.assert_almost_equal(dtifit.fa[0, 0, 0], dtifit_to_relative.fa, decimal=3) # And smoke-test that all these operations return sensibly-shaped arrays: assert_equal(dtifit.fa.shape, data.shape[:3]) assert_equal(dtifit.ad.shape, data.shape[:3]) assert_equal(dtifit.md.shape, data.shape[:3]) assert_equal(dtifit.rd.shape, data.shape[:3]) assert_equal(dtifit.trace.shape, data.shape[:3]) assert_equal(dtifit.mode.shape, data.shape[:3]) assert_equal(dtifit.linearity.shape, data.shape[:3]) assert_equal(dtifit.planarity.shape, data.shape[:3]) assert_equal(dtifit.sphericity.shape, data.shape[:3]) # Test for the shape of the mask assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3))) # Make some synthetic data b0 = 1000. bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T) # The first b value is 0., so we take the second one: B = bvals[1] # Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3) mode = 3 * np.sqrt(6) * np.linalg.det( A_squiggle / np.linalg.norm(A_squiggle)) evecs = np.linalg.eigh(tensor)[1] # Design Matrix X = dti.design_matrix(gtab) # Signals Y = np.exp(np.dot(X, D)) assert_almost_equal(Y[0], b0) Y.shape = (-1, ) + Y.shape # Test fitting with different methods: for fit_method in ['OLS', 'WLS', 'NLLS']: tensor_model = dti.TensorModel(gtab, fit_method=fit_method) tensor_fit = tensor_model.fit(Y) assert_true(tensor_fit.model is tensor_model) assert_equal(tensor_fit.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_fit.evals[0], evals) assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor, err_msg=\ "Calculation of tensor from Y does not compare to analytical solution") assert_almost_equal(tensor_fit.md[0], md) assert_array_almost_equal(tensor_fit.mode, mode, decimal=5) assert_equal(tensor_fit.directions.shape[-2], 1) assert_equal(tensor_fit.directions.shape[-1], 3) # Test error-handling: assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method') # Test multi-voxel data data = np.zeros((3, Y.shape[1])) # Normal voxel data[0] = Y # High diffusion voxel, all diffusing weighted signal equal to zero data[1, gtab.b0s_mask] = b0 data[1, ~gtab.b0s_mask] = 0 # Masked voxel, all data set to zero data[2] = 0. tensor_model = dti.TensorModel(gtab) fit = tensor_model.fit(data) assert_array_almost_equal(fit[0].evals, evals) # Evals should be high for high diffusion voxel assert_(all(fit[1].evals > evals[0] * .9)) # Evals should be zero where data is masked assert_array_almost_equal(fit[2].evals, 0.)
def test_TensorModel(): data, gtab = dsi_voxels() dm = dti.TensorModel(gtab, 'LS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.5, True) dm = dti.TensorModel(gtab, 'WLS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.5, True) sphere = create_unit_sphere(4) assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices)) assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1) # Check that the multivoxel case works: dtifit = dm.fit(data) # And smoke-test that all these operations return sensibly-shaped arrays: assert_equal(dtifit.fa.shape, data.shape[:3]) assert_equal(dtifit.ad.shape, data.shape[:3]) assert_equal(dtifit.md.shape, data.shape[:3]) assert_equal(dtifit.rd.shape, data.shape[:3]) assert_equal(dtifit.trace.shape, data.shape[:3]) assert_equal(dtifit.mode.shape, data.shape[:3]) assert_equal(dtifit.linearity.shape, data.shape[:3]) assert_equal(dtifit.planarity.shape, data.shape[:3]) assert_equal(dtifit.sphericity.shape, data.shape[:3]) # Test for the shape of the mask assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3,3))) # Make some synthetic data b0 = 1000. bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T) # The first b value is 0., so we take the second one: B = bvals[1] # Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3) mode = 3 * np.sqrt(6) * np.linalg.det(A_squiggle / np.linalg.norm(A_squiggle)) evecs = np.linalg.eigh(tensor)[1] # Design Matrix X = dti.design_matrix(gtab) # Signals Y = np.exp(np.dot(X, D)) assert_almost_equal(Y[0], b0) Y.shape = (-1,) + Y.shape # Test fitting with different methods: for fit_method in ['OLS', 'WLS', 'NLLS']: tensor_model = dti.TensorModel(gtab, fit_method=fit_method) tensor_fit = tensor_model.fit(Y) assert_true(tensor_fit.model is tensor_model) assert_equal(tensor_fit.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_fit.evals[0], evals) assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor, err_msg=\ "Calculation of tensor from Y does not compare to analytical solution") assert_almost_equal(tensor_fit.md[0], md) assert_array_almost_equal(tensor_fit.mode, mode, decimal=5) assert_equal(tensor_fit.directions.shape[-2], 1) assert_equal(tensor_fit.directions.shape[-1], 3) # Test error-handling: assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method')
def test_create_unit_sphere(): sphere = create_unit_sphere(7) v, e, f = sphere.vertices, sphere.edges, sphere.faces assert_array_almost_equal((v*v).sum(1), 1)
def plot_tensor_3d(Tensor, cmap='jet', mode='ADC', file_name=None, origin=[0, 0, 0], colorbar=False, figure=None, vmin=None, vmax=None, offset=0, azimuth=60, elevation=90, roll=0, scale_factor=1.0, rgb_pdd=False): """ mode: either "ADC", "ellipse" or "pred_sig" """ Q = Tensor.Q sphere = create_unit_sphere(5) vertices = sphere.vertices faces = sphere.faces x, y, z = vertices.T new_bvecs = np.vstack([x.ravel(), y.ravel(), z.ravel()]) Tensor = ozt.Tensor(Q, new_bvecs, Tensor.bvals[0] * np.ones(new_bvecs.shape[-1])) if mode == 'ADC': v = Tensor.ADC * scale_factor elif mode == 'ellipse': v = Tensor.diffusion_distance * scale_factor elif mode == 'pred_sig': v = Tensor.predicted_signal(1) * scale_factor else: raise ValueError("Mode not recognized") r, phi, theta = geo.cart2sphere(x, y, z) x_plot, y_plot, z_plot = geo.sphere2cart(v, phi, theta) if rgb_pdd: evals, evecs = Tensor.decompose xyz = evecs[0] r = np.abs(xyz[0]) / np.sum(np.abs(xyz)) g = np.abs(xyz[1]) / np.sum(np.abs(xyz)) b = np.abs(xyz[2]) / np.sum(np.abs(xyz)) color = (r, g, b) else: color = None # Call and return straightaway: return _display_maya_voxel(x_plot, y_plot, z_plot, faces, v, origin, cmap=cmap, colorbar=colorbar, color=color, figure=figure, vmin=vmin, vmax=vmax, file_name=file_name, azimuth=azimuth, elevation=elevation)
def test_tensor_model(): fdata, fbval, fbvec = get_data('small_25') data = nib.load(fdata).get_data() gtab = grad.gradient_table(fbval, fbvec) dm = dti.TensorModel(gtab, 'LS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.9, True) dm = dti.TensorModel(gtab, 'WLS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.9, True) assert_equal(dtifit.fa > 0, True) sphere = create_unit_sphere(4) assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices)) # Check that the multivoxel case works: dtifit = dm.fit(data) # Check that it works on signal that has already been normalized to S0: dm_to_relative = dti.TensorModel(gtab) relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0, gtab.b0s_mask])) dtifit_to_relative = dm_to_relative.fit(relative_data) npt.assert_almost_equal(dtifit.fa[0,0,0], dtifit_to_relative.fa, decimal=3) # And smoke-test that all these operations return sensibly-shaped arrays: assert_equal(dtifit.fa.shape, data.shape[:3]) assert_equal(dtifit.ad.shape, data.shape[:3]) assert_equal(dtifit.md.shape, data.shape[:3]) assert_equal(dtifit.rd.shape, data.shape[:3]) assert_equal(dtifit.trace.shape, data.shape[:3]) assert_equal(dtifit.mode.shape, data.shape[:3]) assert_equal(dtifit.linearity.shape, data.shape[:3]) assert_equal(dtifit.planarity.shape, data.shape[:3]) assert_equal(dtifit.sphericity.shape, data.shape[:3]) # Test for the shape of the mask assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3,3))) # Make some synthetic data b0 = 1000. bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T) # The first b value is 0., so we take the second one: B = bvals[1] # Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3) mode = 3 * np.sqrt(6) * np.linalg.det(A_squiggle / np.linalg.norm(A_squiggle)) evecs = np.linalg.eigh(tensor)[1] # Design Matrix X = dti.design_matrix(gtab) # Signals Y = np.exp(np.dot(X, D)) assert_almost_equal(Y[0], b0) Y.shape = (-1,) + Y.shape # Test fitting with different methods: for fit_method in ['OLS', 'WLS', 'NLLS']: tensor_model = dti.TensorModel(gtab, fit_method=fit_method) tensor_fit = tensor_model.fit(Y) assert_true(tensor_fit.model is tensor_model) assert_equal(tensor_fit.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_fit.evals[0], evals) assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor, err_msg=\ "Calculation of tensor from Y does not compare to analytical solution") assert_almost_equal(tensor_fit.md[0], md) assert_array_almost_equal(tensor_fit.mode, mode, decimal=5) assert_equal(tensor_fit.directions.shape[-2], 1) assert_equal(tensor_fit.directions.shape[-1], 3) # Test error-handling: assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method') # Test multi-voxel data data = np.zeros((3, Y.shape[1])) # Normal voxel data[0] = Y # High diffusion voxel, all diffusing weighted signal equal to zero data[1, gtab.b0s_mask] = b0 data[1, ~gtab.b0s_mask] = 0 # Masked voxel, all data set to zero data[2] = 0. tensor_model = dti.TensorModel(gtab) fit = tensor_model.fit(data) assert_array_almost_equal(fit[0].evals, evals) # Evals should be high for high diffusion voxel assert_(all(fit[1].evals > evals[0] * .9)) # Evals should be zero where data is masked assert_array_almost_equal(fit[2].evals, 0.)
def test_TensorModel(): data, gtab = dsi_voxels() dm = dti.TensorModel(gtab, 'LS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.5, True) dm = dti.TensorModel(gtab, 'WLS') dtifit = dm.fit(data[0, 0, 0]) assert_equal(dtifit.fa < 0.5, True) sphere = create_unit_sphere(4) assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices)) assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1) # Check that the multivoxel case works: dtifit = dm.fit(data) # And smoke-test that all these operations return sensibly-shaped arrays: assert_equal(dtifit.fa.shape, data.shape[:3]) assert_equal(dtifit.ad.shape, data.shape[:3]) assert_equal(dtifit.md.shape, data.shape[:3]) assert_equal(dtifit.rd.shape, data.shape[:3]) assert_equal(dtifit.trace.shape, data.shape[:3]) assert_equal(dtifit.mode.shape, data.shape[:3]) assert_equal(dtifit.linearity.shape, data.shape[:3]) assert_equal(dtifit.planarity.shape, data.shape[:3]) assert_equal(dtifit.sphericity.shape, data.shape[:3]) # Test for the shape of the mask assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3))) # Make some synthetic data b0 = 1000. bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T) # The first b value is 0., so we take the second one: B = bvals[1] # Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3) mode = 3 * np.sqrt(6) * np.linalg.det( A_squiggle / np.linalg.norm(A_squiggle)) evecs = np.linalg.eigh(tensor)[1] # Design Matrix X = dti.design_matrix(gtab) # Signals Y = np.exp(np.dot(X, D)) assert_almost_equal(Y[0], b0) Y.shape = (-1, ) + Y.shape # Test fitting with different methods: for fit_method in ['OLS', 'WLS', 'NLLS']: tensor_model = dti.TensorModel(gtab, fit_method=fit_method) tensor_fit = tensor_model.fit(Y) assert_true(tensor_fit.model is tensor_model) assert_equal(tensor_fit.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_fit.evals[0], evals) assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor, err_msg=\ "Calculation of tensor from Y does not compare to analytical solution") assert_almost_equal(tensor_fit.md[0], md) assert_array_almost_equal(tensor_fit.mode, mode, decimal=5) assert_equal(tensor_fit.directions.shape[-2], 1) assert_equal(tensor_fit.directions.shape[-1], 3) # Test error-handling: assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method')