Exemplo n.º 1
0
def test_nnls_jacobian_fucn():
    b0 = 1000.
    bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))

    # Test Jacobian at D
    args = [X, Y]
    analytical = dti._nlls_jacobian_func(D, *args)
    for i in range(len(X)):
        args = [X[i], Y[i]]
        approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
        assert_true(np.allclose(approx, analytical[i]))

    # Test Jacobian at zero
    D = np.zeros_like(D)
    args = [X, Y]
    analytical = dti._nlls_jacobian_func(D, *args)
    for i in range(len(X)):
        args = [X[i], Y[i]]
        approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
        assert_true(np.allclose(approx, analytical[i]))
Exemplo n.º 2
0
def test_nnls_jacobian_fucn():
    b0 = 1000.
    bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))

    # Test Jacobian at D
    args = [X, Y]
    analytical = dti._nlls_jacobian_func(D, *args)
    for i in range(len(X)):
        args = [X[i], Y[i]]
        approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
        assert_true(np.allclose(approx, analytical[i]))

    # Test Jacobian at zero
    D = np.zeros_like(D)
    args = [X, Y]
    analytical = dti._nlls_jacobian_func(D, *args)
    for i in range(len(X)):
        args = [X[i], Y[i]]
        approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
        assert_true(np.allclose(approx, analytical[i]))
Exemplo n.º 3
0
def test_passing_maskedview():
    data = np.ones((2, 4, 56))
    mask = np.array([[True, False, False, True], [True, False, True, False]])

    gtab, bval = read_bvec_file(get_data('55dir_grad.bvec'))

    data = data[mask]
    mv = MaskedView(mask, data)

    tensor = dti.Tensor(mv, bval, gtab.T, min_signal=1e-9)
    assert_equal(tensor.shape, (2, 4))
    assert_equal(tensor.fa().shape, (2, 4))
    assert_equal(tensor.evals.shape, (2, 4, 3))
    assert_equal(tensor.evecs.shape, (2, 4, 3, 3))
    assert_equal(type(tensor.model_params), MaskedView)
    assert_array_equal(tensor.mask, mask)

    tensor = tensor[0]
    assert_equal(tensor.shape, (4, ))
    assert_equal(tensor.fa().shape, (4, ))
    assert_equal(tensor.evals.shape, (4, 3))
    assert_equal(tensor.evecs.shape, (4, 3, 3))
    assert_equal(type(tensor.model_params), MaskedView)
    assert_array_equal(tensor.mask, mask[0])

    tensor = tensor[0]
    assert_equal(tensor.shape, tuple())
    assert_equal(tensor.fa().shape, tuple())
    assert_equal(tensor.evals.shape, (3, ))
    assert_equal(tensor.evecs.shape, (3, 3))
    assert_equal(type(tensor.model_params), np.ndarray)
Exemplo n.º 4
0
def test_passing_maskedview():
    data = np.ones((2,4,56))
    mask = np.array([[True, False, False, True],
                     [True, False, True, False]])

    gtab, bval = read_bvec_file(get_data('55dir_grad.bvec'))

    data = data[mask]
    mv = MaskedView(mask, data)

    tensor = dti.Tensor(mv, bval, gtab.T, min_signal=1e-9)
    assert_equal(tensor.shape, (2,4))
    assert_equal(tensor.fa().shape, (2,4))
    assert_equal(tensor.evals.shape, (2,4,3))
    assert_equal(tensor.evecs.shape, (2,4,3,3))
    assert_equal(type(tensor.model_params), MaskedView)
    assert_array_equal(tensor.mask, mask)

    tensor = tensor[0]
    assert_equal(tensor.shape, (4,))
    assert_equal(tensor.fa().shape, (4,))
    assert_equal(tensor.evals.shape, (4,3))
    assert_equal(tensor.evecs.shape, (4,3,3))
    assert_equal(type(tensor.model_params), MaskedView)
    assert_array_equal(tensor.mask, mask[0])

    tensor = tensor[0]
    assert_equal(tensor.shape, tuple())
    assert_equal(tensor.fa().shape, tuple())
    assert_equal(tensor.evals.shape, (3,))
    assert_equal(tensor.evecs.shape, (3,3))
    assert_equal(type(tensor.model_params), np.ndarray)
Exemplo n.º 5
0
def test_all_zeros():
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    fit_methods = ['LS', 'OLS', 'NNLS', 'RESTORE']
    for fit_method in fit_methods:
        dm = dti.TensorModel(gtab)
        assert_raises(ValueError, dm.fit, np.zeros(bvals.shape[0]))
Exemplo n.º 6
0
def test_masked_array_with_Tensor():
    data = np.ones((2,4,56))
    mask = np.array([[True, False, False, True],
                     [True, False, True, False]])

    gtab, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    
    tensor = dti.Tensor(data, bval, gtab.T, mask=mask, min_signal=1e-9)
    yield assert_equal(tensor.shape, (2,4))
    yield assert_equal(tensor.fa().shape, (2,4))
    yield assert_equal(tensor.evals.shape, (2,4,3))
    yield assert_equal(tensor.evecs.shape, (2,4,3,3))
    yield assert_equal(type(tensor.model_params), MaskedView)
    yield assert_array_equal(tensor.mask, mask)

    tensor = tensor[0]
    yield assert_equal(tensor.shape, (4,))
    yield assert_equal(tensor.fa().shape, (4,))
    yield assert_equal(tensor.evals.shape, (4,3))
    yield assert_equal(tensor.evecs.shape, (4,3,3))
    yield assert_equal(type(tensor.model_params), MaskedView)
    yield assert_array_equal(tensor.mask, mask[0])

    tensor = tensor[0]
    yield assert_equal(tensor.shape, tuple())
    yield assert_equal(tensor.fa().shape, tuple())
    yield assert_equal(tensor.evals.shape, (3,))
    yield assert_equal(tensor.evecs.shape, (3,3))
    yield assert_equal(type(tensor.model_params), np.ndarray)
Exemplo n.º 7
0
def test_all_zeros():
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    fit_methods = ['LS', 'OLS', 'NNLS', 'RESTORE']
    for fit_method in fit_methods:
        dm = dti.TensorModel(gtab)
        assert_raises(ValueError, dm.fit, np.zeros(bvals.shape[0]))
Exemplo n.º 8
0
def test_masked_array_with_tensor():
    data = np.ones((2, 4, 56))
    mask = np.array([[True, False, False, True],
                     [True, False, True, False]])

    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T)

    tensor_model = TensorModel(gtab)
    tensor = tensor_model.fit(data, mask=mask)
    assert_equal(tensor.shape, (2, 4))
    assert_equal(tensor.fa.shape, (2, 4))
    assert_equal(tensor.evals.shape, (2, 4, 3))
    assert_equal(tensor.evecs.shape, (2, 4, 3, 3))

    tensor = tensor[0]
    assert_equal(tensor.shape, (4,))
    assert_equal(tensor.fa.shape, (4,))
    assert_equal(tensor.evals.shape, (4, 3))
    assert_equal(tensor.evecs.shape, (4, 3, 3))

    tensor = tensor[0]
    assert_equal(tensor.shape, tuple())
    assert_equal(tensor.fa.shape, tuple())
    assert_equal(tensor.evals.shape, (3,))
    assert_equal(tensor.evecs.shape, (3, 3))
    assert_equal(type(tensor.model_params), np.ndarray)
Exemplo n.º 9
0
def test_all_zeros():
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    fit_methods = ['LS', 'OLS', 'NNLS', 'RESTORE']
    for _ in fit_methods:
        dm = dti.TensorModel(gtab)
        assert_array_almost_equal(dm.fit(np.zeros(bvals.shape[0])).evals, 0)
Exemplo n.º 10
0
def test_masked_array_with_tensor():
    data = np.ones((2, 4, 56))
    mask = np.array([[True, False, False, True],
                     [True, False, True, False]])

    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T)

    tensor_model = TensorModel(gtab)
    tensor = tensor_model.fit(data, mask=mask)
    assert_equal(tensor.shape, (2, 4))
    assert_equal(tensor.fa.shape, (2, 4))
    assert_equal(tensor.evals.shape, (2, 4, 3))
    assert_equal(tensor.evecs.shape, (2, 4, 3, 3))

    tensor = tensor[0]
    assert_equal(tensor.shape, (4,))
    assert_equal(tensor.fa.shape, (4,))
    assert_equal(tensor.evals.shape, (4, 3))
    assert_equal(tensor.evecs.shape, (4, 3, 3))

    tensor = tensor[0]
    assert_equal(tensor.shape, tuple())
    assert_equal(tensor.fa.shape, tuple())
    assert_equal(tensor.evals.shape, (3,))
    assert_equal(tensor.evecs.shape, (3, 3))
    assert_equal(type(tensor.model_params), np.ndarray)
Exemplo n.º 11
0
def test_restore():
     """
     Test the implementation of the RESTORE algorithm
     """
     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(gtab)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape
     for drop_this in range(1, Y.shape[-1]):
         # RESTORE estimates should be robust to dropping
         this_y = Y.copy()
         this_y[:, drop_this] = 1.0
         tensor_model = dti.TensorModel(gtab, fit_method='restore',
                                        sigma=67.0)

         tensor_est = tensor_model.fit(this_y)
         assert_array_almost_equal(tensor_est.evals[0], evals, decimal=3)
         assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                                   decimal=3)
Exemplo n.º 12
0
def test_all_zeros():
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    fit_methods = ['LS', 'OLS', 'NNLS', 'RESTORE']
    for fit_method in fit_methods:
        dm = dti.TensorModel(gtab)
        assert_array_almost_equal(dm.fit(np.zeros(bvals.shape[0])).evals, 0)
Exemplo n.º 13
0
def test_all_constant():
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    fit_methods = ['LS', 'OLS', 'NNLS']
    for fit_method in fit_methods:
        dm = dti.TensorModel(gtab)
        assert_almost_equal(dm.fit(np.zeros(bvals.shape[0])).fa, 0)
        assert_almost_equal(dm.fit(100 * np.ones(bvals.shape[0])).fa, 0)
Exemplo n.º 14
0
def test_TensorModel():
    data, gtab = dsi_voxels()
    dm = dti.TensorModel(gtab, 'LS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    dm = dti.TensorModel(gtab, 'WLS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    sphere = create_unit_sphere(4)
    assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
    assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1)

    # Check that the multivoxel case works:
    dtifit = dm.fit(data)
    assert_equal(dtifit.fa.shape, data.shape[:3])

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    #Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    evecs = np.linalg.eigh(tensor)[1]
    #Design Matrix
    X = dti.design_matrix(bvecs, bvals)
    #Signals
    Y = np.exp(np.dot(X,D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods: #XXX Add NNLS methods!
    for fit_method in ['OLS', 'WLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg =\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')
Exemplo n.º 15
0
def test_WLS_and_LS_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    ### Defining Test Voxel (avoid nibabel dependency) ###

    #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    B = bval[1]
    #Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    #Design Matrix
    X = dti.design_matrix(bvec, bval)
    #Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    gtab = grad.gradient_table(bval, bvec)

    ### Testing WLS Fit on Single Voxel ###
    #Estimate tensor from test signals
    model = TensorModel(gtab, min_signal=1e-8, fit_method='WLS')
    tensor_est = model.fit(Y)
    assert_equal(tensor_est.shape, Y.shape[:-1])
    assert_array_almost_equal(tensor_est.evals[0], evals)
    assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                              err_msg="Calculation of tensor from Y does not "
                                       "compare to analytical solution")
    assert_almost_equal(tensor_est.md[0], md)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, min_signal=1e-8, fit_method='LS')
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
Exemplo n.º 16
0
def test_all_constant():
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    fit_methods = ['LS', 'OLS', 'NNLS', 'RESTORE']
    for fit_method in fit_methods:
        dm = dti.TensorModel(gtab)
        assert_almost_equal(dm.fit(100 * np.ones(bvals.shape[0])).fa, 0)
        # Doesn't matter if the signal is smaller than 1:
        assert_almost_equal(dm.fit(0.4 * np.ones(bvals.shape[0])).fa, 0)
Exemplo n.º 17
0
def test_all_constant():
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    fit_methods = ['LS', 'OLS', 'NNLS', 'RESTORE']
    for _ in fit_methods:
        dm = dti.TensorModel(gtab)
        assert_almost_equal(dm.fit(100 * np.ones(bvals.shape[0])).fa, 0)
        # Doesn't matter if the signal is smaller than 1:
        assert_almost_equal(dm.fit(0.4 * np.ones(bvals.shape[0])).fa, 0)
Exemplo n.º 18
0
def test_fit_method_error():
    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T)

    # This should work (smoke-testing!):
    TensorModel(gtab, fit_method='WLS')

    # This should raise an error because there is no such fit_method
    assert_raises(ValueError, TensorModel, gtab, min_signal=1e-9,
                  fit_method='s')
Exemplo n.º 19
0
def test_fit_method_error():
    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T)

    # This should work (smoke-testing!):
    TensorModel(gtab, fit_method='WLS')

    # This should raise an error because there is no such fit_method
    assert_raises(ValueError, TensorModel, gtab, min_signal=1e-9,
                  fit_method='s')
Exemplo n.º 20
0
def test_WLS_and_LS_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    ### Defining Test Voxel (avoid nibabel dependency) ###

    #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    gtab, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    B = bval[1]
    #Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    #Design Matrix
    X = dti.design_matrix(gtab, bval)
    #Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1, ) + Y.shape

    ### Testing WLS Fit on Single Voxel ###
    #Estimate tensor from test signals
    tensor_est = dti.Tensor(Y, bval, gtab.T, min_signal=1e-8)
    assert_equal(tensor_est.shape, Y.shape[:-1])
    assert_array_almost_equal(tensor_est.evals[0], evals)
    assert_array_almost_equal(
        tensor_est.D[0],
        tensor,
        err_msg=
        "Calculation of tensor from Y does not compare to analytical solution")
    assert_almost_equal(tensor_est.md()[0], md)

    #test 0d tensor
    y = Y[0]
    tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.D, tensor)
    assert_almost_equal(tensor_est.md(), md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8, fit_method='LS')
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.D, tensor)
    assert_almost_equal(tensor_est.md(), md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
Exemplo n.º 21
0
def test_all_constant():
    """

    """
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    fit_methods = ['LS', 'OLS', 'NNLS']
    for fit_method in fit_methods:
        dm = dti.TensorModel(gtab)
        assert_almost_equal(dm.fit(np.zeros(bvals.shape[0])).fa, 0)
        assert_almost_equal(dm.fit(100 * np.ones(bvals.shape[0])).fa, 0)
Exemplo n.º 22
0
def test_init():
    data = np.ones((2, 4, 56))
    mask = np.ones((2, 4), 'bool')

    gtab, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    tensor = dti.Tensor(data, bval, gtab.T, mask, thresh=0)
    mask[:] = False
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, mask)
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, min_signal=-1)
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, thresh=1)
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, fit_method='s')
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, fit_method=0)
Exemplo n.º 23
0
def test_WLS_and_LS_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """
    
    ### Defining Test Voxel (avoid nibabel dependency) ###

    #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s 
    D = np.array([1., 1., 1., 1., 0., 0., np.log(1000) * 10.**4]) * 10.**-4
    evals = np.array([2., 1., 0.]) * 10.**-4
    md = evals.mean()
    tensor = np.empty((3,3))
    tensor[0, 0] = D[0]
    tensor[1, 1] = D[1]
    tensor[2, 2] = D[2]
    tensor[0, 1] = tensor[1, 0] = D[3]
    tensor[0, 2] = tensor[2, 0] = D[4]
    tensor[1, 2] = tensor[2, 1] = D[5]
    #Design Matrix
    gtab, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    X = dti.design_matrix(gtab, bval)
    #Signals
    Y = np.exp(np.dot(X,D))
    Y.shape = (-1,) + Y.shape
    
    ### Testing WLS Fit on Single Voxel ###
    #Estimate tensor from test signals
    tensor_est = dti.Tensor(Y,bval,gtab.T,min_signal=1e-8)
    yield assert_equal(tensor_est.shape, Y.shape[:-1])
    yield assert_array_almost_equal(tensor_est.evals[0], evals)
    yield assert_array_almost_equal(tensor_est.D[0], tensor,err_msg= "Calculation of tensor from Y does not compare to analytical solution")
    yield assert_almost_equal(tensor_est.md()[0], md)

    #test 0d tensor
    y = Y[0]
    tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8)
    yield assert_equal(tensor_est.shape, tuple())
    yield assert_array_almost_equal(tensor_est.evals, evals)
    yield assert_array_almost_equal(tensor_est.D, tensor)
    yield assert_almost_equal(tensor_est.md(), md)

    tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8, fit_method='LS')
    yield assert_equal(tensor_est.shape, tuple())
    yield assert_array_almost_equal(tensor_est.evals, evals)
    yield assert_array_almost_equal(tensor_est.D, tensor)
    yield assert_almost_equal(tensor_est.md(), md)
Exemplo n.º 24
0
def test_restore():
    """
    Test the implementation of the RESTORE algorithm
    """
    b0 = 1000.
    bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    tensor = from_lower_triangular(D)

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))
    Y.shape = (-1, ) + Y.shape
    for drop_this in range(1, Y.shape[-1]):
        for jac in [True, False]:
            # RESTORE estimates should be robust to dropping
            this_y = Y.copy()
            this_y[:, drop_this] = 1.0
            for sigma in [67.0, np.ones(this_y.shape[-1]) * 67.0]:
                tensor_model = dti.TensorModel(gtab,
                                               fit_method='restore',
                                               jac=jac,
                                               sigma=67.0)

                tensor_est = tensor_model.fit(this_y)
                assert_array_almost_equal(tensor_est.evals[0],
                                          evals,
                                          decimal=3)
                assert_array_almost_equal(tensor_est.quadratic_form[0],
                                          tensor,
                                          decimal=3)

    # If sigma is very small, it still needs to work:
    tensor_model = dti.TensorModel(gtab, fit_method='restore', sigma=0.0001)
    tensor_model.fit(Y.copy())

    # Test return_S0_hat
    tensor_model = dti.TensorModel(gtab,
                                   fit_method='restore',
                                   sigma=0.0001,
                                   return_S0_hat=True)
    tmf = tensor_model.fit(Y.copy())
    assert_almost_equal(tmf[0].S0_hat, b0)
Exemplo n.º 25
0
def test_nlls_fit_tensor():
     """
     Test the implementation of NLLS and RESTORE
     """

     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(bvecs, bval)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape

     #Estimate tensor from test signals and compare against expected result
     #using non-linear least squares:
     tensor_model = dti.TensorModel(gtab, fit_method='NLLS')
     tensor_est = tensor_model.fit(Y)
     assert_equal(tensor_est.shape, Y.shape[:-1])
     assert_array_almost_equal(tensor_est.evals[0], evals)
     assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
     assert_almost_equal(tensor_est.md[0], md)

     # Using the gmm weighting scheme:
     tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm')
     assert_equal(tensor_est.shape, Y.shape[:-1])
     assert_array_almost_equal(tensor_est.evals[0], evals)
     assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
     assert_almost_equal(tensor_est.md[0], md)

     # Use NLLS with some actual 4D data:
     data, bvals, bvecs = get_data('small_25')
     gtab = grad.gradient_table(bvals, bvecs)
     tm1 = dti.TensorModel(gtab, fit_method='NLLS')
     dd = nib.load(data).get_data()
     tf1 = tm1.fit(dd)
     tm2 = dti.TensorModel(gtab)
     tf2 = tm2.fit(dd)

     assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)
Exemplo n.º 26
0
def test_nlls_fit_tensor():
     """
     Test the implementation of NLLS and RESTORE
     """

     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(bvecs, bval)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape

     #Estimate tensor from test signals and compare against expected result
     #using non-linear least squares:
     tensor_model = dti.TensorModel(gtab, fit_method='NLLS')
     tensor_est = tensor_model.fit(Y)
     assert_equal(tensor_est.shape, Y.shape[:-1])
     assert_array_almost_equal(tensor_est.evals[0], evals)
     assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
     assert_almost_equal(tensor_est.md[0], md)

     # Using the gmm weighting scheme:
     tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm')
     assert_equal(tensor_est.shape, Y.shape[:-1])
     assert_array_almost_equal(tensor_est.evals[0], evals)
     assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
     assert_almost_equal(tensor_est.md[0], md)

     # Use NLLS with some actual 4D data:
     data, bvals, bvecs = get_data('small_25')
     gtab = grad.gradient_table(bvals, bvecs)
     tm1 = dti.TensorModel(gtab, fit_method='NLLS')
     dd = nib.load(data).get_data()
     tf1 = tm1.fit(dd)
     tm2 = dti.TensorModel(gtab)
     tf2 = tm2.fit(dd)

     assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)
Exemplo n.º 27
0
def test_init():
    data = np.ones((2,4,56))
    mask = np.ones((2,4),'bool')

    gtab, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    tensor = dti.Tensor(data, bval, gtab.T, mask, thresh=0)
    mask[:] = False
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, mask)
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T,
                        min_signal=-1)
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T, thresh=1)
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T,
                        fit_method='s')
    assert_raises(ValueError, dti.Tensor, data, bval, gtab.T,
                        fit_method=0)
Exemplo n.º 28
0
def sample_hardi_data():
    """High Angular Resulution Diffusion Imaging data with 150 diffusion
    directions
    """
    data_img = load(pjoin(THIS_DIR, 'E1381S6_edcor.nii.gz'))
    data = data_img.get_data()
    aff = data_img.get_affine()
    data_hdr = data_img.get_header()
    voxel_size = data_hdr.get_zooms()
    voxel_size = voxel_size[:3]

    fa_img = load(pjoin(THIS_DIR, 'E1381S6_edcor_fa.nii.gz'))
    fa = fa_img.get_data()

    bvec, bval = read_bvec_file(pjoin(THIS_DIR, 'E1381S6_edcor.bval'))

    return data, fa, bvec, bval, voxel_size
Exemplo n.º 29
0
def sample_hardi_data():
    """High Angular Resulution Diffusion Imaging data with 150 diffusion
    directions
    """
    data_img = load(pjoin(THIS_DIR, 'E1381S6_edcor.nii.gz'))
    data = data_img.get_data()
    aff = data_img.get_affine()
    data_hdr = data_img.get_header()
    voxel_size = data_hdr.get_zooms()
    voxel_size = voxel_size[:3]

    fa_img = load(pjoin(THIS_DIR, 'E1381S6_edcor_fa.nii.gz'))
    fa = fa_img.get_data()

    bvec, bval = read_bvec_file(pjoin(THIS_DIR, 'E1381S6_edcor.bval'))

    return data, fa, bvec, bval, voxel_size
Exemplo n.º 30
0
def test_restore():
    """
    Test the implementation of the RESTORE algorithm
    """
    b0 = 1000.
    bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    tensor = from_lower_triangular(D)

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))
    Y.shape = (-1,) + Y.shape
    for drop_this in range(1, Y.shape[-1]):
        for jac in [True, False]:
            # RESTORE estimates should be robust to dropping
            this_y = Y.copy()
            this_y[:, drop_this] = 1.0
            for _ in [67.0, np.ones(this_y.shape[-1]) * 67.0]:
                tensor_model = dti.TensorModel(gtab, fit_method='restore',
                                               jac=jac,
                                               sigma=67.0)

                tensor_est = tensor_model.fit(this_y)
                assert_array_almost_equal(tensor_est.evals[0], evals,
                                          decimal=3)
                assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                                          decimal=3)

    # If sigma is very small, it still needs to work:
    tensor_model = dti.TensorModel(gtab, fit_method='restore', sigma=0.0001)
    tensor_model.fit(Y.copy())

    # Test return_S0_hat
    tensor_model = dti.TensorModel(gtab, fit_method='restore', sigma=0.0001,
                                   return_S0_hat=True)
    tmf = tensor_model.fit(Y.copy())
    assert_almost_equal(tmf[0].S0_hat, b0)
Exemplo n.º 31
0
def test_restore():
     """
     Test the implementation of the RESTORE algorithm
     """
     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(bvecs, bval)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape
     for sigma in [0.1, 1, 10, 100]:
        for drop_this in range(1, Y.shape[-1]):
           # RESTORE estimates should be robust to dropping
           this_y = Y.copy()
           this_y[:, drop_this] = 1.0
           tensor_model = dti.TensorModel(gtab, fit_method='restore',
                                          sigma=sigma)
           tensor_est = tensor_model.fit(Y)
           assert_array_almost_equal(tensor_est.evals[0], evals)
           assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)


     data, bvals, bvecs = get_data('small_25')
     dd = nib.load(data).get_data()
     gtab = grad.gradient_table(bvals, bvecs)
     fit_method = 'restore' # 'NLLS'
     jac = True # False
     dd[..., 5] = 1.0
     tm = dti.TensorModel(gtab, fit_method=fit_method, jac=True, sigma=10)
     tm.fit(dd)
Exemplo n.º 32
0
def test_restore():
     """
     Test the implementation of the RESTORE algorithm
     """
     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(bvecs, bval)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape
     for sigma in [0.1, 1, 10, 100]:
        for drop_this in range(1, Y.shape[-1]):
           # RESTORE estimates should be robust to dropping
           this_y = Y.copy()
           this_y[:, drop_this] = 1.0
           tensor_model = dti.TensorModel(gtab, fit_method='restore',
                                          sigma=sigma)
           tensor_est = tensor_model.fit(Y)
           assert_array_almost_equal(tensor_est.evals[0], evals)
           assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)


     data, bvals, bvecs = get_data('small_25')
     dd = nib.load(data).get_data()
     gtab = grad.gradient_table(bvals, bvecs)
     fit_method = 'restore' # 'NLLS'
     jac = True # False
     dd[..., 5] = 1.0
     tm = dti.TensorModel(gtab, fit_method=fit_method, jac=True, sigma=10)
     tm.fit(dd)
Exemplo n.º 33
0
def test_nlls_fit_tensor():
    """
    Test the implementation of NLLS and RESTORE
    """

    b0 = 1000.
    bvecs, bval = read_bvec_file(get_fnames('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))
    Y.shape = (-1, ) + Y.shape

    # Estimate tensor from test signals and compare against expected result
    # using non-linear least squares:
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS')
    tensor_est = tensor_model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
    npt.assert_almost_equal(tensor_est.md[0], md)

    # You can also do this without the Jacobian (though it's slower):
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS', jac=False)
    tensor_est = tensor_model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
    npt.assert_almost_equal(tensor_est.md[0], md)

    # Using the gmm weighting scheme:
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm')
    tensor_est = tensor_model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
    npt.assert_almost_equal(tensor_est.md[0], md)

    # If you use sigma weighting, you'd better provide a sigma:
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='sigma')
    npt.assert_raises(ValueError, tensor_model.fit, Y)

    # Use NLLS with some actual 4D data:
    data, bvals, bvecs = get_fnames('small_25')
    gtab = grad.gradient_table(bvals, bvecs)
    tm1 = dti.TensorModel(gtab, fit_method='NLLS')
    dd = load_nifti_data(data)
    tf1 = tm1.fit(dd)
    tm2 = dti.TensorModel(gtab)
    tf2 = tm2.fit(dd)

    npt.assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)
Exemplo n.º 34
0
def test_wls_and_ls_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    # Defining Test Voxel (avoid nibabel dependency) ###

    # Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_fnames('55dir_grad.bvec'))
    B = bval[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    # Design Matrix
    gtab = grad.gradient_table(bval, bvec)
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    npt.assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Testing WLS Fit on Single Voxel
    # If you do something wonky (passing min_signal<0), you should get an
    # error:
    npt.assert_raises(ValueError, TensorModel, gtab, fit_method='WLS',
                      min_signal=-1)

    # Estimate tensor from test signals
    model = TensorModel(gtab, fit_method='WLS', return_S0_hat=True)
    tensor_est = model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                                  err_msg="Calculation of tensor from Y does "
                                          "not compare to analytical solution")
    npt.assert_almost_equal(tensor_est.md[0], md)
    npt.assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    npt.assert_equal(tensor_est.shape, tuple())
    npt.assert_array_almost_equal(tensor_est.evals, evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    npt.assert_almost_equal(tensor_est.md, md)
    npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, fit_method='LS')
    tensor_est = model.fit(y)
    npt.assert_equal(tensor_est.shape, tuple())
    npt.assert_array_almost_equal(tensor_est.evals, evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    npt.assert_almost_equal(tensor_est.md, md)
    npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
    npt.assert_array_almost_equal(tensor_est.linearity, linearity(evals))
    npt.assert_array_almost_equal(tensor_est.planarity, planarity(evals))
    npt.assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
Exemplo n.º 35
0
Arquivo: odf.py Projeto: MarcCote/dipy
    elif len(shape) < 3:
        d = [1, 1, 1]
        d[0:len(shape)] = shape
    else:
        d = shape
    
    return np.mgrid[0:d[0], 0:d[1], 0:d[2]]

if __name__ == '__main__':
    import dipy.core.qball as qball
    from dipy.io.bvectxt import read_bvec_file
    filename='/Users/bagrata/HARDI/E1322S8I1.nii.gz'
    grad_table_filename='/Users/bagrata/HARDI/E1322S8I1.bvec'
    from nipy import load_image, save_image

    grad_table, b_values = read_bvec_file(grad_table_filename)
    img = load_image(filename)
    print 'input dimensions: '
    print img.ndim
    print 'image size: '
    print img.shape
    print 'image affine: '
    print img.affine
    print 'images has pixels with size: '
    print np.dot(img.affine, np.eye(img.ndim+1)).diagonal()[0:3]
    data = np.asarray(img)

    theta, phi = np.mgrid[0:2*np.pi:64*1j, 0:np.pi:32*1j]
    odf_i = qball.ODF(data[188:192,188:192,22:24,:],4,grad_table,b_values)
    disp_odf(odf_i[0:1,0:2,0:2])
Exemplo n.º 36
0
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data1 = nib.load(fdata).get_data()
    gtab1 = grad.gradient_table(fbval, fbvec)
    data2, gtab2 = dsi_voxels()
    for data, gtab in zip([data1, data2], [gtab1, gtab2]):
        dm = dti.TensorModel(gtab, 'LS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        dm = dti.TensorModel(gtab, 'WLS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        assert_equal(dtifit.fa > 0, True)
        sphere = create_unit_sphere(4)
        assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
        # Check that the multivoxel case works:
        dtifit = dm.fit(data)

        # Check that it works on signal that has already been normalized to S0:
        dm_to_relative = dti.TensorModel(gtab)
        if np.any(gtab.b0s_mask):
            relative_data = (data[0, 0, 0] /
                             np.mean(data[0, 0, 0, gtab.b0s_mask]))

            dtifit_to_relative = dm_to_relative.fit(relative_data)
            npt.assert_almost_equal(dtifit.fa[0, 0, 0],
                                    dtifit_to_relative.fa,
                                    decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = 3 * np.sqrt(6) * np.linalg.det(
        A_squiggle / np.linalg.norm(A_squiggle))
    evecs = np.linalg.eigh(tensor)[1]
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1, ) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab, fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method')

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)
Exemplo n.º 37
0
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data1 = nib.load(fdata).get_data()
    gtab1 = grad.gradient_table(fbval, fbvec)
    data2, gtab2 = dsi_voxels()
    for data, gtab in zip([data1, data2], [gtab1, gtab2]):
        dm = dti.TensorModel(gtab, 'LS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        dm = dti.TensorModel(gtab, 'WLS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        assert_equal(dtifit.fa > 0, True)
        sphere = create_unit_sphere(4)
        assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
        # Check that the multivoxel case works:
        dtifit = dm.fit(data)

        # Check that it works on signal that has already been normalized to S0:
        dm_to_relative = dti.TensorModel(gtab)
        if np.any(gtab.b0s_mask):
            relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0,
                                                        gtab.b0s_mask]))

            dtifit_to_relative = dm_to_relative.fit(relative_data)
            npt.assert_almost_equal(dtifit.fa[0, 0, 0], dtifit_to_relative.fa,
                                    decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = (3 * np.sqrt(6) * np.linalg.det(A_squiggle /
            np.linalg.norm(A_squiggle)))
    evals_eigh, evecs_eigh = np.linalg.eigh(tensor)
    # Sort according to eigen-value from large to small:
    evecs = evecs_eigh[:, np.argsort(evals_eigh)[::-1]]
    # Check that eigenvalues and eigenvectors are properly sorted through
    # that previous operation:
    for i in range(3):
        assert_array_almost_equal(np.dot(tensor, evecs[:, i]),
                                  evals[i] * evecs[:, i])
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method,
                                       return_S0_hat=True)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)
        assert_array_almost_equal(tensor_fit.S0_hat, b0, decimal=3)
        # Test that the eigenvectors are correct, one-by-one:
        for i in range(3):
            # Eigenvectors have intrinsic sign ambiguity
            # (see
            # http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf)
            # so we need to allow for sign flips. One of the following should
            # always be true:
            assert_(
                    np.all(np.abs(tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6) or
                    np.all(np.abs(-tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6))
            # We set a fixed tolerance of 10e-6, similar to array_almost_equal

        err_msg = "Calculation of tensor from Y does not compare to "
        err_msg += "analytical solution"
        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=err_msg)

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')

    # Test custom fit tensor method
    try:
        model = dti.TensorModel(gtab, fit_method=lambda *args, **kwargs: 42)
        fit = model.fit_method()
    except Exception as exc:
        assert False, "TensorModel should accept custom fit methods: %s" % exc
    assert fit == 42, "Custom fit method for TensorModel returned %s." % fit

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Return S0_test
    tensor_model = dti.TensorModel(gtab, return_S0_hat=True)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)
    assert_array_almost_equal(fit[0].S0_hat, b0)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)
Exemplo n.º 38
0
def test_TensorModel():
    data, gtab = dsi_voxels()
    dm = dti.TensorModel(gtab, 'LS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    dm = dti.TensorModel(gtab, 'WLS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    sphere = create_unit_sphere(4)
    assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
    assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1)

    # Check that the multivoxel case works:
    dtifit = dm.fit(data)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3,3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = 3 * np.sqrt(6) * np.linalg.det(A_squiggle / np.linalg.norm(A_squiggle))
    evecs = np.linalg.eigh(tensor)[1]
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')
Exemplo n.º 39
0
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data1 = nib.load(fdata).get_data()
    gtab1 = grad.gradient_table(fbval, fbvec)
    data2, gtab2 = dsi_voxels()
    for data, gtab in zip([data1, data2], [gtab1, gtab2]):
        dm = dti.TensorModel(gtab, 'LS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        dm = dti.TensorModel(gtab, 'WLS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        assert_equal(dtifit.fa > 0, True)
        sphere = create_unit_sphere(4)
        assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
        # Check that the multivoxel case works:
        dtifit = dm.fit(data)

        # Check that it works on signal that has already been normalized to S0:
        dm_to_relative = dti.TensorModel(gtab)
        if np.any(gtab.b0s_mask):
            relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0,
                                                        gtab.b0s_mask]))

            dtifit_to_relative = dm_to_relative.fit(relative_data)
            npt.assert_almost_equal(dtifit.fa[0, 0, 0], dtifit_to_relative.fa,
                                    decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = (3 * np.sqrt(6) * np.linalg.det(A_squiggle /
            np.linalg.norm(A_squiggle)))
    evals_eigh, evecs_eigh = np.linalg.eigh(tensor)
    # Sort according to eigen-value from large to small:
    evecs = evecs_eigh[:, np.argsort(evals_eigh)[::-1]]
    # Check that eigenvalues and eigenvectors are properly sorted through
    # that previous operation:
    for i in range(3):
        assert_array_almost_equal(np.dot(tensor, evecs[:, i]),
                                  evals[i] * evecs[:, i])
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method,
                                       return_S0_hat=True)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)
        assert_array_almost_equal(tensor_fit.S0_hat, b0, decimal=3)
        # Test that the eigenvectors are correct, one-by-one:
        for i in range(3):
            # Eigenvectors have intrinsic sign ambiguity
            # (see
            # http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf)
            # so we need to allow for sign flips. One of the following should
            # always be true:
            assert_(
                    np.all(np.abs(tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6) or
                    np.all(np.abs(-tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6))
            # We set a fixed tolerance of 10e-6, similar to array_almost_equal

        err_msg = "Calculation of tensor from Y does not compare to "
        err_msg += "analytical solution"
        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=err_msg)

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')

    # Test custom fit tensor method
    try:
        model = dti.TensorModel(gtab, fit_method=lambda *args, **kwargs: 42)
        fit = model.fit_method()
    except Exception as exc:
        assert False, "TensorModel should accept custom fit methods: %s" % exc
    assert fit == 42, "Custom fit method for TensorModel returned %s." % fit

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Return S0_test
    tensor_model = dti.TensorModel(gtab, return_S0_hat=True)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)
    assert_array_almost_equal(fit[0].S0_hat, b0)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)
Exemplo n.º 40
0
def test_TensorModel():
    data, gtab = dsi_voxels()
    dm = dti.TensorModel(gtab, 'LS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    dm = dti.TensorModel(gtab, 'WLS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    sphere = create_unit_sphere(4)
    assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
    assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1)

    # Check that the multivoxel case works:
    dtifit = dm.fit(data)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = 3 * np.sqrt(6) * np.linalg.det(
        A_squiggle / np.linalg.norm(A_squiggle))
    evecs = np.linalg.eigh(tensor)[1]
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1, ) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab, fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method')
Exemplo n.º 41
0
def test_wls_and_ls_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    # Defining Test Voxel (avoid nibabel dependency) ###

    # Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    B = bval[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    # Design Matrix
    gtab = grad.gradient_table(bval, bvec)
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Testing WLS Fit on Single Voxel
    # If you do something wonky (passing min_signal<0), you should get an
    # error:
    npt.assert_raises(ValueError, TensorModel, gtab, fit_method='WLS',
                      min_signal=-1)

    # Estimate tensor from test signals
    model = TensorModel(gtab, fit_method='WLS', return_S0_hat=True)
    tensor_est = model.fit(Y)
    assert_equal(tensor_est.shape, Y.shape[:-1])
    assert_array_almost_equal(tensor_est.evals[0], evals)
    assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                              err_msg="Calculation of tensor from Y does not "
                                      "compare to analytical solution")
    assert_almost_equal(tensor_est.md[0], md)
    assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, fit_method='LS')
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
    assert_array_almost_equal(tensor_est.linearity, linearity(evals))
    assert_array_almost_equal(tensor_est.planarity, planarity(evals))
    assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
Exemplo n.º 42
0
        d = [1, 1, 1]
        d[0:len(shape)] = shape
    else:
        d = shape

    return np.mgrid[0:d[0], 0:d[1], 0:d[2]]


if __name__ == '__main__':
    import dipy.core.qball as qball
    from dipy.io.bvectxt import read_bvec_file
    filename = '/Users/bagrata/HARDI/E1322S8I1.nii.gz'
    grad_table_filename = '/Users/bagrata/HARDI/E1322S8I1.bvec'
    from nipy import load_image, save_image

    grad_table, b_values = read_bvec_file(grad_table_filename)
    img = load_image(filename)
    print 'input dimensions: '
    print img.ndim
    print 'image size: '
    print img.shape
    print 'image affine: '
    print img.affine
    print 'images has pixels with size: '
    print np.dot(img.affine, np.eye(img.ndim + 1)).diagonal()[0:3]
    data = np.asarray(img)

    theta, phi = np.mgrid[0:2 * np.pi:64 * 1j, 0:np.pi:32 * 1j]
    odf_i = qball.ODF(data[188:192, 188:192, 22:24, :], 4, grad_table,
                      b_values)
    disp_odf(odf_i[0:1, 0:2, 0:2])
Exemplo n.º 43
0
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data = nib.load(fdata).get_data()
    gtab = grad.gradient_table(fbval, fbvec)
    dm = dti.TensorModel(gtab, 'LS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.9, True)
    dm = dti.TensorModel(gtab, 'WLS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.9, True)
    assert_equal(dtifit.fa > 0, True)
    sphere = create_unit_sphere(4)
    assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
    # Check that the multivoxel case works:
    dtifit = dm.fit(data)

    # Check that it works on signal that has already been normalized to S0:
    dm_to_relative = dti.TensorModel(gtab)
    relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0, gtab.b0s_mask]))

    dtifit_to_relative = dm_to_relative.fit(relative_data)
    npt.assert_almost_equal(dtifit.fa[0,0,0], dtifit_to_relative.fa, decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3,3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = 3 * np.sqrt(6) * np.linalg.det(A_squiggle / np.linalg.norm(A_squiggle))
    evecs = np.linalg.eigh(tensor)[1]
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)