Exemple #1
0
def test_design_matrix_lte():
    _, fbval, fbvec = get_fnames('small_25')
    gtab_btens_none = grad.gradient_table(fbval, fbvec)
    gtab_btens_lte = grad.gradient_table(fbval, fbvec, btens="LTE")

    B_btens_none = dti.design_matrix(gtab_btens_none)
    B_btens_lte = dti.design_matrix(gtab_btens_lte)
    npt.assert_array_almost_equal(B_btens_none, B_btens_lte, decimal=1)
Exemple #2
0
def test_nnls_jacobian_fucn():
    b0 = 1000.
    bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))

    # Test Jacobian at D
    args = [X, Y]
    analytical = dti._nlls_jacobian_func(D, *args)
    for i in range(len(X)):
        args = [X[i], Y[i]]
        approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
        assert_true(np.allclose(approx, analytical[i]))

    # Test Jacobian at zero
    D = np.zeros_like(D)
    args = [X, Y]
    analytical = dti._nlls_jacobian_func(D, *args)
    for i in range(len(X)):
        args = [X[i], Y[i]]
        approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
        assert_true(np.allclose(approx, analytical[i]))
Exemple #3
0
def make_fake_signal():
    v, e, f = create_half_unit_sphere(4)
    vecs_xy = v[np.flatnonzero(v[:, 2] == 0)]
    evals = np.array([1.8, 0.2, 0.2]) * 10 ** -3 * 1.5
    evecs_moveing = np.empty((len(vecs_xy), 3, 3))
    evecs_moveing[:, :, 0] = vecs_xy
    evecs_moveing[:, :, 1] = [0, 0, 1]
    evecs_moveing[:, :, 2] = np.cross(evecs_moveing[:, :, 0], evecs_moveing[:, :, 1])
    assert ((evecs_moveing * evecs_moveing).sum(1) - 1 < 0.001).all()
    assert ((evecs_moveing * evecs_moveing).sum(2) - 1 < 0.001).all()

    gtab = np.empty((len(v) + 1, 3))
    bval = np.empty(len(v) + 1)
    bval[0] = 0
    bval[1:] = 2000
    gtab[0] = [0, 0, 0]
    gtab[1:] = v
    bvec = gtab.T
    B = design_matrix(bvec, bval)

    tensor_moveing = np.empty_like(evecs_moveing)
    for ii in xrange(len(vecs_xy)):
        tensor_moveing[ii] = np.dot(evecs_moveing[ii] * evals, evecs_moveing[ii].T)
    D_moveing = lower_triangular(tensor_moveing, 1)
    tensor_fixed = np.diag(evals)
    D_fixed = lower_triangular(tensor_fixed, 1)

    sig = 0.45 * np.exp(np.dot(D_moveing, B.T)) + 0.55 * np.exp(np.dot(B, D_fixed))
    assert sig.max() <= 1
    assert sig.min() > 0
    return v, e, vecs_xy, bval, bvec, sig
Exemple #4
0
def test_restore():
     """
     Test the implementation of the RESTORE algorithm
     """
     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(gtab)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape
     for drop_this in range(1, Y.shape[-1]):
         # RESTORE estimates should be robust to dropping
         this_y = Y.copy()
         this_y[:, drop_this] = 1.0
         tensor_model = dti.TensorModel(gtab, fit_method='restore',
                                        sigma=67.0)

         tensor_est = tensor_model.fit(this_y)
         assert_array_almost_equal(tensor_est.evals[0], evals, decimal=3)
         assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                                   decimal=3)
Exemple #5
0
def make_fake_signal():
    v, e, f = create_half_unit_sphere(4)
    vecs_xy = v[np.flatnonzero(v[:, 2] == 0)]
    evals = np.array([1.8, .2, .2]) * 10**-3 * 1.5
    evecs_moveing = np.empty((len(vecs_xy), 3, 3))
    evecs_moveing[:, :, 0] = vecs_xy
    evecs_moveing[:, :, 1] = [0, 0, 1]
    evecs_moveing[:, :, 2] = np.cross(evecs_moveing[:, :, 0],
                                      evecs_moveing[:, :, 1])
    assert ((evecs_moveing * evecs_moveing).sum(1) - 1 < .001).all()
    assert ((evecs_moveing * evecs_moveing).sum(2) - 1 < .001).all()

    gtab = np.empty((len(v) + 1, 3))
    bval = np.empty(len(v) + 1)
    bval[0] = 0
    bval[1:] = 2000
    gtab[0] = [0, 0, 0]
    gtab[1:] = v
    bvec = gtab.T
    B = design_matrix(bvec, bval)

    tensor_moveing = np.empty_like(evecs_moveing)
    for ii in xrange(len(vecs_xy)):
        tensor_moveing[ii] = np.dot(evecs_moveing[ii] * evals,
                                    evecs_moveing[ii].T)
    D_moveing = lower_triangular(tensor_moveing, 1)
    tensor_fixed = np.diag(evals)
    D_fixed = lower_triangular(tensor_fixed, 1)

    sig = .45 * np.exp(np.dot(D_moveing, B.T)) + .55 * np.exp(
        np.dot(B, D_fixed))
    assert sig.max() <= 1
    assert sig.min() > 0
    return v, e, vecs_xy, bval, bvec, sig
Exemple #6
0
def test_nnls_jacobian_fucn():
    b0 = 1000.
    bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))

    # Test Jacobian at D
    args = [X, Y]
    analytical = dti._nlls_jacobian_func(D, *args)
    for i in range(len(X)):
        args = [X[i], Y[i]]
        approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
        assert_true(np.allclose(approx, analytical[i]))

    # Test Jacobian at zero
    D = np.zeros_like(D)
    args = [X, Y]
    analytical = dti._nlls_jacobian_func(D, *args)
    for i in range(len(X)):
        args = [X[i], Y[i]]
        approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args)
        assert_true(np.allclose(approx, analytical[i]))
Exemple #7
0
def test_TensorModel():
    data, gtab = dsi_voxels()
    dm = dti.TensorModel(gtab, 'LS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    dm = dti.TensorModel(gtab, 'WLS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    sphere = create_unit_sphere(4)
    assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
    assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1)

    # Check that the multivoxel case works:
    dtifit = dm.fit(data)
    assert_equal(dtifit.fa.shape, data.shape[:3])

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    #Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    evecs = np.linalg.eigh(tensor)[1]
    #Design Matrix
    X = dti.design_matrix(bvecs, bvals)
    #Signals
    Y = np.exp(np.dot(X,D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods: #XXX Add NNLS methods!
    for fit_method in ['OLS', 'WLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg =\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')
Exemple #8
0
def test_WLS_and_LS_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    ### Defining Test Voxel (avoid nibabel dependency) ###

    #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    B = bval[1]
    #Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    #Design Matrix
    X = dti.design_matrix(bvec, bval)
    #Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    gtab = grad.gradient_table(bval, bvec)

    ### Testing WLS Fit on Single Voxel ###
    #Estimate tensor from test signals
    model = TensorModel(gtab, min_signal=1e-8, fit_method='WLS')
    tensor_est = model.fit(Y)
    assert_equal(tensor_est.shape, Y.shape[:-1])
    assert_array_almost_equal(tensor_est.evals[0], evals)
    assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                              err_msg="Calculation of tensor from Y does not "
                                       "compare to analytical solution")
    assert_almost_equal(tensor_est.md[0], md)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, min_signal=1e-8, fit_method='LS')
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
Exemple #9
0
def test_WLS_and_LS_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    ### Defining Test Voxel (avoid nibabel dependency) ###

    #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    gtab, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    B = bval[1]
    #Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    #Design Matrix
    X = dti.design_matrix(gtab, bval)
    #Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1, ) + Y.shape

    ### Testing WLS Fit on Single Voxel ###
    #Estimate tensor from test signals
    tensor_est = dti.Tensor(Y, bval, gtab.T, min_signal=1e-8)
    assert_equal(tensor_est.shape, Y.shape[:-1])
    assert_array_almost_equal(tensor_est.evals[0], evals)
    assert_array_almost_equal(
        tensor_est.D[0],
        tensor,
        err_msg=
        "Calculation of tensor from Y does not compare to analytical solution")
    assert_almost_equal(tensor_est.md()[0], md)

    #test 0d tensor
    y = Y[0]
    tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.D, tensor)
    assert_almost_equal(tensor_est.md(), md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8, fit_method='LS')
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.D, tensor)
    assert_almost_equal(tensor_est.md(), md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
Exemple #10
0
def test_WLS_and_LS_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """
    
    ### Defining Test Voxel (avoid nibabel dependency) ###

    #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s 
    D = np.array([1., 1., 1., 1., 0., 0., np.log(1000) * 10.**4]) * 10.**-4
    evals = np.array([2., 1., 0.]) * 10.**-4
    md = evals.mean()
    tensor = np.empty((3,3))
    tensor[0, 0] = D[0]
    tensor[1, 1] = D[1]
    tensor[2, 2] = D[2]
    tensor[0, 1] = tensor[1, 0] = D[3]
    tensor[0, 2] = tensor[2, 0] = D[4]
    tensor[1, 2] = tensor[2, 1] = D[5]
    #Design Matrix
    gtab, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    X = dti.design_matrix(gtab, bval)
    #Signals
    Y = np.exp(np.dot(X,D))
    Y.shape = (-1,) + Y.shape
    
    ### Testing WLS Fit on Single Voxel ###
    #Estimate tensor from test signals
    tensor_est = dti.Tensor(Y,bval,gtab.T,min_signal=1e-8)
    yield assert_equal(tensor_est.shape, Y.shape[:-1])
    yield assert_array_almost_equal(tensor_est.evals[0], evals)
    yield assert_array_almost_equal(tensor_est.D[0], tensor,err_msg= "Calculation of tensor from Y does not compare to analytical solution")
    yield assert_almost_equal(tensor_est.md()[0], md)

    #test 0d tensor
    y = Y[0]
    tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8)
    yield assert_equal(tensor_est.shape, tuple())
    yield assert_array_almost_equal(tensor_est.evals, evals)
    yield assert_array_almost_equal(tensor_est.D, tensor)
    yield assert_almost_equal(tensor_est.md(), md)

    tensor_est = dti.Tensor(y, bval, gtab.T, min_signal=1e-8, fit_method='LS')
    yield assert_equal(tensor_est.shape, tuple())
    yield assert_array_almost_equal(tensor_est.evals, evals)
    yield assert_array_almost_equal(tensor_est.D, tensor)
    yield assert_almost_equal(tensor_est.md(), md)
Exemple #11
0
    def __init__(self, gtab, fit_method="NLS", *args, **kwargs):
        """ Free Water Diffusion Tensor Model [1]_.

        Parameters
        ----------
        gtab : GradientTable class instance
        fit_method : str or callable
            str can be one of the following:

            'WLS' for weighted linear least square fit according to [1]_
                :func:`fwdti.wls_iter`
            'NLS' for non-linear least square fit according to [1]_
                :func:`fwdti.nls_iter`

            callable has to have the signature:
              fit_method(design_matrix, data, *args, **kwargs)
        args, kwargs : arguments and key-word arguments passed to the
           fit_method. See fwdti.wls_iter, fwdti.nls_iter for
           details

        References
        ----------
        .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
               Optimization of a free water elimination two-compartmental model
               for diffusion tensor imaging. NeuroImage 103, 323-333.
               doi: 10.1016/j.neuroimage.2014.09.053
        """
        ReconstModel.__init__(self, gtab)

        if not callable(fit_method):
            try:
                fit_method = common_fit_methods[fit_method]
            except KeyError:
                e_s = '"' + str(fit_method) + '" is not a known fit '
                e_s += 'method, the fit method should either be a '
                e_s += 'function or one of the common fit methods'
                raise ValueError(e_s)
        self.fit_method = fit_method
        self.design_matrix = design_matrix(self.gtab)
        self.args = args
        self.kwargs = kwargs

        # Check if at least three b-values are given
        bmag = int(np.log10(self.gtab.bvals.max()))
        b = self.gtab.bvals.copy() / (10 ** (bmag-1))  # normalize b units
        b = b.round()
        uniqueb = np.unique(b)
        if len(uniqueb) < 3:
            mes = "fwdti fit requires data for at least 2 non zero b-values"
            raise ValueError(mes)
Exemple #12
0
def test_restore():
    """
    Test the implementation of the RESTORE algorithm
    """
    b0 = 1000.
    bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    tensor = from_lower_triangular(D)

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))
    Y.shape = (-1, ) + Y.shape
    for drop_this in range(1, Y.shape[-1]):
        for jac in [True, False]:
            # RESTORE estimates should be robust to dropping
            this_y = Y.copy()
            this_y[:, drop_this] = 1.0
            for sigma in [67.0, np.ones(this_y.shape[-1]) * 67.0]:
                tensor_model = dti.TensorModel(gtab,
                                               fit_method='restore',
                                               jac=jac,
                                               sigma=67.0)

                tensor_est = tensor_model.fit(this_y)
                assert_array_almost_equal(tensor_est.evals[0],
                                          evals,
                                          decimal=3)
                assert_array_almost_equal(tensor_est.quadratic_form[0],
                                          tensor,
                                          decimal=3)

    # If sigma is very small, it still needs to work:
    tensor_model = dti.TensorModel(gtab, fit_method='restore', sigma=0.0001)
    tensor_model.fit(Y.copy())

    # Test return_S0_hat
    tensor_model = dti.TensorModel(gtab,
                                   fit_method='restore',
                                   sigma=0.0001,
                                   return_S0_hat=True)
    tmf = tensor_model.fit(Y.copy())
    assert_almost_equal(tmf[0].S0_hat, b0)
Exemple #13
0
    def __init__(self, gtab, fit_method="NLS", *args, **kwargs):
        """ Free Water Diffusion Tensor Model [1]_.

        Parameters
        ----------
        gtab : GradientTable class instance
        fit_method : str or callable
            str can be one of the following:

            'WLS' for weighted linear least square fit according to [1]_
                :func:`fwdti.wls_iter`
            'NLS' for non-linear least square fit according to [1]_
                :func:`fwdti.nls_iter`

            callable has to have the signature:
              fit_method(design_matrix, data, *args, **kwargs)
        args, kwargs : arguments and key-word arguments passed to the
           fit_method. See fwdti.wls_iter, fwdti.nls_iter for
           details

        References
        ----------
        .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
               Optimization of a free water elimination two-compartmental model
               for diffusion tensor imaging. NeuroImage 103, 323-333.
               doi: 10.1016/j.neuroimage.2014.09.053
        """
        ReconstModel.__init__(self, gtab)

        if not callable(fit_method):
            try:
                fit_method = common_fit_methods[fit_method]
            except KeyError:
                e_s = '"' + str(fit_method) + '" is not a known fit '
                e_s += 'method, the fit method should either be a '
                e_s += 'function or one of the common fit methods'
                raise ValueError(e_s)
        self.fit_method = fit_method
        self.design_matrix = design_matrix(self.gtab)
        self.args = args
        self.kwargs = kwargs

        # Check if at least three b-values are given
        bmag = int(np.log10(self.gtab.bvals.max()))
        b = self.gtab.bvals.copy() / (10**(bmag - 1))  # normalize b units
        b = b.round()
        uniqueb = np.unique(b)
        if len(uniqueb) < 3:
            mes = "fwdti fit requires data for at least 2 non zero b-values"
            raise ValueError(mes)
Exemple #14
0
def test_nlls_fit_tensor():
     """
     Test the implementation of NLLS and RESTORE
     """

     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(bvecs, bval)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape

     #Estimate tensor from test signals and compare against expected result
     #using non-linear least squares:
     tensor_model = dti.TensorModel(gtab, fit_method='NLLS')
     tensor_est = tensor_model.fit(Y)
     assert_equal(tensor_est.shape, Y.shape[:-1])
     assert_array_almost_equal(tensor_est.evals[0], evals)
     assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
     assert_almost_equal(tensor_est.md[0], md)

     # Using the gmm weighting scheme:
     tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm')
     assert_equal(tensor_est.shape, Y.shape[:-1])
     assert_array_almost_equal(tensor_est.evals[0], evals)
     assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
     assert_almost_equal(tensor_est.md[0], md)

     # Use NLLS with some actual 4D data:
     data, bvals, bvecs = get_data('small_25')
     gtab = grad.gradient_table(bvals, bvecs)
     tm1 = dti.TensorModel(gtab, fit_method='NLLS')
     dd = nib.load(data).get_data()
     tf1 = tm1.fit(dd)
     tm2 = dti.TensorModel(gtab)
     tf2 = tm2.fit(dd)

     assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)
Exemple #15
0
def test_nlls_fit_tensor():
     """
     Test the implementation of NLLS and RESTORE
     """

     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(bvecs, bval)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape

     #Estimate tensor from test signals and compare against expected result
     #using non-linear least squares:
     tensor_model = dti.TensorModel(gtab, fit_method='NLLS')
     tensor_est = tensor_model.fit(Y)
     assert_equal(tensor_est.shape, Y.shape[:-1])
     assert_array_almost_equal(tensor_est.evals[0], evals)
     assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
     assert_almost_equal(tensor_est.md[0], md)

     # Using the gmm weighting scheme:
     tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm')
     assert_equal(tensor_est.shape, Y.shape[:-1])
     assert_array_almost_equal(tensor_est.evals[0], evals)
     assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
     assert_almost_equal(tensor_est.md[0], md)

     # Use NLLS with some actual 4D data:
     data, bvals, bvecs = get_data('small_25')
     gtab = grad.gradient_table(bvals, bvecs)
     tm1 = dti.TensorModel(gtab, fit_method='NLLS')
     dd = nib.load(data).get_data()
     tf1 = tm1.fit(dd)
     tm2 = dti.TensorModel(gtab)
     tf2 = tm2.fit(dd)

     assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)
Exemple #16
0
    def __init__(self, gtab, fit_method="NLS", *args, **kwargs):
        """ Free Water Diffusion Tensor Model [1]_.

        Parameters
        ----------
        gtab : GradientTable class instance
        fit_method : str or callable
            str can be one of the following:

            'WLS' for weighted linear least square fit according to [1]_
                :func:`fwdti.wls_iter`
            'NLS' for non-linear least square fit according to [1]_
                :func:`fwdti.nls_iter`

            callable has to have the signature:
              fit_method(design_matrix, data, *args, **kwargs)
        args, kwargs : arguments and key-word arguments passed to the
           fit_method. See fwdti.wls_iter, fwdti.nls_iter for
           details

        References
        ----------
        .. [1] Henriques, R.N., Rokem, A., Garyfallidis, E., St-Jean, S.,
               Peterson E.T., Correia, M.M., 2017. [Re] Optimization of a free
               water elimination two-compartment model for diffusion tensor
               imaging. ReScience volume 3, issue 1, article number 2

        """
        ReconstModel.__init__(self, gtab)

        if not callable(fit_method):
            try:
                fit_method = common_fit_methods[fit_method]
            except KeyError:
                e_s = '"' + str(fit_method) + '" is not a known fit '
                e_s += 'method, the fit method should either be a '
                e_s += 'function or one of the common fit methods'
                raise ValueError(e_s)
        self.fit_method = fit_method
        self.design_matrix = design_matrix(self.gtab)
        self.args = args
        self.kwargs = kwargs

        # Check if at least three b-values are given
        enough_b = check_multi_b(self.gtab, 3, non_zero=False)
        if not enough_b:
            mes = "fwDTI requires at least 3 b-values (which can include b=0)"
            raise ValueError(mes)
Exemple #17
0
    def __init__(self, gtab, fit_method="NLS", *args, **kwargs):
        """ Free Water Diffusion Tensor Model [1]_.

        Parameters
        ----------
        gtab : GradientTable class instance
        fit_method : str or callable
            str can be one of the following:

            'WLS' for weighted linear least square fit according to [1]_
                :func:`fwdti.wls_iter`
            'NLS' for non-linear least square fit according to [1]_
                :func:`fwdti.nls_iter`

            callable has to have the signature:
              fit_method(design_matrix, data, *args, **kwargs)
        args, kwargs : arguments and key-word arguments passed to the
           fit_method. See fwdti.wls_iter, fwdti.nls_iter for
           details

        References
        ----------
        .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
               Optimization of a free water elimination two-compartmental model
               for diffusion tensor imaging. NeuroImage 103, 323-333.
               doi: 10.1016/j.neuroimage.2014.09.053
        """
        ReconstModel.__init__(self, gtab)

        if not callable(fit_method):
            try:
                fit_method = common_fit_methods[fit_method]
            except KeyError:
                e_s = '"' + str(fit_method) + '" is not a known fit '
                e_s += 'method, the fit method should either be a '
                e_s += 'function or one of the common fit methods'
                raise ValueError(e_s)
        self.fit_method = fit_method
        self.design_matrix = design_matrix(self.gtab)
        self.args = args
        self.kwargs = kwargs

        # Check if at least three b-values are given
        enough_b = check_multi_b(self.gtab, 3, non_zero=False)
        if not enough_b:
            mes = "fwDTI requires at least 3 b-values (which can include b=0)"
            raise ValueError(mes)
Exemple #18
0
    def __init__(self, gtab, fit_method="NLS", *args, **kwargs):
        """ Free Water Diffusion Tensor Model [1]_.

        Parameters
        ----------
        gtab : GradientTable class instance
        fit_method : str or callable
            str can be one of the following:

            'WLS' for weighted linear least square fit according to [1]_
                :func:`fwdti.wls_iter`
            'NLS' for non-linear least square fit according to [1]_
                :func:`fwdti.nls_iter`

            callable has to have the signature:
              fit_method(design_matrix, data, *args, **kwargs)
        args, kwargs : arguments and key-word arguments passed to the
           fit_method. See fwdti.wls_iter, fwdti.nls_iter for
           details

        References
        ----------
        .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
               Optimization of a free water elimination two-compartmental model
               for diffusion tensor imaging. NeuroImage 103, 323-333.
               doi: 10.1016/j.neuroimage.2014.09.053
        """
        ReconstModel.__init__(self, gtab)

        if not callable(fit_method):
            try:
                fit_method = common_fit_methods[fit_method]
            except KeyError:
                e_s = '"' + str(fit_method) + '" is not a known fit '
                e_s += 'method, the fit method should either be a '
                e_s += 'function or one of the common fit methods'
                raise ValueError(e_s)
        self.fit_method = fit_method
        self.design_matrix = design_matrix(self.gtab)
        self.args = args
        self.kwargs = kwargs

        # Check if at least three b-values are given
        enough_b = check_multi_b(self.gtab, 3, non_zero=False)
        if not enough_b:
            mes = "fwDTI requires at least 3 b-values (which can include b=0)"
            raise ValueError(mes)
Exemple #19
0
def test_restore():
    """
    Test the implementation of the RESTORE algorithm
    """
    b0 = 1000.
    bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    tensor = from_lower_triangular(D)

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))
    Y.shape = (-1,) + Y.shape
    for drop_this in range(1, Y.shape[-1]):
        for jac in [True, False]:
            # RESTORE estimates should be robust to dropping
            this_y = Y.copy()
            this_y[:, drop_this] = 1.0
            for _ in [67.0, np.ones(this_y.shape[-1]) * 67.0]:
                tensor_model = dti.TensorModel(gtab, fit_method='restore',
                                               jac=jac,
                                               sigma=67.0)

                tensor_est = tensor_model.fit(this_y)
                assert_array_almost_equal(tensor_est.evals[0], evals,
                                          decimal=3)
                assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                                          decimal=3)

    # If sigma is very small, it still needs to work:
    tensor_model = dti.TensorModel(gtab, fit_method='restore', sigma=0.0001)
    tensor_model.fit(Y.copy())

    # Test return_S0_hat
    tensor_model = dti.TensorModel(gtab, fit_method='restore', sigma=0.0001,
                                   return_S0_hat=True)
    tmf = tensor_model.fit(Y.copy())
    assert_almost_equal(tmf[0].S0_hat, b0)
    def __init__(self, gtab):
        super(EigenModule, self).__init__()
        tol = 1e-6

        design_matrix = dti.design_matrix(gtab)

        self.design_matrix_inv = torch.FloatTensor(
            np.linalg.pinv(design_matrix))
        self.design_matrix_inv = nn.Parameter(self.design_matrix_inv,
                                              requires_grad=False)

        self.min_diffusivity = tol / -design_matrix.min()
        self._lt_indices = np.array([[0, 1, 3],
                                     [1, 2, 4],
                                     [3, 4, 5]])

        self.symeig_module = SymEig()

        self.inputs = ['dwi']
        self.outputs = ['evals', 'evecs']
Exemple #21
0
def test_restore():
     """
     Test the implementation of the RESTORE algorithm
     """
     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(bvecs, bval)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape
     for sigma in [0.1, 1, 10, 100]:
        for drop_this in range(1, Y.shape[-1]):
           # RESTORE estimates should be robust to dropping
           this_y = Y.copy()
           this_y[:, drop_this] = 1.0
           tensor_model = dti.TensorModel(gtab, fit_method='restore',
                                          sigma=sigma)
           tensor_est = tensor_model.fit(Y)
           assert_array_almost_equal(tensor_est.evals[0], evals)
           assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)


     data, bvals, bvecs = get_data('small_25')
     dd = nib.load(data).get_data()
     gtab = grad.gradient_table(bvals, bvecs)
     fit_method = 'restore' # 'NLLS'
     jac = True # False
     dd[..., 5] = 1.0
     tm = dti.TensorModel(gtab, fit_method=fit_method, jac=True, sigma=10)
     tm.fit(dd)
Exemple #22
0
def test_restore():
     """
     Test the implementation of the RESTORE algorithm
     """
     b0 = 1000.
     bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec'))
     gtab = grad.gradient_table(bval, bvecs)
     B = bval[1]

     #Scale the eigenvalues and tensor by the B value so the units match
     D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
     evals = np.array([2., 1., 0.]) / B
     md = evals.mean()
     tensor = from_lower_triangular(D)

     #Design Matrix
     X = dti.design_matrix(bvecs, bval)

     #Signals
     Y = np.exp(np.dot(X,D))
     Y.shape = (-1,) + Y.shape
     for sigma in [0.1, 1, 10, 100]:
        for drop_this in range(1, Y.shape[-1]):
           # RESTORE estimates should be robust to dropping
           this_y = Y.copy()
           this_y[:, drop_this] = 1.0
           tensor_model = dti.TensorModel(gtab, fit_method='restore',
                                          sigma=sigma)
           tensor_est = tensor_model.fit(Y)
           assert_array_almost_equal(tensor_est.evals[0], evals)
           assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)


     data, bvals, bvecs = get_data('small_25')
     dd = nib.load(data).get_data()
     gtab = grad.gradient_table(bvals, bvecs)
     fit_method = 'restore' # 'NLLS'
     jac = True # False
     dd[..., 5] = 1.0
     tm = dti.TensorModel(gtab, fit_method=fit_method, jac=True, sigma=10)
     tm.fit(dd)
Exemple #23
0
def fwdti_prediction(params, gtab, S0=1, Diso=3.0e-3):
    r""" Signal prediction given the free water DTI model parameters.

    Parameters
    ----------
    params : (..., 13) ndarray
        Model parameters. The last dimension should have the 12 tensor
        parameters (3 eigenvalues, followed by the 3 corresponding
        eigenvectors) and the volume fraction of the free water compartment.
    gtab : a GradientTable class instance
        The gradient table for this prediction
    S0 : float or ndarray
        The non diffusion-weighted signal in every voxel, or across all
        voxels. Default: 1
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please adjust this value if you are assuming different
        units of diffusion.

    Returns
    --------
    S : (..., N) ndarray
        Simulated signal based on the free water DTI model

    Notes
    -----
    The predicted signal is given by:
    $S(\theta, b) = S_0 * [(1-f) * e^{-b ADC} + f * e^{-b D_{iso}]$, where
    $ADC = \theta Q \theta^T$, $\theta$ is a unit vector pointing at any
    direction on the sphere for which a signal is to be predicted, $b$ is the b
    value provided in the GradientTable input for that direction, $Q$ is the
    quadratic form of the tensor determined by the input parameters, $f$ is the
    free water diffusion compartment, $D_{iso}$ is the free water diffusivity
    which is equal to $3 * 10^{-3} mm^{2}s^{-1} [1]_.

    References
    ----------
    .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
           Optimization of a free water elimination two-compartmental model
           for diffusion tensor imaging. NeuroImage 103, 323-333.
           doi: 10.1016/j.neuroimage.2014.09.053
    """
    evals = params[..., :3]
    evecs = params[..., 3:-1].reshape(params.shape[:-1] + (3, 3))
    f = params[..., 12]
    qform = vec_val_vect(evecs, evals)
    lower_dt = lower_triangular(qform, S0)
    lower_diso = lower_dt.copy()
    lower_diso[..., 0] = lower_diso[..., 2] = lower_diso[..., 5] = Diso
    lower_diso[..., 1] = lower_diso[..., 3] = lower_diso[..., 4] = 0
    D = design_matrix(gtab)

    pred_sig = np.zeros(f.shape + (gtab.bvals.shape[0], ))
    mask = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2])
    index = ndindex(f.shape)
    for v in index:
        if mask[v]:
            pred_sig[v] = (1 - f[v]) * np.exp(np.dot(lower_dt[v], D.T)) + \
                          f[v] * np.exp(np.dot(lower_diso[v], D.T))

    return pred_sig
Exemple #24
0
def test_TensorModel():
    data, gtab = dsi_voxels()
    dm = dti.TensorModel(gtab, 'LS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    dm = dti.TensorModel(gtab, 'WLS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    sphere = create_unit_sphere(4)
    assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
    assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1)

    # Check that the multivoxel case works:
    dtifit = dm.fit(data)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3,3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = 3 * np.sqrt(6) * np.linalg.det(A_squiggle / np.linalg.norm(A_squiggle))
    evecs = np.linalg.eigh(tensor)[1]
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')
Exemple #25
0
def test_TensorModel():
    data, gtab = dsi_voxels()
    dm = dti.TensorModel(gtab, 'LS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    dm = dti.TensorModel(gtab, 'WLS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.5, True)
    sphere = create_unit_sphere(4)
    assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
    assert_almost_equal(dtifit.fa, gfa(dtifit.odf(sphere)), 1)

    # Check that the multivoxel case works:
    dtifit = dm.fit(data)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = 3 * np.sqrt(6) * np.linalg.det(
        A_squiggle / np.linalg.norm(A_squiggle))
    evecs = np.linalg.eigh(tensor)[1]
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1, ) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab, fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method')
def nls_fit_tensor(gtab, data, mask=None, Diso=3e-3,
                   min_signal=1.0e-6, f_transform=True, cholesky=False,
                   jac=True):
    """
    Fit the water elimination tensor model using the non-linear least-squares.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    f_transform : bool, optional
        If true, the water volume fractions is converted during the convergence
        procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
        0 and 1.
        Default: True
    cholesky : bool, optional
        If true it uses cholesky decomposition to insure that diffusion tensor
        is positive define.
        Default: False
    jac : bool
        Use the Jacobian? Default: False

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the last dimension the free water model parameters
        in the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment

    References
    ----------
    .. [1] Henriques, R.N., Rokem, A., Garyfallidis, E., St-Jean, S., Peterson,
           E.T., Correia, M.M., 2017. Re: Optimization of a free water
           elimination two-compartmental model for diffusion tensor imaging.
           ReScience
    """
    # Analyse compatible input cases
    if jac is True and cholesky is True:
        raise ValueError("Cholesky decomposition is not compatible with jac.")

    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[..., gtab.b0s_mask], axis=-1)

    # Loop data fitting through all voxels
    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = nls_iter(W, data[v], S0[v], Diso=Diso,
                              min_signal=min_signal,
                              f_transform=f_transform,
                              cholesky=cholesky, jac=jac)
            fw_params[v] = params

    return fw_params
Exemple #27
0
def test_nlls_fit_tensor():
    """
    Test the implementation of NLLS and RESTORE
    """

    b0 = 1000.
    bval, bvecs = read_bvals_bvecs(*get_fnames('55dir_grad'))
    gtab = grad.gradient_table(bval, bvecs)
    B = bval[1]

    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)

    # Design Matrix
    X = dti.design_matrix(gtab)

    # Signals
    Y = np.exp(np.dot(X, D))
    Y.shape = (-1, ) + Y.shape

    # Estimate tensor from test signals and compare against expected result
    # using non-linear least squares:
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS')
    tensor_est = tensor_model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
    npt.assert_almost_equal(tensor_est.md[0], md)

    # You can also do this without the Jacobian (though it's slower):
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS', jac=False)
    tensor_est = tensor_model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
    npt.assert_almost_equal(tensor_est.md[0], md)

    # Using the gmm weighting scheme:
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm')
    tensor_est = tensor_model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor)
    npt.assert_almost_equal(tensor_est.md[0], md)

    # If you use sigma weighting, you'd better provide a sigma:
    tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='sigma')
    npt.assert_raises(ValueError, tensor_model.fit, Y)

    # Use NLLS with some actual 4D data:
    data, bvals, bvecs = get_fnames('small_25')
    gtab = grad.gradient_table(bvals, bvecs)
    tm1 = dti.TensorModel(gtab, fit_method='NLLS')
    dd = load_nifti_data(data)
    tf1 = tm1.fit(dd)
    tm2 = dti.TensorModel(gtab)
    tf2 = tm2.fit(dd)

    npt.assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)
def nls_fit_tensor_bounds(gtab, data, mask=None, Diso=3e-3, mdreg=2.7e-3,
                          min_signal=1.0e-6, bounds=None, jac=True):
    """
    Fit the water elimination tensor model using the non-linear least-squares
    with constraints

    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    bounds : 2-tuple of arrays with 14 elements, optional
        Lower and upper bounds on fwdti model variables and the log of
        non-diffusion signal S0. Use np.inf with an appropriate sign to
        disable bounds on all or some variables. When bounds is set to None
        the following default variable bounds is used:
            ([0., -Diso, 0., -Diso, -Diso, 0., 0., np.exp(-10.)],
             [Diso, Diso, Diso, Diso, Diso, Diso, 1., np.exp(10.)])
    jac : bool
        Use the Jacobian? Default: False

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the dimention the free water model parameters in
        the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment
    """
    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = nls_iter_bounds(W, data[v], S0[v], Diso=Diso, mdreg=mdreg,
                                     min_signal=min_signal, bounds=bounds,
                                     jac=jac)
            fw_params[v] = params

    return fw_params
def wls_fit_tensor(gtab, data, Diso=3e-3, mask=None, min_signal=1.0e-6,
                   piterations=3, mdreg=2.7e-3):
    r""" Computes weighted least squares (WLS) fit to calculate self-diffusion
    tensor using a linear regression model [1]_.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    piterations : inter, optional
        Number of iterations used to refine the precision of f. Default is set
        to 3 corresponding to a precision of 0.01.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the last dimention the free water model parameters
        in the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment.

    References
    ----------
    .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
           Optimization of a free water elimination two-compartmental model
           for diffusion tensor imaging. NeuroImage 103, 323-333.
           doi: 10.1016/j.neuroimage.2014.09.053
    """
    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = wls_iter(W, data[v], S0[v], min_signal=min_signal,
                              Diso=3e-3, piterations=piterations, mdreg=mdreg)
            fw_params[v] = params

    return fw_params
Exemple #30
0
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data = nib.load(fdata).get_data()
    gtab = grad.gradient_table(fbval, fbvec)
    dm = dti.TensorModel(gtab, 'LS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.9, True)
    dm = dti.TensorModel(gtab, 'WLS')
    dtifit = dm.fit(data[0, 0, 0])
    assert_equal(dtifit.fa < 0.9, True)
    assert_equal(dtifit.fa > 0, True)
    sphere = create_unit_sphere(4)
    assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
    # Check that the multivoxel case works:
    dtifit = dm.fit(data)

    # Check that it works on signal that has already been normalized to S0:
    dm_to_relative = dti.TensorModel(gtab)
    relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0, gtab.b0s_mask]))

    dtifit_to_relative = dm_to_relative.fit(relative_data)
    npt.assert_almost_equal(dtifit.fa[0,0,0], dtifit_to_relative.fa, decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3,3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = 3 * np.sqrt(6) * np.linalg.det(A_squiggle / np.linalg.norm(A_squiggle))
    evecs = np.linalg.eigh(tensor)[1]
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)
def nls_fit_tensor_bounds(gtab, data, mask=None, Diso=3e-3,
                          min_signal=1.0e-6, bounds=None, jac=True):
    """
    Fit the water elimination tensor model using the non-linear least-squares
    with constraints

    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    bounds : 2-tuple of arrays with 14 elements, optional
        Lower and upper bounds on fwdti model variables and the log of
        non-diffusion signal S0. Use np.inf with an appropriate sign to
        disable bounds on all or some variables. When bounds is set to None
        the following default variable bounds is used:
            ([0., -Diso, 0., -Diso, -Diso, 0., 0., np.exp(-10.)],
             [Diso, Diso, Diso, Diso, Diso, Diso, 1., np.exp(10.)])
    jac : bool
        Use the Jacobian? Default: False

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the last dimension the free water model parameters
        in the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment

    References
    ----------
    .. [1] Henriques, R.N., Rokem, A., Garyfallidis, E., St-Jean, S., Peterson,
           E.T., Correia, M.M., 2017. Re: Optimization of a free water
           elimination two-compartmental model for diffusion tensor imaging.
           ReScience
    """
    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = nls_iter_bounds(W, data[v], S0[v], Diso=Diso,
                                     min_signal=min_signal, bounds=bounds,
                                     jac=jac)
            fw_params[v] = params

    return fw_params
Exemple #32
0
def gradient_descent(dwis, bvals, bvecs, mask, init_f, init_tensor, niters=50):
    '''
    Optimize the volume fraction and the tensor via gradient descent.
    '''
    dim_x, dim_y, dim_z = mask.shape
    indices_dwis = (bvals > 0)
    nb_dwis = np.count_nonzero(indices_dwis)
    indices_b0 = (bvals == 0)
    nb_b0 = np.count_nonzero(indices_b0)
    b = bvals.max()
    b0 = dwis[..., indices_b0].mean(-1)
    signal = dwis[..., indices_dwis] / b0[..., None]
    np.clip(signal, 1.0e-6, 1 - 1.0e-6, signal)
    bvals = bvals[indices_dwis]
    bvecs = bvecs[indices_dwis]
    gtab = gradient_table_from_bvals_bvecs(bvals, bvecs)
    H = dti.design_matrix(gtab)[:, :6]
    signal[np.logical_not(mask)] = 0.
    signal = signal[mask]
    lower_triangular = init_tensor[mask, 0]
    volume_fraction = init_f[mask]
    print(" - Begin gradient descent.")
    mask_nvoxels = np.count_nonzero(mask)
    step_size = 1.0e-7
    weight = 100.0
    l_min_loop, l_max_loop = 0.1e-3, 2.5e-3

    for i in range(niters):
        print(" - Iteration {0:d} out of {1:d}.".format(i + 1, niters))

        grad1, predicted_signal_tissue, predicted_signal_water = \
            grad_data_fit_tensor(lower_triangular, signal, H, bvals,
                                 volume_fraction)
        print("\tgrad1 avg: {0:0.4e}".format(np.mean(np.abs(grad1))))
        predicted_signal = volume_fraction[..., None] * predicted_signal_tissue + \
                           (1-volume_fraction[..., None]) * predicted_signal_water
        prediction_error = np.sqrt(((predicted_signal - signal)**2).mean(-1))
        print("\tpref error avg: {0:0.4e}".format(np.mean(prediction_error)))

        gradf = (bvals * (predicted_signal - signal) \
                           * (predicted_signal_tissue - predicted_signal_water)).sum(-1)
        print("\tgradf avg: {0:0.4e}".format(np.mean(np.abs(gradf))))
        volume_fraction -= weight * step_size * gradf

        grad1[np.isnan(grad1)] = 0
        # np.clip(grad1, -1.e5, 1.e5, grad1)
        np.clip(volume_fraction, 0.01, 0.99, volume_fraction)
        lower_triangular -= step_size * grad1
        lower_triangular[np.isnan(lower_triangular)] = 0

        dti_params = dti.eig_from_lo_tri(lower_triangular).reshape(
            (mask_nvoxels, 4, 3))
        evals = dti_params[..., 0, :]
        evecs = dti_params[..., 1:, :]
        lower_triangular = clip_tensor_evals(evals, evecs, l_min_loop,
                                             l_max_loop)
        del dti_params, evals, evecs

    final_tensor = np.zeros((dim_x, dim_y, dim_z, 1, 6), dtype=np.float32)
    final_tensor[mask, 0] = lower_triangular
    final_f = np.zeros((dim_x, dim_y, dim_z), dtype=np.float32)
    final_f[mask] = 1 - volume_fraction

    return final_f, final_tensor
Exemple #33
0
data_fill = sdb.substitute(data_neg, min_signal)
data_in_mask = sdb.filter(data_fill, data_fill.attribute[0]>0)
print "Truncation done: ", str(time.ctime())
print data_in_mask.shape

"""

#TODO: params_in_mask = self.fit_method(self.design_matrix, data_in_mask, *self.args, **self.kwargs), where we need to re-implement
#       def wls_fit_tensor(design_matrix, data). This is a fancy math function including an Einstein Summation...

data_in_mask = data_sdb.reshape((-1, data_sdb.shape[-1]))
data_in_mask = convert_array_type(sdb, data_in_mask, "double")

# Create design matrix B

design_matrix = sdb.from_array(dti.design_matrix(gtab))

# wls fit 
# apparantly one has to run the svd three times to get all three values...

u = robust.gesvd(design_matrix, "'U'")

ols_fit = sdb.dot(u, u.T)

log_s = sdb.log(data_in_mask)

# The einsum: w = np.exp(np.einsum('...ij,...j', ols_fit, log_s))

w = sdb.exp(sdb.dot(ols_fit, log_s.T).T)

# p = sdb.from_array(dipys_3d_pinv(sdb, sdb.dstack([design_matrix[:, i] * w for i in range(design_matrix.shape[1])])))
Exemple #34
0
def initial_fit(dwis,
                bvals,
                bvecs,
                mask,
                wm_roi,
                csf_roi,
                MD,
                csf_percentile=95,
                wm_percentile=5,
                lmin=0.1e-3,
                lmax=2.5e-3,
                evals_lmin=0.1e-3,
                evals_lmax=2.5e-3,
                md_value=0.6e-3,
                interpolate=True,
                fixed_MD=False):
    '''
    Produce the initial estimate of the volume fraction and the initial tensor image
    '''
    print(" - Compute baseline image and DW attenuation.")
    dim_x, dim_y, dim_z = mask.shape
    indices_dwis = (bvals > 0)
    nb_dwis = np.count_nonzero(indices_dwis)
    indices_b0 = (bvals == 0)
    nb_b0 = np.count_nonzero(indices_b0)
    # TO DO : address this line for multi-shell dwi
    b = bvals.max()
    b0 = dwis[..., indices_b0].mean(-1)
    # signal attenuation
    signal = dwis[..., indices_dwis] / b0[..., None]
    np.clip(signal, 1.0e-6, 1 - 1.0e-6, signal)
    signal[np.logical_not(mask)] = 0.
    # tissue b0 references
    csf_b0 = np.percentile(b0[csf_roi], csf_percentile)
    print("\t{0:2d}th percentile of b0 signal in CSF: {1}.".format(
        csf_percentile, csf_b0))
    wm_b0 = np.percentile(b0[wm_roi], wm_percentile)
    print("\t{0:2d}th percentile of b0 signal in WM : {1}.".format(
        wm_percentile, wm_b0))

    print(" - Compute initial volume fraction ...")
    # Eq. 7 from Pasternak 2009 MRM
    epsi = 1e-12  # only used to prevent log(0)
    init_f = 1 - np.log(b0 / wm_b0 + epsi) / np.log(csf_b0 / wm_b0)
    np.clip(init_f, 0.0, 1.0, init_f)
    alpha = init_f.copy()  # exponent for interpolation

    print(" - Compute fixed MD VF map")
    init_f_MD = (np.exp(-b * MD) - np.exp(-b * d)) / (np.exp(-b * md_value) -
                                                      np.exp(-b * d))
    np.clip(init_f_MD, 0.01, 0.99, init_f_MD)

    print(" - Compute min_f and max_f from lmin, lmax")
    ### This was following Pasternak 2009 although with an error
    ### Amin = exp(-b*lmax)   and Amax = exp(-b*lmin)  in that paper
    # min_f = (signal.min(-1)-np.exp(-b*d)) / (np.exp(-b*lmin)-np.exp(-b*d))
    # max_f = (signal.max(-1)-np.exp(-b*d)) / (np.exp(-b*lmax)-np.exp(-b*d))
    ### From Pasternak 2009 method, Amin < At implies that the
    ### term with signal.min(-1) in numerator is the upper bound of f
    ### although in that paper the equation 6 has fmin and fmax reversed.
    ### With lmin, lmax=0.1e-3, 2.5e-3, Amin = 0.08, Awater = 0.04
    ### and one can see that max_f here will usually be >> 1
    min_f = (signal.max(-1) - np.exp(-b * d)) / (np.exp(-b * lmin) -
                                                 np.exp(-b * d))
    max_f = (signal.min(-1) - np.exp(-b * d)) / (np.exp(-b * lmax) -
                                                 np.exp(-b * d))
    # If MD of a voxel is > 3.0e-3, min_f and max_f can be negative.
    # These voxels should be initialized as 0
    np.clip(min_f, 0.0, 1.0, min_f)
    np.clip(max_f, 0.0, 1.0, max_f)
    np.clip(init_f, min_f, max_f, init_f)

    if interpolate:
        print(" - Interpolate two estimates of volume fraction")
        # f = tissue fraction. with init_f high, alpha will be ~1 and init_f_MD will be weighted
        init_f = (np.power(init_f, (1 - alpha))) * (np.power(init_f_MD, alpha))
    elif fixed_MD:
        print(
            " - Using fixed MD value of {0} for inital volume fraction".format(
                md_value))
        init_f = init_f_MD
    else:
        print(" - Using lmin and lmax for initial volume fraction")

    np.clip(init_f, 0.05, 0.99, init_f)  # want minimum 5% of tissue
    init_f[np.isnan(init_f)] = 0.5
    init_f[np.logical_not(mask)] = 0.5

    print(" - Compute initial tissue tensor ...")
    signal[np.isnan(signal)] = 0
    bvecs = bvecs[indices_dwis]
    bvals = bvals[indices_dwis]
    signal_free_water = np.exp(-bvals * d)
    corrected_signal = (signal - (1 - init_f[..., np.newaxis]) \
                     * signal_free_water[np.newaxis, np.newaxis, np.newaxis, :]) \
                     / (init_f[..., np.newaxis])
    np.clip(corrected_signal, 1.0e-3, 1. - 1.0e-3, corrected_signal)
    log_signal = np.log(corrected_signal)
    gtab = gradient_table_from_bvals_bvecs(bvals, bvecs)
    H = dti.design_matrix(gtab)[:, :6]
    pseudo_inv = np.dot(np.linalg.inv(np.dot(H.T, H)), H.T)
    init_tensor = np.dot(log_signal, pseudo_inv.T)

    dti_params = dti.eig_from_lo_tri(init_tensor).reshape(
        (dim_x, dim_y, dim_z, 4, 3))
    evals = dti_params[..., 0, :]
    evecs = dti_params[..., 1:, :]
    if evals_lmin > 0.1e-3:
        print(" - Fatten tensor to {}".format(evals_lmin))
    lower_triangular = clip_tensor_evals(evals, evecs, evals_lmin, evals_lmax)
    lower_triangular[np.logical_not(mask)] = [
        evals_lmin, 0, evals_lmin, 0, 0, evals_lmin
    ]
    nan_mask = np.any(np.isnan(lower_triangular), axis=-1)
    lower_triangular[nan_mask] = [evals_lmin, 0, evals_lmin, 0, 0, evals_lmin]

    init_tensor = lower_triangular[:, :, :, np.newaxis, :]
    return init_f, init_tensor
Exemple #35
0
def test_wls_and_ls_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    # Defining Test Voxel (avoid nibabel dependency) ###

    # Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_fnames('55dir_grad.bvec'))
    B = bval[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    # Design Matrix
    gtab = grad.gradient_table(bval, bvec)
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    npt.assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Testing WLS Fit on Single Voxel
    # If you do something wonky (passing min_signal<0), you should get an
    # error:
    npt.assert_raises(ValueError, TensorModel, gtab, fit_method='WLS',
                      min_signal=-1)

    # Estimate tensor from test signals
    model = TensorModel(gtab, fit_method='WLS', return_S0_hat=True)
    tensor_est = model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                                  err_msg="Calculation of tensor from Y does "
                                          "not compare to analytical solution")
    npt.assert_almost_equal(tensor_est.md[0], md)
    npt.assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    npt.assert_equal(tensor_est.shape, tuple())
    npt.assert_array_almost_equal(tensor_est.evals, evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    npt.assert_almost_equal(tensor_est.md, md)
    npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, fit_method='LS')
    tensor_est = model.fit(y)
    npt.assert_equal(tensor_est.shape, tuple())
    npt.assert_array_almost_equal(tensor_est.evals, evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    npt.assert_almost_equal(tensor_est.md, md)
    npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
    npt.assert_array_almost_equal(tensor_est.linearity, linearity(evals))
    npt.assert_array_almost_equal(tensor_est.planarity, planarity(evals))
    npt.assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
Exemple #36
0
def test_wls_and_ls_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    # Defining Test Voxel (avoid nibabel dependency) ###

    # Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    B = bval[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    # Design Matrix
    gtab = grad.gradient_table(bval, bvec)
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Testing WLS Fit on Single Voxel
    # If you do something wonky (passing min_signal<0), you should get an
    # error:
    npt.assert_raises(ValueError, TensorModel, gtab, fit_method='WLS',
                      min_signal=-1)

    # Estimate tensor from test signals
    model = TensorModel(gtab, fit_method='WLS', return_S0_hat=True)
    tensor_est = model.fit(Y)
    assert_equal(tensor_est.shape, Y.shape[:-1])
    assert_array_almost_equal(tensor_est.evals[0], evals)
    assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                              err_msg="Calculation of tensor from Y does not "
                                      "compare to analytical solution")
    assert_almost_equal(tensor_est.md[0], md)
    assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, fit_method='LS')
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
    assert_array_almost_equal(tensor_est.linearity, linearity(evals))
    assert_array_almost_equal(tensor_est.planarity, planarity(evals))
    assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
Exemple #37
0
def fwdti_prediction(params, gtab, S0=1, Diso=3.0e-3):
    r""" Signal prediction given the free water DTI model parameters.

    Parameters
    ----------
    params : (..., 13) ndarray
        Model parameters. The last dimension should have the 12 tensor
        parameters (3 eigenvalues, followed by the 3 corresponding
        eigenvectors) and the volume fraction of the free water compartment.
    gtab : a GradientTable class instance
        The gradient table for this prediction
    S0 : float or ndarray
        The non diffusion-weighted signal in every voxel, or across all
        voxels. Default: 1
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please adjust this value if you are assuming different
        units of diffusion.

    Returns
    --------
    S : (..., N) ndarray
        Simulated signal based on the free water DTI model

    Notes
    -----
    The predicted signal is given by:
    $S(\theta, b) = S_0 * [(1-f) * e^{-b ADC} + f * e^{-b D_{iso}]$, where
    $ADC = \theta Q \theta^T$, $\theta$ is a unit vector pointing at any
    direction on the sphere for which a signal is to be predicted, $b$ is the b
    value provided in the GradientTable input for that direction, $Q$ is the
    quadratic form of the tensor determined by the input parameters, $f$ is the
    free water diffusion compartment, $D_{iso}$ is the free water diffusivity
    which is equal to $3 * 10^{-3} mm^{2}s^{-1} [1]_.

    References
    ----------
    .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
           Optimization of a free water elimination two-compartmental model
           for diffusion tensor imaging. NeuroImage 103, 323-333.
           doi: 10.1016/j.neuroimage.2014.09.053
    """
    evals = params[..., :3]
    evecs = params[..., 3:-1].reshape(params.shape[:-1] + (3, 3))
    f = params[..., 12]
    qform = vec_val_vect(evecs, evals)
    lower_dt = lower_triangular(qform, S0)
    lower_diso = lower_dt.copy()
    lower_diso[..., 0] = lower_diso[..., 2] = lower_diso[..., 5] = Diso
    lower_diso[..., 1] = lower_diso[..., 3] = lower_diso[..., 4] = 0
    D = design_matrix(gtab)

    pred_sig = np.zeros(f.shape + (gtab.bvals.shape[0],))
    mask = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2])
    index = ndindex(f.shape)
    for v in index:
        if mask[v]:
            pred_sig[v] = (1 - f[v]) * np.exp(np.dot(lower_dt[v], D.T)) + \
                          f[v] * np.exp(np.dot(lower_diso[v], D.T))

    return pred_sig
Exemple #38
0
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data1 = nib.load(fdata).get_data()
    gtab1 = grad.gradient_table(fbval, fbvec)
    data2, gtab2 = dsi_voxels()
    for data, gtab in zip([data1, data2], [gtab1, gtab2]):
        dm = dti.TensorModel(gtab, 'LS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        dm = dti.TensorModel(gtab, 'WLS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        assert_equal(dtifit.fa > 0, True)
        sphere = create_unit_sphere(4)
        assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
        # Check that the multivoxel case works:
        dtifit = dm.fit(data)

        # Check that it works on signal that has already been normalized to S0:
        dm_to_relative = dti.TensorModel(gtab)
        if np.any(gtab.b0s_mask):
            relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0,
                                                        gtab.b0s_mask]))

            dtifit_to_relative = dm_to_relative.fit(relative_data)
            npt.assert_almost_equal(dtifit.fa[0, 0, 0], dtifit_to_relative.fa,
                                    decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = (3 * np.sqrt(6) * np.linalg.det(A_squiggle /
            np.linalg.norm(A_squiggle)))
    evals_eigh, evecs_eigh = np.linalg.eigh(tensor)
    # Sort according to eigen-value from large to small:
    evecs = evecs_eigh[:, np.argsort(evals_eigh)[::-1]]
    # Check that eigenvalues and eigenvectors are properly sorted through
    # that previous operation:
    for i in range(3):
        assert_array_almost_equal(np.dot(tensor, evecs[:, i]),
                                  evals[i] * evecs[:, i])
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method,
                                       return_S0_hat=True)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)
        assert_array_almost_equal(tensor_fit.S0_hat, b0, decimal=3)
        # Test that the eigenvectors are correct, one-by-one:
        for i in range(3):
            # Eigenvectors have intrinsic sign ambiguity
            # (see
            # http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf)
            # so we need to allow for sign flips. One of the following should
            # always be true:
            assert_(
                    np.all(np.abs(tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6) or
                    np.all(np.abs(-tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6))
            # We set a fixed tolerance of 10e-6, similar to array_almost_equal

        err_msg = "Calculation of tensor from Y does not compare to "
        err_msg += "analytical solution"
        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=err_msg)

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')

    # Test custom fit tensor method
    try:
        model = dti.TensorModel(gtab, fit_method=lambda *args, **kwargs: 42)
        fit = model.fit_method()
    except Exception as exc:
        assert False, "TensorModel should accept custom fit methods: %s" % exc
    assert fit == 42, "Custom fit method for TensorModel returned %s." % fit

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Return S0_test
    tensor_model = dti.TensorModel(gtab, return_S0_hat=True)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)
    assert_array_almost_equal(fit[0].S0_hat, b0)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)
Exemple #39
0
def wls_fit_tensor(gtab, data, Diso=3e-3, mask=None, min_signal=1.0e-6,
                   piterations=3, mdreg=2.7e-3):
    r""" Computes weighted least squares (WLS) fit to calculate self-diffusion
    tensor using a linear regression model [1]_.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    piterations : inter, optional
        Number of iterations used to refine the precision of f. Default is set
        to 3 corresponding to a precision of 0.01.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the last dimention the free water model parameters
        in the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment.

    References
    ----------
    .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
           Optimization of a free water elimination two-compartmental model
           for diffusion tensor imaging. NeuroImage 103, 323-333.
           doi: 10.1016/j.neuroimage.2014.09.053
    """
    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = wls_iter(W, data[v], S0[v], min_signal=min_signal,
                              Diso=3e-3, piterations=piterations, mdreg=mdreg)
            fw_params[v] = params

    return fw_params
def nls_fit_tensor(gtab, data, mask=None, Diso=3e-3, mdreg=2.7e-3,
                   min_signal=1.0e-6, f_transform=True, cholesky=False,
                   jac=True):
    """
    Fit the water elimination tensor model using the non-linear least-squares.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    f_transform : bool, optional
        If true, the water volume fractions is converted during the convergence
        procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
        0 and 1.
        Default: True
    cholesky : bool, optional
        If true it uses cholesky decomposition to insure that diffusion tensor
        is positive define.
        Default: False
    jac : bool
        Use the Jacobian? Default: False

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the dimention the free water model parameters in
        the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment
    """
    # Analyse compatible input cases
    if jac is True and cholesky is True:
        raise ValueError("Cholesky decomposition is not compatible with jac.")

    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = nls_iter(W, data[v], S0[v],
                              Diso=Diso, mdreg=mdreg,
                              min_signal=min_signal,
                              f_transform=f_transform, cholesky=cholesky,
                              jac=jac)
            fw_params[v] = params

    return fw_params
Exemple #41
0
def nls_fit_tensor(gtab, data, mask=None, Diso=3e-3, mdreg=2.7e-3,
                   min_signal=1.0e-6, f_transform=True, cholesky=False,
                   jac=False, weighting=None, sigma=None):
    """
    Fit the water elimination tensor model using the non-linear least-squares.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    f_transform : bool, optional
        If true, the water volume fractions is converted during the convergence
        procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
        0 and 1.
        Default: True
    cholesky : bool, optional
        If true it uses cholesky decomposition to insure that diffusion tensor
        is positive define.
        Default: False
    jac : bool
        Use the Jacobian? Default: False
    weighting: str, optional
        the weighting scheme to use in considering the
        squared-error. Default behavior is to use uniform weighting. Other
        options: 'sigma' 'gmm'
    sigma: float, optional
        If the 'sigma' weighting scheme is used, a value of sigma needs to be
        provided here. According to [Chang2005]_, a good value to use is
        1.5267 * std(background_noise), where background_noise is estimated
        from some part of the image known to contain no signal (only noise).

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the dimention the free water model parameters in
        the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment
    """
    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = nls_iter(W, data[v], S0[v], Diso=Diso, mdreg=mdreg,
                              min_signal=min_signal, f_transform=f_transform,
                              cholesky=cholesky, jac=jac, weighting=weighting,
                              sigma=sigma)
            fw_params[v] = params

    return fw_params
Exemple #42
0
import nibabel as nib
import dipy.reconst.dti as dti
import dipy.data as dpd
import dipy.core.gradients as grad

b0 = 1000.
bvecs, bval = dpd.read_bvec_file(dpd.get_data('55dir_grad.bvec'))
gtab = grad.gradient_table(bval, bvecs)
B = bval[1]

D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
evals = np.array([2., 1., 0.]) / B
md = evals.mean()
tensor = dti.from_lower_triangular(D)

X = dti.design_matrix(bvecs, bval)

data = np.exp(np.dot(X,D))
data.shape = (-1,) + data.shape

dti_wls = dti.TensorModel(gtab)
fit_wls = dti_wls.fit(data)
fa1 = fit_wls.fa

noisy_data = np.copy(data)
noisy_data[..., -1] = 1.0

fit_wls_noisy = dti_wls.fit(noisy_data)
fa2 = fit_wls_noisy.fa

dti_restore = dti.TensorModel(gtab,  fit_method='RESTORE', sigma=67.)
Exemple #43
0
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data1 = nib.load(fdata).get_data()
    gtab1 = grad.gradient_table(fbval, fbvec)
    data2, gtab2 = dsi_voxels()
    for data, gtab in zip([data1, data2], [gtab1, gtab2]):
        dm = dti.TensorModel(gtab, 'LS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        dm = dti.TensorModel(gtab, 'WLS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        assert_equal(dtifit.fa > 0, True)
        sphere = create_unit_sphere(4)
        assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
        # Check that the multivoxel case works:
        dtifit = dm.fit(data)

        # Check that it works on signal that has already been normalized to S0:
        dm_to_relative = dti.TensorModel(gtab)
        if np.any(gtab.b0s_mask):
            relative_data = (data[0, 0, 0] /
                             np.mean(data[0, 0, 0, gtab.b0s_mask]))

            dtifit_to_relative = dm_to_relative.fit(relative_data)
            npt.assert_almost_equal(dtifit.fa[0, 0, 0],
                                    dtifit_to_relative.fa,
                                    decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = 3 * np.sqrt(6) * np.linalg.det(
        A_squiggle / np.linalg.norm(A_squiggle))
    evecs = np.linalg.eigh(tensor)[1]
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1, ) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab, fit_method=fit_method)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)

        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=\
        "Calculation of tensor from Y does not compare to analytical solution")

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError, dti.TensorModel, gtab, fit_method='crazy_method')

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)
Exemple #44
0
def test_tensor_model():
    fdata, fbval, fbvec = get_data('small_25')
    data1 = nib.load(fdata).get_data()
    gtab1 = grad.gradient_table(fbval, fbvec)
    data2, gtab2 = dsi_voxels()
    for data, gtab in zip([data1, data2], [gtab1, gtab2]):
        dm = dti.TensorModel(gtab, 'LS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        dm = dti.TensorModel(gtab, 'WLS')
        dtifit = dm.fit(data[0, 0, 0])
        assert_equal(dtifit.fa < 0.9, True)
        assert_equal(dtifit.fa > 0, True)
        sphere = create_unit_sphere(4)
        assert_equal(len(dtifit.odf(sphere)), len(sphere.vertices))
        # Check that the multivoxel case works:
        dtifit = dm.fit(data)

        # Check that it works on signal that has already been normalized to S0:
        dm_to_relative = dti.TensorModel(gtab)
        if np.any(gtab.b0s_mask):
            relative_data = (data[0, 0, 0]/np.mean(data[0, 0, 0,
                                                        gtab.b0s_mask]))

            dtifit_to_relative = dm_to_relative.fit(relative_data)
            npt.assert_almost_equal(dtifit.fa[0, 0, 0], dtifit_to_relative.fa,
                                    decimal=3)

    # And smoke-test that all these operations return sensibly-shaped arrays:
    assert_equal(dtifit.fa.shape, data.shape[:3])
    assert_equal(dtifit.ad.shape, data.shape[:3])
    assert_equal(dtifit.md.shape, data.shape[:3])
    assert_equal(dtifit.rd.shape, data.shape[:3])
    assert_equal(dtifit.trace.shape, data.shape[:3])
    assert_equal(dtifit.mode.shape, data.shape[:3])
    assert_equal(dtifit.linearity.shape, data.shape[:3])
    assert_equal(dtifit.planarity.shape, data.shape[:3])
    assert_equal(dtifit.sphericity.shape, data.shape[:3])

    # Test for the shape of the mask
    assert_raises(ValueError, dm.fit, np.ones((10, 10, 3)), np.ones((3, 3)))

    # Make some synthetic data
    b0 = 1000.
    bvecs, bvals = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T)
    # The first b value is 0., so we take the second one:
    B = bvals[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    A_squiggle = tensor - (1 / 3.0) * np.trace(tensor) * np.eye(3)
    mode = (3 * np.sqrt(6) * np.linalg.det(A_squiggle /
            np.linalg.norm(A_squiggle)))
    evals_eigh, evecs_eigh = np.linalg.eigh(tensor)
    # Sort according to eigen-value from large to small:
    evecs = evecs_eigh[:, np.argsort(evals_eigh)[::-1]]
    # Check that eigenvalues and eigenvectors are properly sorted through
    # that previous operation:
    for i in range(3):
        assert_array_almost_equal(np.dot(tensor, evecs[:, i]),
                                  evals[i] * evecs[:, i])
    # Design Matrix
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Test fitting with different methods:
    for fit_method in ['OLS', 'WLS', 'NLLS']:
        tensor_model = dti.TensorModel(gtab,
                                       fit_method=fit_method,
                                       return_S0_hat=True)

        tensor_fit = tensor_model.fit(Y)
        assert_true(tensor_fit.model is tensor_model)
        assert_equal(tensor_fit.shape, Y.shape[:-1])
        assert_array_almost_equal(tensor_fit.evals[0], evals)
        assert_array_almost_equal(tensor_fit.S0_hat, b0, decimal=3)
        # Test that the eigenvectors are correct, one-by-one:
        for i in range(3):
            # Eigenvectors have intrinsic sign ambiguity
            # (see
            # http://prod.sandia.gov/techlib/access-control.cgi/2007/076422.pdf)
            # so we need to allow for sign flips. One of the following should
            # always be true:
            assert_(
                    np.all(np.abs(tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6) or
                    np.all(np.abs(-tensor_fit.evecs[0][:, i] -
                                  evecs[:, i]) < 10e-6))
            # We set a fixed tolerance of 10e-6, similar to array_almost_equal

        err_msg = "Calculation of tensor from Y does not compare to "
        err_msg += "analytical solution"
        assert_array_almost_equal(tensor_fit.quadratic_form[0], tensor,
                                  err_msg=err_msg)

        assert_almost_equal(tensor_fit.md[0], md)
        assert_array_almost_equal(tensor_fit.mode, mode, decimal=5)
        assert_equal(tensor_fit.directions.shape[-2], 1)
        assert_equal(tensor_fit.directions.shape[-1], 3)

    # Test error-handling:
    assert_raises(ValueError,
                  dti.TensorModel,
                  gtab,
                  fit_method='crazy_method')

    # Test custom fit tensor method
    try:
        model = dti.TensorModel(gtab, fit_method=lambda *args, **kwargs: 42)
        fit = model.fit_method()
    except Exception as exc:
        assert False, "TensorModel should accept custom fit methods: %s" % exc
    assert fit == 42, "Custom fit method for TensorModel returned %s." % fit

    # Test multi-voxel data
    data = np.zeros((3, Y.shape[1]))
    # Normal voxel
    data[0] = Y
    # High diffusion voxel, all diffusing weighted signal equal to zero
    data[1, gtab.b0s_mask] = b0
    data[1, ~gtab.b0s_mask] = 0
    # Masked voxel, all data set to zero
    data[2] = 0.

    tensor_model = dti.TensorModel(gtab)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)

    # Return S0_test
    tensor_model = dti.TensorModel(gtab, return_S0_hat=True)
    fit = tensor_model.fit(data)
    assert_array_almost_equal(fit[0].evals, evals)
    assert_array_almost_equal(fit[0].S0_hat, b0)

    # Evals should be high for high diffusion voxel
    assert_(all(fit[1].evals > evals[0] * .9))

    # Evals should be zero where data is masked
    assert_array_almost_equal(fit[2].evals, 0.)
Exemple #45
0
def nls_fit_tensor(gtab,
                   data,
                   mask=None,
                   Diso=3e-3,
                   min_signal=1.0e-6,
                   f_transform=True,
                   cholesky=False,
                   jac=True):
    """
    Fit the water elimination tensor model using the non-linear least-squares.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    f_transform : bool, optional
        If true, the water volume fractions is converted during the convergence
        procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
        0 and 1.
        Default: True
    cholesky : bool, optional
        If true it uses cholesky decomposition to insure that diffusion tensor
        is positive define.
        Default: False
    jac : bool
        Use the Jacobian? Default: False

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the last dimension the free water model parameters
        in the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment

    References
    ----------
    .. [1] Henriques, R.N., Rokem, A., Garyfallidis, E., St-Jean, S., Peterson,
           E.T., Correia, M.M., 2017. Re: Optimization of a free water
           elimination two-compartmental model for diffusion tensor imaging.
           ReScience
    """
    # Analyse compatible input cases
    if jac is True and cholesky is True:
        raise ValueError("Cholesky decomposition is not compatible with jac.")

    fw_params = np.zeros(data.shape[:-1] + (13, ))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[..., gtab.b0s_mask], axis=-1)

    # Loop data fitting through all voxels
    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = nls_iter(W,
                              data[v],
                              S0[v],
                              Diso=Diso,
                              min_signal=min_signal,
                              f_transform=f_transform,
                              cholesky=cholesky,
                              jac=jac)
            fw_params[v] = params

    return fw_params