Example #1
0
def eig_from_lo_tri(data):
    """Calculates parameters for creating a Tensor instance

    Calculates tensor parameters from the six unique tensor elements. This
    function can be passed to the Tensor class as a fit_method for creating a
    Tensor instance from tensors stored in a nifti file.

    Parameters
    ----------
    data : array_like (..., 6)
        diffusion tensors elements stored in lower triangular order

    Returns
    -------
    dti_params
        Eigen values and vectors, used by the Tensor class to create an
        instance
    """
    data = np.asarray(data)
    data_flat = data.reshape((-1, data.shape[-1]))
    dti_params = np.empty((len(data_flat), 4, 3))

    for ii in range(len(data_flat)):
        tensor = from_lower_triangular(data_flat[ii])
        eigvals, eigvecs = decompose_tensor(tensor)
        dti_params[ii, 0] = eigvals
        dti_params[ii, 1:] = eigvecs

    dti_params.shape = data.shape[:-1] + (12, )
    return dti_params
Example #2
0
def eig_from_lo_tri(data):
    """Calculates parameters for creating a Tensor instance

    Calculates tensor parameters from the six unique tensor elements. This
    function can be passed to the Tensor class as a fit_method for creating a
    Tensor instance from tensors stored in a nifti file.

    Parameters
    ----------
    data : array_like (..., 6)
        diffusion tensors elements stored in lower triangular order

    Returns
    -------
    dti_params
        Eigen values and vectors, used by the Tensor class to create an
        instance
    """
    data = np.asarray(data)
    data_flat = data.reshape((-1, data.shape[-1]))
    dti_params = np.empty((len(data_flat), 4, 3))

    for ii in range(len(data_flat)):
        tensor = from_lower_triangular(data_flat[ii])
        eigvals, eigvecs = decompose_tensor(tensor)
        dti_params[ii, 0] = eigvals
        dti_params[ii, 1:] = eigvecs

    dti_params.shape = data.shape[:-1] + (12,)
    return dti_params
Example #3
0
File: dki.py Project: oesteban/dipy
def wls_fit_dki(design_matrix, data):
    r""" Computes weighted linear least squares (WLS) fit to calculate
    the diffusion tensor and kurtosis tensor using a weighted linear
    regression diffusion kurtosis model [1]_.

    Parameters
    ----------
    design_matrix : array (g, 22)
        Design matrix holding the covariants used to solve for the regression
        coefficients.
    data : array (N, g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    min_signal : default = 1
        All values below min_signal are repalced with min_signal. This is done
        in order to avoid taking log(0) durring the tensor fitting.

    Returns
    -------
    dki_params : array (N, 27)
        All parameters estimated from the diffusion kurtosis model for all N
        voxels.
        Parameters are ordered as follow:
            1) Three diffusion tensor's eingenvalues
            2) Three lines of the eigenvector matrix each containing the first
               second and third coordinates of the eigenvector
            3) Fifteen elements of the kurtosis tensor

    References
    ----------
       [1] Veraart, J., Sijbers, J., Sunaert, S., Leemans, A., Jeurissen, B.,
           2013. Weighted linear least squares estimation of diffusion MRI
           parameters: Strengths, limitations, and pitfalls. Magn Reson Med 81,
           335-346.
    """

    tol = 1e-6

    # preparing data and initializing parametres
    data = np.asarray(data)
    data_flat = data.reshape((-1, data.shape[-1]))
    dki_params = np.empty((len(data_flat), 27))

    # inverting design matrix and defining minimun diffusion aloud
    min_diffusivity = tol / -design_matrix.min()
    inv_design = np.linalg.pinv(design_matrix)

    # lopping WLS solution on all data voxels
    for vox in range(len(data_flat)):
        dki_params[vox] = _wls_iter(design_matrix, inv_design, data_flat[vox],
                                    min_diffusivity)

    # Reshape data according to the input data shape
    dki_params = dki_params.reshape((data.shape[:-1]) + (27, ))

    return dki_params
Example #4
0
def wls_fit_dki(design_matrix, data):
    r""" Computes weighted linear least squares (WLS) fit to calculate
    the diffusion tensor and kurtosis tensor using a weighted linear
    regression diffusion kurtosis model [1]_.

    Parameters
    ----------
    design_matrix : array (g, 22)
        Design matrix holding the covariants used to solve for the regression
        coefficients.
    data : array (N, g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    min_signal : default = 1
        All values below min_signal are repalced with min_signal. This is done
        in order to avoid taking log(0) durring the tensor fitting.

    Returns
    -------
    dki_params : array (N, 27)
        All parameters estimated from the diffusion kurtosis model for all N
        voxels.
        Parameters are ordered as follow:
            1) Three diffusion tensor's eingenvalues
            2) Three lines of the eigenvector matrix each containing the first
               second and third coordinates of the eigenvector
            3) Fifteen elements of the kurtosis tensor

    References
    ----------
       [1] Veraart, J., Sijbers, J., Sunaert, S., Leemans, A., Jeurissen, B.,
           2013. Weighted linear least squares estimation of diffusion MRI
           parameters: Strengths, limitations, and pitfalls. Magn Reson Med 81,
           335-346.
    """

    tol = 1e-6

    # preparing data and initializing parametres
    data = np.asarray(data)
    data_flat = data.reshape((-1, data.shape[-1]))
    dki_params = np.empty((len(data_flat), 27))

    # inverting design matrix and defining minimun diffusion aloud
    min_diffusivity = tol / -design_matrix.min()
    inv_design = np.linalg.pinv(design_matrix)

    # lopping WLS solution on all data voxels
    for vox in range(len(data_flat)):
        dki_params[vox] = _wls_iter(design_matrix, inv_design, data_flat[vox],
                                    min_diffusivity)

    # Reshape data according to the input data shape
    dki_params = dki_params.reshape((data.shape[:-1]) + (27,))

    return dki_params
Example #5
0
def ols_fit_dki(design_matrix, data):
    r""" Computes ordinary least squares (OLS) fit to calculate the diffusion
    tensor and kurtosis tensor using a linear regression diffusion kurtosis
    model [1]_.

    Parameters
    ----------
    design_matrix : array (g, 22)
        Design matrix holding the covariants used to solve for the regression
        coefficients.
    data : array (N, g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.

    Returns
    -------
    dki_params : array (N, 27)
        All parameters estimated from the diffusion kurtosis model.
        Parameters are ordered as follow:
            1) Three diffusion tensor's eingenvalues
            2) Three lines of the eigenvector matrix each containing the first,
               second and third coordinates of the eigenvector
            3) Fifteen elements of the kurtosis tensor

    See Also
    --------
    wls_fit_dki

    References
    ----------
       [1] Tabesh, A., Jensen, J.H., Ardekani, B.A., Helpern, J.A., 2011.
           Estimation of tensors and tensor-derived measures in diffusional
           kurtosis imaging. Magn Reson Med. 65(3), 823-836
    """
    tol = 1e-6

    # preparing data and initializing parameters
    data = np.asarray(data)
    data_flat = data.reshape((-1, data.shape[-1]))
    dki_params = np.empty((len(data_flat), 27))

    # inverting design matrix and defining minimun diffusion aloud
    min_diffusivity = tol / -design_matrix.min()
    inv_design = np.linalg.pinv(design_matrix)

    # lopping OLS solution on all data voxels
    for vox in range(len(data_flat)):
        dki_params[vox] = _ols_iter(inv_design, data_flat[vox],
                                    min_diffusivity)

    # Reshape data according to the input data shape
    dki_params = dki_params.reshape((data.shape[:-1]) + (27,))

    return dki_params
Example #6
0
File: dki.py Project: oesteban/dipy
def ols_fit_dki(design_matrix, data):
    r""" Computes ordinary least squares (OLS) fit to calculate the diffusion
    tensor and kurtosis tensor using a linear regression diffusion kurtosis
    model [1]_.

    Parameters
    ----------
    design_matrix : array (g, 22)
        Design matrix holding the covariants used to solve for the regression
        coefficients.
    data : array (N, g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.

    Returns
    -------
    dki_params : array (N, 27)
        All parameters estimated from the diffusion kurtosis model.
        Parameters are ordered as follow:
            1) Three diffusion tensor's eingenvalues
            2) Three lines of the eigenvector matrix each containing the first,
               second and third coordinates of the eigenvector
            3) Fifteen elements of the kurtosis tensor

    See Also
    --------
    wls_fit_dki

    References
    ----------
       [1] Tabesh, A., Jensen, J.H., Ardekani, B.A., Helpern, J.A., 2011.
           Estimation of tensors and tensor-derived measures in diffusional
           kurtosis imaging. Magn Reson Med. 65(3), 823-836
    """
    tol = 1e-6

    # preparing data and initializing parameters
    data = np.asarray(data)
    data_flat = data.reshape((-1, data.shape[-1]))
    dki_params = np.empty((len(data_flat), 27))

    # inverting design matrix and defining minimun diffusion aloud
    min_diffusivity = tol / -design_matrix.min()
    inv_design = np.linalg.pinv(design_matrix)

    # lopping OLS solution on all data voxels
    for vox in range(len(data_flat)):
        dki_params[vox] = _ols_iter(inv_design, data_flat[vox],
                                    min_diffusivity)

    # Reshape data according to the input data shape
    dki_params = dki_params.reshape((data.shape[:-1]) + (27, ))

    return dki_params
Example #7
0
File: dki.py Project: oesteban/dipy
def dki_prediction(dki_params, gtab, S0=150):
    """ Predict a signal given diffusion kurtosis imaging parameters.

    Parameters
    ----------
    dki_params : ndarray (x, y, z, 27) or (n, 27)
        All parameters estimated from the diffusion kurtosis model.
        Parameters are ordered as follow:
            1) Three diffusion tensor's eingenvalues
            2) Three lines of the eigenvector matrix each containing the first,
               second and third coordinates of the eigenvector
            3) Fifteen elements of the kurtosis tensor
    gtab : a GradientTable class instance
        The gradient table for this prediction
    S0 : float or ndarray (optional)
        The non diffusion-weighted signal in every voxel, or across all
        voxels. Default: 150

    Returns
    --------
    S : (..., N) ndarray
        Simulated signal based on the DKI model:

    .. math::

        S=S_{0}e^{-bD+\frac{1}{6}b^{2}D^{2}K}
    """
    evals, evecs, kt = split_dki_param(dki_params)

    # Define DKI design matrix according to given gtab
    A = design_matrix(gtab)

    # Flat parameters and initialize pred_sig
    fevals = evals.reshape((-1, evals.shape[-1]))
    fevecs = evecs.reshape((-1, ) + evecs.shape[-2:])
    fkt = kt.reshape((-1, kt.shape[-1]))
    pred_sig = np.zeros((len(fevals), len(gtab.bvals)))

    # lopping for all voxels
    for v in range(len(pred_sig)):
        DT = np.dot(np.dot(fevecs[v], np.diag(fevals[v])), fevecs[v].T)
        dt = lower_triangular(DT)
        MD = (dt[0] + dt[2] + dt[5]) / 3
        X = np.concatenate((dt, fkt[v] * MD * MD, np.array([np.log(S0)])),
                           axis=0)
        pred_sig[v] = np.exp(np.dot(A, X))

    # Reshape data according to the shape of dki_params
    pred_sig = pred_sig.reshape(dki_params.shape[:-1] + (pred_sig.shape[-1], ))

    return pred_sig
Example #8
0
def dki_prediction(dki_params, gtab, S0=150):
    """ Predict a signal given diffusion kurtosis imaging parameters.

    Parameters
    ----------
    dki_params : ndarray (x, y, z, 27) or (n, 27)
        All parameters estimated from the diffusion kurtosis model.
        Parameters are ordered as follow:
            1) Three diffusion tensor's eingenvalues
            2) Three lines of the eigenvector matrix each containing the first,
               second and third coordinates of the eigenvector
            3) Fifteen elements of the kurtosis tensor
    gtab : a GradientTable class instance
        The gradient table for this prediction
    S0 : float or ndarray (optional)
        The non diffusion-weighted signal in every voxel, or across all
        voxels. Default: 150

    Returns
    --------
    S : (..., N) ndarray
        Simulated signal based on the DKI model:

    .. math::

        S=S_{0}e^{-bD+\frac{1}{6}b^{2}D^{2}K}
    """
    evals, evecs, kt = split_dki_param(dki_params)

    # Define DKI design matrix according to given gtab
    A = design_matrix(gtab)

    # Flat parameters and initialize pred_sig
    fevals = evals.reshape((-1, evals.shape[-1]))
    fevecs = evecs.reshape((-1,) + evecs.shape[-2:])
    fkt = kt.reshape((-1, kt.shape[-1]))
    pred_sig = np.zeros((len(fevals), len(gtab.bvals)))

    # lopping for all voxels
    for v in range(len(pred_sig)):
        DT = np.dot(np.dot(fevecs[v], np.diag(fevals[v])), fevecs[v].T)
        dt = lower_triangular(DT)
        MD = (dt[0] + dt[2] + dt[5]) / 3
        X = np.concatenate((dt, fkt[v]*MD*MD, np.array([np.log(S0)])), axis=0)
        pred_sig[v] = np.exp(np.dot(A, X))

    # Reshape data according to the shape of dki_params
    pred_sig = pred_sig.reshape(dki_params.shape[:-1] + (pred_sig.shape[-1],))

    return pred_sig
Example #9
0
def nlls_fit_tensor(design_matrix,
                    data,
                    min_signal=1,
                    weighting=None,
                    sigma=None,
                    jac=True):
    """
    Fit the tensor params using non-linear least-squares.

    Parameters
    ----------
    design_matrix : array (g, 7)
        Design matrix holding the covariants used to solve for the regression
        coefficients.

    data : array ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.

    min_signal : float, optional
        All values below min_signal are repalced with min_signal. This is done
        in order to avaid taking log(0) durring the tensor fitting. Default = 1

    weighting: str
           the weighting scheme to use in considering the
           squared-error. Default behavior is to use uniform weighting. Other
           options: 'sigma' 'gmm'

    sigma: float
        If the 'sigma' weighting scheme is used, a value of sigma needs to be
        provided here. According to [Chang2005]_, a good value to use is
        1.5267 * std(background_noise), where background_noise is estimated
        from some part of the image known to contain no signal (only noise).

    jac : bool
        Use the Jacobian? Default: True

    Returns
    -------
    nlls_params: the eigen-values and eigen-vectors of the tensor in each voxel.

    """
    # Flatten for the iteration over voxels:
    flat_data = data.reshape((-1, data.shape[-1]))
    # Use the OLS method parameters as the starting point for the optimization:
    inv_design = np.linalg.pinv(design_matrix)
    sig = np.maximum(flat_data, min_signal)
    log_s = np.log(sig)
    D = np.dot(inv_design, log_s.T).T

    # Flatten for the iteration over voxels:
    ols_params = np.reshape(D, (-1, D.shape[-1]))
    # 12 parameters per voxel (evals + evecs):
    dti_params = np.empty((flat_data.shape[0], 12))
    for vox in range(flat_data.shape[0]):
        start_params = ols_params[vox]
        # Do the optimization in this voxel:
        if jac:
            this_tensor, status = opt.leastsq(_nlls_err_func,
                                              start_params,
                                              args=(design_matrix,
                                                    flat_data[vox], weighting,
                                                    sigma),
                                              Dfun=_nlls_jacobian_func)
        else:
            this_tensor, status = opt.leastsq(_nlls_err_func,
                                              start_params,
                                              args=(design_matrix,
                                                    flat_data[vox], weighting,
                                                    sigma))

        # The parameters are the evals and the evecs:
        try:
            evals, evecs = decompose_tensor(
                from_lower_triangular(this_tensor[:6]))
            dti_params[vox, :3] = evals
            dti_params[vox, 3:] = evecs.ravel()

        # If leastsq failed to converge and produced nans, we'll resort to the
        # OLS solution in this voxel:
        except np.linalg.LinAlgError:
            print(vox)
            dti_params[vox, :] = start_params

    dti_params.shape = data.shape[:-1] + (12, )
    return dti_params
Example #10
0
def recursive_response(gtab, data, mask=None, sh_order=8, peak_thr=0.01,
                       init_fa=0.08, init_trace=0.0021, iter=8,
                       convergence=0.001, parallel=True, nbr_processes=None,
                       sphere=default_sphere):
    """ Recursive calibration of response function using peak threshold

    Parameters
    ----------
    gtab : GradientTable
    data : ndarray
        diffusion data
    mask : ndarray, optional
        mask for recursive calibration, for example a white matter mask. It has
        shape `data.shape[0:3]` and dtype=bool. Default: use the entire data
        array.
    sh_order : int, optional
        maximal spherical harmonics order. Default: 8
    peak_thr : float, optional
        peak threshold, how large the second peak can be relative to the first
        peak in order to call it a single fiber population [1]. Default: 0.01
    init_fa : float, optional
        FA of the initial 'fat' response function (tensor). Default: 0.08
    init_trace : float, optional
        trace of the initial 'fat' response function (tensor). Default: 0.0021
    iter : int, optional
        maximum number of iterations for calibration. Default: 8.
    convergence : float, optional
        convergence criterion, maximum relative change of SH
        coefficients. Default: 0.001.
    parallel : bool, optional
        Whether to use parallelization in peak-finding during the calibration
        procedure. Default: True
    nbr_processes: int
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()).
    sphere : Sphere, optional.
        The sphere used for peak finding. Default: default_sphere.

    Returns
    -------
    response : ndarray
        response function in SH coefficients

    Notes
    -----
    In CSD there is an important pre-processing step: the estimation of the
    fiber response function. Using an FA threshold is not a very robust method.
    It is dependent on the dataset (non-informed used subjectivity), and still
    depends on the diffusion tensor (FA and first eigenvector),
    which has low accuracy at high b-value. This function recursively
    calibrates the response function, for more information see [1].

    References
    ----------
    .. [1] Tax, C.M.W., et al. NeuroImage 2014. Recursive calibration of
           the fiber response function for spherical deconvolution of
           diffusion MRI data.
    """
    S0 = 1.
    evals = fa_trace_to_lambdas(init_fa, init_trace)
    res_obj = (evals, S0)

    if mask is None:
        data = data.reshape(-1, data.shape[-1])
    else:
        data = data[mask]

    n = np.arange(0, sh_order + 1, 2)
    where_dwi = lazy_index(~gtab.b0s_mask)
    response_p = np.ones(len(n))

    for num_it in range(iter):
        r_sh_all = np.zeros(len(n))
        csd_model = ConstrainedSphericalDeconvModel(gtab, res_obj,
                                                    sh_order=sh_order)

        csd_peaks = peaks_from_model(model=csd_model,
                                     data=data,
                                     sphere=sphere,
                                     relative_peak_threshold=peak_thr,
                                     min_separation_angle=25,
                                     parallel=parallel,
                                     nbr_processes=nbr_processes)

        dirs = csd_peaks.peak_dirs
        vals = csd_peaks.peak_values
        single_peak_mask = (vals[:, 1] / vals[:, 0]) < peak_thr
        data = data[single_peak_mask]
        dirs = dirs[single_peak_mask]

        for num_vox in range(data.shape[0]):
            rotmat = vec2vec_rotmat(dirs[num_vox, 0], np.array([0, 0, 1]))

            rot_gradients = np.dot(rotmat, gtab.gradients.T).T

            x, y, z = rot_gradients[where_dwi].T
            r, theta, phi = cart2sphere(x, y, z)
            # for the gradient sphere
            B_dwi = real_sph_harm(0, n, theta[:, None], phi[:, None])
            r_sh_all += np.linalg.lstsq(B_dwi, data[num_vox, where_dwi])[0]

        response = r_sh_all / data.shape[0]
        res_obj = AxSymShResponse(data[:, gtab.b0s_mask].mean(), response)

        change = abs((response_p - response) / response_p)
        if all(change < convergence):
            break

        response_p = response

    return res_obj
Example #11
0
def odf_deconv(odf_sh, R, B_reg, lambda_=1., tau=0.1, r2_term=False):
    r""" ODF constrained-regularized spherical deconvolution using
    the Sharpening Deconvolution Transform (SDT) [1]_, [2]_.

    Parameters
    ----------
    odf_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``,)
         ndarray of SH coefficients for the ODF spherical function to be
         deconvolved
    R : ndarray (``(sh_order + 1)(sh_order + 2)/2``, ``(sh_order + 1)(sh_order + 2)/2``)
         SDT matrix in SH basis
    B_reg : ndarray (``(sh_order + 1)(sh_order + 2)/2``, ``(sh_order + 1)(sh_order + 2)/2``)
         SH basis matrix used for deconvolution
    lambda_ : float
         lambda parameter in minimization equation (default 1.0)
    tau : float
         threshold (tau *max(fODF)) controlling the amplitude below
         which the corresponding fODF is assumed to be zero.
    r2_term : bool
         True if ODF is computed from model that uses the $r^2$ term in the
         integral.  Recall that Tuch's ODF (used in Q-ball Imaging [1]_) and
         the true normalized ODF definition differ from a $r^2$ term in the ODF
         integral. The original Sharpening Deconvolution Transform (SDT)
         technique [2]_ is expecting Tuch's ODF without the $r^2$ (see [3]_ for
         the mathematical details).  Now, this function supports ODF that have
         been computed using the $r^2$ term because the proper analytical
         response function has be derived.  For example, models such as DSI,
         GQI, SHORE, CSA, Tensor, Multi-tensor ODFs, should now be deconvolved
         with the r2_term=True.

    Returns
    -------
    fodf_sh : ndarray (``(sh_order + 1)(sh_order + 2)/2``,)
         Spherical harmonics coefficients of the constrained-regularized fiber
         ODF
    num_it : int
         Number of iterations in the constrained-regularization used for
         convergence

    References
    ----------
    .. [1] Tuch, D. MRM 2004. Q-Ball Imaging.
    .. [2] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and
           Probabilistic Tractography Based on Complex Fibre Orientation
           Distributions
    .. [3] Descoteaux, M, PhD thesis, INRIA Sophia-Antipolis, 2008.
    """
    # In ConstrainedSDTModel.fit, odf_sh is divided by its norm (Z) and
    # sometimes the norm is 0 which creates NaNs.
    if np.any(np.isnan(odf_sh)):
        return np.zeros_like(odf_sh), 0

    # Generate initial fODF estimate, which is the ODF truncated at SH order 4
    fodf_sh = np.linalg.lstsq(R, odf_sh)[0]
    fodf_sh[15:] = 0

    fodf = np.dot(B_reg, fodf_sh)

    # if sharpening a q-ball odf (it is NOT properly normalized), we need to
    # force normalization otherwise, for DSI, CSA, SHORE, Tensor odfs, they are
    # normalized by construction
    if ~r2_term:
        Z = np.linalg.norm(fodf)
        fodf_sh /= Z

    fodf = np.dot(B_reg, fodf_sh)
    threshold = tau * np.max(np.dot(B_reg, fodf_sh))
    # print(np.min(fodf), np.max(fodf), np.mean(fodf), threshold, tau)

    k = []
    convergence = 50
    for num_it in range(1, convergence + 1):
        A = np.dot(B_reg, fodf_sh)
        k2 = np.nonzero(A < threshold)[0]

        if (k2.shape[0] + R.shape[0]) < B_reg.shape[1]:
            warnings.warn(
                'too few negative directions identified - failed to converge')
            return fodf_sh, num_it

        if num_it > 1 and k.shape[0] == k2.shape[0]:
            if (k == k2).all():
                return fodf_sh, num_it

        k = k2
        M = np.concatenate((R, lambda_ * B_reg[k, :]))
        ODF = np.concatenate((odf_sh, np.zeros(k.shape)))
        try:
            fodf_sh = np.linalg.lstsq(M, ODF)[0]
        except np.linalg.LinAlgError as lae:
            # SVD did not converge in Linear Least Squares in current
            # voxel. Proceeding with initial SH estimate for this voxel.
            pass

    warnings.warn('maximum number of iterations exceeded - failed to converge')
    return fodf_sh, num_it
Example #12
0
def csdeconv(dwsignal, X, B_reg, tau=0.1, convergence=50, P=None):
    r""" Constrained-regularized spherical deconvolution (CSD) [1]_

    Deconvolves the axially symmetric single fiber response function `r_rh` in
    rotational harmonics coefficients from the diffusion weighted signal in
    `dwsignal`.

    Parameters
    ----------
    dwsignal : array
        Diffusion weighted signals to be deconvolved.
    X : array
        Prediction matrix which estimates diffusion weighted signals from FOD
        coefficients.
    B_reg : array (N, B)
        SH basis matrix which maps FOD coefficients to FOD values on the
        surface of the sphere. B_reg should be scaled to account for lambda.
    tau : float
        Threshold controlling the amplitude below which the corresponding fODF
        is assumed to be zero.  Ideally, tau should be set to zero. However, to
        improve the stability of the algorithm, tau is set to tau*100 % of the
        max fODF amplitude (here, 10% by default). This is similar to peak
        detection where peaks below 0.1 amplitude are usually considered noise
        peaks. Because SDT is based on a q-ball ODF deconvolution, and not
        signal deconvolution, using the max instead of mean (as in CSD), is
        more stable.
    convergence : int
        Maximum number of iterations to allow the deconvolution to converge.
    P : ndarray
        This is an optimization to avoid computing ``dot(X.T, X)`` many times.
        If the same ``X`` is used many times, ``P`` can be precomputed and
        passed to this function.

    Returns
    -------
    fodf_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``,)
         Spherical harmonics coefficients of the constrained-regularized fiber
         ODF.
    num_it : int
         Number of iterations in the constrained-regularization used for
         convergence.

    Notes
    -----
    This section describes how the fitting of the SH coefficients is done.
    Problem is to minimise per iteration:

    $F(f_n) = ||Xf_n - S||^2 + \lambda^2 ||H_{n-1} f_n||^2$

    Where $X$ maps current FOD SH coefficients $f_n$ to DW signals $s$ and
    $H_{n-1}$ maps FOD SH coefficients $f_n$ to amplitudes along set of
    negative directions identified in previous iteration, i.e. the matrix
    formed by the rows of $B_{reg}$ for which $Hf_{n-1}<0$ where $B_{reg}$
    maps $f_n$ to FOD amplitude on a sphere.

    Solve by differentiating and setting to zero:

    $\Rightarrow \frac{\delta F}{\delta f_n} = 2X^T(Xf_n - S) + 2 \lambda^2
    H_{n-1}^TH_{n-1}f_n=0$

    Or:

    $(X^TX + \lambda^2 H_{n-1}^TH_{n-1})f_n = X^Ts$

    Define $Q = X^TX + \lambda^2 H_{n-1}^TH_{n-1}$ , which by construction is a
    square positive definite symmetric matrix of size $n_{SH} by n_{SH}$. If
    needed, positive definiteness can be enforced with a small minimum norm
    regulariser (helps a lot with poorly conditioned direction sets and/or
    superresolution):

    $Q = X^TX + (\lambda H_{n-1}^T) (\lambda H_{n-1}) + \mu I$

    Solve $Qf_n = X^Ts$ using Cholesky decomposition:

    $Q = LL^T$

    where $L$ is lower triangular. Then problem can be solved by
    back-substitution:

    $L_y = X^Ts$

    $L^Tf_n = y$

    To speeds things up further, form $P = X^TX + \mu I$, and update to form
    $Q$ by rankn update with $H_{n-1}$. The dipy implementation looks like:

        form initially $P = X^T X + \mu I$ and $\lambda B_{reg}$

        for each voxel: form $z = X^Ts$

            estimate $f_0$ by solving $Pf_0=z$. We use a simplified $l_{max}=4$
            solution here, but it might not make a big difference.

            Then iterate until no change in rows of $H$ used in $H_n$

                form $H_{n}$ given $f_{n-1}$

                form $Q = P + (\lambda H_{n-1}^T) (\lambda H_{n-1}$) (this can
                be done by rankn update, but we currently do not use rankn
                update).

                solve $Qf_n = z$ using Cholesky decomposition

    We'd like to thanks Donald Tournier for his help with describing and
    implementing this algorithm.

    References
    ----------
    .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the
           fibre orientation distribution in diffusion MRI: Non-negativity
           constrained super-resolved spherical deconvolution.

    """
    mu = 1e-5
    if P is None:
        P = np.dot(X.T, X)
    z = np.dot(X.T, dwsignal)

    try:
        fodf_sh = _solve_cholesky(P, z)
    except la.LinAlgError:
        P = P + mu * np.eye(P.shape[0])
        fodf_sh = _solve_cholesky(P, z)
    # For the first iteration we use a smooth FOD that only uses SH orders up
    # to 4 (the first 15 coefficients).
    fodf = np.dot(B_reg[:, :15], fodf_sh[:15])
    # The mean of an fodf can be computed by taking $Y_{0,0} * coeff_{0,0}$
    threshold = B_reg[0, 0] * fodf_sh[0] * tau
    where_fodf_small = (fodf < threshold).nonzero()[0]

    # If the low-order fodf does not have any values less than threshold, the
    # full-order fodf is used.
    if len(where_fodf_small) == 0:
        fodf = np.dot(B_reg, fodf_sh)
        where_fodf_small = (fodf < threshold).nonzero()[0]
        # If the fodf still has no values less than threshold, return the fodf.
        if len(where_fodf_small) == 0:
            return fodf_sh, 0

    for num_it in range(1, convergence + 1):
        # This is the super-resolved trick.  Wherever there is a negative
        # amplitude value on the fODF, it concatenates a value to the S vector
        # so that the estimation can focus on trying to eliminate it. In a
        # sense, this "adds" a measurement, which can help to better estimate
        # the fodf_sh, even if you have more SH coefficients to estimate than
        # actual S measurements.
        H = B_reg.take(where_fodf_small, axis=0)

        # We use the Cholesky decomposition to solve for the SH coefficients.
        Q = P + np.dot(H.T, H)
        fodf_sh = _solve_cholesky(Q, z)

        # Sample the FOD using the regularization sphere and compute k.
        fodf = np.dot(B_reg, fodf_sh)
        where_fodf_small_last = where_fodf_small
        where_fodf_small = (fodf < threshold).nonzero()[0]

        if (len(where_fodf_small) == len(where_fodf_small_last) and
                (where_fodf_small == where_fodf_small_last).all()):
            break
    else:
        msg = 'maximum number of iterations exceeded - failed to converge'
        warnings.warn(msg)

    return fodf_sh, num_it
Example #13
0
def recursive_response(gtab,
                       data,
                       mask=None,
                       sh_order=8,
                       peak_thr=0.01,
                       init_fa=0.08,
                       init_trace=0.0021,
                       iter=8,
                       convergence=0.001,
                       parallel=True,
                       nbr_processes=None,
                       sphere=default_sphere):
    """ Recursive calibration of response function using peak threshold

    Parameters
    ----------
    gtab : GradientTable
    data : ndarray
        diffusion data
    mask : ndarray, optional
        mask for recursive calibration, for example a white matter mask. It has
        shape `data.shape[0:3]` and dtype=bool. Default: use the entire data
        array.
    sh_order : int, optional
        maximal spherical harmonics order. Default: 8
    peak_thr : float, optional
        peak threshold, how large the second peak can be relative to the first
        peak in order to call it a single fiber population [1]. Default: 0.01
    init_fa : float, optional
        FA of the initial 'fat' response function (tensor). Default: 0.08
    init_trace : float, optional
        trace of the initial 'fat' response function (tensor). Default: 0.0021
    iter : int, optional
        maximum number of iterations for calibration. Default: 8.
    convergence : float, optional
        convergence criterion, maximum relative change of SH
        coefficients. Default: 0.001.
    parallel : bool, optional
        Whether to use parallelization in peak-finding during the calibration
        procedure. Default: True
    nbr_processes: int
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()).
    sphere : Sphere, optional.
        The sphere used for peak finding. Default: default_sphere.

    Returns
    -------
    response : ndarray
        response function in SH coefficients

    Notes
    -----
    In CSD there is an important pre-processing step: the estimation of the
    fiber response function. Using an FA threshold is not a very robust method.
    It is dependent on the dataset (non-informed used subjectivity), and still
    depends on the diffusion tensor (FA and first eigenvector),
    which has low accuracy at high b-value. This function recursively
    calibrates the response function, for more information see [1].

    References
    ----------
    .. [1] Tax, C.M.W., et al. NeuroImage 2014. Recursive calibration of
           the fiber response function for spherical deconvolution of
           diffusion MRI data.
    """
    S0 = 1
    evals = fa_trace_to_lambdas(init_fa, init_trace)
    res_obj = (evals, S0)

    if mask is None:
        data = data.reshape(-1, data.shape[-1])
    else:
        data = data[mask]

    n = np.arange(0, sh_order + 1, 2)
    where_dwi = lazy_index(~gtab.b0s_mask)
    response_p = np.ones(len(n))

    for num_it in range(iter):
        r_sh_all = np.zeros(len(n))
        csd_model = ConstrainedSphericalDeconvModel(gtab,
                                                    res_obj,
                                                    sh_order=sh_order)

        csd_peaks = peaks_from_model(model=csd_model,
                                     data=data,
                                     sphere=sphere,
                                     relative_peak_threshold=peak_thr,
                                     min_separation_angle=25,
                                     parallel=parallel,
                                     nbr_processes=nbr_processes)

        dirs = csd_peaks.peak_dirs
        vals = csd_peaks.peak_values
        single_peak_mask = (vals[:, 1] / vals[:, 0]) < peak_thr
        data = data[single_peak_mask]
        dirs = dirs[single_peak_mask]

        for num_vox in range(data.shape[0]):
            rotmat = vec2vec_rotmat(dirs[num_vox, 0], np.array([0, 0, 1]))

            rot_gradients = np.dot(rotmat, gtab.gradients.T).T

            x, y, z = rot_gradients[where_dwi].T
            r, theta, phi = cart2sphere(x, y, z)
            # for the gradient sphere
            B_dwi = real_sph_harm(0, n, theta[:, None], phi[:, None])
            r_sh_all += np.linalg.lstsq(B_dwi, data[num_vox, where_dwi])[0]

        response = r_sh_all / data.shape[0]
        res_obj = AxSymShResponse(data[:, gtab.b0s_mask].mean(), response)

        change = abs((response_p - response) / response_p)
        if all(change < convergence):
            break

        response_p = response

    return res_obj
Example #14
0
def nlls_fit_tensor(design_matrix, data, min_signal=1, weighting=None,
                    sigma=None, jac=True):
    """
    Fit the tensor params using non-linear least-squares.

    Parameters
    ----------
    design_matrix : array (g, 7)
        Design matrix holding the covariants used to solve for the regression
        coefficients.

    data : array ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.

    min_signal : float, optional
        All values below min_signal are repalced with min_signal. This is done
        in order to avaid taking log(0) durring the tensor fitting. Default = 1

    weighting: str
           the weighting scheme to use in considering the
           squared-error. Default behavior is to use uniform weighting. Other
           options: 'sigma' 'gmm'

    sigma: float
        If the 'sigma' weighting scheme is used, a value of sigma needs to be
        provided here. According to [Chang2005]_, a good value to use is
        1.5267 * std(background_noise), where background_noise is estimated
        from some part of the image known to contain no signal (only noise).

    jac : bool
        Use the Jacobian? Default: True

    Returns
    -------
    nlls_params: the eigen-values and eigen-vectors of the tensor in each voxel.

    """
    # Flatten for the iteration over voxels:
    flat_data = data.reshape((-1, data.shape[-1]))
    # Use the OLS method parameters as the starting point for the optimization:
    inv_design = np.linalg.pinv(design_matrix)
    sig = np.maximum(flat_data, min_signal)
    log_s = np.log(sig)
    D = np.dot(inv_design, log_s.T).T

    # Flatten for the iteration over voxels:
    ols_params = np.reshape(D, (-1, D.shape[-1]))
    # 12 parameters per voxel (evals + evecs):
    dti_params = np.empty((flat_data.shape[0], 12))
    for vox in range(flat_data.shape[0]):
        start_params = ols_params[vox]
        # Do the optimization in this voxel:
        if jac:
            this_tensor, status = opt.leastsq(_nlls_err_func, start_params,
                                              args=(design_matrix,
                                                    flat_data[vox],
                                                    weighting,
                                                    sigma),
                                              Dfun=_nlls_jacobian_func)
        else:
            this_tensor, status = opt.leastsq(_nlls_err_func, start_params,
                                              args=(design_matrix,
                                                    flat_data[vox],
                                                    weighting,
                                                    sigma))

        # The parameters are the evals and the evecs:
        try:
            evals,evecs=decompose_tensor(from_lower_triangular(this_tensor[:6]))
            dti_params[vox, :3] = evals
            dti_params[vox, 3:] = evecs.ravel()

        # If leastsq failed to converge and produced nans, we'll resort to the
        # OLS solution in this voxel:
        except np.linalg.LinAlgError:
            print(vox)
            dti_params[vox, :] = start_params

    dti_params.shape = data.shape[:-1] + (12,)
    return dti_params
Example #15
0
def gradient(f):
    """
    Return the gradient of an N-dimensional array.

    The gradient is computed using central differences in the interior
    and first differences at the boundaries. The returned gradient hence has
    the same shape as the input array.

    Parameters
    ----------
    f : array_like
      An N-dimensional array containing samples of a scalar function.

    Returns
    -------
    gradient : ndarray
      N arrays of the same shape as `f` giving the derivative of `f` with
      respect to each dimension.

    Examples
    --------
    >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
    >>> gradient(x)
    array([ 1. ,  1.5,  2.5,  3.5,  4.5,  5. ])

    >>> gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
    [array([[ 2.,  2., -1.],
           [ 2.,  2., -1.]]), array([[ 1. ,  2.5,  4. ],
           [ 1. ,  1. ,  1. ]])]

    Note
    ----
    This is a simplified implementation of gradient that is part of numpy
    1.8. In order to mitigate the effects of changes added to this
    implementation in version 1.9 of numpy, we include this implementation
    here.
    """
    f = np.asanyarray(f)
    N = len(f.shape)  # number of dimensions
    dx = [1.0] * N

    # use central differences on interior and first differences on endpoints
    outvals = []

    # create slice objects --- initially all are [:, :, ..., :]
    slice1 = [slice(None)] * N
    slice2 = [slice(None)] * N
    slice3 = [slice(None)] * N

    for axis in range(N):
        # select out appropriate parts for this dimension
        out = np.empty_like(f)
        slice1[axis] = slice(1, -1)
        slice2[axis] = slice(2, None)
        slice3[axis] = slice(None, -2)
        # 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
        out[slice1] = (f[slice2] - f[slice3]) / 2.0
        slice1[axis] = 0
        slice2[axis] = 1
        slice3[axis] = 0
        # 1D equivalent -- out[0] = (f[1] - f[0])
        out[slice1] = (f[slice2] - f[slice3])
        slice1[axis] = -1
        slice2[axis] = -1
        slice3[axis] = -2
        # 1D equivalent -- out[-1] = (f[-1] - f[-2])
        out[slice1] = (f[slice2] - f[slice3])

        # divide by step size
        outvals.append(out / dx[axis])
        # reset the slice object in this dimension to ":"
        slice1[axis] = slice(None)
        slice2[axis] = slice(None)
        slice3[axis] = slice(None)

    if N == 1:
        return outvals[0]
    else:
        return outvals
Example #16
0
    def setup(self, streamline, affine, evals=[0.001, 0, 0], sphere=None):
        """
        Set up the necessary components for the LiFE model: the matrix of
        fiber-contributions to the DWI signal, and the coordinates of voxels
        for which the equations will be solved

        Parameters
        ----------
        streamline : list
            Streamlines, each is an array of shape (n, 3)
        affine : 4 by 4 array
            Mapping from the streamline coordinates to the data
        evals : list (3 items, optional)
            The eigenvalues of the canonical tensor used as a response
            function. Default:[0.001, 0, 0].
        sphere: `dipy.core.Sphere` instance.
            Whether to approximate (and cache) the signal on a discrete
            sphere. This may confer a significant speed-up in setting up the
            problem, but is not as accurate. If `False`, we use the exact
            gradients along the streamlines to calculate the matrix, instead of
            an approximation. Defaults to use the 724-vertex symmetric sphere
            from :mod:`dipy.data`
        """
        if sphere is not False:
            SignalMaker = LifeSignalMaker(self.gtab,
                                          evals=evals,
                                          sphere=sphere)

        if affine is None:
            affine = np.eye(4)
        streamline = transform_streamlines(streamline, affine)
        # Assign some local variables, for shorthand:
        all_coords = np.concatenate(streamline)
        vox_coords = unique_rows(np.round(all_coords).astype(np.intp))
        del all_coords
        # We only consider the diffusion-weighted signals:
        n_bvecs = self.gtab.bvals[~self.gtab.b0s_mask].shape[0]
        v2f, v2fn = voxel2streamline(streamline, transformed=True,
                                     affine=affine, unique_idx=vox_coords)
        # How many fibers in each voxel (this will determine how many
        # components are in the matrix):
        n_unique_f = len(np.hstack(v2f.values()))
        # Preallocate these, which will be used to generate the sparse
        # matrix:
        f_matrix_sig = np.zeros(n_unique_f * n_bvecs, dtype=np.float)
        f_matrix_row = np.zeros(n_unique_f * n_bvecs, dtype=np.intp)
        f_matrix_col = np.zeros(n_unique_f * n_bvecs, dtype=np.intp)

        fiber_signal = []
        for s_idx, s in enumerate(streamline):
            if sphere is not False:
                fiber_signal.append(SignalMaker.streamline_signal(s))
            else:
                fiber_signal.append(streamline_signal(s, self.gtab, evals))

        del streamline
        if sphere is not False:
            del SignalMaker

        keep_ct = 0
        range_bvecs = np.arange(n_bvecs).astype(int)
        # In each voxel:
        for v_idx in range(vox_coords.shape[0]):
            mat_row_idx = (range_bvecs + v_idx * n_bvecs).astype(np.intp)
            # For each fiber in that voxel:
            for f_idx in v2f[v_idx]:
                # For each fiber-voxel combination, store the row/column
                # indices in the pre-allocated linear arrays
                f_matrix_row[keep_ct:keep_ct+n_bvecs] = mat_row_idx
                f_matrix_col[keep_ct:keep_ct+n_bvecs] = f_idx

                vox_fiber_sig = np.zeros(n_bvecs)
                for node_idx in v2fn[f_idx][v_idx]:
                    # Sum the signal from each node of the fiber in that voxel:
                    vox_fiber_sig += fiber_signal[f_idx][node_idx]
                # And add the summed thing into the corresponding rows:
                f_matrix_sig[keep_ct:keep_ct+n_bvecs] += vox_fiber_sig
                keep_ct = keep_ct + n_bvecs

        del v2f, v2fn
        # Allocate the sparse matrix, using the more memory-efficient 'csr'
        # format:
        life_matrix = sps.csr_matrix((f_matrix_sig,
                                     [f_matrix_row, f_matrix_col]))

        return life_matrix, vox_coords
Example #17
0
def gradient(f):
    """
    Return the gradient of an N-dimensional array.

    The gradient is computed using central differences in the interior
    and first differences at the boundaries. The returned gradient hence has
    the same shape as the input array.

    Parameters
    ----------
    f : array_like
      An N-dimensional array containing samples of a scalar function.

    Returns
    -------
    gradient : ndarray
      N arrays of the same shape as `f` giving the derivative of `f` with
      respect to each dimension.

    Examples
    --------
    >>> x = np.array([1, 2, 4, 7, 11, 16], dtype=np.float)
    >>> gradient(x)
    array([ 1. ,  1.5,  2.5,  3.5,  4.5,  5. ])

    >>> gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=np.float))
    [array([[ 2.,  2., -1.],
           [ 2.,  2., -1.]]), array([[ 1. ,  2.5,  4. ],
           [ 1. ,  1. ,  1. ]])]

    Note
    ----
    This is a simplified implementation of gradient that is part of numpy
    1.8. In order to mitigate the effects of changes added to this
    implementation in version 1.9 of numpy, we include this implementation
    here.
    """
    f = np.asanyarray(f)
    N = len(f.shape)  # number of dimensions
    dx = [1.0]*N

    # use central differences on interior and first differences on endpoints
    outvals = []

    # create slice objects --- initially all are [:, :, ..., :]
    slice1 = [slice(None)]*N
    slice2 = [slice(None)]*N
    slice3 = [slice(None)]*N

    for axis in range(N):
        # select out appropriate parts for this dimension
        out = np.empty_like(f)
        slice1[axis] = slice(1, -1)
        slice2[axis] = slice(2, None)
        slice3[axis] = slice(None, -2)

        # 1D equivalent -- out[1:-1] = (f[2:] - f[:-2])/2.0
        out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)])/2.0
        slice1[axis] = 0
        slice2[axis] = 1
        slice3[axis] = 0
        # 1D equivalent -- out[0] = (f[1] - f[0])
        out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)])
        slice1[axis] = -1
        slice2[axis] = -1
        slice3[axis] = -2
        # 1D equivalent -- out[-1] = (f[-1] - f[-2])
        out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)])

        # divide by step size
        outvals.append(out / dx[axis])
        # reset the slice object in this dimension to ":"
        slice1[axis] = slice(None)
        slice2[axis] = slice(None)
        slice3[axis] = slice(None)

    if N == 1:
        return outvals[0]
    else:
        return outvals
Example #18
0
File: dki.py Project: oesteban/dipy
def apparent_kurtosis_coef(dki_params,
                           sphere,
                           min_diffusivity=0,
                           min_kurtosis=-1):
    r""" Calculate the apparent kurtosis coefficient (AKC) in each direction
    of a sphere.

    Parameters
    ----------
    dki_params : ndarray (x, y, z, 27) or (n, 27)
        All parameters estimated from the diffusion kurtosis model.
        Parameters are ordered as follow:
            1) Three diffusion tensor's eingenvalues
            2) Three lines of the eigenvector matrix each containing the first,
               second and third coordinates of the eigenvectors respectively
            3) Fifteen elements of the kurtosis tensor
    sphere : a Sphere class instance
        The AKC will be calculated for each of the vertices in the sphere
    min_diffusivity : float (optional)
        Because negative eigenvalues are not physical and small eigenvalues
        cause quite a lot of noise in diffusion based metrics, diffusivity
        values smaller than `min_diffusivity` are replaced with
        `min_diffusivity`. defaut = 0
    min_kurtosis : float (optional)
        Because high amplitude negative values of kurtosis are not physicaly
        and biologicaly pluasible, and these causes huge artefacts in kurtosis
        based measures, directional kurtosis values than `min_kurtosis` are
        replaced with `min_kurtosis`. defaut = -1

    Returns
    --------
    AKC : ndarray (x, y, z, g) or (n, g)
        Apparent kurtosis coefficient (AKC) for all g directions of a sphere.

    Notes
    -----
    For each sphere direction with coordinates $(n_{1}, n_{2}, n_{3})$, the
    calculation of AKC is done using formula:

    .. math ::
        AKC(n)=\frac{MD^{2}}{ADC(n)^{2}}\sum_{i=1}^{3}\sum_{j=1}^{3}
        \sum_{k=1}^{3}\sum_{l=1}^{3}n_{i}n_{j}n_{k}n_{l}W_{ijkl}

    where $W_{ijkl}$ are the elements of the kurtosis tensor, MD the mean
    diffusivity and ADC the apparent diffusion coefficent computed as:

    .. math ::
        ADC(n)=\sum_{i=1}^{3}\sum_{j=1}^{3}n_{i}n_{j}D_{ij}

    where $D_{ij}$ are the elements of the diffusion tensor.
    """

    # Flat parameters
    outshape = dki_params.shape[:-1]
    dki_params = dki_params.reshape((-1, dki_params.shape[-1]))

    # Split data
    evals, evecs, kt = split_dki_param(dki_params)

    # Compute MD
    MD = mean_diffusivity(evals)

    # Initialize AKC matrix
    V = sphere.vertices
    AKC = np.zeros((len(kt), len(V)))

    # loop over all voxels
    for vox in range(len(kt)):
        R = evecs[vox]
        dt = lower_triangular(np.dot(np.dot(R, np.diag(evals[vox])), R.T))
        AKC[vox] = _directional_kurtosis(dt,
                                         MD[vox],
                                         kt[vox],
                                         V,
                                         min_diffusivity=min_diffusivity,
                                         min_kurtosis=min_kurtosis)

    # reshape data according to input data
    AKC = AKC.reshape((outshape + (len(V), )))

    return AKC
Example #19
0
def kfold_xval(model, data, folds, *model_args, **model_kwargs):
    """
    Perform k-fold cross-validation to generate out-of-sample predictions for
    each measurement.

    Parameters
    ----------
    model : Model class instance
        The type of the model to use for prediction. The corresponding Fit
        object must have a `predict` function implementd One of the following:
        `reconst.dti.TensorModel` or
        `reconst.csdeconv.ConstrainedSphericalDeconvModel`.
    data : ndarray
        Diffusion MRI data acquired with the GradientTable of the model. Shape
        will typically be `(x, y, z, b)` where `xyz` are spatial dimensions and
        b is the number of bvals/bvecs in the GradientTable.
    folds : int
        The number of divisions to apply to the data
    model_args : list
        Additional arguments to the model initialization
    model_kwargs : dict
        Additional key-word arguments to the model initialization. If contains
        the kwarg `mask`, this will be used as a key-word argument to the `fit`
        method of the model object, rather than being used in the
        initialization of the model object

    Notes
    -----
    This function assumes that a prediction API is implemented in the Model
    class for which prediction is conducted. That is, the Fit object that gets
    generated upon fitting the model needs to have a `predict` method, which
    receives a GradientTable class instance as input and produces a predicted
    signal as output.

    It also assumes that the model object has `bval` and `bvec` attributes
    holding b-values and corresponding unit vectors.

    References
    ----------
    .. [1] Rokem, A., Chan, K.L. Yeatman, J.D., Pestilli, F., Mezer, A.,
       Wandell, B.A., 2014. Evaluating the accuracy of diffusion models at
       multiple b-values with cross-validation. ISMRM 2014.
    """
    # This should always be there, if the model inherits from
    # dipy.reconst.base.ReconstModel:
    gtab = model.gtab
    data_b = data[..., ~gtab.b0s_mask]
    div_by_folds = np.mod(data_b.shape[-1], folds)
    # Make sure that an equal number of samples get left out in each fold:
    if div_by_folds != 0:
        msg = "The number of folds must divide the diffusion-weighted "
        msg += "data equally, but "
        msg = "np.mod(%s, %s) is %s" % (data_b.shape[-1], folds, div_by_folds)
        raise ValueError(msg)

    data_0 = data[..., gtab.b0s_mask]
    S0 = np.mean(data_0, -1)
    n_in_fold = data_b.shape[-1] / folds
    prediction = np.zeros(data.shape)
    # We are going to leave out some randomly chosen samples in each iteration:
    order = np.random.permutation(data_b.shape[-1])

    nz_bval = gtab.bvals[~gtab.b0s_mask]
    nz_bvec = gtab.bvecs[~gtab.b0s_mask]

    # Pop the mask, if there is one, out here for use in every fold:
    mask = model_kwargs.pop('mask', None)
    gtgt = gt.gradient_table  # Shorthand
    for k in range(folds):
        fold_mask = np.ones(data_b.shape[-1], dtype=bool)
        fold_idx = order[int(k * n_in_fold): int((k + 1) * n_in_fold)]
        fold_mask[fold_idx] = False
        this_data = np.concatenate([data_0, data_b[..., fold_mask]], -1)

        this_gtab = gtgt(np.hstack([gtab.bvals[gtab.b0s_mask],
                                    nz_bval[fold_mask]]),
                         np.concatenate([gtab.bvecs[gtab.b0s_mask],
                                         nz_bvec[fold_mask]]))
        left_out_gtab = gtgt(np.hstack([gtab.bvals[gtab.b0s_mask],
                                        nz_bval[~fold_mask]]),
                             np.concatenate([gtab.bvecs[gtab.b0s_mask],
                                             nz_bvec[~fold_mask]]))
        this_model = model.__class__(this_gtab, *model_args, **model_kwargs)
        this_fit = this_model.fit(this_data, mask=mask)
        if not hasattr(this_fit, 'predict'):
            err_str = "Models of type: %s " % this_model.__class__
            err_str += "do not have an implementation of model prediction"
            err_str += " and do not support cross-validation"
            raise ValueError(err_str)
        this_predict = S0[..., None] * this_fit.predict(left_out_gtab, S0=1)

        idx_to_assign = np.where(~gtab.b0s_mask)[0][~fold_mask]
        prediction[..., idx_to_assign] =\
            this_predict[..., np.sum(gtab.b0s_mask):]

    # For the b0 measurements
    prediction[..., gtab.b0s_mask] = S0[..., None]
    return prediction
Example #20
0
def apparent_kurtosis_coef(dki_params, sphere, min_diffusivity=0,
                           min_kurtosis=-1):
    r""" Calculate the apparent kurtosis coefficient (AKC) in each direction
    of a sphere.

    Parameters
    ----------
    dki_params : ndarray (x, y, z, 27) or (n, 27)
        All parameters estimated from the diffusion kurtosis model.
        Parameters are ordered as follow:
            1) Three diffusion tensor's eingenvalues
            2) Three lines of the eigenvector matrix each containing the first,
               second and third coordinates of the eigenvectors respectively
            3) Fifteen elements of the kurtosis tensor
    sphere : a Sphere class instance
        The AKC will be calculated for each of the vertices in the sphere
    min_diffusivity : float (optional)
        Because negative eigenvalues are not physical and small eigenvalues
        cause quite a lot of noise in diffusion based metrics, diffusivity
        values smaller than `min_diffusivity` are replaced with
        `min_diffusivity`. defaut = 0
    min_kurtosis : float (optional)
        Because high amplitude negative values of kurtosis are not physicaly
        and biologicaly pluasible, and these causes huge artefacts in kurtosis
        based measures, directional kurtosis values than `min_kurtosis` are
        replaced with `min_kurtosis`. defaut = -1

    Returns
    --------
    AKC : ndarray (x, y, z, g) or (n, g)
        Apparent kurtosis coefficient (AKC) for all g directions of a sphere.

    Notes
    -----
    For each sphere direction with coordinates $(n_{1}, n_{2}, n_{3})$, the
    calculation of AKC is done using formula:

    .. math ::
        AKC(n)=\frac{MD^{2}}{ADC(n)^{2}}\sum_{i=1}^{3}\sum_{j=1}^{3}
        \sum_{k=1}^{3}\sum_{l=1}^{3}n_{i}n_{j}n_{k}n_{l}W_{ijkl}

    where $W_{ijkl}$ are the elements of the kurtosis tensor, MD the mean
    diffusivity and ADC the apparent diffusion coefficent computed as:

    .. math ::
        ADC(n)=\sum_{i=1}^{3}\sum_{j=1}^{3}n_{i}n_{j}D_{ij}

    where $D_{ij}$ are the elements of the diffusion tensor.
    """

    # Flat parameters
    outshape = dki_params.shape[:-1]
    dki_params = dki_params.reshape((-1, dki_params.shape[-1]))

    # Split data
    evals, evecs, kt = split_dki_param(dki_params)

    # Compute MD
    MD = mean_diffusivity(evals)

    # Initialize AKC matrix
    V = sphere.vertices
    AKC = np.zeros((len(kt), len(V)))

    # loop over all voxels
    for vox in range(len(kt)):
        R = evecs[vox]
        dt = lower_triangular(np.dot(np.dot(R, np.diag(evals[vox])), R.T))
        AKC[vox] = _directional_kurtosis(dt, MD[vox], kt[vox], V,
                                         min_diffusivity=min_diffusivity,
                                         min_kurtosis=min_kurtosis)

    # reshape data according to input data
    AKC = AKC.reshape((outshape + (len(V),)))

    return AKC
Example #21
0
def csdeconv(dwsignal, sh_order, X, B_reg, lambda_=1., tau=0.1):
    r""" Constrained-regularized spherical deconvolution (CSD) [1]_

    Deconvolves the axially symmetric single fiber response function `r_rh` in
    rotational harmonics coefficients from the diffusion weighted signal in
    `dwsignal`.

    Parameters
    ----------
    dwsignal, : array
        Diffusion weighted signals to be deconvolved.
    sh_order : int
         maximal SH order of the SH representation
    X : array
        Prediction matrix which estimates diffusion weighted signals from FOD
        coefficients.
    B_reg : array (N, B)
        SH basis matrix which maps FOD coefficients to FOD values on the
        surface of the sphere.
    lambda_ : float
        lambda parameter in minimization equation (default 1.0)
    tau : float
        Threshold controlling the amplitude below which the corresponding fODF
        is assumed to be zero.  Ideally, tau should be set to zero. However, to
        improve the stability of the algorithm, tau is set to tau*100 % of the
        max fODF amplitude (here, 10% by default). This is similar to peak
        detection where peaks below 0.1 amplitude are usually considered noise
        peaks. Because SDT is based on a q-ball ODF deconvolution, and not
        signal deconvolution, using the max instead of mean (as in CSD), is
        more stable.

    Returns
    -------
    fodf_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``,)
         Spherical harmonics coefficients of the constrained-regularized fiber
         ODF
    num_it : int
         Number of iterations in the constrained-regularization used for
         convergence

    References
    ----------
    .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the
           fibre orientation distribution in diffusion MRI: Non-negativity
           constrained super-resolved spherical deconvolution.

    """
    # generate initial fODF estimate, truncated at SH order 4
    fodf_sh = np.linalg.lstsq(X, dwsignal)[0]
    fodf_sh[15:] = 0

    fodf = np.dot(B_reg, fodf_sh)
    # set threshold on FOD amplitude used to identify 'negative' values
    threshold = tau * np.mean(np.dot(B_reg, fodf_sh))

    k = []
    convergence = 50
    for num_it in range(1, convergence + 1):
        fodf = np.dot(B_reg, fodf_sh)

        k2 = np.nonzero(fodf < threshold)[0]

        if (k2.shape[0] + X.shape[0]) < B_reg.shape[1]:
            warnings.warn(
            'too few negative directions identified - failed to converge')
            return fodf_sh, num_it

        if num_it > 1 and k.shape[0] == k2.shape[0]:
            if (k == k2).all():
                return fodf_sh, num_it

        k = k2

        # This is the super-resolved trick.
        # Wherever there is a negative amplitude value on the fODF, it
        # concatenates a value to the S vector so that the estimation can
        # focus on trying to eliminate it. In a sense, this "adds" a
        # measurement, which can help to better estimate the fodf_sh, even if
        # you have more SH coeffcients to estimate than actual S measurements.
        M = np.concatenate((X, lambda_ * B_reg[k, :]))
        S = np.concatenate((dwsignal, np.zeros(k.shape)))
        try:
            fodf_sh = np.linalg.lstsq(M, S)[0]
        except np.linalg.LinAlgError as lae:
            # SVD did not converge in Linear Least Squares in current
            # voxel. Proceeding with initial SH estimate for this voxel.
            pass

    warnings.warn('maximum number of iterations exceeded - failed to converge')
    return fodf_sh, num_it
Example #22
0
def restore_fit_tensor(design_matrix,
                       data,
                       min_signal=1.0,
                       sigma=None,
                       jac=True):
    """
    Use the RESTORE algorithm [Chang2005]_ to calculate a robust tensor fit

    Parameters
    ----------

    design_matrix : array of shape (g, 7)
        Design matrix holding the covariants used to solve for the regression
        coefficients.

    data : array of shape ([X, Y, Z, n_directions], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.

    min_signal : float, optional
        All values below min_signal are repalced with min_signal. This is done
        in order to avaid taking log(0) durring the tensor fitting. Default = 1

    sigma : float
        An estimate of the variance. [Chang2005]_ recommend to use
        1.5267 * std(background_noise), where background_noise is estimated
        from some part of the image known to contain no signal (only noise).

    jac : bool, optional
        Whether to use the Jacobian of the tensor to speed the non-linear
        optimization procedure used to fit the tensor paramters (see also
        :func:`nlls_fit_tensor`). Default: True


    Returns
    -------
    restore_params : an estimate of the tensor parameters in each voxel.

    Note
    ----
    Chang, L-C, Jones, DK and Pierpaoli, C (2005). RESTORE: robust estimation
    of tensors by outlier rejection. MRM, 53: 1088-95.

    """

    # Flatten for the iteration over voxels:
    flat_data = data.reshape((-1, data.shape[-1]))
    # Use the OLS method parameters as the starting point for the optimization:
    inv_design = np.linalg.pinv(design_matrix)
    sig = np.maximum(flat_data, min_signal)
    log_s = np.log(sig)
    D = np.dot(inv_design, log_s.T).T
    ols_params = np.reshape(D, (-1, D.shape[-1]))
    # 12 parameters per voxel (evals + evecs):
    dti_params = np.empty((flat_data.shape[0], 12))
    for vox in range(flat_data.shape[0]):
        start_params = ols_params[vox]
        # Do nlls using sigma weighting in this voxel:
        if jac:
            this_tensor, status = opt.leastsq(_nlls_err_func,
                                              start_params,
                                              args=(design_matrix,
                                                    flat_data[vox], 'sigma',
                                                    sigma),
                                              Dfun=_nlls_jacobian_func)
        else:
            this_tensor, status = opt.leastsq(_nlls_err_func,
                                              start_params,
                                              args=(design_matrix,
                                                    flat_data[vox], 'sigma',
                                                    sigma))

        # Get the residuals:
        pred_sig = np.exp(np.dot(design_matrix, this_tensor))
        residuals = flat_data[vox] - pred_sig
        # If any of the residuals are outliers (using 3 sigma as a criterion
        # following Chang et al., e.g page 1089):
        if np.any(residuals > 3 * sigma):
            # Do nlls with GMM-weighting:
            if jac:
                this_tensor, status = opt.leastsq(_nlls_err_func,
                                                  start_params,
                                                  args=(design_matrix,
                                                        flat_data[vox], 'gmm'),
                                                  Dfun=_nlls_jacobian_func)
            else:
                this_tensor, status = opt.leastsq(_nlls_err_func,
                                                  start_params,
                                                  args=(design_matrix,
                                                        flat_data[vox], 'gmm'))

            # How are you doin' on those residuals?
            pred_sig = np.exp(np.dot(design_matrix, this_tensor))
            residuals = flat_data[vox] - pred_sig
            if np.any(residuals > 3 * sigma):
                # If you still have outliers, refit without those outliers:
                non_outlier_idx = np.where(residuals <= 3 * sigma)
                clean_design = design_matrix[non_outlier_idx]
                clean_sig = flat_data[vox][non_outlier_idx]
                if np.iterable(sigma):
                    this_sigma = sigma[non_outlier_idx]
                else:
                    this_sigma = sigma

                if jac:
                    this_tensor, status = opt.leastsq(_nlls_err_func,
                                                      start_params,
                                                      args=(clean_design,
                                                            clean_sig, 'sigma',
                                                            this_sigma),
                                                      Dfun=_nlls_jacobian_func)
                else:
                    this_tensor, status = opt.leastsq(_nlls_err_func,
                                                      start_params,
                                                      args=(clean_design,
                                                            clean_sig, 'sigma',
                                                            this_sigma))

        # The parameters are the evals and the evecs:
        try:
            evals, evecs = decompose_tensor(
                from_lower_triangular(this_tensor[:6]))
            dti_params[vox, :3] = evals
            dti_params[vox, 3:] = evecs.ravel()

        # If leastsq failed to converge and produced nans, we'll resort to the
        # OLS solution in this voxel:
        except np.linalg.LinAlgError:
            print(vox)
            dti_params[vox, :] = start_params

    dti_params.shape = data.shape[:-1] + (12, )
    restore_params = dti_params
    return restore_params
Example #23
0
def csdeconv(dwsignal, X, B_reg, tau=0.1, convergence=50, P=None):
    r""" Constrained-regularized spherical deconvolution (CSD) [1]_

    Deconvolves the axially symmetric single fiber response function `r_rh` in
    rotational harmonics coefficients from the diffusion weighted signal in
    `dwsignal`.

    Parameters
    ----------
    dwsignal : array
        Diffusion weighted signals to be deconvolved.
    X : array
        Prediction matrix which estimates diffusion weighted signals from FOD
        coefficients.
    B_reg : array (N, B)
        SH basis matrix which maps FOD coefficients to FOD values on the
        surface of the sphere. B_reg should be scaled to account for lambda.
    tau : float
        Threshold controlling the amplitude below which the corresponding fODF
        is assumed to be zero.  Ideally, tau should be set to zero. However, to
        improve the stability of the algorithm, tau is set to tau*100 % of the
        max fODF amplitude (here, 10% by default). This is similar to peak
        detection where peaks below 0.1 amplitude are usually considered noise
        peaks. Because SDT is based on a q-ball ODF deconvolution, and not
        signal deconvolution, using the max instead of mean (as in CSD), is
        more stable.
    convergence : int
        Maximum number of iterations to allow the deconvolution to converge.
    P : ndarray
        This is an optimization to avoid computing ``dot(X.T, X)`` many times.
        If the same ``X`` is used many times, ``P`` can be precomputed and
        passed to this function.

    Returns
    -------
    fodf_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``,)
         Spherical harmonics coefficients of the constrained-regularized fiber
         ODF.
    num_it : int
         Number of iterations in the constrained-regularization used for
         convergence.

    Notes
    -----
    This section describes how the fitting of the SH coefficients is done.
    Problem is to minimise per iteration:

    $F(f_n) = ||Xf_n - S||^2 + \lambda^2 ||H_{n-1} f_n||^2$

    Where $X$ maps current FOD SH coefficients $f_n$ to DW signals $s$ and
    $H_{n-1}$ maps FOD SH coefficients $f_n$ to amplitudes along set of negative
    directions identified in previous iteration, i.e. the matrix formed by the
    rows of $B_{reg}$ for which $Hf_{n-1}<0$ where $B_{reg}$ maps $f_n$ to FOD
    amplitude on a sphere.

    Solve by differentiating and setting to zero:

    $\Rightarrow \frac{\delta F}{\delta f_n} = 2X^T(Xf_n - S) + 2 \lambda^2
    H_{n-1}^TH_{n-1}f_n=0$

    Or:

    $(X^TX + \lambda^2 H_{n-1}^TH_{n-1})f_n = X^Ts$

    Define $Q = X^TX + \lambda^2 H_{n-1}^TH_{n-1}$ , which by construction is a
    square positive definite symmetric matrix of size $n_{SH} by n_{SH}$. If
    needed, positive definiteness can be enforced with a small minimum norm
    regulariser (helps a lot with poorly conditioned direction sets and/or
    superresolution):

    $Q = X^TX + (\lambda H_{n-1}^T) (\lambda H_{n-1}) + \mu I$

    Solve $Qf_n = X^Ts$ using Cholesky decomposition:

    $Q = LL^T$

    where $L$ is lower triangular. Then problem can be solved by
    back-substitution:

    $L_y = X^Ts$

    $L^Tf_n = y$

    To speeds things up further, form $P = X^TX + \mu I$, and update to form
    $Q$ by rankn update with $H_{n-1}$. The dipy implementation looks like:

        form initially $P = X^T X + \mu I$ and $\lambda B_{reg}$

        for each voxel: form $z = X^Ts$

            estimate $f_0$ by solving $Pf_0=z$. We use a simplified $l_{max}=4$
            solution here, but it might not make a big difference.

            Then iterate until no change in rows of $H$ used in $H_n$

                form $H_{n}$ given $f_{n-1}$

                form $Q = P + (\lambda H_{n-1}^T) (\lambda H_{n-1}$) (this can
                be done by rankn update, but we currently do not use rankn
                update).

                solve $Qf_n = z$ using Cholesky decomposition

    We'd like to thanks Donald Tournier for his help with describing and
    implementing this algorithm.

    References
    ----------
    .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the
           fibre orientation distribution in diffusion MRI: Non-negativity
           constrained super-resolved spherical deconvolution.

    """
    mu = 1e-5
    if P is None:
        P = np.dot(X.T, X)
    z = np.dot(X.T, dwsignal)

    try:
        fodf_sh = _solve_cholesky(P, z)
    except la.LinAlgError:
        P = P + mu * np.eye(P.shape[0])
        fodf_sh = _solve_cholesky(P, z)
    # For the first iteration we use a smooth FOD that only uses SH orders up
    # to 4 (the first 15 coefficients).
    fodf = np.dot(B_reg[:, :15], fodf_sh[:15])
    # The mean of an fodf can be computed by taking $Y_{0,0} * coeff_{0,0}$
    threshold = B_reg[0, 0] * fodf_sh[0] * tau
    where_fodf_small = (fodf < threshold).nonzero()[0]

    # If the low-order fodf does not have any values less than threshold, the
    # full-order fodf is used.
    if len(where_fodf_small) == 0:
        fodf = np.dot(B_reg, fodf_sh)
        where_fodf_small = (fodf < threshold).nonzero()[0]
        # If the fodf still has no values less than threshold, return the fodf.
        if len(where_fodf_small) == 0:
            return fodf_sh, 0

    for num_it in range(1, convergence + 1):
        # This is the super-resolved trick.  Wherever there is a negative
        # amplitude value on the fODF, it concatenates a value to the S vector
        # so that the estimation can focus on trying to eliminate it. In a
        # sense, this "adds" a measurement, which can help to better estimate
        # the fodf_sh, even if you have more SH coefficients to estimate than
        # actual S measurements.
        H = B_reg.take(where_fodf_small, axis=0)

        # We use the Cholesky decomposition to solve for the SH coefficients.
        Q = P + np.dot(H.T, H)
        fodf_sh = _solve_cholesky(Q, z)

        # Sample the FOD using the regularization sphere and compute k.
        fodf = np.dot(B_reg, fodf_sh)
        where_fodf_small_last = where_fodf_small
        where_fodf_small = (fodf < threshold).nonzero()[0]

        if (len(where_fodf_small) == len(where_fodf_small_last)
                and (where_fodf_small == where_fodf_small_last).all()):
            break
    else:
        msg = 'maximum number of iterations exceeded - failed to converge'
        warnings.warn(msg)

    return fodf_sh, num_it
Example #24
0
def kfold_xval(model, data, folds, *model_args, **model_kwargs):
    """
    Perform k-fold cross-validation to generate out-of-sample predictions for
    each measurement.

    Parameters
    ----------
    model : Model class instance
        The type of the model to use for prediction. The corresponding Fit
        object must have a `predict` function implementd One of the following:
        `reconst.dti.TensorModel` or
        `reconst.csdeconv.ConstrainedSphericalDeconvModel`. 
    data : ndarray
        Diffusion MRI data acquired with the GradientTable of the model. Shape
        will typically be `(x, y, z, b)` where `xyz` are spatial dimensions and
        b is the number of bvals/bvecs in the GradientTable. 
    folds : int
        The number of divisions to apply to the data
    model_args : list
        Additional arguments to the model initialization
    model_kwargs : dict
        Additional key-word arguments to the model initialization

    Notes
    -----
    This function assumes that a prediction API is implemented in the Model
    class for which prediction is conducted. That is, the Fit object that gets
    generated upon fitting the model needs to have a `predict` method, which
    receives a GradientTable class instance as input and produces a predicted
    signal as output.

    It also assumes that the model object has `bval` and `bvec` attributes
    holding b-values and corresponding unit vectors.

    References
    ----------
    .. [1] Rokem, A., Chan, K.L. Yeatman, J.D., Pestilli, F., Mezer, A.,
       Wandell, B.A., 2014. Evaluating the accuracy of diffusion models at
       multiple b-values with cross-validation. ISMRM 2014.
    """
    # This should always be there, if the model inherits from
    # dipy.reconst.base.ReconstModel:
    gtab = model.gtab
    data_b = data[..., ~gtab.b0s_mask]
    div_by_folds =  np.mod(data_b.shape[-1], folds)
    # Make sure that an equal number of samples get left out in each fold:
    if div_by_folds!= 0:
        msg = "The number of folds must divide the diffusion-weighted "
        msg += "data equally, but "
        msg = "np.mod(%s, %s) is %s"%(data_b.shape[-1], folds, div_by_folds)
        raise ValueError(msg)

    data_0 = data[..., gtab.b0s_mask]
    S0 = np.mean(data_0, -1)
    n_in_fold = data_b.shape[-1]/folds
    prediction = np.zeros(data.shape)
    # We are going to leave out some randomly chosen samples in each iteration:
    order = np.random.permutation(data_b.shape[-1])

    nz_bval = gtab.bvals[~gtab.b0s_mask]
    nz_bvec = gtab.bvecs[~gtab.b0s_mask]

    for k in range(folds):
        fold_mask = np.ones(data_b.shape[-1], dtype=bool)
        fold_idx = order[k*n_in_fold:(k+1)*n_in_fold]
        fold_mask[fold_idx] = False
        this_data = np.concatenate([data_0, data_b[..., fold_mask]], -1)

        this_gtab = gt.gradient_table(np.hstack([gtab.bvals[gtab.b0s_mask],
                                                 nz_bval[fold_mask]]),
                                      np.concatenate([gtab.bvecs[gtab.b0s_mask],
                                                 nz_bvec[fold_mask]]))
        left_out_gtab = gt.gradient_table(np.hstack([gtab.bvals[gtab.b0s_mask],
                                                 nz_bval[~fold_mask]]),
                                      np.concatenate([gtab.bvecs[gtab.b0s_mask],
                                                 nz_bvec[~fold_mask]]))

        this_model = model.__class__(this_gtab, *model_args, **model_kwargs)
        this_fit = this_model.fit(this_data)
        if not hasattr(this_fit, 'predict'):
            err_str = "Models of type: %s "%this_model.__class__
            err_str += "do not have an implementation of model prediction"
            err_str += " and do not support cross-validation"
            raise ValueError(err_str)
        this_predict = S0[..., None] * this_fit.predict(left_out_gtab, S0=1)

        idx_to_assign = np.where(~gtab.b0s_mask)[0][~fold_mask]
        prediction[..., idx_to_assign]=this_predict[..., np.sum(gtab.b0s_mask):]

    # For the b0 measurements
    prediction[..., gtab.b0s_mask] = S0[..., None]
    return prediction
Example #25
0
def odf_deconv(odf_sh, R, B_reg, lambda_=1., tau=0.1, r2_term=False):
    r""" ODF constrained-regularized spherical deconvolution using
    the Sharpening Deconvolution Transform (SDT) [1]_, [2]_.

    Parameters
    ----------
    odf_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``,)
         ndarray of SH coefficients for the ODF spherical function to be
         deconvolved
    R : ndarray (``(sh_order + 1)(sh_order + 2)/2``, ``(sh_order + 1)(sh_order + 2)/2``)
         SDT matrix in SH basis
    B_reg : ndarray (``(sh_order + 1)(sh_order + 2)/2``, ``(sh_order + 1)(sh_order + 2)/2``)
         SH basis matrix used for deconvolution
    lambda_ : float
         lambda parameter in minimization equation (default 1.0)
    tau : float
         threshold (tau *max(fODF)) controlling the amplitude below
         which the corresponding fODF is assumed to be zero.
    r2_term : bool
         True if ODF is computed from model that uses the $r^2$ term in the
         integral.  Recall that Tuch's ODF (used in Q-ball Imaging [1]_) and
         the true normalized ODF definition differ from a $r^2$ term in the ODF
         integral. The original Sharpening Deconvolution Transform (SDT)
         technique [2]_ is expecting Tuch's ODF without the $r^2$ (see [3]_ for
         the mathematical details).  Now, this function supports ODF that have
         been computed using the $r^2$ term because the proper analytical
         response function has be derived.  For example, models such as DSI,
         GQI, SHORE, CSA, Tensor, Multi-tensor ODFs, should now be deconvolved
         with the r2_term=True.

    Returns
    -------
    fodf_sh : ndarray (``(sh_order + 1)(sh_order + 2)/2``,)
         Spherical harmonics coefficients of the constrained-regularized fiber
         ODF
    num_it : int
         Number of iterations in the constrained-regularization used for
         convergence

    References
    ----------
    .. [1] Tuch, D. MRM 2004. Q-Ball Imaging.
    .. [2] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and
           Probabilistic Tractography Based on Complex Fibre Orientation
           Distributions
    .. [3] Descoteaux, M, PhD thesis, INRIA Sophia-Antipolis, 2008.
    """
    # In ConstrainedSDTModel.fit, odf_sh is divided by its norm (Z) and sometimes
    # the norm is 0 which creates NaNs.
    if np.any(np.isnan(odf_sh)):
        return np.zeros_like(odf_sh), 0

    # Generate initial fODF estimate, which is the ODF truncated at SH order 4
    fodf_sh = np.linalg.lstsq(R, odf_sh)[0]
    fodf_sh[15:] = 0

    fodf = np.dot(B_reg, fodf_sh)

    # if sharpening a q-ball odf (it is NOT properly normalized), we need to
    # force normalization otherwise, for DSI, CSA, SHORE, Tensor odfs, they are
    # normalized by construction
    if ~r2_term:
        Z = np.linalg.norm(fodf)
        fodf_sh /= Z

    fodf = np.dot(B_reg, fodf_sh)
    threshold = tau * np.max(np.dot(B_reg, fodf_sh))
    #print(np.min(fodf), np.max(fodf), np.mean(fodf), threshold, tau)

    k = []
    convergence = 50
    for num_it in range(1, convergence + 1):
        A = np.dot(B_reg, fodf_sh)
        k2 = np.nonzero(A < threshold)[0]

        if (k2.shape[0] + R.shape[0]) < B_reg.shape[1]:
            warnings.warn(
                'too few negative directions identified - failed to converge')
            return fodf_sh, num_it

        if num_it > 1 and k.shape[0] == k2.shape[0]:
            if (k == k2).all():
                return fodf_sh, num_it

        k = k2
        M = np.concatenate((R, lambda_ * B_reg[k, :]))
        ODF = np.concatenate((odf_sh, np.zeros(k.shape)))
        try:
            fodf_sh = np.linalg.lstsq(M, ODF)[0]
        except np.linalg.LinAlgError as lae:
            # SVD did not converge in Linear Least Squares in current
            # voxel. Proceeding with initial SH estimate for this voxel.
            pass

    warnings.warn('maximum number of iterations exceeded - failed to converge')
    return fodf_sh, num_it
Example #26
0
    def setup(self, streamline, affine, evals=[0.001, 0, 0], sphere=None):
        """
        Set up the necessary components for the LiFE model: the matrix of
        fiber-contributions to the DWI signal, and the coordinates of voxels
        for which the equations will be solved

        Parameters
        ----------
        streamline : list
            Streamlines, each is an array of shape (n, 3)
        affine : 4 by 4 array
            Mapping from the streamline coordinates to the data
        evals : list (3 items, optional)
            The eigenvalues of the canonical tensor used as a response
            function. Default:[0.001, 0, 0].
        sphere: `dipy.core.Sphere` instance.
            Whether to approximate (and cache) the signal on a discrete
            sphere. This may confer a significant speed-up in setting up the
            problem, but is not as accurate. If `False`, we use the exact
            gradients along the streamlines to calculate the matrix, instead of
            an approximation. Defaults to use the 724-vertex symmetric sphere
            from :mod:`dipy.data`
        """
        if sphere is not False:
            SignalMaker = LifeSignalMaker(self.gtab,
                                          evals=evals,
                                          sphere=sphere)

        if affine is None:
            affine = np.eye(4)
        streamline = transform_streamlines(streamline, affine)
        # Assign some local variables, for shorthand:
        all_coords = np.concatenate(streamline)
        vox_coords = unique_rows(np.round(all_coords).astype(np.intp))
        del all_coords
        # We only consider the diffusion-weighted signals:
        n_bvecs = self.gtab.bvals[~self.gtab.b0s_mask].shape[0]
        v2f, v2fn = voxel2streamline(streamline,
                                     transformed=True,
                                     affine=affine,
                                     unique_idx=vox_coords)
        # How many fibers in each voxel (this will determine how many
        # components are in the matrix):
        n_unique_f = len(np.hstack(v2f.values()))
        # Preallocate these, which will be used to generate the sparse
        # matrix:
        f_matrix_sig = np.zeros(n_unique_f * n_bvecs, dtype=np.float)
        f_matrix_row = np.zeros(n_unique_f * n_bvecs, dtype=np.intp)
        f_matrix_col = np.zeros(n_unique_f * n_bvecs, dtype=np.intp)

        fiber_signal = []
        for s_idx, s in enumerate(streamline):
            if sphere is not False:
                fiber_signal.append(SignalMaker.streamline_signal(s))
            else:
                fiber_signal.append(streamline_signal(s, self.gtab, evals))

        del streamline
        if sphere is not False:
            del SignalMaker

        keep_ct = 0
        range_bvecs = np.arange(n_bvecs).astype(int)
        # In each voxel:
        for v_idx in range(vox_coords.shape[0]):
            mat_row_idx = (range_bvecs + v_idx * n_bvecs).astype(np.intp)
            # For each fiber in that voxel:
            for f_idx in v2f[v_idx]:
                # For each fiber-voxel combination, store the row/column
                # indices in the pre-allocated linear arrays
                f_matrix_row[keep_ct:keep_ct + n_bvecs] = mat_row_idx
                f_matrix_col[keep_ct:keep_ct + n_bvecs] = f_idx

                vox_fiber_sig = np.zeros(n_bvecs)
                for node_idx in v2fn[f_idx][v_idx]:
                    # Sum the signal from each node of the fiber in that voxel:
                    vox_fiber_sig += fiber_signal[f_idx][node_idx]
                # And add the summed thing into the corresponding rows:
                f_matrix_sig[keep_ct:keep_ct + n_bvecs] += vox_fiber_sig
                keep_ct = keep_ct + n_bvecs

        del v2f, v2fn
        # Allocate the sparse matrix, using the more memory-efficient 'csr'
        # format:
        life_matrix = sps.csr_matrix(
            (f_matrix_sig, [f_matrix_row, f_matrix_col]))

        return life_matrix, vox_coords
Example #27
0
def restore_fit_tensor(design_matrix, data, min_signal=1.0, sigma=None,
                       jac=True):
    """
    Use the RESTORE algorithm [Chang2005]_ to calculate a robust tensor fit

    Parameters
    ----------

    design_matrix : array of shape (g, 7)
        Design matrix holding the covariants used to solve for the regression
        coefficients.

    data : array of shape ([X, Y, Z, n_directions], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.

    min_signal : float, optional
        All values below min_signal are repalced with min_signal. This is done
        in order to avaid taking log(0) durring the tensor fitting. Default = 1

    sigma : float
        An estimate of the variance. [Chang2005]_ recommend to use
        1.5267 * std(background_noise), where background_noise is estimated
        from some part of the image known to contain no signal (only noise).

    jac : bool, optional
        Whether to use the Jacobian of the tensor to speed the non-linear
        optimization procedure used to fit the tensor paramters (see also
        :func:`nlls_fit_tensor`). Default: True


    Returns
    -------
    restore_params : an estimate of the tensor parameters in each voxel.

    Note
    ----
    Chang, L-C, Jones, DK and Pierpaoli, C (2005). RESTORE: robust estimation
    of tensors by outlier rejection. MRM, 53: 1088-95.

    """

    # Flatten for the iteration over voxels:
    flat_data = data.reshape((-1, data.shape[-1]))
    # Use the OLS method parameters as the starting point for the optimization:
    inv_design = np.linalg.pinv(design_matrix)
    sig = np.maximum(flat_data, min_signal)
    log_s = np.log(sig)
    D = np.dot(inv_design, log_s.T).T
    ols_params = np.reshape(D, (-1, D.shape[-1]))
    # 12 parameters per voxel (evals + evecs):
    dti_params = np.empty((flat_data.shape[0], 12))
    for vox in range(flat_data.shape[0]):
        start_params = ols_params[vox]
        # Do nlls using sigma weighting in this voxel:
        if jac:
            this_tensor, status = opt.leastsq(_nlls_err_func, start_params,
                                              args=(design_matrix,
                                                    flat_data[vox],
                                                    'sigma',
                                                    sigma),
                                              Dfun=_nlls_jacobian_func)
        else:
            this_tensor, status = opt.leastsq(_nlls_err_func, start_params,
                                             args=(design_matrix,
                                                   flat_data[vox],
                                                   'sigma',
                                                   sigma))

        # Get the residuals:
        pred_sig = np.exp(np.dot(design_matrix, this_tensor))
        residuals = flat_data[vox] - pred_sig
        # If any of the residuals are outliers (using 3 sigma as a criterion
        # following Chang et al., e.g page 1089):
        if np.any(residuals > 3 * sigma):
            # Do nlls with GMM-weighting:
            if jac:
                this_tensor, status= opt.leastsq(_nlls_err_func,
                                                 start_params,
                                                 args=(design_matrix,
                                                       flat_data[vox],
                                                       'gmm'),
                                                 Dfun=_nlls_jacobian_func)
            else:
                this_tensor, status= opt.leastsq(_nlls_err_func,
                                                 start_params,
                                                 args=(design_matrix,
                                                       flat_data[vox],
                                                       'gmm'))

            # How are you doin' on those residuals?
            pred_sig = np.exp(np.dot(design_matrix, this_tensor))
            residuals = flat_data[vox] - pred_sig
            if np.any(residuals > 3 * sigma):
                # If you still have outliers, refit without those outliers:
                non_outlier_idx = np.where(residuals <= 3 * sigma)
                clean_design = design_matrix[non_outlier_idx]
                clean_sig = flat_data[vox][non_outlier_idx]
                if np.iterable(sigma):
                    this_sigma = sigma[non_outlier_idx]
                else:
                    this_sigma = sigma

                if jac:
                    this_tensor, status= opt.leastsq(_nlls_err_func,
                                                     start_params,
                                                     args=(clean_design,
                                                           clean_sig,
                                                           'sigma',
                                                           this_sigma),
                                                     Dfun=_nlls_jacobian_func)
                else:
                    this_tensor, status= opt.leastsq(_nlls_err_func,
                                                     start_params,
                                                     args=(clean_design,
                                                           clean_sig,
                                                           'sigma',
                                                           this_sigma))

        # The parameters are the evals and the evecs:
        try:
            evals,evecs=decompose_tensor(from_lower_triangular(this_tensor[:6]))
            dti_params[vox, :3] = evals
            dti_params[vox, 3:] = evecs.ravel()

        # If leastsq failed to converge and produced nans, we'll resort to the
        # OLS solution in this voxel:
        except np.linalg.LinAlgError:
            print(vox)
            dti_params[vox, :] = start_params


    dti_params.shape = data.shape[:-1] + (12,)
    restore_params = dti_params
    return restore_params