Exemplo n.º 1
0
def compmainevec(tensMat, bMaskSource=None):
    """Calcul du vecteur propre avec la plus grande valeur propre à chaque
    position du champ de tenseur tensMat"""

    # Initialisation des variables de résultat
    mainVec = np.zeros(tensMat.shape[:3] + (3,))
    allEve = np.zeros(tensMat.shape[:3] + (3,3))
    allEva = np.zeros(tensMat.shape[:3] + (3,))

    # Définition des indices d'itération en fonction du masque de cerveau
    if not(bMaskSource):
        itIdx = ndindex(tensMat.shape[:3])
    else:
        bMask = openimage(bMaskSource)
        itIdx = bMask.nonzero()
        itIdx = list(zip(itIdx[0], itIdx[1], itIdx[2]))

    # Calcul du vecteur propre principal
    for idx in itIdx:
        dLin = tensMat[idx]
        eva, eve = compLinDTensorEigv(dLin, compEigVec=True)
        mainEvIdx = eva.argmax()
        mainVec[idx] = eve[:, mainEvIdx]
        allEve = eve
        allEva = eva

    return mainVec, allEve, allEva
Exemplo n.º 2
0
def peak_extraction(odfs_file, sphere_vertices_file, out_file, relative_peak_threshold=.5,
                    peak_normalize=1, min_separation_angle=45, max_peak_number=5):

    in_nifti = nib.load(odfs_file)
    refaff = in_nifti.get_affine()
    odfs = in_nifti.get_data()

    vertices = np.loadtxt(sphere_vertices_file)
    sphere = Sphere(xyz=vertices)

    num_peak_coeffs = max_peak_number * 3
    peaks = np.zeros(odfs.shape[:-1] + (num_peak_coeffs,))

    for index in ndindex(odfs.shape[:-1]):
        vox_peaks, values, _ = peak_directions(odfs[index], sphere,
                                               float(relative_peak_threshold),
                                               float(min_separation_angle))

        if peak_normalize == 1:
            values /= values[0]
            vox_peaks = vox_peaks * values[:, None]

        vox_peaks = vox_peaks.ravel()
        m = vox_peaks.shape[0]
        if m > num_peak_coeffs:
            m = num_peak_coeffs
        peaks[index][:m] = vox_peaks[:m]

    peaks_img = nib.Nifti1Image(peaks.astype(np.float32), refaff)
    nib.save(peaks_img, out_file)
Exemplo n.º 3
0
def local_piesno(data, N, size=5, return_mask=True):

    m_out = np.zeros(data.shape[:-1], dtype=np.bool)
    reshaped_maps = sliding_window(data, (size, size, size, data.shape[-1]))

    sigma = np.zeros(reshaped_maps.shape[0], dtype=np.float32)
    mask = np.zeros((reshaped_maps.shape[0], size**3), dtype=np.bool)

    for i in range(reshaped_maps.shape[0]):
        cur_map = reshaped_maps[i].reshape(size**3, 1, -1)
        sigma[i], m = piesno(cur_map, N=N, return_mask=True)
        mask[i] = np.squeeze(m)

    s_out = sigma.reshape(data.shape[0] // size, data.shape[1] // size, data.shape[2] // size)

    for n, i in enumerate(ndindex(s_out.shape)):
        i = np.array(i) * size
        j = i + size
        m_out[i[0]:j[0], i[1]:j[1], i[2]:j[2]] = mask[n].reshape(size, size, size)

    interpolated = np.zeros_like(data[..., 0], dtype=np.float32)
    x, y, z = np.array(s_out.shape) * size
    interpolated[:x, :y, :z] = zoom(s_out, size, order=1)

    if return_mask:
        return interpolated, m_out

    return interpolated
Exemplo n.º 4
0
def dirs_from_odf(odfs, sphere, relative_peak_threshold=.35,
                  min_separation_angle=25.,
                  peak_normalize=True,
                  max_peak_number=5):
    # or directions from odf
    num_peak_coeffs = max_peak_number * 3
    peaks = np.zeros(odfs.shape[:-1] + (num_peak_coeffs,))

    for index in ndindex(odfs.shape[:-1]):
        vox_peaks, values, _ = peak_directions(odfs[index], sphere,
                                               float(relative_peak_threshold),
                                               float(min_separation_angle))

        if peak_normalize is True:
            values /= values[0]
            vox_peaks = vox_peaks * values[:, None]

        vox_peaks = vox_peaks.ravel()
        m = vox_peaks.shape[0]

        if m > num_peak_coeffs:
            m = num_peak_coeffs
        peaks[index][:m] = vox_peaks[:m]

    peaks = peaks.reshape(odfs.shape[:3] + (5, 3))
    return peaks
Exemplo n.º 5
0
def peaks_extract(out_file, odf, affine, sphere,
                  relative_peak_threshold=.5,
                  peak_normalize=1,
                  min_separation_angle=45,
                  max_peak_number=5):

    num_peak_coeffs = max_peak_number * 3
    peaks = np.zeros(odf.shape[:-1] + (num_peak_coeffs,))

    for index in ndindex(odf.shape[:-1]):
        vox_peaks, values, _ = peak_directions(odf[index], sphere,
                                               float(relative_peak_threshold),
                                               float(min_separation_angle))

        if peak_normalize == 1:
            values /= values[0]
            vox_peaks = vox_peaks * values[:, None]

        vox_peaks = vox_peaks.ravel()
        m = vox_peaks.shape[0]
        if m > num_peak_coeffs:
            m = num_peak_coeffs
        peaks[index][:m] = vox_peaks[:m]

    peaks_img = nib.Nifti1Image(peaks.astype(np.float32), affine)
    nib.save(peaks_img, out_file)
Exemplo n.º 6
0
 def __call__(self, *args, **kwargs):
     result = np.empty(self.shape, dtype=object)
     for ijk in ndindex(self.shape):
         item = self[ijk]
         if item is not None:
             result[ijk] = item(*args, **kwargs)
     return _squash(result)
Exemplo n.º 7
0
def bench_quick_squash():
    # nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench'
    repeat = 10
    shape = (300, 200)
    arrs = np.zeros(shape, dtype=object)
    scalars = np.zeros(shape, dtype=object)
    for ijk in ndindex(arrs.shape):
        arrs[ijk] = np.ones((3, 5))
        scalars[ijk] = np.float32(0)
    print('\nSquashing benchmarks')
    for name, objs in (
        ('floats', np.zeros(shape, float).astype(object)),
        ('ints', np.zeros(shape, int).astype(object)),
        ('arrays', arrs),
        ('scalars', scalars),
    ):
        print(name)
        timed0 = measure("quick_squash(objs)", repeat)
        timed1 = measure("old_squash(objs)", repeat)
        print("fast %4.2f; slow %4.2f" % (timed0, timed1))
        objs[50, 50] = None
        timed0 = measure("quick_squash(objs)", repeat)
        timed1 = measure("old_squash(objs)", repeat)
        print("With None: fast %4.2f; slow %4.2f" % (timed0, timed1))
        msk = objs != np.array(None)
        timed0 = measure("quick_squash(objs, msk)", repeat)
        timed1 = measure("old_squash(objs, msk)", repeat)
        print("With mask: fast %4.2f; slow %4.2f" % (timed0, timed1))
        objs[50, 50] = np.float32(0)
        timed0 = measure("quick_squash(objs, msk)", repeat)
        timed1 = measure("old_squash(objs, msk)", repeat)
        print("Other dtype: fast %4.2f; slow %4.2f" % (timed0, timed1))
Exemplo n.º 8
0
 def __call__(self, *args, **kwargs):
     result = np.empty(self.shape, dtype=object)
     for ijk in ndindex(self.shape):
         item = self[ijk]
         if item is not None:
             result[ijk] = item(*args, **kwargs)
     return _squash(result)
Exemplo n.º 9
0
def bench_quick_squash():
    # nosetests -s --match '(?:^|[\\b_\\.//-])[Bb]ench'
    repeat = 10
    shape = (300, 200)
    arrs = np.zeros(shape, dtype=object)
    scalars = np.zeros(shape, dtype=object)
    for ijk in ndindex(arrs.shape):
        arrs[ijk] = np.ones((3, 5))
        scalars[ijk] = np.float32(0)
    print('\nSquashing benchmarks')
    for name, objs in (
        ('floats', np.zeros(shape, float).astype(object)),
        ('ints', np.zeros(shape, int).astype(object)),
        ('arrays', arrs),
        ('scalars', scalars),
    ):
        print(name)
        timed0 = measure("quick_squash(objs)", repeat)
        timed1 = measure("old_squash(objs)", repeat)
        print("fast %4.2f; slow %4.2f" % (timed0, timed1))
        objs[50, 50] = None
        timed0 = measure("quick_squash(objs)", repeat)
        timed1 = measure("old_squash(objs)", repeat)
        print("With None: fast %4.2f; slow %4.2f" % (timed0, timed1))
        msk = objs != np.array(None)
        timed0 = measure("quick_squash(objs, msk)", repeat)
        timed1 = measure("old_squash(objs, msk)", repeat)
        print("With mask: fast %4.2f; slow %4.2f" % (timed0, timed1))
        objs[50, 50] = np.float32(0)
        timed0 = measure("quick_squash(objs, msk)", repeat)
        timed1 = measure("old_squash(objs, msk)", repeat)
        print("Other dtype: fast %4.2f; slow %4.2f" % (timed0, timed1))
Exemplo n.º 10
0
def test_binary_stopping_criterion():
    """This tests that the binary stopping criterion returns expected
    streamline statuses.
    """

    mask = np.random.random((4, 4, 4))
    mask[mask < 0.4] = 0.0

    btc_boolean = BinaryStoppingCriterion(mask > 0)
    btc_float64 = BinaryStoppingCriterion(mask)

    # Test voxel center
    for ind in ndindex(mask.shape):
        pts = np.array(ind, dtype='float64')
        state_boolean = btc_boolean.check_point(pts)
        state_float64 = btc_float64.check_point(pts)
        if mask[ind] > 0:
            npt.assert_equal(state_boolean, int(StreamlineStatus.TRACKPOINT))
            npt.assert_equal(state_float64, int(StreamlineStatus.TRACKPOINT))
        else:
            npt.assert_equal(state_boolean, int(StreamlineStatus.ENDPOINT))
            npt.assert_equal(state_float64, int(StreamlineStatus.ENDPOINT))

    # Test random points in voxel
    for ind in ndindex(mask.shape):
        for _ in range(50):
            pts = np.array(ind, dtype='float64') + np.random.random(3) - 0.5
            state_boolean = btc_boolean.check_point(pts)
            state_float64 = btc_float64.check_point(pts)
            if mask[ind] > 0:
                npt.assert_equal(state_boolean,
                                 int(StreamlineStatus.TRACKPOINT))
                npt.assert_equal(state_float64,
                                 int(StreamlineStatus.TRACKPOINT))
            else:
                npt.assert_equal(state_boolean, int(StreamlineStatus.ENDPOINT))
                npt.assert_equal(state_float64, int(StreamlineStatus.ENDPOINT))

    # Test outside points
    outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2], [0, 0.5, -0.51],
                   [0, -0.51, 0.1], [4, 0, 0]]
    for pts in outside_pts:
        pts = np.array(pts, dtype='float64')
        state_boolean = btc_boolean.check_point(pts)
        state_float64 = btc_float64.check_point(pts)
        npt.assert_equal(state_boolean, int(StreamlineStatus.OUTSIDEIMAGE))
        npt.assert_equal(state_float64, int(StreamlineStatus.OUTSIDEIMAGE))
Exemplo n.º 11
0
def test_binary_tissue_classifier():
    """This tests that the binary tissue classifier returns expected
     tissue types.
    """

    mask = np.random.random((4, 4, 4))
    mask[mask < 0.4] = 0.0

    btc_boolean = BinaryTissueClassifier(mask > 0)
    btc_float64 = BinaryTissueClassifier(mask)

    # test voxel center
    for ind in ndindex(mask.shape):
        pts = np.array(ind, dtype='float64')
        state_boolean = btc_boolean.check_point(pts)
        state_float64 = btc_float64.check_point(pts)
        if mask[ind] > 0:
            npt.assert_equal(state_boolean, TissueTypes.TRACKPOINT)
            npt.assert_equal(state_float64, TissueTypes.TRACKPOINT)
        else:
            npt.assert_equal(state_boolean, TissueTypes.ENDPOINT)
            npt.assert_equal(state_float64, TissueTypes.ENDPOINT)

    # test random points in voxel
    for ind in ndindex(mask.shape):
        for _ in range(50):
            pts = np.array(ind, dtype='float64') + np.random.random(3) - 0.5
            state_boolean = btc_boolean.check_point(pts)
            state_float64 = btc_float64.check_point(pts)
            if mask[ind] > 0:
                npt.assert_equal(state_boolean, TissueTypes.TRACKPOINT)
                npt.assert_equal(state_float64, TissueTypes.TRACKPOINT)
            else:
                npt.assert_equal(state_boolean, TissueTypes.ENDPOINT)
                npt.assert_equal(state_float64, TissueTypes.ENDPOINT)

    # test outside points
    outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2],
                   [0, 0.5, -0.51], [0, -0.51, 0.1]]
    for pts in outside_pts:
        pts = np.array(pts, dtype='float64')
        state_boolean = btc_boolean.check_point(pts)
        state_float64 = btc_float64.check_point(pts)
        npt.assert_equal(state_boolean, TissueTypes.OUTSIDEIMAGE)
        npt.assert_equal(state_float64, TissueTypes.OUTSIDEIMAGE)
Exemplo n.º 12
0
def test_binary_tissue_classifier():
    """This tests that the binary tissue classifier returns expected
     tissue types.
    """

    mask = np.random.random((4, 4, 4))
    mask[mask < 0.4] = 0.0

    btc_boolean = BinaryTissueClassifier(mask > 0)
    btc_float64 = BinaryTissueClassifier(mask)

    # test voxel center
    for ind in ndindex(mask.shape):
        pts = np.array(ind, dtype='float64')
        state_boolean = btc_boolean.check_point(pts)
        state_float64 = btc_float64.check_point(pts)
        if mask[ind] > 0:
            npt.assert_equal(state_boolean, TissueTypes.TRACKPOINT)
            npt.assert_equal(state_float64, TissueTypes.TRACKPOINT)
        else:
            npt.assert_equal(state_boolean, TissueTypes.ENDPOINT)
            npt.assert_equal(state_float64, TissueTypes.ENDPOINT)

    # test random points in voxel
    for ind in ndindex(mask.shape):
        for _ in range(50):
            pts = np.array(ind, dtype='float64') + np.random.random(3) - 0.5
            state_boolean = btc_boolean.check_point(pts)
            state_float64 = btc_float64.check_point(pts)
            if mask[ind] > 0:
                npt.assert_equal(state_boolean, TissueTypes.TRACKPOINT)
                npt.assert_equal(state_float64, TissueTypes.TRACKPOINT)
            else:
                npt.assert_equal(state_boolean, TissueTypes.ENDPOINT)
                npt.assert_equal(state_float64, TissueTypes.ENDPOINT)

    # test outside points
    outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2], [0, 0.5, -0.51],
                   [0, -0.51, 0.1], [4, 0, 0]]
    for pts in outside_pts:
        pts = np.array(pts, dtype='float64')
        state_boolean = btc_boolean.check_point(pts)
        state_float64 = btc_float64.check_point(pts)
        npt.assert_equal(state_boolean, TissueTypes.OUTSIDEIMAGE)
        npt.assert_equal(state_float64, TissueTypes.OUTSIDEIMAGE)
Exemplo n.º 13
0
def odf_sh_to_sharp(odfs_sh, sphere, basis=None, ratio=3 / 15., sh_order=8, lambda_=1., tau=0.1):
    r""" Sharpen odfs using the spherical deconvolution transform [1]_

    This function can be used to sharpen any smooth ODF spherical function. In theory, this should
    only be used to sharpen QballModel ODFs, but in practice, one can play with the deconvolution
    ratio and sharpen almost any ODF-like spherical function. The constrained-regularization is stable
    and will not only sharp the ODF peaks but also regularize the noisy peaks.

    Parameters
    ---------- 
    odfs_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, )
        array of odfs expressed as spherical harmonics coefficients
    sphere : Sphere
        sphere used to build the regularization matrix    
    basis : {None, 'mrtrix', 'fibernav'}
        different spherical harmonic basis. None is the fibernav basis as well.
    ratio : float, 
        ratio of the smallest vs the largest eigenvalue of the single prolate tensor response function
        (:math:`\frac{\lambda_2}{\lambda_1}`)
    sh_order : int
        maximal SH order of the SH representation
    lambda_ : float
        lambda parameter (see odfdeconv) (default 1.0)
    tau : float
        tau parameter in the L matrix construction (see odfdeconv) (default 0.1)

    Returns
    -------
    fodf_sh : ndarray
        sharpened odf expressed as spherical harmonics coefficients

    References
    ----------
    .. [1] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based
           on Complex Fibre Orientation Distributions
    """
    m, n = sph_harm_ind_list(sh_order)
    r, theta, phi = cart2sphere(sphere.x, sphere.y, sphere.z)

    real_sym_sh = sph_harm_lookup[basis]

    B_reg, m, n = real_sym_sh(sh_order, theta[:, None], phi[:, None])
    
    R, P = forward_sdt_deconv_mat(ratio, sh_order)

    # scale lambda to account for differences in the number of
    # SH coefficients and number of mapped directions
    lambda_ = lambda_ * R.shape[0] * R[0, 0] / B_reg.shape[0]

    fodf_sh = np.zeros(odfs_sh.shape)

    for index in ndindex(odfs_sh.shape[:-1]):

        fodf_sh[index], num_it = odf_deconv(odfs_sh[index], sh_order, R, B_reg, lambda_=lambda_, tau=tau)

    return fodf_sh
Exemplo n.º 14
0
def test_act_tissue_classifier():
    """This tests that the act tissue classifier returns expected
     tissue types.
    """

    gm = np.random.random((4, 4, 4))
    wm = np.random.random((4, 4, 4))
    csf = np.random.random((4, 4, 4))
    tissue_sum = gm + wm + csf
    gm /= tissue_sum
    wm /= tissue_sum
    csf /= tissue_sum

    act_tc = ActTissueClassifier(include_map=gm, exclude_map=csf)

    # test voxel center
    for ind in ndindex(wm.shape):
        pts = np.array(ind, dtype='float64')
        state = act_tc.check_point(pts)
        if csf[ind] > 0.5:
            npt.assert_equal(state, TissueTypes.INVALIDPOINT)
        elif gm[ind] > 0.5:
            npt.assert_equal(state, TissueTypes.ENDPOINT)
        else:
            npt.assert_equal(state, TissueTypes.TRACKPOINT)

    # test random points in voxel
    inds = [[0, 1.4, 2.2], [0, 2.3, 2.3], [0, 2.2, 1.3], [0, 0.9, 2.2],
            [0, 2.8, 1.1], [0, 1.1, 3.3], [0, 2.1, 1.9], [0, 3.1, 3.1],
            [0, 0.1, 0.1], [0, 0.9, 0.5], [0, 0.9, 0.5], [0, 2.9, 0.1]]
    for pts in inds:
        pts = np.array(pts, dtype='float64')
        state = act_tc.check_point(pts)
        gm_res = scipy.ndimage.map_coordinates(gm,
                                               np.reshape(pts, (3, 1)),
                                               order=1,
                                               mode='nearest')
        csf_res = scipy.ndimage.map_coordinates(csf,
                                                np.reshape(pts, (3, 1)),
                                                order=1,
                                                mode='nearest')
        if csf_res > 0.5:
            npt.assert_equal(state, TissueTypes.INVALIDPOINT)
        elif gm_res > 0.5:
            npt.assert_equal(state, TissueTypes.ENDPOINT)
        else:
            npt.assert_equal(state, TissueTypes.TRACKPOINT)

    # test outside points
    outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2], [0, 0.5, -0.51],
                   [0, -0.51, 0.1]]
    for pts in outside_pts:
        pts = np.array(pts, dtype='float64')
        state = act_tc.check_point(pts)
        npt.assert_equal(state, TissueTypes.OUTSIDEIMAGE)
Exemplo n.º 15
0
    def fit(self, data, mask=None):
        """
        Fit the SpatContModel object to data

        Parameters
        ----------
        data : array
            The measured signal.

        mask : array, optional
            A boolean array used to mark the coordinates in the data that
            should be analyzed. Has the shape `data.shape[:-1]`. Default: the
            entire volume, except the edges.

        Returns
        -------
        SpatContFit

        """
        if mask is None:
            mask = np.ones(data.shape)
        # Pre-allocate the final result:
        beta = np.zeros(data.shape[:-1] + (self.design_matrix.shape[-1], ))
        # We will go voxel by voxel, avoiding the edges altogether:
        coords = np.array(list(dnd.ndindex((data.shape[0]-2,
                                            data.shape[1]-2,
                                            data.shape[2]-2)))) + 1

        S0 = np.mean(data[..., self.gtab.b0s_mask], -1)
        # Normalize and remove mean:
        norm_data = (data[..., ~self.gtab.b0s_mask] / S0[..., None])
        mean_data = np.mean(norm_data, -1)
        norm_data = norm_data - mean_data[..., None]
        # Weights for the 3x3 matrix of voxels around the voxel of interest,
        # calculated by the distance from the center of the central voxel:
        dist_weights = ut.signal_weights(self.gtab, self.tau)
        dw_shape = np.sum(~self.gtab.b0s_mask)
        for i, j, k in coords:
            local_mask = mask[i-1:i+2, j-1:j+2, k-1:k+2]
            # Don't bother if there's nothing to analyze here:
            if np.any(local_mask):
                local_data = norm_data[i-1:i+2, j-1:j+2, k-1:k+2]
                local_dist_weight = (dist_weights *
                                     local_mask[..., None]).ravel()
                local_dm = (self.sc_design_matrix * local_mask[..., None, None])
                local_dm = local_dm.reshape(-1, self.sc_design_matrix.shape[-1])
                fit_it = local_data.ravel() * local_dist_weight
                this_alpha = (self.orig_alpha *
                           np.sum(local_dist_weight**2)/local_dm.shape[0])
                self.solver.alpha = this_alpha
                beta[i, j, k] = self.solver.fit(local_dm,
                                                fit_it).coef_
        return SpatContFit(self, beta, S0, mean_data)
Exemplo n.º 16
0
def peaks_from_odfs(odf4d,
                    sphere,
                    relative_peak_threshold,
                    min_separation_angle,
                    mask=None,
                    gfa_thr=0,
                    normalize_peaks=False,
                    npeaks=5):

    shape = odf4d.shape[:-1]
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as data.")

    gfa_array = np.zeros(shape)
    qa_array = np.zeros((shape + (npeaks, )))

    peak_dirs = np.zeros((shape + (npeaks, 3)))
    peak_values = np.zeros((shape + (npeaks, )))
    peak_indices = np.zeros((shape + (npeaks, )), dtype='int')
    peak_indices.fill(-1)

    global_max = -np.inf
    for idx in ndindex(shape):
        if not mask[idx]:
            continue
        odf = odf4d[idx]
        gfa_array[idx] = gfa(odf)
        if gfa_array[idx] < gfa_thr:
            global_max = max(global_max, odf.max())
            continue
        # Get peaks of odf
        direction, pk, ind = peak_directions(odf, sphere,
                                             relative_peak_threshold,
                                             min_separation_angle)
        # Calculate peak metrics
        if pk.shape[0] != 0:
            global_max = max(global_max, pk[0])
            n = min(npeaks, pk.shape[0])
            qa_array[idx][:n] = pk[:n] - odf.min()
            peak_dirs[idx][:n] = direction[:n]
            peak_indices[idx][:n] = ind[:n]
            peak_values[idx][:n] = pk[:n]

            if normalize_peaks:
                peak_values[idx][:n] /= pk[0]
                peak_dirs[idx] *= peak_values[idx][:, None]

    qa_array /= global_max

    return peak_dirs, peak_values
Exemplo n.º 17
0
def multiprocess_stabilisation(arglist):
    """Helper function for multiprocessing the stabilization part."""
    data, m_hat, mask, sigma, N = arglist
    out = np.zeros(data.shape, dtype=np.float32)

    for idx in ndindex(data.shape):

        if sigma[idx] > 0 and mask[idx]:
            eta = fixed_point_finder(m_hat[idx], sigma[idx], N)
            out[idx] = chi_to_gauss(data[idx], eta, sigma[idx], N)

    return out
Exemplo n.º 18
0
def test_act_tissue_classifier():
    """This tests that the act tissue classifier returns expected
     tissue types.
    """

    gm = np.random.random((4, 4, 4))
    wm = np.random.random((4, 4, 4))
    csf = np.random.random((4, 4, 4))
    tissue_sum = gm + wm + csf
    gm /= tissue_sum
    wm /= tissue_sum
    csf /= tissue_sum

    act_tc = ActTissueClassifier(include_map=gm, exclude_map=csf)

    # test voxel center
    for ind in ndindex(wm.shape):
        pts = np.array(ind, dtype='float64')
        state = act_tc.check_point(pts)
        if csf[ind] > 0.5:
            npt.assert_equal(state, TissueTypes.INVALIDPOINT)
        elif gm[ind] > 0.5:
            npt.assert_equal(state, TissueTypes.ENDPOINT)
        else:
            npt.assert_equal(state, TissueTypes.TRACKPOINT)

    # test random points in voxel
    inds = [[0, 1.4, 2.2], [0, 2.3, 2.3], [0, 2.2, 1.3], [0, 0.9, 2.2],
            [0, 2.8, 1.1], [0, 1.1, 3.3], [0, 2.1, 1.9], [0, 3.1, 3.1],
            [0, 0.1, 0.1], [0, 0.9, 0.5], [0, 0.9, 0.5], [0, 2.9, 0.1]]
    for pts in inds:
        pts = np.array(pts, dtype='float64')
        state = act_tc.check_point(pts)
        gm_res = scipy.ndimage.map_coordinates(
            gm, np.reshape(pts, (3, 1)), order=1, mode='nearest')
        csf_res = scipy.ndimage.map_coordinates(
            csf, np.reshape(pts, (3, 1)), order=1, mode='nearest')
        if csf_res > 0.5:
            npt.assert_equal(state, TissueTypes.INVALIDPOINT)
        elif gm_res > 0.5:
            npt.assert_equal(state, TissueTypes.ENDPOINT)
        else:
            npt.assert_equal(state, TissueTypes.TRACKPOINT)

    # test outside points
    outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2],
                   [0, 0.5, -0.51], [0, -0.51, 0.1]]
    for pts in outside_pts:
        pts = np.array(pts, dtype='float64')
        state = act_tc.check_point(pts)
        npt.assert_equal(state, TissueTypes.OUTSIDEIMAGE)
Exemplo n.º 19
0
def tenseur(dmri, gtab, bMaskSource=None):
    """Estimation des tenseurs par méthode des moindres carrés.

    Paramètres
    ----------
    dmri: Fichier nifti contenant une image d'IRM de diffusion
    gtab: Table des gradients utilisée
    bMaskSource: Fichier nifti contenant un masque binaire du cerveau.

    Retour
    ------
    tenseur: nparray contenant les tenseurs à chaque voxel

    Example
    -------
    >> tenseur = q2.tenseur('Data/dmri.nii',
                 '../Data/gradient_directions_b-values.txt')"""

    # Chargement des données et déclaration de variables
    dmri = nib.load(dmri)
    data = dmri.get_data()

    gtab = np.ndfromtxt(gtab)[1:]
    S0 = data[..., 0]
    S = data[..., 1:]

    B = np.array([gtab[:, 0] ** 2, gtab[:, 0] * gtab[:, 1],
                  gtab[:, 0] * gtab[:, 2], gtab[:, 1] ** 2,
                  gtab[:, 1] * gtab[:, 2], gtab[:, 2] ** 2]).T
    tenseur = np.empty(data.shape[:3] + (6,))

    # Définition des indices d'itération en fonction du masque de cerveau
    if not(bMaskSource):
        itIdx = ndindex(data.shape[:3])
    else:
        bMask = openimage(bMaskSource)
        itIdx = bMask.nonzero()
        itIdx = list(zip(itIdx[0], itIdx[1], itIdx[2]))

    # Calcul des tenseurs en tous les indices définis précédemment
    for index in itIdx:
        if S0[index] == 0:
            tenseur[index] = np.zeros(6)
        else:
            X = -((1 / gtab[:, 3].astype(float)) *
                (np.log(S[index].astype(float) / S0[index].astype(float))))
            tenseur[index] = np.dot(np.linalg.pinv(B), X)

    tenseur[np.isinf(tenseur) | np.isnan(tenseur)] = 0
    return tenseur.astype('f32')
Exemplo n.º 20
0
def test_cmc_stopping_criterion():
    """This tests that the cmc stopping criterion returns expected
    streamline statuses.
    """

    gm = np.array([[[1, 1], [0, 0], [0, 0]]])
    wm = np.array([[[0, 0], [1, 1], [0, 0]]])
    csf = np.array([[[0, 0], [0, 0], [1, 1]]])
    include_map = gm
    exclude_map = csf

    cmc_tc = CmcStoppingCriterion(include_map=include_map,
                                  exclude_map=exclude_map,
                                  step_size=1,
                                  average_voxel_size=1)
    cmc_tc_from_pve = CmcStoppingCriterion.from_pve(wm_map=wm,
                                                    gm_map=gm,
                                                    csf_map=csf,
                                                    step_size=1,
                                                    average_voxel_size=1)

    # Test constructors
    for idx in np.ndindex(wm.shape):
        idx = np.asarray(idx, dtype="float64")
        npt.assert_almost_equal(cmc_tc.get_include(idx),
                                cmc_tc_from_pve.get_include(idx))
        npt.assert_almost_equal(cmc_tc.get_exclude(idx),
                                cmc_tc_from_pve.get_exclude(idx))

    # Test voxel center
    for ind in ndindex(wm.shape):
        pts = np.array(ind, dtype='float64')
        state = cmc_tc.check_point(pts)
        if csf[ind] == 1:
            npt.assert_equal(state, int(StreamlineStatus.INVALIDPOINT))
        elif gm[ind] == 1:
            npt.assert_equal(state, int(StreamlineStatus.ENDPOINT))
        else:
            npt.assert_equal(state, int(StreamlineStatus.TRACKPOINT))

    # Test outside points
    outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2], [0, 0.5, -0.51],
                   [0, -0.51, 0.1]]
    for pts in outside_pts:
        pts = np.array(pts, dtype='float64')
        npt.assert_equal(cmc_tc.check_point(pts),
                         int(StreamlineStatus.OUTSIDEIMAGE))
        npt.assert_equal(cmc_tc.get_exclude(pts), 0)
        npt.assert_equal(cmc_tc.get_include(pts), 0)
Exemplo n.º 21
0
def test_cmc_tissue_classifier():
    """This tests that the cmc tissue classifier returns expected
     tissue types.
    """

    gm = np.array([[[1, 1], [0, 0], [0, 0]]])
    wm = np.array([[[0, 0], [1, 1], [0, 0]]])
    csf = np.array([[[0, 0], [0, 0], [1, 1]]])
    include_map = gm
    exclude_map = csf

    cmc_tc = CmcTissueClassifier(include_map=include_map,
                                 exclude_map=exclude_map,
                                 step_size=1,
                                 average_voxel_size=1)
    cmc_tc_from_pve = CmcTissueClassifier.from_pve(wm_map=wm,
                                                   gm_map=gm,
                                                   csf_map=csf,
                                                   step_size=1,
                                                   average_voxel_size=1)

    # Test contructors
    for idx in np.ndindex(wm.shape):
        idx = np.asarray(idx, dtype="float64")
        npt.assert_almost_equal(cmc_tc.get_include(idx),
                                cmc_tc_from_pve.get_include(idx))
        npt.assert_almost_equal(cmc_tc.get_exclude(idx),
                                cmc_tc_from_pve.get_exclude(idx))

    # Test voxel center
    for ind in ndindex(wm.shape):
        pts = np.array(ind, dtype='float64')
        state = cmc_tc.check_point(pts)
        if csf[ind] == 1:
            npt.assert_equal(state, TissueTypes.INVALIDPOINT)
        elif gm[ind] == 1:
            npt.assert_equal(state, TissueTypes.ENDPOINT)
        else:
            npt.assert_equal(state, TissueTypes.TRACKPOINT)

    # Test outside points
    outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2],
                   [0, 0.5, -0.51], [0, -0.51, 0.1]]
    for pts in outside_pts:
        pts = np.array(pts, dtype='float64')
        npt.assert_equal(cmc_tc.check_point(pts), TissueTypes.OUTSIDEIMAGE)
        npt.assert_equal(cmc_tc.get_exclude(pts), 0)
        npt.assert_equal(cmc_tc.get_include(pts), 0)
Exemplo n.º 22
0
def show_peak_directions(fpng, peaks, scale=0.3, x=10, y=0, z=10):
    r = fvtk.ren()

    for index in ndindex(peaks.shape[:-1]):
        peak = peaks[index]
        directions = peak.reshape(peak.shape[0] / 3, 3)

        # pos = np.array(index)
        for i in xrange(directions.shape[0]):
            if norm(directions[i]) != 0:
                line_actor = fvtk.line(
                    index + scale * np.vstack((-directions[i], directions[i])), abs(directions[i] / norm(directions[i]))
                )
                line_actor.RotateX(-90)
                fvtk.add(r, line_actor)

    fvtk.show(r)
    fvtk.record(r, out_path=fpng, size=(900, 900))
    fvtk.clear(r)
Exemplo n.º 23
0
def test_threshold_tissue_classifier():
    """This tests that the thresholdy tissue classifier returns expected
     tissue types.
    """

    tissue_map = np.random.random((4, 4, 4))

    ttc = ThresholdTissueClassifier(tissue_map.astype('float32'), 0.5)

    # test voxel center
    for ind in ndindex(tissue_map.shape):
        pts = np.array(ind, dtype='float64')
        state = ttc.check_point(pts)
        if tissue_map[ind] > 0.5:
            npt.assert_equal(state, TissueTypes.TRACKPOINT)
        else:
            npt.assert_equal(state, TissueTypes.ENDPOINT)

    # test random points in voxel
    inds = [[0, 1.4, 2.2], [0, 2.3, 2.3], [0, 2.2, 1.3], [0, 0.9, 2.2],
            [0, 2.8, 1.1], [0, 1.1, 3.3], [0, 2.1, 1.9], [0, 3.1, 3.1],
            [0, 0.1, 0.1], [0, 0.9, 0.5], [0, 0.9, 0.5], [0, 2.9, 0.1]]
    for pts in inds:
        pts = np.array(pts, dtype='float64')
        state = ttc.check_point(pts)
        res = scipy.ndimage.map_coordinates(tissue_map,
                                            np.reshape(pts, (3, 1)),
                                            order=1,
                                            mode='nearest')
        if res > 0.5:
            npt.assert_equal(state, TissueTypes.TRACKPOINT)
        else:
            npt.assert_equal(state, TissueTypes.ENDPOINT)

    # test outside points
    outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2], [0, 0.5, -0.51],
                   [0, -0.51, 0.1]]
    for pts in outside_pts:
        pts = np.array(pts, dtype='float64')
        state = ttc.check_point(pts)
        npt.assert_equal(state, TissueTypes.OUTSIDEIMAGE)
Exemplo n.º 24
0
    def new_fit(self, data, mask=None):
        """Fit method for every voxel in data"""
        # If only one voxel just return a normal fit
        if data.ndim == 1:
            return single_voxel_fit(self, data)

        # Make a mask if mask is None
        if mask is None:
            shape = data.shape[:-1]
            strides = (0, ) * len(shape)
            mask = as_strided(np.array(True), shape=shape, strides=strides)
        # Check the shape of the mask if mask is not None
        elif mask.shape != data.shape[:-1]:
            raise ValueError("mask and data shape do not match")

        # Fit data where mask is True
        fit_array = np.empty(data.shape[:-1], dtype=object)
        for ijk in ndindex(data.shape[:-1]):
            if mask[ijk]:
                fit_array[ijk] = single_voxel_fit(self, data[ijk])
        return MultiVoxelFit(self, fit_array, mask)
Exemplo n.º 25
0
    def new_fit(self, data, mask=None):
        """Fit method for every voxel in data"""
        # If only one voxel just return a normal fit
        if data.ndim == 1:
            return single_voxel_fit(self, data)

        # Make a mask if mask is None
        if mask is None:
            shape = data.shape[:-1]
            strides = (0,) * len(shape)
            mask = as_strided(np.array(True), shape=shape, strides=strides)
        # Check the shape of the mask if mask is not None
        elif mask.shape != data.shape[:-1]:
            raise ValueError("mask and data shape do not match")

        # Fit data where mask is True
        fit_array = np.empty(data.shape[:-1], dtype=object)
        for ijk in ndindex(data.shape[:-1]):
            if mask[ijk]:
                fit_array[ijk] = single_voxel_fit(self, data[ijk])
        return MultiVoxelFit(self, fit_array, mask)
Exemplo n.º 26
0
def show_peak_directions(peaks, scale=0.3, x=10, y=0, z=10):
    """ visualize peak directions

    Parameters
    ----------
    peaks : ndarray,
            (X, Y, Z, 15)
    scale : float
            voxel scaling (0 =< `scale` =< 1)
    x : int,
        x slice (0 <= x <= X-1)
    y : int,
        y slice (0 <= y <= Y-1)
    z : int,
        z slice (0 <= z <= Z-1)

    Notes
    -----
    If x, y, z are Nones then the full volume is shown.

    """
    # if x is None and y is None and z is None:
    #    raise ValueError('A slice should be provided')

    r = fvtk.ren()

    for index in ndindex(peaks.shape[:-1]):
        peak = peaks[index]
        directions = peak.reshape(peak.shape[0] / 3, 3)

        #pos = np.array(index)
        for i in xrange(directions.shape[0]):
            if norm(directions[i]) != 0:
                line_actor = fvtk.line(index +
                                       scale * vstack((-directions[i], directions[i])),
                                       abs(directions[i] / norm(directions[i])))
                line_actor.RotateX(-90)
                fvtk.add(r, line_actor)

    fvtk.show(r)
Exemplo n.º 27
0
def get_maps(data, mask, args, npeaks=5):
    nufo_map = np.zeros(data.shape[0:3])
    afd_map = np.zeros(data.shape[0:3])
    afd_sum = np.zeros(data.shape[0:3])

    peaks_dirs = np.zeros(list(data.shape[0:3]) + [npeaks, 3])
    order = find_order_from_nb_coeff(data)
    sphere = get_sphere(args.sphere)
    b_matrix = get_b_matrix(order, sphere, args.sh_basis)

    for index in ndindex(data.shape[:-1]):
        if mask[index]:
            if np.isnan(data[index]).any():
                nufo_map[index] = 0
                afd_map[index] = 0
            else:
                maximas, afd, _ = get_maximas(
                    data[index], sphere, b_matrix, args.r_threshold, args.at)
                # sf = np.dot(data[index], B.T)

                n = min(npeaks, maximas.shape[0])
                nufo_map[index] = maximas.shape[0]
                if n == 0:
                    afd_map[index] = 0.0
                    nufo_map[index] = 0.0
                else:
                    afd_map[index] = afd.max()
                    peaks_dirs[index][:n] = maximas[:n]

                    # sum of all coefficients, sqrt(power spectrum)
                    # sum C^2 = sum fODF^2
                    afd_sum[index] = np.sqrt(np.dot(data[index], data[index]))

                    # sum of all peaks contributions to the afd
                    # integral of all the lobes. Numerical sum.
                    # With an infinite number of SH, this should == to afd_sum
                    # sf[np.nonzero(sf < args.at)] = 0.
                    # afd_sum[index] = sf.sum()/n*4*np.pi/B.shape[0]x

    return nufo_map, afd_map, afd_sum, peaks_dirs
Exemplo n.º 28
0
def test_fit_data():
    fdata, fbval, fbvec = dpd.get_data('small_25')
    gtab = grad.gradient_table(fbval, fbvec)
    ni_data = nib.load(fdata)
    data = ni_data.get_data()
    dtmodel = dti.TensorModel(gtab)
    dtfit = dtmodel.fit(data)
    sphere = dpd.get_sphere()
    peak_idx = dti.quantize_evecs(dtfit.evecs, sphere.vertices)
    eu = edx.EuDX(dtfit.fa.astype('f8'), peak_idx,
                  seeds=list(nd.ndindex(data.shape[:-1])),
                  odf_vertices=sphere.vertices, a_low=0)
    tensor_streamlines = [streamline for streamline in eu]
    life_model = life.FiberModel(gtab)
    life_fit = life_model.fit(data, tensor_streamlines)
    model_error = life_fit.predict() - life_fit.data
    model_rmse = np.sqrt(np.mean(model_error ** 2, -1))
    matlab_rmse, matlab_weights = dpd.matlab_life_results()
    # Lower error than the matlab implementation for these data:
    npt.assert_(np.median(model_rmse) < np.median(matlab_rmse))
    # And a moderate correlation with the Matlab implementation weights:
    npt.assert_(np.corrcoef(matlab_weights, life_fit.beta)[0, 1] > 0.6)
Exemplo n.º 29
0
def test_threshold_tissue_classifier():
    """This tests that the thresholdy tissue classifier returns expected
     tissue types.
    """

    tissue_map = np.random.random((4, 4, 4))

    ttc = ThresholdTissueClassifier(tissue_map.astype('float32'), 0.5)

    # test voxel center
    for ind in ndindex(tissue_map.shape):
        pts = np.array(ind, dtype='float64')
        state = ttc.check_point(pts)
        if tissue_map[ind] > 0.5:
            npt.assert_equal(state, TissueTypes.TRACKPOINT)
        else:
            npt.assert_equal(state, TissueTypes.ENDPOINT)

    # test random points in voxel
    inds = [[0, 1.4, 2.2], [0, 2.3, 2.3], [0, 2.2, 1.3], [0, 0.9, 2.2],
            [0, 2.8, 1.1], [0, 1.1, 3.3], [0, 2.1, 1.9], [0, 3.1, 3.1],
            [0, 0.1, 0.1], [0, 0.9, 0.5], [0, 0.9, 0.5], [0, 2.9, 0.1]]
    for pts in inds:
        pts = np.array(pts, dtype='float64')
        state = ttc.check_point(pts)
        res = scipy.ndimage.map_coordinates(
            tissue_map, np.reshape(pts, (3, 1)), order=1, mode='nearest')
        if res > 0.5:
            npt.assert_equal(state, TissueTypes.TRACKPOINT)
        else:
            npt.assert_equal(state, TissueTypes.ENDPOINT)

    # test outside points
    outside_pts = [[100, 100, 100], [0, -1, 1], [0, 10, 2],
                   [0, 0.5, -0.51], [0, -0.51, 0.1]]
    for pts in outside_pts:
        pts = np.array(pts, dtype='float64')
        state = ttc.check_point(pts)
        npt.assert_equal(state, TissueTypes.OUTSIDEIMAGE)
Exemplo n.º 30
0
def compute_peaks(odfs,
                  sphere,
                  relative_peak_threshold=.5,
                  peak_normalize=1,
                  min_separation_angle=45,
                  max_peak_number=5):
    if odfs.shape[0] == sphere.vertices.shape[0]:
        odfs = np.array(odfs.reshape(odfs.shape[0], -1).T, order='C')
    num_peak_coeffs = max_peak_number * 3
    peaks = np.zeros(odfs.shape[:-1] + (num_peak_coeffs, ))
    for index in ndindex(odfs.shape[:-1]):
        vox_peaks, values, _ = peak_directions(odfs[index], sphere,
                                               float(relative_peak_threshold),
                                               float(min_separation_angle))
        if peak_normalize == 1:
            values /= values[0]
            vox_peaks = vox_peaks * values[:, None]
        vox_peaks = vox_peaks.ravel()
        m = vox_peaks.shape[0]
        if m > num_peak_coeffs:
            m = num_peak_coeffs
        peaks[index][:m] = vox_peaks[:m]
    return peaks
Exemplo n.º 31
0
def compAdcAndFa(tensMat, bMaskSource=None):
    """ Calcul de l'ADC et de la FA d'une matrice 3D comprenant des
    tenseurs de diffusion à chaque index.

    Paramètres
    ----------
    tensMat:nparray. Matrice MxNxPx6 comprenant les tenseurs
    bMaskSource: Fichier nifti contenant un masque pour lequel l'ADC et la FA
    seront calculées (généralement un masque de cerveau).

    Retour
    ------
    adcMap: nparray. Matrice MxNxP de l'ADC a chaque voxel
    faMap: nparray. Matrice MxNxP de la FA a chaque voxel"""

    # Initialisation des variables de résultat
    adcMap = np.zeros(tensMat.shape[:3])
    faMap = np.zeros(tensMat.shape[:3])

    # Définition des indices d'itération en fonction du masque de cerveau
    if not(bMaskSource):
        itIdx = ndindex(tensMat.shape[:3])
    else:
        bMask = nib.load(bMaskSource)
        bMask = bMask.get_data()
        itIdx = bMask.nonzero()
        itIdx = list(zip(itIdx[0], itIdx[1], itIdx[2]))

    # Calcul des cartes de FA et d'ADC
    for idx in itIdx:
        dLin = tensMat[idx]
        eigv = compLinDTensorEigv(dLin)
        adcMap[idx] = eigv.sum() / 3
        faMap[idx] = np.sqrt(3 / 2 * ((eigv - eigv.mean()) ** 2).sum() /
                     (eigv ** 2).sum())

    return adcMap.astype('f32'), faMap.astype('f32')
Exemplo n.º 32
0
    def predict(self, *args, **kwargs):
        """
        Predict for the multi-voxel object using each single-object's
        prediction API, with S0 provided from an array.
        """
        S0 = kwargs.get('S0', np.ones(self.fit_array.shape))
        idx = ndindex(self.fit_array.shape)
        ijk = next(idx)

        def gimme_S0(S0, ijk):
            if isinstance(S0, np.ndarray):
                return S0[ijk]
            else:
                return S0

        kwargs['S0'] = gimme_S0(S0, ijk)
        # If we have a mask, we might have some Nones up front, skip those:
        while self.fit_array[ijk] is None:
            ijk = next(idx)

        if not hasattr(self.fit_array[ijk], 'predict'):
            msg = "This model does not have prediction implemented yet"
            raise NotImplementedError(msg)

        first_pred = self.fit_array[ijk].predict(*args, **kwargs)
        result = np.zeros(self.fit_array.shape + (first_pred.shape[-1], ))
        result[ijk] = first_pred
        for ijk in idx:
            kwargs['S0'] = gimme_S0(S0, ijk)
            # If it's masked, we predict a 0:
            if self.fit_array[ijk] is None:
                result[ijk] *= 0
            else:
                result[ijk] = self.fit_array[ijk].predict(*args, **kwargs)

        return result
Exemplo n.º 33
0
    def predict(self, *args, **kwargs):
        """
        Predict for the multi-voxel object using each single-object's
        prediction API, with S0 provided from an array.
        """
        S0 = kwargs.get('S0', np.ones(self.fit_array.shape))
        idx = ndindex(self.fit_array.shape)
        ijk = next(idx)

        def gimme_S0(S0, ijk):
            if isinstance(S0, np.ndarray):
                return S0[ijk]
            else:
                return S0

        kwargs['S0'] = gimme_S0(S0, ijk)
        # If we have a mask, we might have some Nones up front, skip those:
        while self.fit_array[ijk] is None:
            ijk = next(idx)

        if not hasattr(self.fit_array[ijk], 'predict'):
            msg = "This model does not have prediction implemented yet"
            raise NotImplementedError(msg)

        first_pred = self.fit_array[ijk].predict(*args, **kwargs)
        result = np.zeros(self.fit_array.shape + (first_pred.shape[-1],))
        result[ijk] = first_pred
        for ijk in idx:
            kwargs['S0'] = gimme_S0(S0, ijk)
            # If it's masked, we predict a 0:
            if self.fit_array[ijk] is None:
                result[ijk] *= 0
            else:
                result[ijk] = self.fit_array[ijk].predict(*args, **kwargs)

        return result
Exemplo n.º 34
0
def test_fit_data():
    fdata, fbval, fbvec = dpd.get_data('small_25')
    gtab = grad.gradient_table(fbval, fbvec)
    ni_data = nib.load(fdata)
    data = ni_data.get_data()
    dtmodel = dti.TensorModel(gtab)
    dtfit = dtmodel.fit(data)
    sphere = dpd.get_sphere()
    peak_idx = dti.quantize_evecs(dtfit.evecs, sphere.vertices)
    eu = edx.EuDX(dtfit.fa.astype('f8'),
                  peak_idx,
                  seeds=list(nd.ndindex(data.shape[:-1])),
                  odf_vertices=sphere.vertices,
                  a_low=0)
    tensor_streamlines = [streamline for streamline in eu]
    life_model = life.FiberModel(gtab)
    life_fit = life_model.fit(data, tensor_streamlines)
    model_error = life_fit.predict() - life_fit.data
    model_rmse = np.sqrt(np.mean(model_error**2, -1))
    matlab_rmse, matlab_weights = dpd.matlab_life_results()
    # Lower error than the matlab implementation for these data:
    npt.assert_(np.median(model_rmse) < np.median(matlab_rmse))
    # And a moderate correlation with the Matlab implementation weights:
    npt.assert_(np.corrcoef(matlab_weights, life_fit.beta)[0, 1] > 0.68)
def wls_fit_tensor(gtab, data, Diso=3e-3, mask=None, min_signal=1.0e-6,
                   piterations=3, mdreg=2.7e-3):
    r""" Computes weighted least squares (WLS) fit to calculate self-diffusion
    tensor using a linear regression model [1]_.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    piterations : inter, optional
        Number of iterations used to refine the precision of f. Default is set
        to 3 corresponding to a precision of 0.01.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the last dimention the free water model parameters
        in the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment.

    References
    ----------
    .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
           Optimization of a free water elimination two-compartmental model
           for diffusion tensor imaging. NeuroImage 103, 323-333.
           doi: 10.1016/j.neuroimage.2014.09.053
    """
    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = wls_iter(W, data[v], S0[v], min_signal=min_signal,
                              Diso=3e-3, piterations=piterations, mdreg=mdreg)
            fw_params[v] = params

    return fw_params
Exemplo n.º 36
0
def avs_dki_df(gtab, data, mask=None, min_signal=1.0e-6):
    r""" Computes mean diffusion kurtosis

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.

    Returns
    -------
    params : ndarray ([X, Y, Z, ...], 3)
        All parameters estimated from the dir fit DKI model.
        Parameters are ordered as follows:
            1) Direct Mean Diffusivity measure
            2) Direct Mean Kurtosis measure
            3) Direct S0 estimate

    Reference
    ---------
    Henriques, R.N., Correia, M.M., Interpreting age-related changes based
    on the mean signal diffusion kurtosis. 25th Annual Meeting of ISMRM,
    Honolulu, April 22-28, 2017
    """
    params = np.zeros(data.shape[:-1] + (3, ))

    bmag = int(np.log10(gtab.bvals.max()))
    b = gtab.bvals.copy() / (10**(bmag - 1))  # normalize b units
    b = b.round() * (10**(bmag - 1))
    uniqueb = np.unique(b)
    nb = len(uniqueb)

    B = np.zeros((nb, 3))
    B[:, 0] = -uniqueb
    B[:, 1] = 1.0 / 6.0 * uniqueb**2
    B[:, 2] = np.ones(nb)

    ng = np.zeros(nb)
    for bi in range(nb):
        ng[bi] = np.sum(b == uniqueb[bi])
    ng = np.sqrt(ng)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    index = ndindex(mask.shape)
    sig = np.zeros(nb)
    for v in index:
        if mask[v]:
            for bi in range(nb):
                sig[bi] = np.mean(data[v][b == uniqueb[bi]])

            # Define weights as diag(sqrt(ng) * yn**2)
            W = np.diag(ng * sig**2)

            BTW = np.dot(B.T, W)
            inv_BT_W_B = np.linalg.pinv(np.dot(BTW, B))
            invBTWB_BTW = np.dot(inv_BT_W_B, BTW)
            p = np.dot(invBTWB_BTW, np.log(sig))
            p[1] = p[1] / (p[0]**2)
            p[2] = np.exp(p[2])
            params[v] = p
    return params
Exemplo n.º 37
0
def wls_fit_msdki(design_matrix, msignal, ng, mask=None,
                  min_signal=MIN_POSITIVE_SIGNAL, return_S0_hat=False):
    r"""
    Fits the mean signal diffusion kurtosis imaging based on a weighted
    least square solution [1]_.

    Parameters
    ----------
    design_matrix : array (nub, 3)
        Design matrix holding the covariants used to solve for the regression
        coefficients of the mean signal diffusion kurtosis model. Note that
        nub is the number of unique b-values
    msignal : ndarray ([X, Y, Z, ..., nub])
        Mean signal along all gradient directions for each unique b-value
        Note that the last dimension should contain the signal means and nub
        is the number of unique b-values.
    ng : ndarray(nub)
        Number of gradient directions used to compute the mean signal for
        all unique b-values
    mask : array
        A boolean array used to mark the coordinates in the data that
        should be analyzed that has the shape data.shape[:-1]
    min_signal : float, optional
        Voxel with mean signal intensities lower than the min positive signal
        are not processed. Default: 0.0001
    return_S0_hat : bool
        If True, also return S0 values for the fit.

    Returns
    -------
    params : array (..., 2)
        Containing the mean signal diffusivity and mean signal kurtosis

    References
    ----------
    .. [1] Henriques, R.N., 2018. Advanced Methods for Diffusion MRI Data
           Analysis and their Application to the Healthy Ageing Brain
           (Doctoral thesis). Downing College, University of Cambridge.
           https://doi.org/10.17863/CAM.29356
    """
    params = np.zeros(msignal.shape[:-1] + (3,))

    # Prepare mask
    if mask is None:
        mask = np.ones(msignal.shape[:-1], dtype=bool)
    else:
        if mask.shape != msignal.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    index = ndindex(mask.shape)
    for v in index:
        # Skip if out of mask
        if not mask[v]:
            continue
        # Skip if no signal is present
        if np.mean(msignal[v]) <= min_signal:
            continue
        # Define weights as diag(ng * yn**2)
        W = np.diag(ng * msignal[v]**2)

        # WLS fitting
        BTW = np.dot(design_matrix.T, W)
        inv_BT_W_B = np.linalg.pinv(np.dot(BTW, design_matrix))
        invBTWB_BTW = np.dot(inv_BT_W_B, BTW)
        p = np.dot(invBTWB_BTW, np.log(msignal[v]))

        # Process parameters
        p[1] = p[1] / (p[0]**2)
        p[2] = np.exp(p[2])
        params[v] = p

    if return_S0_hat:
        return params[..., :2], params[..., 2]
    else:
        return params[..., :2]
Exemplo n.º 38
0
def peaks_from_model(model,
                     data,
                     sphere,
                     relative_peak_threshold,
                     min_separation_angle,
                     mask=None,
                     return_odf=False,
                     return_sh=True,
                     gfa_thr=0,
                     normalize_peaks=False,
                     sh_order=8,
                     sh_basis_type=None,
                     npeaks=5,
                     B=None,
                     invB=None,
                     parallel=False,
                     nbr_processes=None):
    """Fit the model to data and computes peaks and metrics

    Parameters
    ----------
    model : a model instance
        `model` will be used to fit the data.
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    relative_peak_threshold : float
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
    min_separation_angle : float in [0, 90] The minimum distance between
        directions. If two peaks are too close only the larger of the two is
        returned.
    mask : array, optional
        If `mask` is provided, voxels that are False in `mask` are skipped and
        no peaks are returned.
    return_odf : bool
        If True, the odfs are returned.
    return_sh : bool
        If True, the odf as spherical harmonics coefficients is returned
    gfa_thr : float
        Voxels with gfa less than `gfa_thr` are skipped, no peaks are returned.
    normalize_peaks : bool
        If true, all peak values are calculated relative to `max(odf)`.
    sh_order : int, optional
        Maximum SH order in the SH fit.  For `sh_order`, there will be
        ``(sh_order + 1) * (sh_order + 2) / 2`` SH coefficients (default 8).
    sh_basis_type : {None, 'tournier07', 'descoteaux07'}
        ``None`` for the default DIPY basis,
        ``tournier07`` for the Tournier 2007 [2]_ basis, and
        ``descoteaux07`` for the Descoteaux 2007 [1]_ basis
        (``None`` defaults to ``descoteaux07``).
    sh_smooth : float, optional
        Lambda-regularization in the SH fit (default 0.0).
    npeaks : int
        Maximum number of peaks found (default 5 peaks).
    B : ndarray, optional
        Matrix that transforms spherical harmonics to spherical function
        ``sf = np.dot(sh, B)``.
    invB : ndarray, optional
        Inverse of B.
    parallel: bool
        If True, use multiprocessing to compute peaks and metric
        (default False). Temporary files are saved in the default temporary
        directory of the system. It can be changed using ``import tempfile``
        and ``tempfile.tempdir = '/path/to/tempdir'``.
    nbr_processes: int
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()).

    Returns
    -------
    pam : PeaksAndMetrics
        An object with ``gfa``, ``peak_directions``, ``peak_values``,
        ``peak_indices``, ``odf``, ``shm_coeffs`` as attributes

    References
    ----------
    .. [1] Descoteaux, M., Angelino, E., Fitzgibbons, S. and Deriche, R.
           Regularized, Fast, and Robust Analytical Q-ball Imaging.
           Magn. Reson. Med. 2007;58:497-510.
    .. [2] Tournier J.D., Calamante F. and Connelly A. Robust determination
           of the fibre orientation distribution in diffusion MRI:
           Non-negativity constrained super-resolved spherical deconvolution.
           NeuroImage. 2007;35(4):1459-1472.

    """
    if return_sh and (B is None or invB is None):
        B, invB = sh_to_sf_matrix(sphere,
                                  sh_order,
                                  sh_basis_type,
                                  return_inv=True)

    if parallel:
        # It is mandatory to provide B and invB to the parallel function.
        # Otherwise, a call to np.linalg.pinv is made in a subprocess and
        # makes it timeout on some system.
        # see https://github.com/dipy/dipy/issues/253 for details
        return _peaks_from_model_parallel(
            model, data, sphere, relative_peak_threshold, min_separation_angle,
            mask, return_odf, return_sh, gfa_thr, normalize_peaks, sh_order,
            sh_basis_type, npeaks, B, invB, nbr_processes)

    shape = data.shape[:-1]
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as data.")

    gfa_array = np.zeros(shape)
    qa_array = np.zeros((shape + (npeaks, )))

    peak_dirs = np.zeros((shape + (npeaks, 3)))
    peak_values = np.zeros((shape + (npeaks, )))
    peak_indices = np.zeros((shape + (npeaks, )), dtype='int')
    peak_indices.fill(-1)

    if return_sh:
        n_shm_coeff = (sh_order + 2) * (sh_order + 1) // 2
        shm_coeff = np.zeros((shape + (n_shm_coeff, )))

    if return_odf:
        odf_array = np.zeros((shape + (len(sphere.vertices), )))

    global_max = -np.inf
    for idx in ndindex(shape):
        if not mask[idx]:
            continue

        odf = model.fit(data[idx]).odf(sphere)

        if return_sh:
            shm_coeff[idx] = np.dot(odf, invB)

        if return_odf:
            odf_array[idx] = odf

        gfa_array[idx] = gfa(odf)
        if gfa_array[idx] < gfa_thr:
            global_max = max(global_max, odf.max())
            continue

        # Get peaks of odf
        direction, pk, ind = peak_directions(odf, sphere,
                                             relative_peak_threshold,
                                             min_separation_angle)

        # Calculate peak metrics
        if pk.shape[0] != 0:
            global_max = max(global_max, pk[0])

            n = min(npeaks, pk.shape[0])
            qa_array[idx][:n] = pk[:n] - odf.min()

            peak_dirs[idx][:n] = direction[:n]
            peak_indices[idx][:n] = ind[:n]
            peak_values[idx][:n] = pk[:n]

            if normalize_peaks:
                peak_values[idx][:n] /= pk[0]
                peak_dirs[idx] *= peak_values[idx][:, None]

    qa_array /= global_max

    return _pam_from_attrs(PeaksAndMetrics, sphere, peak_indices, peak_values,
                           peak_dirs, gfa_array, qa_array,
                           shm_coeff if return_sh else None,
                           B if return_sh else None,
                           odf_array if return_odf else None)
Exemplo n.º 39
0
def peaks_from_model(model,
                     data,
                     sphere,
                     relative_peak_threshold,
                     min_separation_angle,
                     mask=None,
                     return_odf=False,
                     return_sh=True,
                     gfa_thr=0,
                     normalize_peaks=False,
                     sh_order=8,
                     sh_basis_type=None,
                     npeaks=5,
                     B=None,
                     invB=None,
                     parallel=False,
                     nbr_processes=None):
    """Fits the model to data and computes peaks and metrics

    Parameters
    ----------
    model : a model instance
        `model` will be used to fit the data.
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    relative_peak_threshold : float
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
    min_separation_angle : float in [0, 90] The minimum distance between
        directions. If two peaks are too close only the larger of the two is
        returned.
    mask : array, optional
        If `mask` is provided, voxels that are False in `mask` are skipped and
        no peaks are returned.
    return_odf : bool
        If True, the odfs are returned.
    return_sh : bool
        If True, the odf as spherical harmonics coefficients is returned
    gfa_thr : float
        Voxels with gfa less than `gfa_thr` are skipped, no peaks are returned.
    normalize_peaks : bool
        If true, all peak values are calculated relative to `max(odf)`.
    sh_order : int, optional
        Maximum SH order in the SH fit.  For `sh_order`, there will be
        ``(sh_order + 1) * (sh_order + 2) / 2`` SH coefficients (default 8).
    sh_basis_type : {None, 'mrtrix', 'fibernav'}
        ``None`` for the default dipy basis which is the fibernav basis,
        ``mrtrix`` for the MRtrix basis, and
        ``fibernav`` for the FiberNavigator basis
    sh_smooth : float, optional
        Lambda-regularization in the SH fit (default 0.0).
    npeaks : int
        Maximum number of peaks found (default 5 peaks).
    B : ndarray, optional
        Matrix that transforms spherical harmonics to spherical function
        ``sf = np.dot(sh, B)``.
    invB : ndarray, optional
        Inverse of B.
    parallel: bool
        If True, use multiprocessing to compute peaks and metric
        (default False). Temporary files are saved in the default temporary
        directory of the system. It can be changed using ``import tempfile``
        and ``tempfile.tempdir = '/path/to/tempdir'``.
    nbr_processes: int
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()).

    Returns
    -------
    pam : PeaksAndMetrics
        An object with ``gfa``, ``peak_directions``, ``peak_values``,
        ``peak_indices``, ``odf``, ``shm_coeffs`` as attributes
    """

    if return_sh and (B is None or invB is None):
        B, invB = sh_to_sf_matrix(sphere,
                                  sh_order,
                                  sh_basis_type,
                                  return_inv=True)

    if parallel:
        # It is mandatory to provide B and invB to the parallel function.
        # Otherwise, a call to np.linalg.pinv is made in a subprocess and
        # makes it timeout on some system.
        # see https://github.com/nipy/dipy/issues/253 for details
        return _peaks_from_model_parallel(
            model, data, sphere, relative_peak_threshold, min_separation_angle,
            mask, return_odf, return_sh, gfa_thr, normalize_peaks, sh_order,
            sh_basis_type, npeaks, B, invB, nbr_processes)

    shape = data.shape[:-1]
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as data.")

    gfa_array = np.zeros(shape)
    qa_array = np.zeros((shape + (npeaks, )))

    peak_dirs = np.zeros((shape + (npeaks, 3)))
    peak_values = np.zeros((shape + (npeaks, )))
    peak_indices = np.zeros((shape + (npeaks, )), dtype='int')
    peak_indices.fill(-1)

    if return_sh:
        n_shm_coeff = (sh_order + 2) * (sh_order + 1) // 2
        shm_coeff = np.zeros((shape + (n_shm_coeff, )))

    if return_odf:
        odf_array = np.zeros((shape + (len(sphere.vertices), )))

    global_max = -np.inf
    for idx in ndindex(shape):
        if not mask[idx]:
            continue

        odf = model.fit(data[idx]).odf(sphere)

        if return_sh:
            shm_coeff[idx] = np.dot(odf, invB)

        if return_odf:
            odf_array[idx] = odf

        gfa_array[idx] = gfa(odf)
        if gfa_array[idx] < gfa_thr:
            global_max = max(global_max, odf.max())
            continue

        # Get peaks of odf
        direction, pk, ind = peak_directions(odf, sphere,
                                             relative_peak_threshold,
                                             min_separation_angle)

        # Calculate peak metrics
        if pk.shape[0] != 0:
            global_max = max(global_max, pk[0])

            n = min(npeaks, pk.shape[0])
            qa_array[idx][:n] = pk[:n] - odf.min()

            peak_dirs[idx][:n] = direction[:n]
            peak_indices[idx][:n] = ind[:n]
            peak_values[idx][:n] = pk[:n]

            if normalize_peaks:
                peak_values[idx][:n] /= pk[0]
                peak_dirs[idx] *= peak_values[idx][:, None]

    qa_array /= global_max

    pam = PeaksAndMetrics()
    pam.sphere = sphere
    pam.peak_dirs = peak_dirs
    pam.peak_values = peak_values
    pam.peak_indices = peak_indices
    pam.gfa = gfa_array
    pam.qa = qa_array

    if return_sh:
        pam.shm_coeff = shm_coeff
        pam.B = B
    else:
        pam.shm_coeff = None
        pam.B = None

    if return_odf:
        pam.odf = odf_array
    else:
        pam.odf = None

    return pam
Exemplo n.º 40
0
def odf_sh_to_sharp(odfs_sh,
                    sphere,
                    basis=None,
                    ratio=3 / 15.,
                    sh_order=8,
                    lambda_=1.,
                    tau=0.1,
                    r2_term=False):
    r""" Sharpen odfs using the sharpening deconvolution transform [2]_

    This function can be used to sharpen any smooth ODF spherical function. In
    theory, this should only be used to sharpen QballModel ODFs, but in
    practice, one can play with the deconvolution ratio and sharpen almost any
    ODF-like spherical function. The constrained-regularization is stable and
    will not only sharpen the ODF peaks but also regularize the noisy peaks.

    Parameters
    ----------
    odfs_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, )
        array of odfs expressed as spherical harmonics coefficients
    sphere : Sphere
        sphere used to build the regularization matrix
    basis : {None, 'tournier07', 'descoteaux07'}
        different spherical harmonic basis:
        ``None`` for the default DIPY basis,
        ``tournier07`` for the Tournier 2007 [4]_ basis, and
        ``descoteaux07`` for the Descoteaux 2007 [3]_ basis
        (``None`` defaults to ``descoteaux07``).
    ratio : float,
        ratio of the smallest vs the largest eigenvalue of the single prolate
        tensor response function (:math:`\frac{\lambda_2}{\lambda_1}`)
    sh_order : int
        maximal SH order of the SH representation
    lambda_ : float
        lambda parameter (see odfdeconv) (default 1.0)
    tau : float
        tau parameter in the L matrix construction (see odfdeconv)
        (default 0.1)
    r2_term : bool
         True if ODF is computed from model that uses the $r^2$ term in the
         integral.  Recall that Tuch's ODF (used in Q-ball Imaging [1]_) and
         the true normalized ODF definition differ from a $r^2$ term in the ODF
         integral. The original Sharpening Deconvolution Transform (SDT)
         technique [2]_ is expecting Tuch's ODF without the $r^2$ (see [3]_ for
         the mathematical details).  Now, this function supports ODF that have
         been computed using the $r^2$ term because the proper analytical
         response function has be derived.  For example, models such as DSI,
         GQI, SHORE, CSA, Tensor, Multi-tensor ODFs, should now be deconvolved
         with the r2_term=True.

    Returns
    -------
    fodf_sh : ndarray
        sharpened odf expressed as spherical harmonics coefficients

    References
    ----------
    .. [1] Tuch, D. MRM 2004. Q-Ball Imaging.
    .. [2] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and
           Probabilistic Tractography Based on Complex Fibre Orientation
           Distributions
    .. [3] Descoteaux, M., Angelino, E., Fitzgibbons, S. and Deriche, R.
           Regularized, Fast, and Robust Analytical Q-ball Imaging.
           Magn. Reson. Med. 2007;58:497-510.
    .. [4] Tournier J.D., Calamante F. and Connelly A. Robust determination
           of the fibre orientation distribution in diffusion MRI:
           Non-negativity constrained super-resolved spherical deconvolution.
           NeuroImage. 2007;35(4):1459-1472.

    """
    r, theta, phi = cart2sphere(sphere.x, sphere.y, sphere.z)
    real_sym_sh = sph_harm_lookup[basis]

    B_reg, m, n = real_sym_sh(sh_order, theta, phi)
    R, P = forward_sdt_deconv_mat(ratio, n, r2_term=r2_term)

    # scale lambda to account for differences in the number of
    # SH coefficients and number of mapped directions
    lambda_ = lambda_ * R.shape[0] * R[0, 0] / B_reg.shape[0]

    fodf_sh = np.zeros(odfs_sh.shape)

    for index in ndindex(odfs_sh.shape[:-1]):
        fodf_sh[index], num_it = odf_deconv(odfs_sh[index],
                                            R,
                                            B_reg,
                                            lambda_=lambda_,
                                            tau=tau,
                                            r2_term=r2_term)

    return fodf_sh
Exemplo n.º 41
0
def test_ndindex():
    x = list(ndindex(1, 2, 3))
    expected = [ix for ix, e in np.ndenumerate(np.zeros((1, 2, 3)))]
    assert_array_equal(x, expected)
Exemplo n.º 42
0
print('PDF.shape (%d, %d, %d, %d, %d)' % PDF.shape)
"""
PDF.shape ``(96, 96, 17, 17, 17)``

We see that even for a single slice this PDF array is close to 345 MBytes so we
really have to be careful with memory usage when use this function with a full
dataset.

The simple solution is to generate/analyze the ODFs/PDFs by iterating through
each voxel and not store them in memory if that is not necessary.
"""

from dipy.core.ndindex import ndindex

for index in ndindex(dataslice.shape[:2]):
    pdf = dsmodel.fit(dataslice[index]).pdf()
"""
If you really want to save the PDFs of a full dataset on the disc we recommend
using memory maps (``numpy.memmap``) but still have in mind that even if you do 
that for example for a dataset of volume size ``(96, 96, 60)`` you will need about 
2.5 GBytes which can take less space when reasonable spheres (with < 1000 vertices) 
are used.

Let's now calculate a map of Generalized Fractional Anisotropy (GFA) [Tuch04]_ 
using the DSI ODFs.
"""

from dipy.reconst.odf import gfa

GFA = gfa(ODF)
Exemplo n.º 43
0
def wls_fit_tensor(gtab, data, Diso=3e-3, mask=None, min_signal=1.0e-6,
                   piterations=3, mdreg=2.7e-3):
    r""" Computes weighted least squares (WLS) fit to calculate self-diffusion
    tensor using a linear regression model [1]_.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    piterations : inter, optional
        Number of iterations used to refine the precision of f. Default is set
        to 3 corresponding to a precision of 0.01.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the last dimention the free water model parameters
        in the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment.

    References
    ----------
    .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
           Optimization of a free water elimination two-compartmental model
           for diffusion tensor imaging. NeuroImage 103, 323-333.
           doi: 10.1016/j.neuroimage.2014.09.053
    """
    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = wls_iter(W, data[v], S0[v], min_signal=min_signal,
                              Diso=3e-3, piterations=piterations, mdreg=mdreg)
            fw_params[v] = params

    return fw_params
Exemplo n.º 44
0
def tensor(evals, evecs, scalar_colors=None,
           sphere=None, scale=2.2, norm=True):
    """Plot many tensors as ellipsoids simultaneously.

    Parameters
    ----------
    evals : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
        eigenvalues
    evecs : (3, 3) or (X, 3, 3) or (X, Y, 3, 3) or (X, Y, Z, 3, 3) ndarray
        eigenvectors
    scalar_colors : (3,) or (X, 3) or (X, Y, 3) or (X, Y, Z, 3) ndarray
        RGB colors used to show the tensors
        Default None, color the ellipsoids using ``color_fa``
    sphere : Sphere,
        this sphere will be transformed to the tensor ellipsoid
        Default is None which uses a symmetric sphere with 724 points.
    scale : float,
        distance between ellipsoids.
    norm : boolean,
        Normalize `evals`.

    Returns
    -------
    actor : vtkActor
        Ellipsoids

    Examples
    --------
    >>> from dipy.viz import fvtk
    >>> r = fvtk.ren()
    >>> evals = np.array([1.4, .35, .35]) * 10 ** (-3)
    >>> evecs = np.eye(3)
    >>> from dipy.data import get_sphere
    >>> sphere = get_sphere('symmetric724')
    >>> fvtk.add(r, fvtk.tensor(evals, evecs, sphere=sphere))
    >>> #fvtk.show(r)

    """

    evals = np.asarray(evals)
    if evals.ndim > 4:
        raise ValueError("Wrong shape")
    evals = _makeNd(evals, 4)
    evecs = _makeNd(evecs, 5)

    grid_shape = np.array(evals.shape[:3])

    if sphere is None:
        from dipy.data import get_sphere
        sphere = get_sphere('symmetric724')
    faces = np.asarray(sphere.faces, dtype=int)
    vertices = sphere.vertices

    colors = vtk.vtkUnsignedCharArray()
    colors.SetNumberOfComponents(3)
    colors.SetName("Colors")

    if scalar_colors is None:
        from dipy.reconst.dti import color_fa, fractional_anisotropy
        cfa = color_fa(fractional_anisotropy(evals), evecs)
    else:
        cfa = _makeNd(scalar_colors, 4)

    list_sq = []
    list_cols = []

    for ijk in ndindex(grid_shape):
        ea = evals[ijk]
        if norm:
            ea /= ea.max()
        ea = np.diag(ea.copy())

        ev = evecs[ijk].copy()
        xyz = np.dot(ev, np.dot(ea, vertices.T))

        xyz += scale * (ijk - grid_shape / 2.)[:, None]

        xyz = xyz.T

        list_sq.append(xyz)

        acolor = np.zeros(xyz.shape)
        acolor[:, :] = np.interp(cfa[ijk], [0, 1], [0, 255])
        list_cols.append(acolor.astype('ubyte'))

    points = vtk.vtkPoints()
    triangles = vtk.vtkCellArray()

    for k in xrange(len(list_sq)):

        xyz = list_sq[k]

        cols = list_cols[k]

        for i in xrange(xyz.shape[0]):

            points.InsertNextPoint(*xyz[i])
            colors.InsertNextTuple3(*cols[i])

        for j in xrange(faces.shape[0]):

            triangle = vtk.vtkTriangle()
            triangle.GetPointIds().SetId(0, faces[j, 0] + k * xyz.shape[0])
            triangle.GetPointIds().SetId(1, faces[j, 1] + k * xyz.shape[0])
            triangle.GetPointIds().SetId(2, faces[j, 2] + k * xyz.shape[0])
            triangles.InsertNextCell(triangle)
            del triangle

    polydata = vtk.vtkPolyData()
    polydata.SetPoints(points)
    polydata.SetPolys(triangles)

    polydata.GetPointData().SetScalars(colors)
    polydata.Modified()

    mapper = vtk.vtkPolyDataMapper()
    if major_version <= 5:
        mapper.SetInput(polydata)
    else:
        mapper.SetInputData(polydata)

    actor = vtk.vtkActor()
    actor.SetMapper(mapper)

    return actor
Exemplo n.º 45
0
def dkimicro_prediction(params, gtab, S0=1):
    r""" Signal prediction given the DKI microstructure model parameters.

    Parameters
    ----------
    params : ndarray (x, y, z, 40) or (n, 40)
    All parameters estimated from the diffusion kurtosis microstructure model.
        Parameters are ordered as follows:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) Fifteen elements of the kurtosis tensor
            4) Six elements of the hindered diffusion tensor
            5) Six elements of the restricted diffusion tensor
            6) Axonal water fraction
    gtab : a GradientTable class instance
        The gradient table for this prediction
    S0 : float or ndarray
        The non diffusion-weighted signal in every voxel, or across all
        voxels. Default: 1

    Returns
    -------
    S : (..., N) ndarray
        Simulated signal based on the DKI microstructure model

    Notes
    -----
    1) The predicted signal is given by:
    $S(\theta, b) = S_0 * [f * e^{-b ADC_{r}} + (1-f) * e^{-b ADC_{h}]$, where
    $ ADC_{r} and ADC_{h} are the apparent diffusion coefficients of the
    diffusion hindered and restricted compartment for a given direction
    $\theta$, $b$ is the b value provided in the GradientTable input for that
    direction, $f$ is the volume fraction of the restricted diffusion
    compartment (also known as the axonal water fraction).

    2) In the original article of DKI microstructural model [1]_, the hindered
    and restricted tensors were definde as the intra-cellular and
    extra-cellular diffusion compartments respectively.
    """

    # Initialize pred_sig
    pred_sig = np.zeros(params.shape[:-1] + (gtab.bvals.shape[0], ))

    # Define dti design matrix and region to process
    D = dti_design_matrix(gtab)
    evals = params[..., :3]
    mask = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2])

    # Prepare parameters
    f = params[..., 27]
    adce = params[..., 28:34]
    adci = params[..., 34:40]

    if isinstance(S0, np.ndarray):
        S0_vol = S0 * np.ones(params.shape[:-1])
    else:
        S0_vol = S0

    # Process pred_sig for all data voxels
    index = ndindex(evals.shape[:-1])
    for v in index:
        if mask[v]:
            pred_sig[v] = (1. - f[v]) * np.exp(np.dot(D[:, :6], adce[v])) + \
                f[v] * np.exp(np.dot(D[:, :6], adci[v]))

    return pred_sig * S0_vol
Exemplo n.º 46
0
def fwdti_prediction(params, gtab, S0=1, Diso=3.0e-3):
    r""" Signal prediction given the free water DTI model parameters.

    Parameters
    ----------
    params : (..., 13) ndarray
        Model parameters. The last dimension should have the 12 tensor
        parameters (3 eigenvalues, followed by the 3 corresponding
        eigenvectors) and the volume fraction of the free water compartment.
    gtab : a GradientTable class instance
        The gradient table for this prediction
    S0 : float or ndarray
        The non diffusion-weighted signal in every voxel, or across all
        voxels. Default: 1
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please adjust this value if you are assuming different
        units of diffusion.

    Returns
    --------
    S : (..., N) ndarray
        Simulated signal based on the free water DTI model

    Notes
    -----
    The predicted signal is given by:
    $S(\theta, b) = S_0 * [(1-f) * e^{-b ADC} + f * e^{-b D_{iso}]$, where
    $ADC = \theta Q \theta^T$, $\theta$ is a unit vector pointing at any
    direction on the sphere for which a signal is to be predicted, $b$ is the b
    value provided in the GradientTable input for that direction, $Q$ is the
    quadratic form of the tensor determined by the input parameters, $f$ is the
    free water diffusion compartment, $D_{iso}$ is the free water diffusivity
    which is equal to $3 * 10^{-3} mm^{2}s^{-1} [1]_.

    References
    ----------
    .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
           Optimization of a free water elimination two-compartmental model
           for diffusion tensor imaging. NeuroImage 103, 323-333.
           doi: 10.1016/j.neuroimage.2014.09.053
    """
    evals = params[..., :3]
    evecs = params[..., 3:-1].reshape(params.shape[:-1] + (3, 3))
    f = params[..., 12]
    qform = vec_val_vect(evecs, evals)
    lower_dt = lower_triangular(qform, S0)
    lower_diso = lower_dt.copy()
    lower_diso[..., 0] = lower_diso[..., 2] = lower_diso[..., 5] = Diso
    lower_diso[..., 1] = lower_diso[..., 3] = lower_diso[..., 4] = 0
    D = design_matrix(gtab)

    pred_sig = np.zeros(f.shape + (gtab.bvals.shape[0], ))
    mask = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2])
    index = ndindex(f.shape)
    for v in index:
        if mask[v]:
            pred_sig[v] = (1 - f[v]) * np.exp(np.dot(lower_dt[v], D.T)) + \
                          f[v] * np.exp(np.dot(lower_diso[v], D.T))

    return pred_sig
Exemplo n.º 47
0
def test_parzen_densities():
    # Test the computation of the joint intensity distribution
    # using a dense and a sparse set of values
    seed = 1246592
    nbins = 32
    nr = 30
    nc = 35
    ns = 20
    nvals = 50

    for dim in [2, 3]:
        if dim == 2:
            shape = (nr, nc)
            static, moving = create_random_image_pair(shape, nvals, seed)
        else:
            shape = (ns, nr, nc)
            static, moving = create_random_image_pair(shape, nvals, seed)

        # Initialize
        parzen_hist = ParzenJointHistogram(nbins)
        parzen_hist.setup(static, moving)
        # Get distributions computed by dense sampling
        parzen_hist.update_pdfs_dense(static, moving)
        actual_joint_dense = parzen_hist.joint
        actual_mmarginal_dense = parzen_hist.mmarginal
        actual_smarginal_dense = parzen_hist.smarginal

        # Get distributions computed by sparse sampling
        sval = static.reshape(-1)
        mval = moving.reshape(-1)
        parzen_hist.update_pdfs_sparse(sval, mval)
        actual_joint_sparse = parzen_hist.joint
        actual_mmarginal_sparse = parzen_hist.mmarginal
        actual_smarginal_sparse = parzen_hist.smarginal

        # Compute the expected joint distribution with dense sampling
        expected_joint_dense = np.zeros(shape=(nbins, nbins))
        for index in ndindex(shape):
            sv = parzen_hist.bin_normalize_static(static[index])
            mv = parzen_hist.bin_normalize_moving(moving[index])
            sbin = parzen_hist.bin_index(sv)
            # The spline is centered at mv, will evaluate for all row
            spline_arg = np.array([i - mv for i in range(nbins)])
            contribution = cubic_spline(spline_arg)
            expected_joint_dense[sbin, :] += contribution

        # Compute the expected joint distribution with sparse sampling
        expected_joint_sparse = np.zeros(shape=(nbins, nbins))
        for index in range(sval.shape[0]):
            sv = parzen_hist.bin_normalize_static(sval[index])
            mv = parzen_hist.bin_normalize_moving(mval[index])
            sbin = parzen_hist.bin_index(sv)
            # The spline is centered at mv, will evaluate for all row
            spline_arg = np.array([i - mv for i in range(nbins)])
            contribution = cubic_spline(spline_arg)
            expected_joint_sparse[sbin, :] += contribution

        # Verify joint distributions
        expected_joint_dense /= expected_joint_dense.sum()
        expected_joint_sparse /= expected_joint_sparse.sum()
        assert_array_almost_equal(actual_joint_dense, expected_joint_dense)
        assert_array_almost_equal(actual_joint_sparse, expected_joint_sparse)

        # Verify moving marginals
        expected_mmarginal_dense = expected_joint_dense.sum(0)
        expected_mmarginal_dense /= expected_mmarginal_dense.sum()
        expected_mmarginal_sparse = expected_joint_sparse.sum(0)
        expected_mmarginal_sparse /= expected_mmarginal_sparse.sum()
        assert_array_almost_equal(actual_mmarginal_dense,
                                  expected_mmarginal_dense)
        assert_array_almost_equal(actual_mmarginal_sparse,
                                  expected_mmarginal_sparse)

        # Verify static marginals
        expected_smarginal_dense = expected_joint_dense.sum(1)
        expected_smarginal_dense /= expected_smarginal_dense.sum()
        expected_smarginal_sparse = expected_joint_sparse.sum(1)
        expected_smarginal_sparse /= expected_smarginal_sparse.sum()
        assert_array_almost_equal(actual_smarginal_dense,
                                  expected_smarginal_dense)
        assert_array_almost_equal(actual_smarginal_sparse,
                                  expected_smarginal_sparse)
Exemplo n.º 48
0
def diffusion_components(dki_params,
                         sphere='repulsion100',
                         awf=None,
                         mask=None):
    """ Extracts the restricted and hindered diffusion tensors of well aligned
    fibers from diffusion kurtosis imaging parameters [1]_.

    Parameters
    ----------
    dki_params : ndarray (x, y, z, 27) or (n, 27)
        All parameters estimated from the diffusion kurtosis model.
        Parameters are ordered as follows:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the first,
               second and third coordinates of the eigenvector
            3) Fifteen elements of the kurtosis tensor
    sphere : Sphere class instance, optional
        The sphere providing sample directions to sample the restricted and
        hindered cellular diffusion tensors. For more details see Fieremans
        et al., 2011.
    awf : ndarray (optional)
        Array containing values of the axonal water fraction that has the shape
        dki_params.shape[:-1]. If not given this will be automatically computed
        using :func:`axonal_water_fraction`" with function's default precision.
    mask : ndarray (optional)
        A boolean array used to mark the coordinates in the data that should be
        analyzed that has the shape dki_params.shape[:-1]

    Returns
    -------
    edt : ndarray (x, y, z, 6) or (n, 6)
        Parameters of the hindered diffusion tensor.
    idt : ndarray (x, y, z, 6) or (n, 6)
        Parameters of the restricted diffusion tensor.

    Notes
    -----
    In the original article of DKI microstructural model [1]_, the hindered and
    restricted tensors were definde as the intra-cellular and extra-cellular
    diffusion compartments respectively.

    References
    ----------
    .. [1] Fieremans E, Jensen JH, Helpern JA, 2011. White matter
           characterization with diffusional kurtosis imaging.
           Neuroimage 58(1):177-88. doi: 10.1016/j.neuroimage.2011.06.006
    """
    shape = dki_params.shape[:-1]

    # load gradient directions
    if not isinstance(sphere, dps.Sphere):
        sphere = get_sphere(sphere)

    # select voxels where to apply the single fiber model
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as dki_params.")
        else:
            mask = np.array(mask, dtype=bool, copy=False)

    # check or compute awf values
    if awf is None:
        awf = axonal_water_fraction(dki_params, sphere=sphere, mask=mask)
    else:
        if awf.shape != shape:
            raise ValueError("awf array is not the same shape as dki_params.")

    # Initialize hindered and restricted diffusion tensors
    edt_all = np.zeros(shape + (6, ))
    idt_all = np.zeros(shape + (6, ))

    # Generate matrix that converts apparent diffusion coefficients to tensors
    B = np.zeros((sphere.x.size, 6))
    B[:, 0] = sphere.x * sphere.x  # Bxx
    B[:, 1] = sphere.x * sphere.y * 2.  # Bxy
    B[:, 2] = sphere.y * sphere.y  # Byy
    B[:, 3] = sphere.x * sphere.z * 2.  # Bxz
    B[:, 4] = sphere.y * sphere.z * 2.  # Byz
    B[:, 5] = sphere.z * sphere.z  # Bzz
    pinvB = np.linalg.pinv(B)

    # Compute hindered and restricted diffusion tensors for all voxels
    evals, evecs, kt = split_dki_param(dki_params)
    dt = lower_triangular(vec_val_vect(evecs, evals))
    md = mean_diffusivity(evals)

    index = ndindex(mask.shape)
    for idx in index:
        if not mask[idx]:
            continue
        # sample apparent diffusion and kurtosis values
        di = directional_diffusion(dt[idx], sphere.vertices)
        ki = directional_kurtosis(dt[idx],
                                  md[idx],
                                  kt[idx],
                                  sphere.vertices,
                                  adc=di,
                                  min_kurtosis=0)
        edi = di * (1 + np.sqrt(ki * awf[idx] / (3.0 - 3.0 * awf[idx])))
        edt = np.dot(pinvB, edi)
        edt_all[idx] = edt

        # We only move on if there is an axonal water fraction.
        # Otherwise, remaining params are already zero, so move on
        if awf[idx] == 0:
            continue
        # Convert apparent diffusion and kurtosis values to apparent diffusion
        # values of the hindered and restricted diffusion
        idi = di * (1 - np.sqrt(ki * (1.0 - awf[idx]) / (3.0 * awf[idx])))
        # generate hindered and restricted diffusion tensors
        idt = np.dot(pinvB, idi)
        idt_all[idx] = idt

    return edt_all, idt_all
def nls_fit_tensor(gtab, data, mask=None, Diso=3e-3, mdreg=2.7e-3,
                   min_signal=1.0e-6, f_transform=True, cholesky=False,
                   jac=True):
    """
    Fit the water elimination tensor model using the non-linear least-squares.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    f_transform : bool, optional
        If true, the water volume fractions is converted during the convergence
        procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
        0 and 1.
        Default: True
    cholesky : bool, optional
        If true it uses cholesky decomposition to insure that diffusion tensor
        is positive define.
        Default: False
    jac : bool
        Use the Jacobian? Default: False

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the dimention the free water model parameters in
        the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment
    """
    # Analyse compatible input cases
    if jac is True and cholesky is True:
        raise ValueError("Cholesky decomposition is not compatible with jac.")

    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = nls_iter(W, data[v], S0[v],
                              Diso=Diso, mdreg=mdreg,
                              min_signal=min_signal,
                              f_transform=f_transform, cholesky=cholesky,
                              jac=jac)
            fw_params[v] = params

    return fw_params
Exemplo n.º 50
0
def awf_from_msk(msk, mask=None):
    """
    Computes the axonal water fraction from the mean signal kurtosis
    assuming the 2-compartmental spherical mean technique model [1]_, [2]_

    Parameters
    ----------
    msk : ndarray ([X, Y, Z, ...])
        Mean signal kurtosis (msk)
    mask : ndarray, optional
        A boolean array used to mark the coordinates in the data that should be
        analyzed that has the same shape of the msdki parameters

    Returns
    -------
    smt2f : ndarray ([X, Y, Z, ...])
        ndarray containing the axonal volume fraction estimate.

    Notes
    -----
    Computes the axonal water fraction from the mean signal kurtosis
    MSK using equation 17 of [1]_

    References
    ----------
    .. [1] Neto Henriques R, Jespersen SN, Shemesh N (2019). Microscopic
           anisotropy misestimation in spherical‐mean single diffusion
           encoding MRI. Magnetic Resonance in Medicine (In press).
           doi: 10.1002/mrm.27606
    .. [2] Kaden E, Kelm ND, Carson RP, et al. (2016) Multi‐compartment
           microscopic diffusion imaging. Neuroimage 139:346–359.
    """
    awf = np.zeros(msk.shape)

    # Prepare mask
    if mask is None:
        mask = np.ones(msk.shape, dtype=bool)
    else:
        if mask.shape != msk.shape:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # looping voxels
    index = ndindex(mask.shape)
    for v in index:
        # Skip if out of mask
        if not mask[v]:
            continue

        if msk[v] > 2.4:
            awf[v] = 1
        elif msk[v] < 0:
            awf[v] = 0
        else:
            if np.isnan(msk[v]):
                awf[v] = np.nan
            else:
                mski = msk[v]
                fini = mski / 2.4  # Initial guess based on linear assumption
                awf[v] = opt.fsolve(_msk_from_awf_error, fini, args=(mski,),
                                    fprime=_diff_msk_from_awf, col_deriv=True)

    return awf
def nls_fit_tensor_bounds(gtab, data, mask=None, Diso=3e-3, mdreg=2.7e-3,
                          min_signal=1.0e-6, bounds=None, jac=True):
    """
    Fit the water elimination tensor model using the non-linear least-squares
    with constraints

    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    bounds : 2-tuple of arrays with 14 elements, optional
        Lower and upper bounds on fwdti model variables and the log of
        non-diffusion signal S0. Use np.inf with an appropriate sign to
        disable bounds on all or some variables. When bounds is set to None
        the following default variable bounds is used:
            ([0., -Diso, 0., -Diso, -Diso, 0., 0., np.exp(-10.)],
             [Diso, Diso, Diso, Diso, Diso, Diso, 1., np.exp(10.)])
    jac : bool
        Use the Jacobian? Default: False

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the dimention the free water model parameters in
        the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment
    """
    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = nls_iter_bounds(W, data[v], S0[v], Diso=Diso, mdreg=mdreg,
                                     min_signal=min_signal, bounds=bounds,
                                     jac=jac)
            fw_params[v] = params

    return fw_params
Exemplo n.º 52
0
def wls_fit_msdki(design_matrix, msignal, ng, mask=None,
                  min_signal=MIN_POSITIVE_SIGNAL, return_S0_hat=False):
    r"""
    Fits the mean signal diffusion kurtosis imaging based on a weighted
    least square solution [1]_.

    Parameters
    ----------
    design_matrix : array (nub, 3)
        Design matrix holding the covariants used to solve for the regression
        coefficients of the mean signal diffusion kurtosis model. Note that
        nub is the number of unique b-values
    msignal : ndarray ([X, Y, Z, ..., nub])
        Mean signal along all gradient directions for each unique b-value
        Note that the last dimension should contain the signal means and nub
        is the number of unique b-values.
    ng : ndarray(nub)
        Number of gradient directions used to compute the mean signal for
        all unique b-values
    mask : array
        A boolean array used to mark the coordinates in the data that
        should be analyzed that has the shape data.shape[:-1]
    min_signal : float, optional
        Voxel with mean signal intensities lower than the min positive signal
        are not processed. Default: 0.0001
    return_S0_hat : bool
        If True, also return S0 values for the fit.

    Returns
    -------
    params : array (..., 2)
        Containing the mean signal diffusivity and mean signal kurtosis

    References
    ----------
    .. [1] Henriques, R.N., 2018. Advanced Methods for Diffusion MRI Data
           Analysis and their Application to the Healthy Ageing Brain
           (Doctoral thesis). Downing College, University of Cambridge.
           https://doi.org/10.17863/CAM.29356
    """
    params = np.zeros(msignal.shape[:-1] + (3,))

    # Prepare mask
    if mask is None:
        mask = np.ones(msignal.shape[:-1], dtype=bool)
    else:
        if mask.shape != msignal.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    index = ndindex(mask.shape)
    for v in index:
        # Skip if out of mask
        if not mask[v]:
            continue
        # Skip if no signal is present
        if np.mean(msignal[v]) <= min_signal:
            continue
        # Define weights as diag(ng * yn**2)
        W = np.diag(ng * msignal[v]**2)

        # WLS fitting
        BTW = np.dot(design_matrix.T, W)
        inv_BT_W_B = np.linalg.pinv(np.dot(BTW, design_matrix))
        invBTWB_BTW = np.dot(inv_BT_W_B, BTW)
        p = np.dot(invBTWB_BTW, np.log(msignal[v]))

        # Process parameters
        p[1] = p[1] / (p[0]**2)
        p[2] = np.exp(p[2])
        params[v] = p

    if return_S0_hat:
        return params[..., :2], params[..., 2]
    else:
        return params[..., :2]
Exemplo n.º 53
0
def old_squash(arr, mask=None, fill=0):
    """Try and make a standard array from an object array

    This function takes an object array and attempts to convert it to a more
    useful dtype. If array can be converted to a better dtype, Nones are
    replaced by `fill`. To make the behaviour of this function more clear, here
    are the most common cases:

    1.  `arr` is an array of scalars of type `T`. Returns an array like
        `arr.astype(T)`
    2.  `arr` is an array of arrays. All items in `arr` have the same shape
        `S`. Returns an array with shape `arr.shape + S`.
    3.  `arr` is an array of arrays of different shapes. Returns `arr`.
    4.  Items in `arr` are not ndarrys or scalars. Returns `arr`.

    Parameters
    ----------
    arr : array, dtype=object
        The array to be converted.
    mask : array, dtype=bool, optional
        Where arr has Nones.
    fill : number, optional
        Nones are replaced by fill.

    Returns
    -------
    result : array

    Examples
    --------
    >>> arr = np.empty(3, dtype=object)
    >>> arr.fill(2)
    >>> old_squash(arr)
    array([2, 2, 2])
    >>> arr[0] = None
    >>> old_squash(arr)
    array([0, 2, 2])
    >>> arr.fill(np.ones(2))
    >>> r = old_squash(arr)
    >>> r.shape == (3, 2)
    True
    >>> r.dtype
    dtype('float64')
    """
    if mask is None:
        mask = np.vectorize(lambda x: x is not None)(arr)
    not_none = arr[mask]
    # all None, just return arr
    if not_none.size == 0:
        return arr
    first = not_none[0]
    # If the first item is an ndarray
    if type(first) is np.ndarray:
        shape = first.shape
        try:
            # Check the shapes of all items
            all_same_shape = all(item.shape == shape for item in not_none)
        except AttributeError:
            return arr
        # If items have different shapes just return arr
        if not all_same_shape:
            return arr
        # Find common dtype.  np.result_type can do this more simply, but it is
        # only available for numpy 1.6.0
        dtypes = set(a.dtype for a in not_none)
        tiny_arrs = [np.zeros((1, ), dtype=dt) for dt in dtypes]
        dtype = reduce(np.add, tiny_arrs).dtype
        # Create output array and fill
        result = np.empty(arr.shape + shape, dtype=dtype)
        result.fill(fill)
        for ijk in ndindex(arr.shape):
            if mask[ijk]:
                result[ijk] = arr[ijk]
        return result

    # If the first item is a scalar
    elif np.isscalar(first):
        "first is not an ndarray"
        all_scalars = all(np.isscalar(item) for item in not_none)
        if not all_scalars:
            return arr
        # See comment about np.result_type above. We sum against the smallest
        # possible type, bool, and let numpy type promotion find the best
        # common type. The values might all be Python scalars so we need to
        # cast to numpy type at the end to be sure of having a dtype.
        dtype = np.asarray(sum(not_none, False)).dtype
        temp = arr.copy()
        temp[~mask] = fill
        return temp.astype(dtype)
    else:
        return arr
Exemplo n.º 54
0
def fwdti_prediction(params, gtab, S0=1, Diso=3.0e-3):
    r""" Signal prediction given the free water DTI model parameters.

    Parameters
    ----------
    params : (..., 13) ndarray
        Model parameters. The last dimension should have the 12 tensor
        parameters (3 eigenvalues, followed by the 3 corresponding
        eigenvectors) and the volume fraction of the free water compartment.
    gtab : a GradientTable class instance
        The gradient table for this prediction
    S0 : float or ndarray
        The non diffusion-weighted signal in every voxel, or across all
        voxels. Default: 1
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please adjust this value if you are assuming different
        units of diffusion.

    Returns
    --------
    S : (..., N) ndarray
        Simulated signal based on the free water DTI model

    Notes
    -----
    The predicted signal is given by:
    $S(\theta, b) = S_0 * [(1-f) * e^{-b ADC} + f * e^{-b D_{iso}]$, where
    $ADC = \theta Q \theta^T$, $\theta$ is a unit vector pointing at any
    direction on the sphere for which a signal is to be predicted, $b$ is the b
    value provided in the GradientTable input for that direction, $Q$ is the
    quadratic form of the tensor determined by the input parameters, $f$ is the
    free water diffusion compartment, $D_{iso}$ is the free water diffusivity
    which is equal to $3 * 10^{-3} mm^{2}s^{-1} [1]_.

    References
    ----------
    .. [1] Hoy, A.R., Koay, C.G., Kecskemeti, S.R., Alexander, A.L., 2014.
           Optimization of a free water elimination two-compartmental model
           for diffusion tensor imaging. NeuroImage 103, 323-333.
           doi: 10.1016/j.neuroimage.2014.09.053
    """
    evals = params[..., :3]
    evecs = params[..., 3:-1].reshape(params.shape[:-1] + (3, 3))
    f = params[..., 12]
    qform = vec_val_vect(evecs, evals)
    lower_dt = lower_triangular(qform, S0)
    lower_diso = lower_dt.copy()
    lower_diso[..., 0] = lower_diso[..., 2] = lower_diso[..., 5] = Diso
    lower_diso[..., 1] = lower_diso[..., 3] = lower_diso[..., 4] = 0
    D = design_matrix(gtab)

    pred_sig = np.zeros(f.shape + (gtab.bvals.shape[0],))
    mask = _positive_evals(evals[..., 0], evals[..., 1], evals[..., 2])
    index = ndindex(f.shape)
    for v in index:
        if mask[v]:
            pred_sig[v] = (1 - f[v]) * np.exp(np.dot(lower_dt[v], D.T)) + \
                          f[v] * np.exp(np.dot(lower_diso[v], D.T))

    return pred_sig
Exemplo n.º 55
0
def test_mattes_densities():
    # Test the computation of the joint intensity distribution
    # using a dense and a sparse set of values
    seed = 1246592
    nbins = 32
    nr = 30
    nc = 35
    ns = 20
    nvals = 50

    for dim in [2, 3]:
        if dim == 2:
            shape = (nr, nc)
            static, moving = create_random_image_pair(shape, nvals, seed)
        else:
            shape = (ns, nr, nc)
            static, moving = create_random_image_pair(shape, nvals, seed)

        # Initialize
        mbase = MattesBase(nbins)
        mbase.setup(static, moving)
        # Get distributions computed by dense sampling
        mbase.update_pdfs_dense(static, moving)
        actual_joint_dense = mbase.joint
        actual_mmarginal_dense = mbase.mmarginal
        actual_smarginal_dense = mbase.smarginal

        # Get distributions computed by sparse sampling
        sval = static.reshape(-1)
        mval = moving.reshape(-1)
        mbase.update_pdfs_sparse(sval, mval)
        actual_joint_sparse = mbase.joint
        actual_mmarginal_sparse = mbase.mmarginal
        actual_smarginal_sparse = mbase.smarginal

        # Compute the expected joint distribution with dense sampling
        expected_joint_dense = np.zeros(shape=(nbins, nbins))
        for index in ndindex(shape):
            sv = mbase.bin_normalize_static(static[index])
            mv = mbase.bin_normalize_moving(moving[index])
            sbin = mbase.bin_index(sv)
            # The spline is centered at mv, will evaluate for all row
            spline_arg = np.array([i - mv for i in range(nbins)])
            contribution = cubic_spline(spline_arg)
            expected_joint_dense[sbin, :] += contribution

        # Compute the expected joint distribution with sparse sampling
        expected_joint_sparse = np.zeros(shape=(nbins, nbins))
        for index in range(sval.shape[0]):
            sv = mbase.bin_normalize_static(sval[index])
            mv = mbase.bin_normalize_moving(mval[index])
            sbin = mbase.bin_index(sv)
            # The spline is centered at mv, will evaluate for all row
            spline_arg = np.array([i - mv for i in range(nbins)])
            contribution = cubic_spline(spline_arg)
            expected_joint_sparse[sbin, :] += contribution

        # Verify joint distributions
        expected_joint_dense /= expected_joint_dense.sum()
        expected_joint_sparse /= expected_joint_sparse.sum()
        assert_array_almost_equal(actual_joint_dense, expected_joint_dense)
        assert_array_almost_equal(actual_joint_sparse, expected_joint_sparse)

        # Verify moving marginals
        expected_mmarginal_dense = expected_joint_dense.sum(0)
        expected_mmarginal_dense /= expected_mmarginal_dense.sum()
        expected_mmarginal_sparse = expected_joint_sparse.sum(0)
        expected_mmarginal_sparse /= expected_mmarginal_sparse.sum()
        assert_array_almost_equal(actual_mmarginal_dense,
                                  expected_mmarginal_dense)
        assert_array_almost_equal(actual_mmarginal_sparse,
                                  expected_mmarginal_sparse)

        # Verify static marginals
        expected_smarginal_dense = expected_joint_dense.sum(1)
        expected_smarginal_dense /= expected_smarginal_dense.sum()
        expected_smarginal_sparse = expected_joint_sparse.sum(1)
        expected_smarginal_sparse /= expected_smarginal_sparse.sum()
        assert_array_almost_equal(actual_smarginal_dense,
                                  expected_smarginal_dense)
        assert_array_almost_equal(actual_smarginal_sparse,
                                  expected_smarginal_sparse)
Exemplo n.º 56
0
def old_squash(arr, mask=None, fill=0):
    """Try and make a standard array from an object array

    This function takes an object array and attempts to convert it to a more
    useful dtype. If array can be converted to a better dtype, Nones are
    replaced by `fill`. To make the behaviour of this function more clear, here
    are the most common cases:

    1.  `arr` is an array of scalars of type `T`. Returns an array like
        `arr.astype(T)`
    2.  `arr` is an array of arrays. All items in `arr` have the same shape
        `S`. Returns an array with shape `arr.shape + S`.
    3.  `arr` is an array of arrays of different shapes. Returns `arr`.
    4.  Items in `arr` are not ndarrys or scalars. Returns `arr`.

    Parameters
    ----------
    arr : array, dtype=object
        The array to be converted.
    mask : array, dtype=bool, optional
        Where arr has Nones.
    fill : number, optional
        Nones are replaced by fill.

    Returns
    -------
    result : array

    Examples
    --------
    >>> arr = np.empty(3, dtype=object)
    >>> arr.fill(2)
    >>> old_squash(arr)
    array([2, 2, 2])
    >>> arr[0] = None
    >>> old_squash(arr)
    array([0, 2, 2])
    >>> arr.fill(np.ones(2))
    >>> r = old_squash(arr)
    >>> r.shape == (3, 2)
    True
    >>> r.dtype
    dtype('float64')

    """
    if mask is None:
        mask = arr != np.array(None)
    not_none = arr[mask]
    # all None, just return arr
    if not_none.size == 0:
        return arr
    first = not_none[0]
    # If the first item is an ndarray
    if type(first) is np.ndarray:
        shape = first.shape
        try:
            # Check the shapes of all items
            all_same_shape = all(item.shape == shape for item in not_none)
        except AttributeError:
            return arr
        # If items have different shapes just return arr
        if not all_same_shape:
            return arr
        # Find common dtype.  np.result_type can do this more simply, but it is
        # only available for numpy 1.6.0
        dtypes = set(a.dtype for a in not_none)
        tiny_arrs = [np.zeros((1,), dtype=dt) for dt in dtypes]
        dtype = reduce(np.add, tiny_arrs).dtype
        # Create output array and fill
        result = np.empty(arr.shape + shape, dtype=dtype)
        result.fill(fill)
        for ijk in ndindex(arr.shape):
            if mask[ijk]:
                result[ijk] = arr[ijk]
        return result

    # If the first item is a scalar
    elif np.isscalar(first):
        "first is not an ndarray"
        all_scalars = all(np.isscalar(item) for item in not_none)
        if not all_scalars:
            return arr
        # See comment about np.result_type above. We sum against the smallest
        # possible type, bool, and let numpy type promotion find the best
        # common type. The values might all be Python scalars so we need to
        # cast to numpy type at the end to be sure of having a dtype.
        dtype = np.asarray(sum(not_none, False)).dtype
        temp = arr.copy()
        temp[~mask] = fill
        return temp.astype(dtype)
    else:
        return arr
Exemplo n.º 57
0
    data2, affine2 = resample(data, affine,
                              zooms=zooms,
                              new_zooms=(1., 1., 1.))

    mask2, affine2 = resample(mask, affine,
                              zooms=zooms,
                              new_zooms=(1., 1., 1.))

    mask2[mask2 > 0] = 1

    # As these datasets are huge we will use ndindex to apply the mask
    # rather than data2[mask2==0] = np.zeros(data2.shape[-1])
    from dipy.core.ndindex import ndindex

    for index in ndindex(data2.shape[:3]):
        if mask2[index] == 0:
            data2[index] = np.zeros(data2.shape[-1])

    del data, affine, zooms

    print(data2.shape)
    print(affine2)
    print(nib.aff2axcodes(affine2))

    print('>>> Save resampled data, masks and S0...')

    # Save as nii (not nii.gz) to reduce saving and loading time
    fname2 = join(dname, 'dwi_1x1x1.nii')
    nib.save(nib.Nifti1Image(data2, affine2), fname2)
Exemplo n.º 58
0
def peaks_from_model(model, data, sphere, relative_peak_threshold,
                     min_separation_angle, mask=None, return_odf=False,
                     return_sh=True, gfa_thr=0, normalize_peaks=False,
                     sh_order=8, sh_basis_type=None, npeaks=5, B=None,
                     invB=None, parallel=False, nbr_processes=None):
    """Fit the model to data and computes peaks and metrics

    Parameters
    ----------
    model : a model instance
        `model` will be used to fit the data.
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    relative_peak_threshold : float
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
    min_separation_angle : float in [0, 90] The minimum distance between
        directions. If two peaks are too close only the larger of the two is
        returned.
    mask : array, optional
        If `mask` is provided, voxels that are False in `mask` are skipped and
        no peaks are returned.
    return_odf : bool
        If True, the odfs are returned.
    return_sh : bool
        If True, the odf as spherical harmonics coefficients is returned
    gfa_thr : float
        Voxels with gfa less than `gfa_thr` are skipped, no peaks are returned.
    normalize_peaks : bool
        If true, all peak values are calculated relative to `max(odf)`.
    sh_order : int, optional
        Maximum SH order in the SH fit.  For `sh_order`, there will be
        ``(sh_order + 1) * (sh_order + 2) / 2`` SH coefficients (default 8).
    sh_basis_type : {None, 'mrtrix', 'fibernav'}
        ``None`` for the default dipy basis which is the fibernav basis,
        ``mrtrix`` for the MRtrix basis, and
        ``fibernav`` for the FiberNavigator basis
    sh_smooth : float, optional
        Lambda-regularization in the SH fit (default 0.0).
    npeaks : int
        Maximum number of peaks found (default 5 peaks).
    B : ndarray, optional
        Matrix that transforms spherical harmonics to spherical function
        ``sf = np.dot(sh, B)``.
    invB : ndarray, optional
        Inverse of B.
    parallel: bool
        If True, use multiprocessing to compute peaks and metric
        (default False). Temporary files are saved in the default temporary
        directory of the system. It can be changed using ``import tempfile``
        and ``tempfile.tempdir = '/path/to/tempdir'``.
    nbr_processes: int
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()).

    Returns
    -------
    pam : PeaksAndMetrics
        An object with ``gfa``, ``peak_directions``, ``peak_values``,
        ``peak_indices``, ``odf``, ``shm_coeffs`` as attributes
    """
    if return_sh and (B is None or invB is None):
        B, invB = sh_to_sf_matrix(
            sphere, sh_order, sh_basis_type, return_inv=True)

    if parallel:
        # It is mandatory to provide B and invB to the parallel function.
        # Otherwise, a call to np.linalg.pinv is made in a subprocess and
        # makes it timeout on some system.
        # see https://github.com/nipy/dipy/issues/253 for details
        return _peaks_from_model_parallel(model,
                                          data, sphere,
                                          relative_peak_threshold,
                                          min_separation_angle,
                                          mask, return_odf,
                                          return_sh,
                                          gfa_thr,
                                          normalize_peaks,
                                          sh_order,
                                          sh_basis_type,
                                          npeaks,
                                          B,
                                          invB,
                                          nbr_processes)

    shape = data.shape[:-1]
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as data.")

    gfa_array = np.zeros(shape)
    qa_array = np.zeros((shape + (npeaks,)))

    peak_dirs = np.zeros((shape + (npeaks, 3)))
    peak_values = np.zeros((shape + (npeaks,)))
    peak_indices = np.zeros((shape + (npeaks,)), dtype='int')
    peak_indices.fill(-1)

    if return_sh:
        n_shm_coeff = (sh_order + 2) * (sh_order + 1) // 2
        shm_coeff = np.zeros((shape + (n_shm_coeff,)))

    if return_odf:
        odf_array = np.zeros((shape + (len(sphere.vertices),)))

    global_max = -np.inf
    for idx in ndindex(shape):
        if not mask[idx]:
            continue

        odf = model.fit(data[idx]).odf(sphere)

        if return_sh:
            shm_coeff[idx] = np.dot(odf, invB)

        if return_odf:
            odf_array[idx] = odf

        gfa_array[idx] = gfa(odf)
        if gfa_array[idx] < gfa_thr:
            global_max = max(global_max, odf.max())
            continue

        # Get peaks of odf
        direction, pk, ind = peak_directions(odf, sphere,
                                             relative_peak_threshold,
                                             min_separation_angle)

        # Calculate peak metrics
        if pk.shape[0] != 0:
            global_max = max(global_max, pk[0])

            n = min(npeaks, pk.shape[0])
            qa_array[idx][:n] = pk[:n] - odf.min()

            peak_dirs[idx][:n] = direction[:n]
            peak_indices[idx][:n] = ind[:n]
            peak_values[idx][:n] = pk[:n]

            if normalize_peaks:
                peak_values[idx][:n] /= pk[0]
                peak_dirs[idx] *= peak_values[idx][:, None]

    qa_array /= global_max

    return _pam_from_attrs(PeaksAndMetrics,
                           sphere,
                           peak_indices,
                           peak_values,
                           peak_dirs,
                           gfa_array,
                           qa_array,
                           shm_coeff if return_sh else None,
                           B if return_sh else None,
                           odf_array if return_odf else None)
Exemplo n.º 59
0
def nls_fit_tensor(gtab, data, mask=None, Diso=3e-3, mdreg=2.7e-3,
                   min_signal=1.0e-6, f_transform=True, cholesky=False,
                   jac=False, weighting=None, sigma=None):
    """
    Fit the water elimination tensor model using the non-linear least-squares.

    Parameters
    ----------
    gtab : a GradientTable class instance
        The gradient table containing diffusion acquisition parameters.
    data : ndarray ([X, Y, Z, ...], g)
        Data or response variables holding the data. Note that the last
        dimension should contain the data. It makes no copies of data.
    mask : array, optional
        A boolean array used to mark the coordinates in the data that should
        be analyzed that has the shape data.shape[:-1]
    Diso : float, optional
        Value of the free water isotropic diffusion. Default is set to 3e-3
        $mm^{2}.s^{-1}$. Please ajust this value if you are assuming different
        units of diffusion.
    mdreg : float, optimal
        DTI's mean diffusivity regularization threshold. If standard DTI
        diffusion tensor's mean diffusivity is almost near the free water
        diffusion value, the diffusion signal is assumed to be only free water
        diffusion (i.e. volume fraction will be set to 1 and tissue's diffusion
        parameters are set to zero). Default md_reg is 2.7e-3 $mm^{2}.s^{-1}$
        (corresponding to 90% of the free water diffusion value).
    min_signal : float
        The minimum signal value. Needs to be a strictly positive
        number. Default: 1.0e-6.
    f_transform : bool, optional
        If true, the water volume fractions is converted during the convergence
        procedure to ft = arcsin(2*f - 1) + pi/2, insuring f estimates between
        0 and 1.
        Default: True
    cholesky : bool, optional
        If true it uses cholesky decomposition to insure that diffusion tensor
        is positive define.
        Default: False
    jac : bool
        Use the Jacobian? Default: False
    weighting: str, optional
        the weighting scheme to use in considering the
        squared-error. Default behavior is to use uniform weighting. Other
        options: 'sigma' 'gmm'
    sigma: float, optional
        If the 'sigma' weighting scheme is used, a value of sigma needs to be
        provided here. According to [Chang2005]_, a good value to use is
        1.5267 * std(background_noise), where background_noise is estimated
        from some part of the image known to contain no signal (only noise).

    Returns
    -------
    fw_params : ndarray (x, y, z, 13)
        Matrix containing in the dimention the free water model parameters in
        the following order:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the
               first, second and third coordinates of the eigenvector
            3) The volume fraction of the free water compartment
    """
    fw_params = np.zeros(data.shape[:-1] + (13,))
    W = design_matrix(gtab)

    # Prepare mask
    if mask is None:
        mask = np.ones(data.shape[:-1], dtype=bool)
    else:
        if mask.shape != data.shape[:-1]:
            raise ValueError("Mask is not the same shape as data.")
        mask = np.array(mask, dtype=bool, copy=False)

    # Prepare S0
    S0 = np.mean(data[:, :, :, gtab.b0s_mask], axis=-1)

    index = ndindex(mask.shape)
    for v in index:
        if mask[v]:
            params = nls_iter(W, data[v], S0[v], Diso=Diso, mdreg=mdreg,
                              min_signal=min_signal, f_transform=f_transform,
                              cholesky=cholesky, jac=jac, weighting=weighting,
                              sigma=sigma)
            fw_params[v] = params

    return fw_params
Exemplo n.º 60
0
def odf_sh_to_sharp(odfs_sh, sphere, basis=None, ratio=3 / 15., sh_order=8,
                    lambda_=1., tau=0.1, r2_term=False):
    r""" Sharpen odfs using the sharpening deconvolution transform [2]_

    This function can be used to sharpen any smooth ODF spherical function. In
    theory, this should only be used to sharpen QballModel ODFs, but in
    practice, one can play with the deconvolution ratio and sharpen almost any
    ODF-like spherical function. The constrained-regularization is stable and
    will not only sharpen the ODF peaks but also regularize the noisy peaks.

    Parameters
    ----------
    odfs_sh : ndarray (``(sh_order + 1)*(sh_order + 2)/2``, )
        array of odfs expressed as spherical harmonics coefficients
    sphere : Sphere
        sphere used to build the regularization matrix
    basis : {None, 'mrtrix', 'fibernav'}
        different spherical harmonic basis. None is the fibernav basis as well.
    ratio : float,
        ratio of the smallest vs the largest eigenvalue of the single prolate
        tensor response function (:math:`\frac{\lambda_2}{\lambda_1}`)
    sh_order : int
        maximal SH order of the SH representation
    lambda_ : float
        lambda parameter (see odfdeconv) (default 1.0)
    tau : float
        tau parameter in the L matrix construction (see odfdeconv)
        (default 0.1)
    r2_term : bool
         True if ODF is computed from model that uses the $r^2$ term in the
         integral.  Recall that Tuch's ODF (used in Q-ball Imaging [1]_) and
         the true normalized ODF definition differ from a $r^2$ term in the ODF
         integral. The original Sharpening Deconvolution Transform (SDT)
         technique [2]_ is expecting Tuch's ODF without the $r^2$ (see [3]_ for
         the mathematical details).  Now, this function supports ODF that have
         been computed using the $r^2$ term because the proper analytical
         response function has be derived.  For example, models such as DSI,
         GQI, SHORE, CSA, Tensor, Multi-tensor ODFs, should now be deconvolved
         with the r2_term=True.

    Returns
    -------
    fodf_sh : ndarray
        sharpened odf expressed as spherical harmonics coefficients

    References
    ----------
    .. [1] Tuch, D. MRM 2004. Q-Ball Imaging.
    .. [2] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and
           Probabilistic Tractography Based on Complex Fibre Orientation
           Distributions
    .. [3] Descoteaux, M, et al. MRM 2007. Fast, Regularized and Analytical
           Q-Ball Imaging
    """
    r, theta, phi = cart2sphere(sphere.x, sphere.y, sphere.z)
    real_sym_sh = sph_harm_lookup[basis]

    B_reg, m, n = real_sym_sh(sh_order, theta, phi)
    R, P = forward_sdt_deconv_mat(ratio, n, r2_term=r2_term)

    # scale lambda to account for differences in the number of
    # SH coefficients and number of mapped directions
    lambda_ = lambda_ * R.shape[0] * R[0, 0] / B_reg.shape[0]

    fodf_sh = np.zeros(odfs_sh.shape)

    for index in ndindex(odfs_sh.shape[:-1]):
        fodf_sh[index], num_it = odf_deconv(odfs_sh[index], R, B_reg,
                                            lambda_=lambda_, tau=tau,
                                            r2_term=r2_term)

    return fodf_sh