Esempio n. 1
0
def test_sh_to_sf_matrix():
    sphere = Sphere(xyz=hemi_icosahedron.vertices)

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore",
                                message=descoteaux07_legacy_msg,
                                category=PendingDeprecationWarning)

        B1, invB1 = sh_to_sf_matrix(sphere)

        B2, m, n = real_sh_descoteaux(4, sphere.theta, sphere.phi)

    invB2 = smooth_pinv(B2, L=np.zeros_like(n))

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore",
                                message=descoteaux07_legacy_msg,
                                category=PendingDeprecationWarning)

        B3 = sh_to_sf_matrix(sphere, return_inv=False)

    assert_array_almost_equal(B1, B2.T)
    assert_array_almost_equal(invB1, invB2.T)
    assert_array_almost_equal(B3, B1)
    assert_raises(ValueError, sh_to_sf_matrix, sphere, basis_type="")
Esempio n. 2
0
def test_sh_to_sf_matrix():
    sphere = Sphere(xyz=hemi_icosahedron.vertices)
    B1, invB1 = sh_to_sf_matrix(sphere)
    B2, m, n = real_sym_sh_basis(4, sphere.theta, sphere.phi)
    invB2 = smooth_pinv(B2, L=np.zeros_like(n))
    B3 = sh_to_sf_matrix(sphere, return_inv=False)

    assert_array_almost_equal(B1, B2.T)
    assert_array_almost_equal(invB1, invB2.T)
    assert_array_almost_equal(B3, B1)
    assert_raises(ValueError, sh_to_sf_matrix, sphere, basis_type="")
Esempio n. 3
0
def create_odf_slicer(sh_fodf, mask, sphere, nb_subdivide, sh_order, sh_basis,
                      full_basis, orientation, scale, radial_scale, norm,
                      colormap, slice_index):
    """
    Create a ODF slicer actor displaying a fODF slice. The input volume is a
    3-dimensional grid containing the SH coefficients of the fODF for each
    voxel at each voxel, with the grid dimension having a size of 1 along the
    axis corresponding to the selected orientation.
    """
    # Subdivide the spheres if nb_subdivide is provided
    if nb_subdivide is not None:
        sphere = sphere.subdivide(nb_subdivide)

    # SH coefficients to SF coefficients matrix
    B_mat = sh_to_sf_matrix(sphere,
                            sh_order,
                            sh_basis,
                            full_basis,
                            return_inv=False)

    odf_actor = actor.odf_slicer(sh_fodf,
                                 mask=mask,
                                 norm=norm,
                                 radial_scale=radial_scale,
                                 sphere=sphere,
                                 colormap=colormap,
                                 scale=scale,
                                 B_matrix=B_mat)
    set_display_extent(odf_actor, orientation, sh_fodf.shape[:3], slice_index)

    return odf_actor
Esempio n. 4
0
    def __init__(self,
                 odf_dataset,
                 basis,
                 sf_threshold,
                 sf_threshold_init,
                 theta,
                 dipy_sphere='symmetric724'):
        self.sf_threshold = sf_threshold
        self.sf_threshold_init = sf_threshold_init
        self.theta = theta

        self.vertices = dipy.data.get_sphere(dipy_sphere).vertices
        self.dirs = np.zeros(len(self.vertices), dtype=np.ndarray)
        for i in range(len(self.vertices)):
            self.dirs[i] = TrackingDirection(self.vertices[i], i)
        self.maxima_neighbours = self.get_direction_neighbours(np.pi / 16.)
        self.tracking_neighbours = self.get_direction_neighbours(self.theta)
        self.dataset = odf_dataset
        self.basis = basis

        if 'symmetric' not in dipy_sphere:
            raise ValueError('Sphere must be symmetric. Call to '
                             'get_opposite_direction will fail.')

        sphere = dipy.data.get_sphere(dipy_sphere)
        sh_order = order_from_ncoef(self.dataset.data.shape[-1])
        self.B = sh_to_sf_matrix(sphere,
                                 sh_order,
                                 self.basis,
                                 smooth=0.006,
                                 return_inv=False)
    def fod_sh(self, sh_order=8, basis_type=None):
        """
        Returns the spherical harmonics coefficients of the Fiber Orientation
        Distribution (FOD) if it is available. Uses are 724 spherical
        tessellation to do the spherical harmonics transform.

        Parameters
        ----------
        sh_order : integer,
            the maximum spherical harmonics order of the coefficient expansion.
        basis_type : string,
            type of spherical harmonics basis to use for the expansion, see
            sh_to_sf_matrix for more info.

        Returns
        -------
        fods_sh : array of size (Ndata, Ncoefficients),
            spherical harmonics coefficients of the FODs, scaled by volume
            fraction.
        """
        if not self.model.fod_available:
            msg = ('FODs not available for current model.')
            raise ValueError(msg)
        sphere = get_sphere(name='repulsion724')
        vertices = sphere.vertices
        _, inv_sh_matrix = sh_to_sf_matrix(sphere,
                                           sh_order,
                                           basis_type=basis_type,
                                           return_inv=True)
        fods_sf = self.fod(vertices)

        dataset_shape = self.fitted_parameters_vector.shape[:-1]
        number_coef_used = int((sh_order + 2) * (sh_order + 1) // 2)
        fods_sh = np.zeros(np.r_[dataset_shape, number_coef_used])
        mask_pos = np.where(self.mask)
        for pos in zip(*mask_pos):
            fods_sh[pos] = np.dot(inv_sh_matrix.T, fods_sf[pos])
        return fods_sh
Esempio n. 6
0
    def __init__(self,
                 odf_dataset,
                 basis,
                 sf_threshold,
                 sf_threshold_init,
                 theta,
                 dipy_sphere='symmetric724',
                 min_separation_angle=np.pi / 16.):
        super().__init__(odf_dataset, theta, dipy_sphere)

        self.sf_threshold = sf_threshold
        self.sf_threshold_init = sf_threshold_init
        sh_order = order_from_ncoef(self.dataset.data.shape[-1])
        self.basis = basis
        self.B = sh_to_sf_matrix(self.sphere,
                                 sh_order,
                                 self.basis,
                                 smooth=0.006,
                                 return_inv=False)

        # For deterministic tracking:
        self.maxima_neighbours = self._get_sphere_neighbours(
            min_separation_angle)
Esempio n. 7
0
def peaks_from_sh(shm_coeff,
                  sphere,
                  mask=None,
                  relative_peak_threshold=0.5,
                  absolute_threshold=0,
                  min_separation_angle=25,
                  normalize_peaks=False,
                  npeaks=5,
                  sh_basis_type='descoteaux07',
                  nbr_processes=None):
    """Computes peaks from given spherical harmonic coefficients

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    relative_peak_threshold : float, optional
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
        Default: 0.5
    absolute_threshold : float, optional
        Absolute threshold on fODF amplitude. This value should be set to
        approximately 1.5 to 2 times the maximum fODF amplitude in isotropic
        voxels (ex. ventricles). `scil_compute_fodf_max_in_ventricles.py`
        can be used to find the maximal value.
        Default: 0
    min_separation_angle : float in [0, 90], optional
        The minimum distance between directions. If two peaks are too close
        only the larger of the two is returned.
        Default: 25
    normalize_peaks : bool, optional
        If true, all peak values are calculated relative to `max(odf)`.
    npeaks : int, optional
        Maximum number of peaks found (default 5 peaks).
    sh_basis_type : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    tuple of np.ndarray
        peak_dirs, peak_values, peak_indices
    """
    sh_order = order_from_ncoef(shm_coeff.shape[-1])
    B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type)

    data_shape = shm_coeff.shape
    if mask is None:
        mask = np.sum(shm_coeff, axis=3).astype(bool)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
                                                   or nbr_processes < 0 else nbr_processes

    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff[mask].reshape(
        (np.count_nonzero(mask), data_shape[3]))
    chunks = np.array_split(shm_coeff, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        peaks_from_sh_parallel,
        zip(chunks, itertools.repeat(B), itertools.repeat(sphere),
            itertools.repeat(relative_peak_threshold),
            itertools.repeat(absolute_threshold),
            itertools.repeat(min_separation_angle), itertools.repeat(npeaks),
            itertools.repeat(normalize_peaks), np.arange(len(chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    peak_dirs_array = np.zeros(data_shape[0:3] + (npeaks, 3))
    peak_values_array = np.zeros(data_shape[0:3] + (npeaks, ))
    peak_indices_array = np.zeros(data_shape[0:3] + (npeaks, ))

    # tmp arrays are neccesary to avoid inserting data in returned variable
    # rather than the original array
    tmp_peak_dirs_array = np.zeros((np.count_nonzero(mask), npeaks, 3))
    tmp_peak_values_array = np.zeros((np.count_nonzero(mask), npeaks))
    tmp_peak_indices_array = np.zeros((np.count_nonzero(mask), npeaks))
    for i, peak_dirs, peak_values, peak_indices in results:
        tmp_peak_dirs_array[chunk_len[i]:chunk_len[i + 1], :, :] = peak_dirs
        tmp_peak_values_array[chunk_len[i]:chunk_len[i + 1], :] = peak_values
        tmp_peak_indices_array[chunk_len[i]:chunk_len[i + 1], :] = peak_indices

    peak_dirs_array[mask] = tmp_peak_dirs_array
    peak_values_array[mask] = tmp_peak_values_array
    peak_indices_array[mask] = tmp_peak_indices_array

    return peak_dirs_array, peak_values_array, peak_indices_array
Esempio n. 8
0
def local_asym_filtering(in_sh,
                         sh_order=8,
                         sh_basis='descoteaux07',
                         in_full_basis=False,
                         out_full_basis=True,
                         dot_sharpness=1.0,
                         sphere_str='repulsion724',
                         sigma=1.0):
    """Average the SH projected on a sphere using a first-neighbor gaussian
    blur and a dot product weight between sphere directions and the direction
    to neighborhood voxels, forcing to 0 negative values and thus performing
    asymmetric hemisphere-aware filtering.

    Parameters
    ----------
    in_sh: ndarray (x, y, z, n_coeffs)
        Input SH coefficients array
    sh_order: int, optional
        Maximum order of the SH series.
    sh_basis: {'descoteaux07', 'tournier07'}, optional
        SH basis of the input signal.
    in_full_basis: bool, optional
        True if the input is in full SH basis.
    out_full_basis: bool, optional
        If True, save output SH using full SH basis.
    dot_sharpness: float, optional
        Exponent of the dot product. When set to 0.0, directions
        are not weighted by the dot product.
    sphere_str: str, optional
        Name of the sphere used to project SH coefficients to SF.
    sigma: float, optional
        Sigma for the Gaussian.

    Returns
    -------
    out_sh: ndarray (x, y, z, n_coeffs)
        Filtered signal as SH coefficients.
    """
    # Load the sphere used for projection of SH
    sphere = get_sphere(sphere_str)

    # Normalized filter for each sf direction
    weights = _get_weights(sphere, dot_sharpness, sigma)

    nb_sf = len(sphere.vertices)
    mean_sf = np.zeros(np.append(in_sh.shape[:-1], nb_sf))
    B = sh_to_sf_matrix(sphere,
                        sh_order=sh_order,
                        basis_type=sh_basis,
                        return_inv=False,
                        full_basis=in_full_basis)

    # We want a B matrix to project on an inverse sphere to have the sf on
    # the opposite hemisphere for a given vertice
    neg_B = sh_to_sf_matrix(Sphere(xyz=-sphere.vertices),
                            sh_order=sh_order,
                            basis_type=sh_basis,
                            return_inv=False,
                            full_basis=in_full_basis)

    # Apply filter to each sphere vertice
    for sf_i in range(nb_sf):
        w_filter = weights[..., sf_i]

        # Calculate contribution of center voxel
        current_sf = np.dot(in_sh, B[:, sf_i])
        mean_sf[..., sf_i] = w_filter[1, 1, 1] * current_sf

        # Add contributions of neighbors using opposite hemispheres
        current_sf = np.dot(in_sh, neg_B[:, sf_i])
        w_filter[1, 1, 1] = 0.0
        mean_sf[..., sf_i] += correlate(current_sf, w_filter, mode="constant")

    # Convert back to SH coefficients
    _, B_inv = sh_to_sf_matrix(sphere,
                               sh_order=sh_order,
                               basis_type=sh_basis,
                               full_basis=out_full_basis)

    out_sh = np.array([np.dot(i, B_inv) for i in mean_sf], dtype=in_sh.dtype)
    return out_sh
Esempio n. 9
0
# Here, we fetch and load the fiber ODF volume to display. The ODF are
# expressed as spherical harmonics (SH) coefficients in a 3D grid.
fetch_viz_dmri()
fetch_viz_icons()

fodf_img = nib.load(read_viz_dmri('fodf.nii.gz'))
sh = fodf_img.get_fdata()
affine = fodf_img.affine
grid_shape = sh.shape[:-1]

###############################################################################
# We then define a low resolution sphere used to visualize SH coefficients
# as spherical functions (SF) as well as a matrix `B_low` to project SH
# onto the sphere.
sphere_low = get_sphere('repulsion100')
B_low = sh_to_sf_matrix(sphere_low, 8, return_inv=False)

###############################################################################
# Now, we create a slicer for each orientation to display a slice in
# the middle of the volume and we add them to a `scene`.

# Change these values to test various parameters combinations.
scale = 0.5
norm = False
colormap = None
radial_scale = True
opacity = 1.0
global_cm = False

# ODF slicer for axial slice
odf_actor_z = actor.odf_slicer(sh, affine=affine, sphere=sphere_low,
Esempio n. 10
0
def peaks_from_model(model,
                     data,
                     sphere,
                     relative_peak_threshold,
                     min_separation_angle,
                     mask=None,
                     return_odf=False,
                     return_sh=True,
                     gfa_thr=0,
                     normalize_peaks=False,
                     sh_order=8,
                     sh_basis_type=None,
                     npeaks=5,
                     B=None,
                     invB=None,
                     parallel=False,
                     nbr_processes=None):
    """Fit the model to data and computes peaks and metrics

    Parameters
    ----------
    model : a model instance
        `model` will be used to fit the data.
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    relative_peak_threshold : float
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
    min_separation_angle : float in [0, 90] The minimum distance between
        directions. If two peaks are too close only the larger of the two is
        returned.
    mask : array, optional
        If `mask` is provided, voxels that are False in `mask` are skipped and
        no peaks are returned.
    return_odf : bool
        If True, the odfs are returned.
    return_sh : bool
        If True, the odf as spherical harmonics coefficients is returned
    gfa_thr : float
        Voxels with gfa less than `gfa_thr` are skipped, no peaks are returned.
    normalize_peaks : bool
        If true, all peak values are calculated relative to `max(odf)`.
    sh_order : int, optional
        Maximum SH order in the SH fit.  For `sh_order`, there will be
        ``(sh_order + 1) * (sh_order + 2) / 2`` SH coefficients (default 8).
    sh_basis_type : {None, 'tournier07', 'descoteaux07'}
        ``None`` for the default DIPY basis,
        ``tournier07`` for the Tournier 2007 [2]_ basis, and
        ``descoteaux07`` for the Descoteaux 2007 [1]_ basis
        (``None`` defaults to ``descoteaux07``).
    sh_smooth : float, optional
        Lambda-regularization in the SH fit (default 0.0).
    npeaks : int
        Maximum number of peaks found (default 5 peaks).
    B : ndarray, optional
        Matrix that transforms spherical harmonics to spherical function
        ``sf = np.dot(sh, B)``.
    invB : ndarray, optional
        Inverse of B.
    parallel: bool
        If True, use multiprocessing to compute peaks and metric
        (default False). Temporary files are saved in the default temporary
        directory of the system. It can be changed using ``import tempfile``
        and ``tempfile.tempdir = '/path/to/tempdir'``.
    nbr_processes: int
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()).

    Returns
    -------
    pam : PeaksAndMetrics
        An object with ``gfa``, ``peak_directions``, ``peak_values``,
        ``peak_indices``, ``odf``, ``shm_coeffs`` as attributes

    References
    ----------
    .. [1] Descoteaux, M., Angelino, E., Fitzgibbons, S. and Deriche, R.
           Regularized, Fast, and Robust Analytical Q-ball Imaging.
           Magn. Reson. Med. 2007;58:497-510.
    .. [2] Tournier J.D., Calamante F. and Connelly A. Robust determination
           of the fibre orientation distribution in diffusion MRI:
           Non-negativity constrained super-resolved spherical deconvolution.
           NeuroImage. 2007;35(4):1459-1472.

    """
    if return_sh and (B is None or invB is None):
        B, invB = sh_to_sf_matrix(sphere,
                                  sh_order,
                                  sh_basis_type,
                                  return_inv=True)

    if parallel:
        # It is mandatory to provide B and invB to the parallel function.
        # Otherwise, a call to np.linalg.pinv is made in a subprocess and
        # makes it timeout on some system.
        # see https://github.com/dipy/dipy/issues/253 for details
        return _peaks_from_model_parallel(
            model, data, sphere, relative_peak_threshold, min_separation_angle,
            mask, return_odf, return_sh, gfa_thr, normalize_peaks, sh_order,
            sh_basis_type, npeaks, B, invB, nbr_processes)

    shape = data.shape[:-1]
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as data.")

    gfa_array = np.zeros(shape)
    qa_array = np.zeros((shape + (npeaks, )))

    peak_dirs = np.zeros((shape + (npeaks, 3)))
    peak_values = np.zeros((shape + (npeaks, )))
    peak_indices = np.zeros((shape + (npeaks, )), dtype='int')
    peak_indices.fill(-1)

    if return_sh:
        n_shm_coeff = (sh_order + 2) * (sh_order + 1) // 2
        shm_coeff = np.zeros((shape + (n_shm_coeff, )))

    if return_odf:
        odf_array = np.zeros((shape + (len(sphere.vertices), )))

    global_max = -np.inf
    for idx in ndindex(shape):
        if not mask[idx]:
            continue

        odf = model.fit(data[idx]).odf(sphere)

        if return_sh:
            shm_coeff[idx] = np.dot(odf, invB)

        if return_odf:
            odf_array[idx] = odf

        gfa_array[idx] = gfa(odf)
        if gfa_array[idx] < gfa_thr:
            global_max = max(global_max, odf.max())
            continue

        # Get peaks of odf
        direction, pk, ind = peak_directions(odf, sphere,
                                             relative_peak_threshold,
                                             min_separation_angle)

        # Calculate peak metrics
        if pk.shape[0] != 0:
            global_max = max(global_max, pk[0])

            n = min(npeaks, pk.shape[0])
            qa_array[idx][:n] = pk[:n] - odf.min()

            peak_dirs[idx][:n] = direction[:n]
            peak_indices[idx][:n] = ind[:n]
            peak_values[idx][:n] = pk[:n]

            if normalize_peaks:
                peak_values[idx][:n] /= pk[0]
                peak_dirs[idx] *= peak_values[idx][:, None]

    qa_array /= global_max

    return _pam_from_attrs(PeaksAndMetrics, sphere, peak_indices, peak_values,
                           peak_dirs, gfa_array, qa_array,
                           shm_coeff if return_sh else None,
                           B if return_sh else None,
                           odf_array if return_odf else None)
Esempio n. 11
0
def convert_sh_to_sf(shm_coeff,
                     sphere,
                     mask=None,
                     dtype="float32",
                     input_basis='descoteaux07',
                     input_full_basis=False,
                     nbr_processes=multiprocessing.cpu_count()):
    """Converts spherical harmonic coefficients to an SF sphere

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    dtype : str
        Datatype to use for computation and output array.
        Either `float32` or `float64`. Default: `float32`
    input_basis : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    input_full_basis : bool
        If True, use a full SH basis (even and odd orders) for the input SH
        coefficients.
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    shm_coeff_array : np.ndarray
        Spherical harmonic coefficients in the desired basis.
    """
    assert dtype in ["float32", "float64"], "Only `float32` and `float64` " \
                                            "should be used."

    sh_order = order_from_ncoef(shm_coeff.shape[-1],
                                full_basis=input_full_basis)
    B_in, _ = sh_to_sf_matrix(sphere,
                              sh_order,
                              basis_type=input_basis,
                              full_basis=input_full_basis)
    B_in = B_in.astype(dtype)

    data_shape = shm_coeff.shape
    if mask is None:
        mask = np.sum(shm_coeff, axis=3).astype(bool)

    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff[mask].reshape(
        (np.count_nonzero(mask), data_shape[3]))
    shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        convert_sh_to_sf_parallel,
        zip(shm_coeff_chunks, itertools.repeat(B_in),
            itertools.repeat(len(sphere.vertices)),
            np.arange(len(shm_coeff_chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    new_shape = data_shape[:3] + (len(sphere.vertices), )
    sf_array = np.zeros(new_shape, dtype=dtype)
    tmp_sf_array = np.zeros((np.count_nonzero(mask), new_shape[3]),
                            dtype=dtype)
    for i, new_sf in results:
        tmp_sf_array[chunk_len[i]:chunk_len[i + 1], :] = new_sf

    sf_array[mask] = tmp_sf_array

    return sf_array
Esempio n. 12
0
def peaks_from_model(model,
                     data,
                     sphere,
                     relative_peak_threshold,
                     min_separation_angle,
                     mask=None,
                     return_odf=False,
                     return_sh=True,
                     gfa_thr=0,
                     normalize_peaks=False,
                     sh_order=8,
                     sh_basis_type=None,
                     npeaks=5,
                     B=None,
                     invB=None,
                     parallel=False,
                     nbr_processes=None):
    """Fits the model to data and computes peaks and metrics

    Parameters
    ----------
    model : a model instance
        `model` will be used to fit the data.
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    relative_peak_threshold : float
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
    min_separation_angle : float in [0, 90] The minimum distance between
        directions. If two peaks are too close only the larger of the two is
        returned.
    mask : array, optional
        If `mask` is provided, voxels that are False in `mask` are skipped and
        no peaks are returned.
    return_odf : bool
        If True, the odfs are returned.
    return_sh : bool
        If True, the odf as spherical harmonics coefficients is returned
    gfa_thr : float
        Voxels with gfa less than `gfa_thr` are skipped, no peaks are returned.
    normalize_peaks : bool
        If true, all peak values are calculated relative to `max(odf)`.
    sh_order : int, optional
        Maximum SH order in the SH fit.  For `sh_order`, there will be
        ``(sh_order + 1) * (sh_order + 2) / 2`` SH coefficients (default 8).
    sh_basis_type : {None, 'mrtrix', 'fibernav'}
        ``None`` for the default dipy basis which is the fibernav basis,
        ``mrtrix`` for the MRtrix basis, and
        ``fibernav`` for the FiberNavigator basis
    sh_smooth : float, optional
        Lambda-regularization in the SH fit (default 0.0).
    npeaks : int
        Maximum number of peaks found (default 5 peaks).
    B : ndarray, optional
        Matrix that transforms spherical harmonics to spherical function
        ``sf = np.dot(sh, B)``.
    invB : ndarray, optional
        Inverse of B.
    parallel: bool
        If True, use multiprocessing to compute peaks and metric
        (default False). Temporary files are saved in the default temporary
        directory of the system. It can be changed using ``import tempfile``
        and ``tempfile.tempdir = '/path/to/tempdir'``.
    nbr_processes: int
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()).

    Returns
    -------
    pam : PeaksAndMetrics
        An object with ``gfa``, ``peak_directions``, ``peak_values``,
        ``peak_indices``, ``odf``, ``shm_coeffs`` as attributes
    """

    if return_sh and (B is None or invB is None):
        B, invB = sh_to_sf_matrix(sphere,
                                  sh_order,
                                  sh_basis_type,
                                  return_inv=True)

    if parallel:
        # It is mandatory to provide B and invB to the parallel function.
        # Otherwise, a call to np.linalg.pinv is made in a subprocess and
        # makes it timeout on some system.
        # see https://github.com/nipy/dipy/issues/253 for details
        return _peaks_from_model_parallel(
            model, data, sphere, relative_peak_threshold, min_separation_angle,
            mask, return_odf, return_sh, gfa_thr, normalize_peaks, sh_order,
            sh_basis_type, npeaks, B, invB, nbr_processes)

    shape = data.shape[:-1]
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as data.")

    gfa_array = np.zeros(shape)
    qa_array = np.zeros((shape + (npeaks, )))

    peak_dirs = np.zeros((shape + (npeaks, 3)))
    peak_values = np.zeros((shape + (npeaks, )))
    peak_indices = np.zeros((shape + (npeaks, )), dtype='int')
    peak_indices.fill(-1)

    if return_sh:
        n_shm_coeff = (sh_order + 2) * (sh_order + 1) // 2
        shm_coeff = np.zeros((shape + (n_shm_coeff, )))

    if return_odf:
        odf_array = np.zeros((shape + (len(sphere.vertices), )))

    global_max = -np.inf
    for idx in ndindex(shape):
        if not mask[idx]:
            continue

        odf = model.fit(data[idx]).odf(sphere)

        if return_sh:
            shm_coeff[idx] = np.dot(odf, invB)

        if return_odf:
            odf_array[idx] = odf

        gfa_array[idx] = gfa(odf)
        if gfa_array[idx] < gfa_thr:
            global_max = max(global_max, odf.max())
            continue

        # Get peaks of odf
        direction, pk, ind = peak_directions(odf, sphere,
                                             relative_peak_threshold,
                                             min_separation_angle)

        # Calculate peak metrics
        if pk.shape[0] != 0:
            global_max = max(global_max, pk[0])

            n = min(npeaks, pk.shape[0])
            qa_array[idx][:n] = pk[:n] - odf.min()

            peak_dirs[idx][:n] = direction[:n]
            peak_indices[idx][:n] = ind[:n]
            peak_values[idx][:n] = pk[:n]

            if normalize_peaks:
                peak_values[idx][:n] /= pk[0]
                peak_dirs[idx] *= peak_values[idx][:, None]

    qa_array /= global_max

    pam = PeaksAndMetrics()
    pam.sphere = sphere
    pam.peak_dirs = peak_dirs
    pam.peak_values = peak_values
    pam.peak_indices = peak_indices
    pam.gfa = gfa_array
    pam.qa = qa_array

    if return_sh:
        pam.shm_coeff = shm_coeff
        pam.B = B
    else:
        pam.shm_coeff = None
        pam.B = None

    if return_odf:
        pam.odf = odf_array
    else:
        pam.odf = None

    return pam
Esempio n. 13
0
    def track(self):
        """
        GPU streamlines generator yielding streamlines with corresponding
        seed positions one by one.
        """
        t0 = perf_counter()

        # Load the sphere
        sphere = get_sphere('symmetric724')

        # Convert theta to cos(theta)
        max_cos_theta = np.cos(np.deg2rad(self.theta))

        cl_kernel = CLKernel('track', 'tracking', 'local_tracking.cl')

        # Set tracking parameters
        # TODO: Add relative sf_threshold parameter.
        cl_kernel.set_define('IM_X_DIM', self.sh.shape[0])
        cl_kernel.set_define('IM_Y_DIM', self.sh.shape[1])
        cl_kernel.set_define('IM_Z_DIM', self.sh.shape[2])
        cl_kernel.set_define('IM_N_COEFFS', self.sh.shape[3])
        cl_kernel.set_define('N_DIRS', len(sphere.vertices))

        cl_kernel.set_define('N_THETAS', len(self.theta))
        cl_kernel.set_define('STEP_SIZE', '{}f'.format(self.step_size))
        cl_kernel.set_define('MAX_LENGTH', self.max_strl_points)
        cl_kernel.set_define('FORWARD_ONLY',
                             'true' if self.forward_only else 'false')

        # Create CL program
        n_input_params = 7
        n_output_params = 2
        cl_manager = CLManager(cl_kernel, n_input_params, n_output_params)

        # Input buffers
        # Constant input buffers
        cl_manager.add_input_buffer(0, self.sh)
        cl_manager.add_input_buffer(1, sphere.vertices)

        sh_order = find_order_from_nb_coeff(self.sh)
        B_mat = sh_to_sf_matrix(sphere,
                                sh_order,
                                self.sh_basis,
                                return_inv=False)
        cl_manager.add_input_buffer(2, B_mat)
        cl_manager.add_input_buffer(3, self.mask.astype(np.float32))

        cl_manager.add_input_buffer(6, max_cos_theta)

        logging.debug(
            'Initialized OpenCL program in {:.2f}s.'.format(perf_counter() -
                                                            t0))

        # Generate streamlines in batches
        t0 = perf_counter()
        nb_processed_streamlines = 0
        nb_valid_streamlines = 0
        for seed_batch in self.seed_batches:
            # Generate random values for sf sampling
            # TODO: Implement random number generator directly
            #       on the GPU to generate values on-the-fly.
            rand_vals = self.rng.uniform(
                0.0, 1.0, (len(seed_batch), self.max_strl_points))

            # Update buffers
            cl_manager.add_input_buffer(4, seed_batch)
            cl_manager.add_input_buffer(5, rand_vals)

            # output streamlines buffer
            cl_manager.add_output_buffer(
                0, (len(seed_batch), self.max_strl_points, 3))
            # output streamlines length buffer
            cl_manager.add_output_buffer(1, (len(seed_batch), 1))

            # Run the kernel
            tracks, n_points = cl_manager.run((len(seed_batch), 1, 1))
            n_points = n_points.squeeze().astype(np.int16)
            for (strl, seed, n_pts) in zip(tracks, seed_batch, n_points):
                if n_pts >= self.min_strl_points:
                    strl = strl[:n_pts]
                    nb_valid_streamlines += 1

                    # output is yielded so that we can use lazy tractogram.
                    yield strl, seed

            # per-batch logging information
            nb_processed_streamlines += len(seed_batch)
            logging.info('{0:>8}/{1} streamlines generated'.format(
                nb_processed_streamlines, self.n_seeds))

        logging.info('Tracked {0} streamlines in {1:.2f}s.'.format(
            nb_valid_streamlines,
            perf_counter() - t0))
Esempio n. 14
0
def peaks_from_model(model, data, sphere, relative_peak_threshold,
                     min_separation_angle, mask=None, return_odf=False,
                     return_sh=True, gfa_thr=0, normalize_peaks=False,
                     sh_order=8, sh_basis_type=None, npeaks=5, B=None,
                     invB=None, parallel=False, nbr_processes=None):
    """Fit the model to data and computes peaks and metrics

    Parameters
    ----------
    model : a model instance
        `model` will be used to fit the data.
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    relative_peak_threshold : float
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
    min_separation_angle : float in [0, 90] The minimum distance between
        directions. If two peaks are too close only the larger of the two is
        returned.
    mask : array, optional
        If `mask` is provided, voxels that are False in `mask` are skipped and
        no peaks are returned.
    return_odf : bool
        If True, the odfs are returned.
    return_sh : bool
        If True, the odf as spherical harmonics coefficients is returned
    gfa_thr : float
        Voxels with gfa less than `gfa_thr` are skipped, no peaks are returned.
    normalize_peaks : bool
        If true, all peak values are calculated relative to `max(odf)`.
    sh_order : int, optional
        Maximum SH order in the SH fit.  For `sh_order`, there will be
        ``(sh_order + 1) * (sh_order + 2) / 2`` SH coefficients (default 8).
    sh_basis_type : {None, 'mrtrix', 'fibernav'}
        ``None`` for the default dipy basis which is the fibernav basis,
        ``mrtrix`` for the MRtrix basis, and
        ``fibernav`` for the FiberNavigator basis
    sh_smooth : float, optional
        Lambda-regularization in the SH fit (default 0.0).
    npeaks : int
        Maximum number of peaks found (default 5 peaks).
    B : ndarray, optional
        Matrix that transforms spherical harmonics to spherical function
        ``sf = np.dot(sh, B)``.
    invB : ndarray, optional
        Inverse of B.
    parallel: bool
        If True, use multiprocessing to compute peaks and metric
        (default False). Temporary files are saved in the default temporary
        directory of the system. It can be changed using ``import tempfile``
        and ``tempfile.tempdir = '/path/to/tempdir'``.
    nbr_processes: int
        If `parallel` is True, the number of subprocesses to use
        (default multiprocessing.cpu_count()).

    Returns
    -------
    pam : PeaksAndMetrics
        An object with ``gfa``, ``peak_directions``, ``peak_values``,
        ``peak_indices``, ``odf``, ``shm_coeffs`` as attributes
    """
    if return_sh and (B is None or invB is None):
        B, invB = sh_to_sf_matrix(
            sphere, sh_order, sh_basis_type, return_inv=True)

    if parallel:
        # It is mandatory to provide B and invB to the parallel function.
        # Otherwise, a call to np.linalg.pinv is made in a subprocess and
        # makes it timeout on some system.
        # see https://github.com/nipy/dipy/issues/253 for details
        return _peaks_from_model_parallel(model,
                                          data, sphere,
                                          relative_peak_threshold,
                                          min_separation_angle,
                                          mask, return_odf,
                                          return_sh,
                                          gfa_thr,
                                          normalize_peaks,
                                          sh_order,
                                          sh_basis_type,
                                          npeaks,
                                          B,
                                          invB,
                                          nbr_processes)

    shape = data.shape[:-1]
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as data.")

    gfa_array = np.zeros(shape)
    qa_array = np.zeros((shape + (npeaks,)))

    peak_dirs = np.zeros((shape + (npeaks, 3)))
    peak_values = np.zeros((shape + (npeaks,)))
    peak_indices = np.zeros((shape + (npeaks,)), dtype='int')
    peak_indices.fill(-1)

    if return_sh:
        n_shm_coeff = (sh_order + 2) * (sh_order + 1) // 2
        shm_coeff = np.zeros((shape + (n_shm_coeff,)))

    if return_odf:
        odf_array = np.zeros((shape + (len(sphere.vertices),)))

    global_max = -np.inf
    for idx in ndindex(shape):
        if not mask[idx]:
            continue

        odf = model.fit(data[idx]).odf(sphere)

        if return_sh:
            shm_coeff[idx] = np.dot(odf, invB)

        if return_odf:
            odf_array[idx] = odf

        gfa_array[idx] = gfa(odf)
        if gfa_array[idx] < gfa_thr:
            global_max = max(global_max, odf.max())
            continue

        # Get peaks of odf
        direction, pk, ind = peak_directions(odf, sphere,
                                             relative_peak_threshold,
                                             min_separation_angle)

        # Calculate peak metrics
        if pk.shape[0] != 0:
            global_max = max(global_max, pk[0])

            n = min(npeaks, pk.shape[0])
            qa_array[idx][:n] = pk[:n] - odf.min()

            peak_dirs[idx][:n] = direction[:n]
            peak_indices[idx][:n] = ind[:n]
            peak_values[idx][:n] = pk[:n]

            if normalize_peaks:
                peak_values[idx][:n] /= pk[0]
                peak_dirs[idx] *= peak_values[idx][:, None]

    qa_array /= global_max

    return _pam_from_attrs(PeaksAndMetrics,
                           sphere,
                           peak_indices,
                           peak_values,
                           peak_dirs,
                           gfa_array,
                           qa_array,
                           shm_coeff if return_sh else None,
                           B if return_sh else None,
                           odf_array if return_odf else None)
Esempio n. 15
0
def bingham_fit_sh(sh,
                   max_lobes=5,
                   abs_th=0.,
                   rel_th=0.,
                   min_sep_angle=25.,
                   max_fit_angle=15,
                   mask=None,
                   nbr_processes=None):
    """
    Approximate SH field by fitting Bingham distributions to
    up to ``max_lobes`` lobes per voxel, sorted in descending order
    by the amplitude of their peak direction.

    Parameters
    ----------
    sh: ndarray (X, Y, Z, ncoeffs)
        SH coefficients array.
    max_lobes: unsigned int, optional
        Maximum number of lobes to fit per voxel.
    abs_th: float, optional
        Absolute threshold for peak extraction.
    rel_th: float, optional
        Relative threshold for peak extraction in the range [0, 1].
    min_sep_angle: float, optional
        Minimum separation angle between two adjacent peaks in degrees.
    max_fit_angle: float, optional
        The maximum distance in degrees around a peak direction for
        fitting the Bingham function.
    mask: ndarray (X, Y, Z), optional
        Mask to apply to the data.
    nbr_processes: unsigned int, optional
        The number of processes to use. If None, than
        ``multiprocessing.cpu_count()`` processes are executed.

    Returns
    -------
    out: ndarray (X, Y, Z, max_lobes*9)
        Bingham functions array.
    """
    order, full_basis = get_sh_order_and_fullness(sh.shape[-1])
    shape = sh.shape

    sphere = get_sphere('symmetric724').subdivide(2)
    B_mat = sh_to_sf_matrix(sphere,
                            order,
                            full_basis=full_basis,
                            return_inv=False)

    nbr_processes = multiprocessing.cpu_count()\
        if nbr_processes is None \
        or nbr_processes < 0 \
        or nbr_processes > multiprocessing.cpu_count() \
        else nbr_processes

    if mask is not None:
        sh = sh[mask]
    else:
        sh = sh.reshape((-1, shape[-1]))

    sh = np.array_split(sh, nbr_processes)
    pool = multiprocessing.Pool(nbr_processes)
    out = pool.map(
        _bingham_fit_sh_chunk,
        zip(sh, itertools.repeat(B_mat), itertools.repeat(sphere),
            itertools.repeat(abs_th), itertools.repeat(min_sep_angle),
            itertools.repeat(rel_th), itertools.repeat(max_lobes),
            itertools.repeat(max_fit_angle)))
    pool.close()
    pool.join()

    out = np.concatenate(out, axis=0)
    if mask is not None:
        bingham = np.zeros(shape[:3] + (max_lobes, NB_PARAMS))
        bingham[mask] = out
        return bingham

    out = out.reshape(shape[:3] + (max_lobes, NB_PARAMS))
    return out
Esempio n. 16
0
def test_odf_slicer(interactive=False):
    # Prepare our data
    sphere = get_sphere('repulsion100')
    shape = (11, 11, 11, sphere.vertices.shape[0])
    odfs = np.ones(shape)

    affine = np.array([[2.0, 0.0, 0.0, 3.0],
                       [0.0, 2.0, 0.0, 3.0],
                       [0.0, 0.0, 2.0, 1.0],
                       [0.0, 0.0, 0.0, 1.0]])
    mask = np.ones(odfs.shape[:3], bool)
    mask[:4, :4, :4] = False

    # Test that affine and mask work
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, affine=affine, mask=mask,
                                 scale=.25, colormap='blues')

    k = 2
    I, J, _ = odfs.shape[:3]
    odf_actor.display_extent(0, I - 1, 0, J - 1, k, k)

    scene = window.Scene()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()

    if interactive:
        window.show(scene, reset_camera=False)

    arr = window.snapshot(scene)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 11 * 11 - 16)

    # Test that global colormap works
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask, scale=.25,
                                 colormap='blues', norm=False, global_cm=True)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that the most basic odf_slicer instanciation works
    odf_actor = actor.odf_slicer(odfs)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that odf_slicer.display works properly
    scene.clear()
    scene.add(odf_actor)
    scene.add(actor.axes((11, 11, 11)))
    for i in range(11):
        odf_actor.display(i, None, None)
        if interactive:
            window.show(scene)
    for j in range(11):
        odf_actor.display(None, j, None)
        if interactive:
            window.show(scene)

    # With mask equal to zero everything should be black
    mask = np.zeros(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=mask,
                                 scale=.25, colormap='blues',
                                 norm=False, global_cm=True)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # global_cm=True with colormap=None should raise an error
    npt.assert_raises(IOError, actor.odf_slicer, odfs, sphere=sphere,
                      mask=None, scale=.25, colormap=None, norm=False,
                      global_cm=True)

    # Dimension mismatch between sphere vertices and number
    # of SF coefficients will raise an error.
    npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None,
                      sphere=get_sphere('repulsion200'), scale=.25)

    # colormap=None and global_cm=False results in directionally encoded colors
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, mask=None,
                                 scale=.25, colormap=None,
                                 norm=False, global_cm=False)
    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that SH coefficients input works
    B = sh_to_sf_matrix(sphere, sh_order=4, return_inv=False)
    odfs = np.zeros((11, 11, 11, B.shape[0]))
    odfs[..., 0] = 1.0
    odf_actor = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B)

    scene.clear()
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Dimension mismatch between sphere vertices and dimension of
    # B matrix will raise an error.
    npt.assert_raises(ValueError, actor.odf_slicer, odfs, mask=None,
                      sphere=get_sphere('repulsion200'))

    # Test that constant colormap color works. Also test that sphere
    # normals are oriented correctly. Will show purple spheres with
    # a white contour.
    odf_contour = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B,
                                   colormap=(255, 255, 255))
    odf_contour.GetProperty().SetAmbient(1.0)
    odf_contour.GetProperty().SetFrontfaceCulling(True)

    odf_actor = actor.odf_slicer(odfs, sphere=sphere, B_matrix=B,
                                 colormap=(255, 0, 255), scale=0.4)
    scene.clear()
    scene.add(odf_contour)
    scene.add(odf_actor)
    scene.reset_camera()
    scene.reset_clipping_range()
    if interactive:
        window.show(scene)

    # Test that we can change the sphere on an active actor
    new_sphere = get_sphere('symmetric362')
    new_B = sh_to_sf_matrix(new_sphere, sh_order=4, return_inv=False)
    odf_actor.update_sphere(new_sphere.vertices, new_sphere.faces, new_B)
    if interactive:
        window.show(scene)

    del odf_actor
    del odfs
Esempio n. 17
0
def angle_aware_bilateral_filtering_gpu(in_sh,
                                        sh_order=8,
                                        sh_basis='descoteaux07',
                                        in_full_basis=False,
                                        sphere_str='repulsion724',
                                        sigma_spatial=1.0,
                                        sigma_angular=1.0,
                                        sigma_range=0.5):
    """
    Angle-aware bilateral filtering using OpenCL for GPU computing.

    Parameters
    ----------
    in_sh: ndarray (x, y, z, ncoeffs)
        Input SH volume.
    sh_order: int, optional
        Maximum SH order of input volume.
    sh_basis: str, optional
        Name of SH basis used.
    in_full_basis: bool, optional
        True if input is expressed in full SH basis.
    sphere_str: str, optional
        Name of the DIPY sphere to use for sh to sf projection.
    sigma_spatial: float, optional
        Standard deviation for spatial filter.
    sigma_angular: float, optional
        Standard deviation for angular filter.
    sigma_range: float, optional
        Standard deviation for range filter.

    Returns
    -------
    out_sh: ndarray (x, y, z, ncoeffs)
        Output SH coefficient array in full SH basis.
    """
    s_weights = _get_spatial_weights(sigma_spatial)
    h_half_width = len(s_weights) // 2

    sphere = get_sphere(sphere_str)
    a_weights = _get_angular_weights(s_weights.shape, sphere, sigma_angular)

    h_weights = s_weights[..., None] * a_weights
    h_weights /= np.sum(h_weights, axis=(0, 1, 2))

    sh_to_sf_mat = sh_to_sf_matrix(sphere,
                                   sh_order=sh_order,
                                   basis_type=sh_basis,
                                   full_basis=in_full_basis,
                                   return_inv=False)

    _, sf_to_sh_mat = sh_to_sf_matrix(sphere,
                                      sh_order=sh_order,
                                      basis_type=sh_basis,
                                      full_basis=True,
                                      return_inv=True)

    out_n_coeffs = sf_to_sh_mat.shape[1]
    n_dirs = len(sphere.vertices)
    volume_shape = in_sh.shape
    in_sh = np.pad(in_sh,
                   ((h_half_width, h_half_width), (h_half_width, h_half_width),
                    (h_half_width, h_half_width), (0, 0)))

    cl_kernel = CLKernel('correlate', 'denoise', 'angle_aware_bilateral.cl')
    cl_kernel.set_define('IM_X_DIM', volume_shape[0])
    cl_kernel.set_define('IM_Y_DIM', volume_shape[1])
    cl_kernel.set_define('IM_Z_DIM', volume_shape[2])

    cl_kernel.set_define('H_X_DIM', h_weights.shape[0])
    cl_kernel.set_define('H_Y_DIM', h_weights.shape[1])
    cl_kernel.set_define('H_Z_DIM', h_weights.shape[2])

    cl_kernel.set_define('SIGMA_RANGE', float(sigma_range))

    cl_kernel.set_define('IN_N_COEFFS', volume_shape[-1])
    cl_kernel.set_define('OUT_N_COEFFS', out_n_coeffs)
    cl_kernel.set_define('N_DIRS', n_dirs)

    cl_manager = CLManager(cl_kernel)
    cl_manager.add_input_buffer(in_sh)
    cl_manager.add_input_buffer(h_weights)
    cl_manager.add_input_buffer(sh_to_sf_mat)
    cl_manager.add_input_buffer(sf_to_sh_mat)

    cl_manager.add_output_buffer(volume_shape[:3] + (out_n_coeffs, ),
                                 np.float32)

    outputs = cl_manager.run(volume_shape[:3])
    return outputs[0]
Esempio n. 18
0
def angle_aware_bilateral_filtering_cpu(in_sh,
                                        sh_order=8,
                                        sh_basis='descoteaux07',
                                        in_full_basis=False,
                                        sphere_str='repulsion724',
                                        sigma_spatial=1.0,
                                        sigma_angular=1.0,
                                        sigma_range=0.5,
                                        nbr_processes=1):
    """
    Angle-aware bilateral filtering on the CPU
    (optionally using multiple threads).

    Parameters
    ----------
    in_sh: ndarray (x, y, z, ncoeffs)
        Input SH volume.
    sh_order: int, optional
        Maximum SH order of input volume.
    sh_basis: str, optional
        Name of SH basis used.
    in_full_basis: bool, optional
        True if input is expressed in full SH basis.
    sphere_str: str, optional
        Name of the DIPY sphere to use for sh to sf projection.
    sigma_spatial: float, optional
        Standard deviation for spatial filter.
    sigma_angular: float, optional
        Standard deviation for angular filter.
    sigma_range: float, optional
        Standard deviation for range filter.
    nbr_processes: int, optional
        Number of processes to use.

    Returns
    -------
    out_sh: ndarray (x, y, z, ncoeffs)
        Output SH coefficient array in full SH basis.
    """
    # Load the sphere used for projection of SH
    sphere = get_sphere(sphere_str)

    # Normalized filter for each sf direction
    s_weights = _get_spatial_weights(sigma_spatial)
    a_weights = _get_angular_weights(s_weights.shape, sphere, sigma_angular)

    weights = s_weights[..., None] * a_weights
    weights /= np.sum(weights, axis=(0, 1, 2))

    nb_sf = len(sphere.vertices)
    B = sh_to_sf_matrix(sphere,
                        sh_order=sh_order,
                        basis_type=sh_basis,
                        return_inv=False,
                        full_basis=in_full_basis)

    if nbr_processes > 1:
        # Apply filter to each sphere vertice in parallel
        pool = multiprocessing.Pool(nbr_processes)

        # divide the sphere directions among the processes
        base_chunk_size = int(nb_sf / nbr_processes + 0.5)
        first_ids = np.arange(0, nb_sf, base_chunk_size)
        residuals = nb_sf - first_ids
        chunk_sizes = np.where(residuals < base_chunk_size, residuals,
                               base_chunk_size)
        res = pool.map(
            _process_subset_directions,
            zip(itertools.repeat(weights), itertools.repeat(in_sh),
                first_ids, chunk_sizes, itertools.repeat(B),
                itertools.repeat(sigma_range)))
        pool.close()
        pool.join()

        # Patch chunks together.
        mean_sf = np.concatenate(res, axis=-1)
    else:
        args = [weights, in_sh, 0, nb_sf, B, sigma_range]
        mean_sf = _process_subset_directions(args)

    # Convert back to SH coefficients
    _, B_inv = sh_to_sf_matrix(sphere,
                               sh_order=sh_order,
                               basis_type=sh_basis,
                               full_basis=True)
    out_sh = np.array([np.dot(i, B_inv) for i in mean_sf], dtype=in_sh.dtype)
    # By default, return only asymmetric SH
    return out_sh
Esempio n. 19
0
def create_odf_slicer(sh_fodf, orientation, slice_index, mask, sphere,
                      nb_subdivide, sh_order, sh_basis, full_basis, scale,
                      radial_scale, norm, colormap):
    """
    Create a ODF slicer actor displaying a fODF slice. The input volume is a
    3-dimensional grid containing the SH coefficients of the fODF for each
    voxel at each voxel, with the grid dimension having a size of 1 along the
    axis corresponding to the selected orientation.

    Parameters
    ----------
    sh_fodf : np.ndarray
        Spherical harmonics of fODF data.
    orientation : str
        Name of the axis to visualize. Choices are axial, coronal and sagittal.
    slice_index : int
        Index of the slice to visualize along the chosen orientation.
    mask : np.ndarray, optional
        Only the data inside the mask will be displayed. Defaults to None.
    sphere: DIPY Sphere
        Sphere used for visualization.
    nb_subdivide : int
        Number of subdivisions for given sphere. If None, uses the given sphere
        as is.
    sh_order : int
        Maximum spherical harmonics order.
    sh_basis : str
        Type of basis for the spherical harmonics.
    full_basis : bool
        Boolean indicating if the basis is full or not.
    scale : float
        Scaling factor for FODF.
    radial_scale : bool
        If True, enables radial scale for ODF slicer.
    norm : bool
        If True, enables normalization of ODF slicer.
    colormap : str
        Colormap for the ODF slicer. If None, a RGB colormap is used.

    Returns
    -------
    odf_actor : actor.odf_slicer
        Fury object containing the odf information.
    """
    # Subdivide the spheres if nb_subdivide is provided
    if nb_subdivide is not None:
        sphere = sphere.subdivide(nb_subdivide)

    # SH coefficients to SF coefficients matrix
    B_mat = sh_to_sf_matrix(sphere,
                            sh_order,
                            sh_basis,
                            full_basis,
                            return_inv=False)

    odf_actor = actor.odf_slicer(sh_fodf,
                                 mask=mask,
                                 norm=norm,
                                 radial_scale=radial_scale,
                                 sphere=sphere,
                                 colormap=colormap,
                                 scale=scale,
                                 B_matrix=B_mat)
    set_display_extent(odf_actor, orientation, sh_fodf.shape[:3], slice_index)

    return odf_actor
Esempio n. 20
0
def maps_from_sh(shm_coeff,
                 peak_dirs,
                 peak_values,
                 peak_indices,
                 sphere,
                 mask=None,
                 gfa_thr=0,
                 sh_basis_type='descoteaux07',
                 nbr_processes=None):
    """Computes maps from given SH coefficients and peaks

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    peak_dirs : np.ndarray
        Peak directions
    peak_values : np.ndarray
        Peak values
    peak_indices : np.ndarray
        Peak indices
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    gfa_thr : float, optional
        Voxels with gfa less than `gfa_thr` are skipped for all metrics, except
        `rgb_map`.
        Default: 0
    sh_basis_type : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    tuple of np.ndarray
        nufo_map, afd_max, afd_sum, rgb_map, gfa, qa
    """
    sh_order = order_from_ncoef(shm_coeff.shape[-1])
    B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type)

    data_shape = shm_coeff.shape
    if mask is None:
        mask = np.sum(shm_coeff, axis=3).astype(bool)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
                                                   or nbr_processes < 0 else nbr_processes

    npeaks = peak_values.shape[3]
    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff[mask].reshape(
        (np.count_nonzero(mask), data_shape[3]))
    peak_dirs = peak_dirs[mask].reshape((np.count_nonzero(mask), npeaks, 3))
    peak_values = peak_values[mask].reshape((np.count_nonzero(mask), npeaks))
    peak_indices = peak_indices[mask].reshape((np.count_nonzero(mask), npeaks))
    shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes)
    peak_dirs_chunks = np.array_split(peak_dirs, nbr_processes)
    peak_values_chunks = np.array_split(peak_values, nbr_processes)
    peak_indices_chunks = np.array_split(peak_indices, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        maps_from_sh_parallel,
        zip(shm_coeff_chunks, peak_dirs_chunks, peak_values_chunks,
            peak_indices_chunks, itertools.repeat(B), itertools.repeat(sphere),
            itertools.repeat(gfa_thr), np.arange(len(shm_coeff_chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    nufo_map_array = np.zeros(data_shape[0:3])
    afd_max_array = np.zeros(data_shape[0:3])
    afd_sum_array = np.zeros(data_shape[0:3])
    rgb_map_array = np.zeros(data_shape[0:3] + (3, ))
    gfa_map_array = np.zeros(data_shape[0:3])
    qa_map_array = np.zeros(data_shape[0:3] + (npeaks, ))

    # tmp arrays are neccesary to avoid inserting data in returned variable
    # rather than the original array
    tmp_nufo_map_array = np.zeros((np.count_nonzero(mask)))
    tmp_afd_max_array = np.zeros((np.count_nonzero(mask)))
    tmp_afd_sum_array = np.zeros((np.count_nonzero(mask)))
    tmp_rgb_map_array = np.zeros((np.count_nonzero(mask), 3))
    tmp_gfa_map_array = np.zeros((np.count_nonzero(mask)))
    tmp_qa_map_array = np.zeros((np.count_nonzero(mask), npeaks))

    all_time_max_odf = -np.inf
    all_time_global_max = -np.inf
    for i, nufo_map, afd_max, afd_sum, rgb_map, gfa_map, qa_map, \
        max_odf, global_max in results:
        all_time_max_odf = max(all_time_global_max, max_odf)
        all_time_global_max = max(all_time_global_max, global_max)

        tmp_nufo_map_array[chunk_len[i]:chunk_len[i + 1]] = nufo_map
        tmp_afd_max_array[chunk_len[i]:chunk_len[i + 1]] = afd_max
        tmp_afd_sum_array[chunk_len[i]:chunk_len[i + 1]] = afd_sum
        tmp_rgb_map_array[chunk_len[i]:chunk_len[i + 1], :] = rgb_map
        tmp_gfa_map_array[chunk_len[i]:chunk_len[i + 1]] = gfa_map
        tmp_qa_map_array[chunk_len[i]:chunk_len[i + 1], :] = qa_map

    nufo_map_array[mask] = tmp_nufo_map_array
    afd_max_array[mask] = tmp_afd_max_array
    afd_sum_array[mask] = tmp_afd_sum_array
    rgb_map_array[mask] = tmp_rgb_map_array
    gfa_map_array[mask] = tmp_gfa_map_array
    qa_map_array[mask] = tmp_qa_map_array

    rgb_map_array /= all_time_max_odf
    rgb_map_array *= 255
    qa_map_array /= all_time_global_max

    afd_unique = np.unique(afd_max_array)
    if np.array_equal(np.array([0, 1]), afd_unique) \
            or np.array_equal(np.array([1]), afd_unique):
        logging.warning('All AFD_max values are 1. The peaks seem normalized.')

    return (nufo_map_array, afd_max_array, afd_sum_array, rgb_map_array,
            gfa_map_array, qa_map_array)
Esempio n. 21
0
    def __init__(self,
                 dataset,
                 step_size,
                 rk_order,
                 algo,
                 basis,
                 sf_threshold,
                 sf_threshold_init,
                 theta,
                 dipy_sphere='symmetric724',
                 min_separation_angle=np.pi / 16.):
        """

        Parameters
        ----------
        dataset: scilpy.image.datasets.DataVolume
            Trackable Dataset object.
        step_size: float
            The step size for tracking.
        rk_order: int
            Order for the Runge Kutta integration.
        theta: float
            Maximum angle (radians) between two steps.
        dipy_sphere: string, optional
            If necessary, name of the DIPY sphere object to use to evaluate
            directions.
        basis: string
            SH basis name. One of 'tournier07' or 'descoteaux07'
        sf_threshold: float
            Threshold on spherical function (SF).
        sf_threshold_init: float
            Threshold on spherical function when initializing a new streamline.
        theta: float
            Maximum angle (radians) between two steps.
        dipy_sphere: string, optional
            Name of the DIPY sphere object to use for evaluating SH. Can't be
            None.
        min_separation_angle: float, optional
            Minimum separation angle (in radians) for peaks extraction. Used
            for deterministic tracking. A candidate direction is a maximum if
            its SF value is greater than all other SF values in its
            neighbourhood, where the neighbourhood includes all the sphere
            directions located at most `min_separation_angle` from the
            candidate direction.
        """
        super().__init__(dataset, step_size, rk_order, dipy_sphere)

        # Propagation params
        self.theta = theta
        if algo not in ['det', 'prob']:
            raise ValueError("ODFPropagator algo should be 'det' or 'prob'.")
        self.algo = algo
        self.tracking_neighbours = get_sphere_neighbours(
            self.sphere, self.theta)
        # For deterministic tracking:
        self.maxima_neighbours = get_sphere_neighbours(self.sphere,
                                                       min_separation_angle)

        # ODF params
        self.sf_threshold = sf_threshold
        self.sf_threshold_init = sf_threshold_init
        sh_order, full_basis =\
            get_sh_order_and_fullness(self.dataset.data.shape[-1])
        self.basis = basis
        self.B = sh_to_sf_matrix(self.sphere,
                                 sh_order,
                                 self.basis,
                                 smooth=0.006,
                                 return_inv=False,
                                 full_basis=full_basis)
Esempio n. 22
0
def convert_sh_basis(shm_coeff,
                     sphere,
                     mask=None,
                     input_basis='descoteaux07',
                     nbr_processes=None):
    """Converts spherical harmonic coefficients between two bases

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    input_basis : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    shm_coeff_array : np.ndarray
        Spherical harmonic coefficients in the desired basis.
    """
    output_basis = 'descoteaux07' if input_basis == 'tournier07' else 'tournier07'

    sh_order = order_from_ncoef(shm_coeff.shape[-1])
    B_in, _ = sh_to_sf_matrix(sphere, sh_order, input_basis)
    _, invB_out = sh_to_sf_matrix(sphere, sh_order, output_basis)

    data_shape = shm_coeff.shape
    if mask is None:
        mask = np.sum(shm_coeff, axis=3).astype(bool)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
                                                   or nbr_processes < 0 else nbr_processes

    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff[mask].reshape(
        (np.count_nonzero(mask), data_shape[3]))
    shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        convert_sh_basis_parallel,
        zip(shm_coeff_chunks, itertools.repeat(B_in),
            itertools.repeat(invB_out), np.arange(len(shm_coeff_chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    shm_coeff_array = np.zeros(data_shape)
    tmp_shm_coeff_array = np.zeros((np.count_nonzero(mask), data_shape[3]))
    for i, new_shm_coeff in results:
        tmp_shm_coeff_array[chunk_len[i]:chunk_len[i + 1], :] = new_shm_coeff

    shm_coeff_array[mask] = tmp_shm_coeff_array

    return shm_coeff_array