Пример #1
0
def test_order_from_ncoeff():
    # Just try some out:
    for sh_order in [2, 4, 6, 8, 12, 24]:
        m, n = sph_harm_ind_list(sh_order)
        n_coef = m.shape[0]
        assert_equal(order_from_ncoef(n_coef), sh_order)

        # Try out full basis
        m_full, n_full = sph_harm_ind_list(sh_order, True)
        n_coef_full = m_full.shape[0]
        assert_equal(order_from_ncoef(n_coef_full, True), sh_order)
Пример #2
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    assert_inputs_exist(parser, args.in_sh, optional=args.mask)

    # Load data
    sh_img = nib.load(args.in_sh)
    sh = sh_img.get_fdata(dtype=np.float32)
    mask = None
    if args.mask:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)

    # Precompute output filenames to check if they exist
    sh_order = order_from_ncoef(sh.shape[-1], full_basis=args.full_basis)
    _, order_ids = sph_harm_ind_list(sh_order, full_basis=args.full_basis)
    orders = sorted(np.unique(order_ids))
    output_fnames = ["{}{}.nii.gz".format(args.out_prefix, i) for i in orders]
    assert_outputs_exist(parser, args, output_fnames)

    # Compute RISH features
    rish, final_orders = compute_rish(sh, mask, full_basis=args.full_basis)

    # Make sure the precomputed orders match the orders returned
    assert np.all(orders == np.array(final_orders))

    # Save each RISH feature as a separate file
    for i, fname in enumerate(output_fnames):
        nib.save(nib.Nifti1Image(rish[..., i], sh_img.affine), fname)
Пример #3
0
    def __init__(self,
                 odf_dataset,
                 basis,
                 sf_threshold,
                 sf_threshold_init,
                 theta,
                 dipy_sphere='symmetric724'):
        self.sf_threshold = sf_threshold
        self.sf_threshold_init = sf_threshold_init
        self.theta = theta

        self.vertices = dipy.data.get_sphere(dipy_sphere).vertices
        self.dirs = np.zeros(len(self.vertices), dtype=np.ndarray)
        for i in range(len(self.vertices)):
            self.dirs[i] = TrackingDirection(self.vertices[i], i)
        self.maxima_neighbours = self.get_direction_neighbours(np.pi / 16.)
        self.tracking_neighbours = self.get_direction_neighbours(self.theta)
        self.dataset = odf_dataset
        self.basis = basis

        if 'symmetric' not in dipy_sphere:
            raise ValueError('Sphere must be symmetric. Call to '
                             'get_opposite_direction will fail.')

        sphere = dipy.data.get_sphere(dipy_sphere)
        sh_order = order_from_ncoef(self.dataset.data.shape[-1])
        self.B = sh_to_sf_matrix(sphere,
                                 sh_order,
                                 self.basis,
                                 smooth=0.006,
                                 return_inv=False)
Пример #4
0
def sh_to_dwi(data_sh, gtab, mask=None, add_b0=True, smooth=0.006):
    sh_order = order_from_ncoef(data_sh.shape[-1])

    B, invB = get_B_matrix(gtab, sh_order, smooth=smooth)

    mini = .001
    maxi = .999

    if torch.is_tensor(data_sh):
        B = torch.FloatTensor(B).to(data_sh.device)
        data_dwi = torch.einsum("...i,ij", data_sh, B.T).clamp(mini, maxi)

        if add_b0:
            b0_like = torch.ones(*data_dwi.shape[:-1], gtab.b0s_mask.sum())
            b0_like = b0_like.to(data_dwi.device)
            data_dwi = torch.cat([b0_like, data_dwi], dim=-1)
    else:
        data_dwi = np.einsum("...i,ij", data_sh, B.T).clip(mini, maxi)

        if add_b0:
            shape = tuple(data_dwi.shape[:-1]) + (gtab.b0s_mask.sum(), )
            b0_like = np.ones(shape)
            data_dwi = np.concatenate([b0_like, data_dwi], axis=-1)

    if mask is not None:
        data_dwi *= mask

    return data_dwi
Пример #5
0
def print_peaks(sh_signal, mask=None):
    if has_fury:
        data_small = sh_signal[:, :, 50:51]
        ren = window.Renderer()

        sh_order = order_from_ncoef(data_small.shape[-1])
        theta = default_sphere.theta
        phi = default_sphere.phi
        sh_params = SIGNAL_PARAMETERS['processing_params']['sh_params']
        basis_type = sh_params['basis_type']
        sph_harm_basis = sph_harm_lookup.get(basis_type)
        sampling_matrix, m, n = sph_harm_basis(sh_order, theta, phi)
        odfs = np.dot(data_small, sampling_matrix.T)

        odfs = np.clip(odfs, 0, np.max(odfs, -1)[..., None])
        odfs_actor = actor.odf_slicer(odfs,
                                      sphere=default_sphere,
                                      colormap='plasma',
                                      scale=0.4)
        odfs_actor.display(z=0)

        ren.add(odfs_actor)
        print('Saving illustration as csa_odfs.png')
        window.record(ren,
                      n_frames=1,
                      out_path='csa_odfs.png',
                      size=(600, 600))
        window.show(ren)
Пример #6
0
def test_order_from_ncoeff():
    """

    """
    # Just try some out:
    for sh_order in [2, 4, 6, 8, 12, 24]:
        m, n = sph_harm_ind_list(sh_order)
        n_coef = m.shape[0]
        npt.assert_equal(order_from_ncoef(n_coef), sh_order)
Пример #7
0
 def __init__(self, shcoeff, sphere, basis_type):
     self.shcoeff = shcoeff
     self.sphere = sphere
     sh_order = order_from_ncoef(shcoeff.shape[-1])
     try:
         basis = sph_harm_lookup[basis_type]
     except KeyError:
         raise ValueError("%s is not a known basis type." % basis_type)
     self._B, m, n = basis(sh_order, sphere.theta, sphere.phi)
Пример #8
0
 def __init__(self, shcoeff, sphere, basis_type):
     self.shcoeff = shcoeff
     self.sphere = sphere
     sh_order = order_from_ncoef(shcoeff.shape[-1])
     try:
         basis = sph_harm_lookup[basis_type]
     except KeyError:
         raise ValueError("%s is not a known basis type." % basis_type)
     self._B, m, n = basis(sh_order, sphere.theta, sphere.phi)
Пример #9
0
def test_order_from_ncoeff():
    """

    """
    # Just try some out:
    for sh_order in [2, 4, 6, 8, 12, 24]:
        m, n = sph_harm_ind_list(sh_order)
        n_coef = m.shape[0]
        assert_equal(order_from_ncoef(n_coef), sh_order)
Пример #10
0
def sym_shm_sample_grad(sample, gradients):
    ncoef = sample.shape[1]
    m_gradients = gradients.shape[1]
    sh_order = int(order_from_ncoef(ncoef))
    G = sym_shm_grad(sh_order)
    G = G.reshape((2, -1, ncoef))
    x, y, z = gradients
    _, theta, phi = cart2sphere(x, y, z)
    gradsample = shm_sample(sh_order + 1, theta, phi)
    return np.einsum('jl,tlk->jtk', gradsample, G)
Пример #11
0
def main():
    parser = _build_arg_parser()
    args = _parse_args(parser)
    data, grid_shape = _get_data_from_inputs(args)
    sph = get_sphere(args.sphere)
    sh_order = order_from_ncoef(data['fodf'].shape[-1], args.full_basis)
    if not validate_order(sh_order, data['fodf'].shape[-1], args.full_basis):
        parser.error('Invalid number of coefficients for fODF. '
                     'Use --full_basis if your input is in '
                     'full SH basis.')

    actors = []

    # Retrieve the mask if supplied
    if 'mask' in data:
        mask = data['mask']
    else:
        mask = None

    # Instantiate the ODF slicer actor
    odf_actor = create_odf_slicer(data['fodf'], mask, sph, args.sph_subdivide,
                                  sh_order, args.sh_basis, args.full_basis,
                                  args.axis_name, args.scale,
                                  not args.radial_scale_off, not args.norm_off,
                                  args.colormap)
    actors.append(odf_actor)

    # Instantiate a texture slicer actor if a background image is supplied
    if 'bg' in data:
        bg_actor = create_texture_slicer(data['bg'], args.bg_range,
                                         args.axis_name, args.bg_opacity,
                                         args.bg_offset, args.bg_interpolation)
        actors.append(bg_actor)

    # Instantiate a peaks slicer actor if peaks are supplied
    if 'peaks' in data:
        peaks_values = None
        if 'peaks_values' in data:
            peaks_values = data['peaks_values']
        else:
            peaks_values =\
                np.ones(data['peaks'].shape[:-1]) * args.peaks_length
        peaks_actor = create_peaks_slicer(data['peaks'], args.axis_name,
                                          peaks_values, mask, args.peaks_color,
                                          args.peaks_width)

        actors.append(peaks_actor)

    # Prepare and display the scene
    scene = create_scene(actors, args.axis_name, grid_shape)
    render_scene(scene, args.win_dims, args.interactor, args.output,
                 args.silent)
Пример #12
0
def compute_asymmetry_map(sh_coeffs):
    order = order_from_ncoef(sh_coeffs.shape[-1], full_basis=True)
    _, l_list = sph_harm_ind_list(order, full_basis=True)

    sign = np.power(-1.0, l_list)
    sign = np.reshape(sign, (1, 1, 1, len(l_list)))
    sh_squared = sh_coeffs**2
    mask = sh_squared.sum(axis=-1) > 0.

    asym_map = np.zeros(sh_coeffs.shape[:-1])
    asym_map[mask] = np.sum(sh_squared * sign, axis=-1)[mask] / \
        np.sum(sh_squared, axis=-1)[mask]

    asym_map = np.sqrt(1 - asym_map**2) * mask

    return asym_map
Пример #13
0
    def _init_shm_coefficient(self, sh_basis_type=None):
        print("Initialising spherical harmonics")
        self.dti_model = dti.TensorModel(self.dataset.gtab, fit_method='LS')

        peaks = peaks_from_model(model=self.dti_model,
                                 data=self.dataset.dwi,
                                 sphere=self.sphere,
                                 relative_peak_threshold=.2,
                                 min_separation_angle=25,
                                 mask=self.dataset.binary_mask,
                                 npeaks=2)

        self.sh_coefficient = peaks.shm_coeff
        sh_order = order_from_ncoef(self.sh_coefficient.shape[-1])
        try:
            basis = sph_harm_lookup[sh_basis_type]
        except KeyError:
            raise ValueError("%s is not a known basis type." % sh_basis_type)

        self._B, m, n = basis(sh_order, self.sphere.theta, self.sphere.phi)
Пример #14
0
    def __init__(self,
                 odf_dataset,
                 basis,
                 sf_threshold,
                 sf_threshold_init,
                 theta,
                 dipy_sphere='symmetric724',
                 min_separation_angle=np.pi / 16.):
        super().__init__(odf_dataset, theta, dipy_sphere)

        self.sf_threshold = sf_threshold
        self.sf_threshold_init = sf_threshold_init
        sh_order = order_from_ncoef(self.dataset.data.shape[-1])
        self.basis = basis
        self.B = sh_to_sf_matrix(self.sphere,
                                 sh_order,
                                 self.basis,
                                 smooth=0.006,
                                 return_inv=False)

        # For deterministic tracking:
        self.maxima_neighbours = self._get_sphere_neighbours(
            min_separation_angle)
Пример #15
0
def csd_predict(sh_coeff, gtab, response=None, S0=1, R=None):
    """
    Compute a signal prediction given spherical harmonic coefficients and
    (optionally) a response function for the provided GradientTable class
    instance

    Parameters
    ----------
    sh_coeff : ndarray
       Spherical harmonic coefficients

    gtab : GradientTable class instance

    response : tuple
        A tuple with two elements. The first is the eigen-values as an (3,)
        ndarray and the second is the signal value for the response
        function without diffusion weighting.
        Default: (np.array([0.0015, 0.0003, 0.0003]), 1)

    S0 : ndarray or float
        The non diffusion-weighted signal value.

    R : ndarray
        Optionally, provide an R matrix. If not provided, calculated from the
        gtab, response function, etc.

    Returns
    -------
    pred_sig : ndarray
        The signal predicted from the provided SH coefficients for a
        measurement with the provided GradientTable. The last dimension of the
        resulting array is the same as the number of bvals/bvecs in the
        GradientTable. The first dimensions have shape: `sh_coeff.shape[:-1]`.
    """
    n_coeff = sh_coeff.shape[-1]
    sh_order = order_from_ncoef(n_coeff)
    x, y, z = gtab.gradients[~gtab.b0s_mask].T
    r, theta, phi = cart2sphere(x, y, z)
    SH_basis, m, n = real_sym_sh_basis(sh_order, theta, phi)
    if R is None:
        # for the gradient sphere
        B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None])

        if response is None:
            response = (np.array([0.0015, 0.0003, 0.0003]), 1)
        else:
            response = response

        S_r = estimate_response(gtab, response[0], response[1])
        r_sh = np.linalg.lstsq(B_dwi, S_r[~gtab.b0s_mask])[0]
        r_rh = sh_to_rh(r_sh, m, n)
        R = forward_sdeconv_mat(r_rh, n)

    predict_matrix = np.dot(SH_basis, R)

    if np.iterable(S0):
        # If it's an array, we need to give it one more dimension:
        S0 = S0[..., None]

    # This is the key operation: convolve and multiply by S0:
    pre_pred_sig = S0 * np.dot(predict_matrix, sh_coeff)

    # Now put everything in its right place:
    pred_sig = np.zeros(pre_pred_sig.shape[:-1] + (gtab.bvals.shape[0],))
    pred_sig[..., ~gtab.b0s_mask] = pre_pred_sig
    pred_sig[..., gtab.b0s_mask] = S0

    return pred_sig
Пример #16
0
def convert_sh_to_sf(shm_coeff,
                     sphere,
                     mask=None,
                     dtype="float32",
                     input_basis='descoteaux07',
                     input_full_basis=False,
                     nbr_processes=multiprocessing.cpu_count()):
    """Converts spherical harmonic coefficients to an SF sphere

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    dtype : str
        Datatype to use for computation and output array.
        Either `float32` or `float64`. Default: `float32`
    input_basis : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    input_full_basis : bool
        If True, use a full SH basis (even and odd orders) for the input SH
        coefficients.
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    shm_coeff_array : np.ndarray
        Spherical harmonic coefficients in the desired basis.
    """
    assert dtype in ["float32", "float64"], "Only `float32` and `float64` " \
                                            "should be used."

    sh_order = order_from_ncoef(shm_coeff.shape[-1],
                                full_basis=input_full_basis)
    B_in, _ = sh_to_sf_matrix(sphere,
                              sh_order,
                              basis_type=input_basis,
                              full_basis=input_full_basis)
    B_in = B_in.astype(dtype)

    data_shape = shm_coeff.shape
    if mask is None:
        mask = np.sum(shm_coeff, axis=3).astype(bool)

    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff[mask].reshape(
        (np.count_nonzero(mask), data_shape[3]))
    shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        convert_sh_to_sf_parallel,
        zip(shm_coeff_chunks, itertools.repeat(B_in),
            itertools.repeat(len(sphere.vertices)),
            np.arange(len(shm_coeff_chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    new_shape = data_shape[:3] + (len(sphere.vertices), )
    sf_array = np.zeros(new_shape, dtype=dtype)
    tmp_sf_array = np.zeros((np.count_nonzero(mask), new_shape[3]),
                            dtype=dtype)
    for i, new_sf in results:
        tmp_sf_array[chunk_len[i]:chunk_len[i + 1], :] = new_sf

    sf_array[mask] = tmp_sf_array

    return sf_array
Пример #17
0
def convert_sh_basis(shm_coeff,
                     sphere,
                     mask=None,
                     input_basis='descoteaux07',
                     nbr_processes=None):
    """Converts spherical harmonic coefficients between two bases

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    input_basis : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    shm_coeff_array : np.ndarray
        Spherical harmonic coefficients in the desired basis.
    """
    output_basis = 'descoteaux07' if input_basis == 'tournier07' else 'tournier07'

    sh_order = order_from_ncoef(shm_coeff.shape[-1])
    B_in, _ = sh_to_sf_matrix(sphere, sh_order, input_basis)
    _, invB_out = sh_to_sf_matrix(sphere, sh_order, output_basis)

    data_shape = shm_coeff.shape
    if mask is None:
        mask = np.sum(shm_coeff, axis=3).astype(bool)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
                                                   or nbr_processes < 0 else nbr_processes

    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff[mask].reshape(
        (np.count_nonzero(mask), data_shape[3]))
    shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        convert_sh_basis_parallel,
        zip(shm_coeff_chunks, itertools.repeat(B_in),
            itertools.repeat(invB_out), np.arange(len(shm_coeff_chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    shm_coeff_array = np.zeros(data_shape)
    tmp_shm_coeff_array = np.zeros((np.count_nonzero(mask), data_shape[3]))
    for i, new_shm_coeff in results:
        tmp_shm_coeff_array[chunk_len[i]:chunk_len[i + 1], :] = new_shm_coeff

    shm_coeff_array[mask] = tmp_shm_coeff_array

    return shm_coeff_array
Пример #18
0
def maps_from_sh(shm_coeff,
                 peak_dirs,
                 peak_values,
                 peak_indices,
                 sphere,
                 mask=None,
                 gfa_thr=0,
                 sh_basis_type='descoteaux07',
                 nbr_processes=None):
    """Computes maps from given SH coefficients and peaks

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    peak_dirs : np.ndarray
        Peak directions
    peak_values : np.ndarray
        Peak values
    peak_indices : np.ndarray
        Peak indices
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    gfa_thr : float, optional
        Voxels with gfa less than `gfa_thr` are skipped for all metrics, except
        `rgb_map`.
        Default: 0
    sh_basis_type : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    tuple of np.ndarray
        nufo_map, afd_max, afd_sum, rgb_map, gfa, qa
    """
    sh_order = order_from_ncoef(shm_coeff.shape[-1])
    B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type)

    data_shape = shm_coeff.shape
    if mask is None:
        mask = np.sum(shm_coeff, axis=3).astype(bool)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
                                                   or nbr_processes < 0 else nbr_processes

    npeaks = peak_values.shape[3]
    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff[mask].reshape(
        (np.count_nonzero(mask), data_shape[3]))
    peak_dirs = peak_dirs[mask].reshape((np.count_nonzero(mask), npeaks, 3))
    peak_values = peak_values[mask].reshape((np.count_nonzero(mask), npeaks))
    peak_indices = peak_indices[mask].reshape((np.count_nonzero(mask), npeaks))
    shm_coeff_chunks = np.array_split(shm_coeff, nbr_processes)
    peak_dirs_chunks = np.array_split(peak_dirs, nbr_processes)
    peak_values_chunks = np.array_split(peak_values, nbr_processes)
    peak_indices_chunks = np.array_split(peak_indices, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in shm_coeff_chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        maps_from_sh_parallel,
        zip(shm_coeff_chunks, peak_dirs_chunks, peak_values_chunks,
            peak_indices_chunks, itertools.repeat(B), itertools.repeat(sphere),
            itertools.repeat(gfa_thr), np.arange(len(shm_coeff_chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    nufo_map_array = np.zeros(data_shape[0:3])
    afd_max_array = np.zeros(data_shape[0:3])
    afd_sum_array = np.zeros(data_shape[0:3])
    rgb_map_array = np.zeros(data_shape[0:3] + (3, ))
    gfa_map_array = np.zeros(data_shape[0:3])
    qa_map_array = np.zeros(data_shape[0:3] + (npeaks, ))

    # tmp arrays are neccesary to avoid inserting data in returned variable
    # rather than the original array
    tmp_nufo_map_array = np.zeros((np.count_nonzero(mask)))
    tmp_afd_max_array = np.zeros((np.count_nonzero(mask)))
    tmp_afd_sum_array = np.zeros((np.count_nonzero(mask)))
    tmp_rgb_map_array = np.zeros((np.count_nonzero(mask), 3))
    tmp_gfa_map_array = np.zeros((np.count_nonzero(mask)))
    tmp_qa_map_array = np.zeros((np.count_nonzero(mask), npeaks))

    all_time_max_odf = -np.inf
    all_time_global_max = -np.inf
    for i, nufo_map, afd_max, afd_sum, rgb_map, gfa_map, qa_map, \
        max_odf, global_max in results:
        all_time_max_odf = max(all_time_global_max, max_odf)
        all_time_global_max = max(all_time_global_max, global_max)

        tmp_nufo_map_array[chunk_len[i]:chunk_len[i + 1]] = nufo_map
        tmp_afd_max_array[chunk_len[i]:chunk_len[i + 1]] = afd_max
        tmp_afd_sum_array[chunk_len[i]:chunk_len[i + 1]] = afd_sum
        tmp_rgb_map_array[chunk_len[i]:chunk_len[i + 1], :] = rgb_map
        tmp_gfa_map_array[chunk_len[i]:chunk_len[i + 1]] = gfa_map
        tmp_qa_map_array[chunk_len[i]:chunk_len[i + 1], :] = qa_map

    nufo_map_array[mask] = tmp_nufo_map_array
    afd_max_array[mask] = tmp_afd_max_array
    afd_sum_array[mask] = tmp_afd_sum_array
    rgb_map_array[mask] = tmp_rgb_map_array
    gfa_map_array[mask] = tmp_gfa_map_array
    qa_map_array[mask] = tmp_qa_map_array

    rgb_map_array /= all_time_max_odf
    rgb_map_array *= 255
    qa_map_array /= all_time_global_max

    afd_unique = np.unique(afd_max_array)
    if np.array_equal(np.array([0, 1]), afd_unique) \
            or np.array_equal(np.array([1]), afd_unique):
        logging.warning('All AFD_max values are 1. The peaks seem normalized.')

    return (nufo_map_array, afd_max_array, afd_sum_array, rgb_map_array,
            gfa_map_array, qa_map_array)
Пример #19
0
def peaks_from_sh(shm_coeff,
                  sphere,
                  mask=None,
                  relative_peak_threshold=0.5,
                  absolute_threshold=0,
                  min_separation_angle=25,
                  normalize_peaks=False,
                  npeaks=5,
                  sh_basis_type='descoteaux07',
                  nbr_processes=None):
    """Computes peaks from given spherical harmonic coefficients

    Parameters
    ----------
    shm_coeff : np.ndarray
        Spherical harmonic coefficients
    sphere : Sphere
        The Sphere providing discrete directions for evaluation.
    mask : np.ndarray, optional
        If `mask` is provided, only the data inside the mask will be
        used for computations.
    relative_peak_threshold : float, optional
        Only return peaks greater than ``relative_peak_threshold * m`` where m
        is the largest peak.
        Default: 0.5
    absolute_threshold : float, optional
        Absolute threshold on fODF amplitude. This value should be set to
        approximately 1.5 to 2 times the maximum fODF amplitude in isotropic
        voxels (ex. ventricles). `scil_compute_fodf_max_in_ventricles.py`
        can be used to find the maximal value.
        Default: 0
    min_separation_angle : float in [0, 90], optional
        The minimum distance between directions. If two peaks are too close
        only the larger of the two is returned.
        Default: 25
    normalize_peaks : bool, optional
        If true, all peak values are calculated relative to `max(odf)`.
    npeaks : int, optional
        Maximum number of peaks found (default 5 peaks).
    sh_basis_type : str, optional
        Type of spherical harmonic basis used for `shm_coeff`. Either
        `descoteaux07` or `tournier07`.
        Default: `descoteaux07`
    nbr_processes: int, optional
        The number of subprocesses to use.
        Default: multiprocessing.cpu_count()

    Returns
    -------
    tuple of np.ndarray
        peak_dirs, peak_values, peak_indices
    """
    sh_order = order_from_ncoef(shm_coeff.shape[-1])
    B, _ = sh_to_sf_matrix(sphere, sh_order, sh_basis_type)

    data_shape = shm_coeff.shape
    if mask is None:
        mask = np.sum(shm_coeff, axis=3).astype(bool)

    nbr_processes = multiprocessing.cpu_count() if nbr_processes is None \
                                                   or nbr_processes < 0 else nbr_processes

    # Ravel the first 3 dimensions while keeping the 4th intact, like a list of
    # 1D time series voxels. Then separate it in chunks of len(nbr_processes).
    shm_coeff = shm_coeff[mask].reshape(
        (np.count_nonzero(mask), data_shape[3]))
    chunks = np.array_split(shm_coeff, nbr_processes)
    chunk_len = np.cumsum([0] + [len(c) for c in chunks])

    pool = multiprocessing.Pool(nbr_processes)
    results = pool.map(
        peaks_from_sh_parallel,
        zip(chunks, itertools.repeat(B), itertools.repeat(sphere),
            itertools.repeat(relative_peak_threshold),
            itertools.repeat(absolute_threshold),
            itertools.repeat(min_separation_angle), itertools.repeat(npeaks),
            itertools.repeat(normalize_peaks), np.arange(len(chunks))))
    pool.close()
    pool.join()

    # Re-assemble the chunk together in the original shape.
    peak_dirs_array = np.zeros(data_shape[0:3] + (npeaks, 3))
    peak_values_array = np.zeros(data_shape[0:3] + (npeaks, ))
    peak_indices_array = np.zeros(data_shape[0:3] + (npeaks, ))

    # tmp arrays are neccesary to avoid inserting data in returned variable
    # rather than the original array
    tmp_peak_dirs_array = np.zeros((np.count_nonzero(mask), npeaks, 3))
    tmp_peak_values_array = np.zeros((np.count_nonzero(mask), npeaks))
    tmp_peak_indices_array = np.zeros((np.count_nonzero(mask), npeaks))
    for i, peak_dirs, peak_values, peak_indices in results:
        tmp_peak_dirs_array[chunk_len[i]:chunk_len[i + 1], :, :] = peak_dirs
        tmp_peak_values_array[chunk_len[i]:chunk_len[i + 1], :] = peak_values
        tmp_peak_indices_array[chunk_len[i]:chunk_len[i + 1], :] = peak_indices

    peak_dirs_array[mask] = tmp_peak_dirs_array
    peak_values_array[mask] = tmp_peak_values_array
    peak_indices_array[mask] = tmp_peak_indices_array

    return peak_dirs_array, peak_values_array, peak_indices_array
Пример #20
0
def compute_rish(sh, mask=None, full_basis=False):
    """Compute the RISH (Rotationally Invariant Spherical Harmonics) features
    of the SH signal [1]. Each RISH feature map is the total energy of its
    associated order. Mathematically, it is the sum of the squared SH
    coefficients of the SH order.

    Parameters
    ----------
    sh : np.ndarray object
        Array of the SH coefficients
    mask: np.ndarray object, optional
        Binary mask. Only data inside the mask will be used for computation.
    full_basis: bool, optional
        True when coefficients are for a full SH basis.

    Returns
    -------
    rish : np.ndarray with shape (x,y,z,n_orders)
        The RISH features of the input SH, with one channel per SH order.
    orders : list(int)
        The SH order of each RISH feature in the last channel of `rish`.

    References
    ----------
    [1] Mirzaalian, Hengameh, et al. "Harmonizing diffusion MRI data across
        multiple sites and scanners." MICCAI 2015.
        https://scholar.harvard.edu/files/hengameh/files/miccai2015.pdf
    """
    # Guess SH order
    sh_order = order_from_ncoef(sh.shape[-1], full_basis=full_basis)

    # Get degree / order for all indices
    degree_ids, order_ids = sph_harm_ind_list(sh_order, full_basis=full_basis)

    # Apply mask to input
    if mask is not None:
        sh = sh * mask[..., None]

    # Get number of indices per order (e.g. for order 6, sym. : [1,5,9,13])
    step = 1 if full_basis else 2
    n_indices_per_order = np.bincount(order_ids)[::step]

    # Get start index of each order (e.g. for order 6 : [0,1,6,15])
    order_positions = np.concatenate([[0],
                                      np.cumsum(n_indices_per_order)])[:-1]

    # Get paired indices for np.add.reduceat, specifying where to reduce.
    # The last index is omitted, it is automatically replaced by len(array)-1
    # (e.g. for order 6 : [0,1, 1,6, 6,15, 15,])
    reduce_indices = np.repeat(order_positions, 2)[1:]

    # Compute the sum of squared coefficients using numpy's `reduceat`
    squared_sh = np.square(sh)
    rish = np.add.reduceat(squared_sh, reduce_indices, axis=-1)[..., ::2]

    # Apply mask
    if mask is not None:
        rish *= mask[..., None]

    orders = sorted(np.unique(order_ids))

    return rish, orders