コード例 #1
0
ファイル: csd_cvxpy.py プロジェクト: gabknight/dmipy
    def __init__(self,
                 acquisition_scheme,
                 model,
                 x0_vector=None,
                 sh_order=8,
                 unity_constraint=True,
                 lambda_lb=0.):
        self.model = model
        self.acquisition_scheme = acquisition_scheme
        self.sh_order = sh_order
        self.Ncoef = int((sh_order + 2) * (sh_order + 1) // 2)
        self.Nmodels = len(self.model.models)
        self.lambda_lb = lambda_lb
        self.unity_constraint = unity_constraint
        self.sphere_jacobian = 2 * np.sqrt(np.pi)

        sphere = get_sphere('symmetric724')
        hemisphere = HemiSphere(phi=sphere.phi, theta=sphere.theta)
        self.L_positivity = real_sym_sh_mrtrix(self.sh_order, hemisphere.theta,
                                               hemisphere.phi)[0]

        x0_single_voxel = np.reshape(x0_vector, (-1, x0_vector.shape[-1]))[0]
        if np.all(np.isnan(x0_single_voxel)):
            self.single_convolution_kernel = True
            self.A = self._construct_convolution_kernel(x0_single_voxel)
        else:
            self.single_convolution_kernel = False

        self.Ncoef_total = 0
        vf_array = []

        if self.model.volume_fractions_fixed:
            self.sh_start = 0
            self.Ncoef_total = self.Ncoef
            self.vf_indices = np.array([0])
        else:
            for model in self.model.models:
                if 'orientation' in model.parameter_types.values():
                    self.sh_start = self.Ncoef_total
                    sh_model = np.zeros(self.Ncoef)
                    sh_model[0] = 1
                    vf_array.append(sh_model)
                    self.Ncoef_total += self.Ncoef
                else:
                    vf_array.append(1)
                    self.Ncoef_total += 1
            self.vf_indices = np.where(np.hstack(vf_array))[0]

        sh_l = sph_harm_ind_list(sh_order)[1]
        lb_weights = sh_l**2 * (sh_l + 1)**2  # laplace-beltrami
        if self.model.volume_fractions_fixed:
            self.R_smoothness = np.diag(lb_weights)
        else:
            diagonal = np.zeros(self.Ncoef_total)
            diagonal[self.sh_start:self.sh_start + self.Ncoef] = lb_weights
            self.R_smoothness = np.diag(diagonal)
コード例 #2
0
    def __init__(self,
                 acquisition_scheme,
                 model,
                 x0_vector=None,
                 sh_order=8,
                 lambda_pos=1.,
                 lambda_lb=5e-4,
                 tau=0.1,
                 max_iter=50,
                 unity_constraint=True,
                 init_sh_order=4):
        self.model = model
        self.acquisition_scheme = acquisition_scheme
        self.sh_order = sh_order
        self.Ncoef = int((sh_order + 2) * (sh_order + 1) // 2)
        self.Ncoef4 = int((init_sh_order + 2) * (init_sh_order + 1) // 2)
        self.Nmodels = len(self.model.models)
        self.lambda_pos = lambda_pos
        self.lambda_lb = lambda_lb
        self.tau = tau
        self.max_iter = max_iter
        self.unity_constraint = unity_constraint
        self.sphere_jacobian = 1 / (2 * np.sqrt(np.pi))

        # step 1: prepare positivity grid on sphere
        sphere = get_sphere('symmetric724')
        hemisphere = HemiSphere(phi=sphere.phi, theta=sphere.theta)
        self.L_positivity = real_sym_sh_mrtrix(self.sh_order, hemisphere.theta,
                                               hemisphere.phi)[0]

        sh_l = sph_harm_ind_list(sh_order)[1]
        self.R_smoothness = np.diag(sh_l**2 * (sh_l + 1)**2)

        # check if there is only one model. If so, precompute rh array.
        if self.model.volume_fractions_fixed:
            x0_single_voxel = np.reshape(x0_vector,
                                         (-1, x0_vector.shape[-1]))[0]
            if np.all(np.isnan(x0_single_voxel)):
                self.single_convolution_kernel = True
                parameters_dict = self.model.parameter_vector_to_parameters(
                    x0_single_voxel)
                self.A = self.model._construct_convolution_kernel(
                    **parameters_dict)
                self.AT_A = np.dot(self.A.T, self.A)
            else:
                self.single_convolution_kernel = False
        else:
            msg = "This CSD optimizer cannot estimate volume fractions."
            raise ValueError(msg)
コード例 #3
0
ファイル: cvxpy_fod.py プロジェクト: Brainconnect/dmipy
    def __init__(self,
                 acquisition_scheme,
                 model,
                 x0_vector=None,
                 sh_order=8,
                 unity_constraint=True):
        self.model = model
        self.acquisition_scheme = acquisition_scheme
        self.sh_order = sh_order
        self.Ncoef = int((sh_order + 2) * (sh_order + 1) // 2)
        self.Nmodels = len(self.model.models)
        self.unity_constraint = unity_constraint

        # step 1: prepare positivity grid on sphere
        sphere = get_sphere('symmetric724')
        hemisphere = HemiSphere(phi=sphere.phi, theta=sphere.theta)
        self.sh_matrix_positivity = real_sym_sh_mrtrix(self.sh_order,
                                                       hemisphere.theta,
                                                       hemisphere.phi)[0]

        # check if there is only one model. If so, precompute rh array.
        if self.Nmodels == 1:
            # also needs to check if x0 is not changing.
            self.prepared_computation = True
            x0_shape = x0_vector.shape
            x0_dummy = np.reshape(x0_vector, (-1, x0_shape[-1]))[0]
            rh_matrix = self.recover_rotational_harmonics(x0_dummy)[0]
            self.prepared_rh_coef = np.zeros([rh_matrix.shape[0], self.Ncoef])
            for i, shell_index in enumerate(
                    acquisition_scheme.unique_dwi_indices):
                rh_order = int(acquisition_scheme.shell_sh_orders[shell_index])
                prepared_rh_shell = self.prepare_rotational_harmonics(
                    rh_matrix[i], rh_order)
                if sh_order >= rh_order:
                    self.prepared_rh_coef[i, :len(prepared_rh_shell)] = (
                        prepared_rh_shell)
                else:
                    self.prepared_rh_coef[i] = prepared_rh_shell[:self.Ncoef]
        else:
            self.prepared_computation = False
コード例 #4
0
ファイル: distributions.py プロジェクト: gabknight/dmipy
from __future__ import division
import pkg_resources
from os.path import join

import numpy as np
from scipy import stats
from scipy import special
from dipy.reconst.shm import real_sym_sh_mrtrix

from dmipy.utils import utils
from scipy import interpolate
from dmipy.core.modeling_framework import ModelProperties
from dipy.utils.optpkg import optional_package
from dipy.data import get_sphere, HemiSphere
sphere = get_sphere('symmetric724')
hemisphere = HemiSphere(phi=sphere.phi, theta=sphere.theta)

numba, have_numba, _ = optional_package("numba")

GRADIENT_TABLES_PATH = pkg_resources.resource_filename('dmipy',
                                                       'data/gradient_tables')
SIGNAL_MODELS_PATH = pkg_resources.resource_filename('dmipy', 'signal_models')
DATA_PATH = pkg_resources.resource_filename('dmipy', 'data')
SPHERE_CARTESIAN = np.loadtxt(join(GRADIENT_TABLES_PATH,
                                   'sphere_with_cap.txt'))
SPHERE_SPHERICAL = utils.cart2sphere(SPHERE_CARTESIAN)
log_bingham_normalization_splinefit = np.load(join(
    DATA_PATH, "bingham_normalization_splinefit.npz"),
                                              encoding='bytes')['arr_0']

inverse_sh_matrix_kernel = {
コード例 #5
0
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    if args.isVerbose:
        logging.basicConfig(level=logging.DEBUG)

    assert_inputs_exist(parser, [
        args.sh_file, args.seed_file, args.map_include_file,
        args.map_exclude_file
    ])
    assert_outputs_exist(parser, args, [args.output_file])

    if not nib.streamlines.is_supported(args.output_file):
        parser.error('Invalid output streamline file format (must be trk or ' +
                     'tck): {0}'.format(args.output_file))

    if not args.min_length > 0:
        parser.error('minL must be > 0, {}mm was provided.'.format(
            args.min_length))
    if args.max_length < args.min_length:
        parser.error(
            'maxL must be > than minL, (minL={}mm, maxL={}mm).'.format(
                args.min_length, args.max_length))

    if args.compress:
        if args.compress < 0.001 or args.compress > 1:
            logging.warning(
                'You are using an error rate of {}.\nWe recommend setting it '
                'between 0.001 and 1.\n0.001 will do almost nothing to the '
                'tracts while 1 will higly compress/linearize the tracts'.
                format(args.compress))

    if args.particles <= 0:
        parser.error('--particles must be >= 1.')

    if args.back_tracking <= 0:
        parser.error('PFT backtracking distance must be > 0.')

    if args.forward_tracking <= 0:
        parser.error('PFT forward tracking distance must be > 0.')

    if args.npv and args.npv <= 0:
        parser.error('Number of seeds per voxel must be > 0.')

    if args.nt and args.nt <= 0:
        parser.error('Total number of seeds must be > 0.')

    fodf_sh_img = nib.load(args.sh_file)
    fodf_sh_img = nib.load(args.sh_file)
    if not np.allclose(np.mean(fodf_sh_img.header.get_zooms()[:3]),
                       fodf_sh_img.header.get_zooms()[0],
                       atol=1.e-3):
        parser.error(
            'SH file is not isotropic. Tracking cannot be ran robustly.')

    tracking_sphere = HemiSphere.from_sphere(get_sphere('repulsion724'))

    # Check if sphere is unit, since we couldn't find such check in Dipy.
    if not np.allclose(np.linalg.norm(tracking_sphere.vertices, axis=1), 1.):
        raise RuntimeError('Tracking sphere should be unit normed.')

    sh_basis = args.sh_basis

    if args.algo == 'det':
        dgklass = DeterministicMaximumDirectionGetter
    else:
        dgklass = ProbabilisticDirectionGetter

    theta = get_theta(args.theta, args.algo)

    # Reminder for the future:
    # pmf_threshold == clip pmf under this
    # relative_peak_threshold is for initial directions filtering
    # min_separation_angle is the initial separation angle for peak extraction
    dg = dgklass.from_shcoeff(fodf_sh_img.get_data().astype(np.double),
                              max_angle=theta,
                              sphere=tracking_sphere,
                              basis_type=sh_basis,
                              pmf_threshold=args.sf_threshold,
                              relative_peak_threshold=args.sf_threshold_init)

    map_include_img = nib.load(args.map_include_file)
    map_exclude_img = nib.load(args.map_exclude_file)
    voxel_size = np.average(map_include_img.get_header()['pixdim'][1:4])

    tissue_classifier = None
    if not args.act:
        tissue_classifier = CmcTissueClassifier(map_include_img.get_data(),
                                                map_exclude_img.get_data(),
                                                step_size=args.step_size,
                                                average_voxel_size=voxel_size)
    else:
        tissue_classifier = ActTissueClassifier(map_include_img.get_data(),
                                                map_exclude_img.get_data())

    if args.npv:
        nb_seeds = args.npv
        seed_per_vox = True
    elif args.nt:
        nb_seeds = args.nt
        seed_per_vox = False
    else:
        nb_seeds = 1
        seed_per_vox = True

    voxel_size = fodf_sh_img.header.get_zooms()[0]
    vox_step_size = args.step_size / voxel_size
    seed_img = nib.load(args.seed_file)
    seeds = track_utils.random_seeds_from_mask(
        seed_img.get_data(),
        seeds_count=nb_seeds,
        seed_count_per_voxel=seed_per_vox,
        random_seed=args.seed)

    # Note that max steps is used once for the forward pass, and
    # once for the backwards. This doesn't, in fact, control the real
    # max length
    max_steps = int(args.max_length / args.step_size) + 1
    pft_streamlines = ParticleFilteringTracking(
        dg,
        tissue_classifier,
        seeds,
        np.eye(4),
        max_cross=1,
        step_size=vox_step_size,
        maxlen=max_steps,
        pft_back_tracking_dist=args.back_tracking,
        pft_front_tracking_dist=args.forward_tracking,
        particle_count=args.particles,
        return_all=args.keep_all,
        random_seed=args.seed)

    scaled_min_length = args.min_length / voxel_size
    scaled_max_length = args.max_length / voxel_size
    filtered_streamlines = (
        s for s in pft_streamlines
        if scaled_min_length <= length(s) <= scaled_max_length)
    if args.compress:
        filtered_streamlines = (compress_streamlines(s, args.compress)
                                for s in filtered_streamlines)

    tractogram = LazyTractogram(lambda: filtered_streamlines,
                                affine_to_rasmm=seed_img.affine)

    filetype = nib.streamlines.detect_format(args.output_file)
    header = create_header_from_anat(seed_img, base_filetype=filetype)

    # Use generator to save the streamlines on-the-fly
    nib.streamlines.save(tractogram, args.output_file, header=header)
コード例 #6
0
def white_matter_response_tournier13(acquisition_scheme,
                                     data,
                                     max_iter=5,
                                     sh_order=10,
                                     N_candidate_voxels=300,
                                     peak_ratio_setting='mrtrix'):
    """
    Iterative model-free white matter response function estimation according to
    [1]_. Quoting the paper, the steps are the following:

    - 1) The 300 brain voxels with the highest FA were identified within a
        brain mask (eroded by three voxels to remove any noisy voxels at the
        brain edges).
    - 2) The single-fibre 'response function' was estimated within these
        voxels, and used to compute the fibre orientation distribution (FOD)
        employing constrained spherical deconvolution (CSD) up to lmax = 10.
    - 3) Within each voxel, a peak-finding procedure was used to identify the
        two largest FOD peaks, and their amplitude ratio was computed.
    - 4) The 300 voxels with the lowest second to first peak amplitude ratios
        were identified, and used as the current estimate of the set of
        'single-fibre' voxels. It should be noted that these voxels were not
        required to be a subset of the original set of 'single-fibre' voxels.
    - 5) To ensure minimal bias from the initial estimate of the 'response
        function', steps (2) to (4) were re-iterated until convergence (no
        difference in the set of 'single-fibre' voxels). It should be noted
        that, in practice, convergence was achieved within a single iteration
        in all cases.

    Parameters
    ----------
    acquisition_scheme : DmipyAcquisitionScheme instance,
        An acquisition scheme that has been instantiated using dMipy.
    data : NDarray,
        Measured diffusion signal array.
    max_iter : Positive integer,
        Defines the maximum amount of iterations to be done for the single-
        fibre response kernel.
    sh_order : Positive even integer,
        Maximum spherical harmonics order to be used in the FOD estimation for
        the single-fibre response kernel.
    N_candidate_voxels : integer,
        Number of voxels to be included in the final white matter response
        estimation. Default is 300 following [1]_.
    peak_ratio_setting : string,
        Can be either 'ratio' or 'mrtrix', meaning the 'ratio' parameter
        between two peaks is actually calculated as the ratio, or a more
        complicated version as 1 / sqrt(peak1 * (1 - peak2 / peak1)) ** 2, to
        avoid favouring small, yet low SNR FODs [2]_.

    Returns
    -------
    S0_wm : positive float,
        Estimated S0 tissue response value.
    TR2_wm_model : Dmipy Anisotropic ModelFree Model
        ModelFree representation of white matter response.
    selected_indices : array of size (N_candidate_voxels,),
        indices of selected voxels for white matter response.

    References
    ----------
    .. [1] Tournier, J-Donald, Fernando Calamante, and Alan Connelly.
        "Determination of the appropriate b value and number of gradient
        directions for high-angular-resolution diffusion-weighted imaging."
        NMR in Biomedicine 26.12 (2013): 1775-1786.
    .. [2] MRtrix 3.0 readthedocs
    """
    data_shape = np.atleast_2d(data).shape
    N_voxels = int(np.prod(data_shape[:-1]))
    if N_voxels < N_candidate_voxels:
        msg = "The parameter N_candidate voxels is set to {} but only ".format(
            N_candidate_voxels)
        msg += "{} voxels are given. N_candidate_voxels".format(N_voxels)
        msg += " reset to number of voxels given."
        print(msg)
        N_candidate_voxels = N_voxels

    ratio_settings = ['ratio', 'mrtrix']
    if peak_ratio_setting not in ratio_settings:
        msg = 'peak_ratio_setting must be in {}'.format(ratio_settings)
        raise ValueError(msg)

    if data.ndim == 4:
        # calculate brain mask on 4D data (x, y, z, DWI)
        b0_mask, mask = median_otsu(data, 2, 1)
        # needs to be eroded 3 times.
        mask_eroded = binary_erosion(mask, iterations=3)
        data_to_fit = data[mask_eroded]
    else:
        # can't calculate brain mask on other than 4D data.
        # assume the data was prepared.
        data_to_fit = data.reshape([-1, data_shape[-1]])

    gtab = gtab_dmipy2dipy(acquisition_scheme)
    tenmod = dti.TensorModel(gtab)
    tenfit = tenmod.fit(data_to_fit)
    fa = tenfit.fa

    # selected based on FA
    selected_indices = np.argsort(fa)[-N_candidate_voxels:]
    sphere = get_sphere('symmetric724')
    hemisphere = HemiSphere(theta=sphere.theta, phi=sphere.phi)
    # iterate until convergence
    it = 0
    while True:
        print('Tournier13 white matter response iteration {}'.format(it + 1))
        selected_data = data_to_fit[selected_indices]

        S0_wm, TR2_wm_model = estimate_TR2_anisotropic_tissue_response_model(
            acquisition_scheme, selected_data)
        sh_model = MultiCompartmentSphericalHarmonicsModel([TR2_wm_model],
                                                           sh_order=sh_order)
        sh_fit = sh_model.fit(acquisition_scheme,
                              data_to_fit,
                              solver='csd_tournier07',
                              use_parallel_processing=False,
                              lambda_lb=0.)
        peaks, values, indices = sh_fit.peaks_directions(
            hemisphere, max_peaks=2, relative_peak_threshold=0.)
        if peak_ratio_setting == 'ratio':
            ratio = values[..., 1] / values[..., 0]
        elif peak_ratio_setting == 'mrtrix':
            ratio = 1. / np.sqrt(values[..., 0] *
                                 (1 - values[..., 1] / values[..., 0]))**2
        selected_indices_old = selected_indices
        selected_indices = np.argsort(ratio)[:N_candidate_voxels]
        percentage_overlap = 100 * float(
            len(np.intersect1d(selected_indices,
                               selected_indices_old))) / N_candidate_voxels
        print('{:.1f} percent candidate voxel overlap.'.format(
            percentage_overlap))
        if percentage_overlap == 100.:
            print('White matter response converged')
            break
        it += 1
        if it > max_iter:
            print('Maximum iterations reached without convergence')
            break
    return S0_wm, TR2_wm_model, selected_indices