Esempio n. 1
0
def test_multivox_dsi():
    data, gtab = dsi_voxels()
    DS = DiffusionSpectrumModel(gtab)
    get_sphere('symmetric724')

    DSfit = DS.fit(data)
    PDF = DSfit.pdf()
    assert_equal(data.shape[:-1] + (17, 17, 17), PDF.shape)
    assert_equal(np.alltrue(np.isreal(PDF)), True)
Esempio n. 2
0
def gqi(training, category, snr, denoised, odeconv, tv, method, weight=0.1, sl=3.):

    data, affine, gtab, mask, evals, S0, prefix = prepare(training,
                                                          category,
                                                          snr,
                                                          denoised,
                                                          odeconv,
                                                          tv,
                                                          method)
    


    model = GeneralizedQSamplingModel(gtab,
                                      method='gqi2',
                                      sampling_length=sl,
                                      normalize_peaks=False)

    fit = model.fit(data, mask)

    sphere = get_sphere('symmetric724')   

    odf = fit.odf(sphere)

    if odeconv == True:

        odf_sh = sf_to_sh(odf, sphere, sh_order=8,
                          basis_type='mrtrix')

        # # nib.save(nib.Nifti1Image(odf_sh, affine), model_tag + 'odf_sh.nii.gz')

        reg_sphere = get_sphere('symmetric724')

        fodf_sh = odf_sh_to_sharp(odf_sh,
                                  reg_sphere, basis='mrtrix', ratio=3.8 / 16.6,
                                  sh_order=8, Lambda=1., tau=1.)

        # # nib.save(nib.Nifti1Image(odf_sh, affine), model_tag + 'fodf_sh.nii.gz')

        fodf_sh[np.isnan(fodf_sh)]=0

        r, theta, phi = cart2sphere(sphere.x, sphere.y, sphere.z)
        B_regul, m, n = real_sph_harm_mrtrix(8, theta[:, None], phi[:, None])

        fodf = np.dot(fodf_sh, B_regul.T)

        odf = fodf

    if tv == True:

        odf = tv_denoise_4d(odf, weight=weight)

    save_odfs_peaks(training, odf, affine, sphere, dres, prefix)
Esempio n. 3
0
def test_diffusivities():
    psphere = get_sphere('symmetric362')
    bvecs = np.concatenate(([[0, 0, 0]], psphere.vertices))
    bvals = np.zeros(len(bvecs)) + 1000
    bvals[0] = 0
    gtab = grad.gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))
    mevecs = [np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
              np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])]
    S = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)

    dm = dti.TensorModel(gtab, 'LS')
    dmfit = dm.fit(S)

    md = mean_diffusivity(dmfit.evals)
    Trace = trace(dmfit.evals)
    rd = radial_diffusivity(dmfit.evals)
    ad = axial_diffusivity(dmfit.evals)
    lin = linearity(dmfit.evals)
    plan = planarity(dmfit.evals)
    spher = sphericity(dmfit.evals)

    assert_almost_equal(md, (0.0015 + 0.0003 + 0.0001) / 3)
    assert_almost_equal(Trace, (0.0015 + 0.0003 + 0.0001))
    assert_almost_equal(ad, 0.0015)
    assert_almost_equal(rd, (0.0003 + 0.0001) / 2)
    assert_almost_equal(lin, (0.0015 - 0.0003)/Trace)
    assert_almost_equal(plan, 2 * (0.0003 - 0.0001)/Trace)
    assert_almost_equal(spher, (3 * 0.0001)/Trace)
Esempio n. 4
0
def sdt(training, category, snr, denoised, odeconv, tv, method, weight=0.1):

    data, affine, gtab, mask, evals, S0, prefix = prepare(training,
                                                          category,
                                                          snr,
                                                          denoised,
                                                          odeconv,
                                                          tv,
                                                          method)

    if category == 'dti':
        csdt_model = ConstrainedSDTModel(gtab, ratio = evals[1] / evals[0], sh_order=6)
       
    if category == 'hardi':
        csdt_model = ConstrainedSDTModel(gtab, ratio = evals[1] / evals[0],  sh_order=8)

    csdt_fit = csdt_model.fit(data, mask)

    sphere = get_sphere('symmetric724')

    odf = csdt_fit.odf(sphere)

    if tv == True:

        odf = tv_denoise_4d(odf, weight=weight)

    save_odfs_peaks(training, odf, affine, sphere, dres, prefix)
Esempio n. 5
0
    def __init__(self, gtab, evals=[0.001, 0, 0], sphere=None):
        """
        Initialize a signal maker

        Parameters
        ----------
        gtab : GradientTable class instance
            The gradient table on which the signal is calculated.
        evals : list of 3 items
            The eigenvalues of the canonical tensor to use in calculating the
            signal.
        n_points : `dipy.core.Sphere` class instance
            The discrete sphere to use as an approximation for the continuous
            sphere on which the signal is represented. If integer - we will use
            an instance of one of the symmetric spheres cached in
            `dps.get_sphere`. If a 'dipy.core.Sphere' class instance is
            provided, we will use this object. Default: the :mod:`dipy.data`
            symmetric sphere with 724 vertices
        """
        if sphere is None:
            self.sphere = dpd.get_sphere('symmetric724')
        else:
            self.sphere = sphere

        self.gtab = gtab
        self.evals = evals
        # Initialize an empty dict to fill with signals for each of the sphere
        # vertices:
        self.signal = np.empty((self.sphere.vertices.shape[0],
                                np.sum(~gtab.b0s_mask)))
        # We'll need to keep track of what we've already calculated:
        self._calculated = []
Esempio n. 6
0
    def _run_interface(self, runtime):
        from dipy.reconst import shm
        from dipy.data import get_sphere
        from dipy.reconst.peaks import peaks_from_model

        gtab = self._get_gradient_table()

        img = nb.load(self.inputs.in_file)
        data = img.get_data()
        affine = img.affine
        mask = None
        if isdefined(self.inputs.mask_file):
            mask = nb.load(self.inputs.mask_file).get_data()

        # Fit it
        model = shm.QballModel(gtab, 8)
        sphere = get_sphere('symmetric724')
        peaks = peaks_from_model(
            model=model,
            data=data,
            relative_peak_threshold=.5,
            min_separation_angle=25,
            sphere=sphere,
            mask=mask)
        apm = shm.anisotropic_power(peaks.shm_coeff)
        out_file = self._gen_filename('apm')
        nb.Nifti1Image(apm.astype("float32"), affine).to_filename(out_file)
        IFLOGGER.info('APM qball image saved as %s', out_file)

        return runtime
Esempio n. 7
0
def test_sfm():
    fdata, fbvals, fbvecs = dpd.get_data()
    data = nib.load(fdata).get_data()
    gtab = grad.gradient_table(fbvals, fbvecs)
    sfmodel = sfm.SparseFascicleModel(gtab)
    sffit1 = sfmodel.fit(data[0, 0, 0])
    sphere = dpd.get_sphere("symmetric642")
    odf1 = sffit1.odf(sphere)
    pred1 = sffit1.predict(gtab)
    mask = np.ones(data.shape[:-1])
    sffit2 = sfmodel.fit(data, mask)
    pred2 = sffit2.predict(gtab)
    odf2 = sffit2.odf(sphere)
    sffit3 = sfmodel.fit(data)
    pred3 = sffit3.predict(gtab)
    odf3 = sffit3.odf(sphere)
    npt.assert_almost_equal(pred3, pred2, decimal=2)
    npt.assert_almost_equal(pred3[0, 0, 0], pred1, decimal=2)
    npt.assert_almost_equal(odf3[0, 0, 0], odf1, decimal=2)
    npt.assert_almost_equal(odf3[0, 0, 0], odf2[0, 0, 0], decimal=2)

    # Fit zeros and you will get back zeros
    npt.assert_almost_equal(
        sfmodel.fit(np.zeros(data[0, 0, 0].shape)).beta, np.zeros(sfmodel.design_matrix[0].shape[-1])
    )
Esempio n. 8
0
def test_eudx_bad_seed():
    """Test passing a bad seed to eudx"""
    fimg, fbvals, fbvecs = get_data('small_101D')

    img = ni.load(fimg)
    affine = img.affine
    data = img.get_data()
    gtab = gradient_table(fbvals, fbvecs)
    tensor_model = TensorModel(gtab)
    ten = tensor_model.fit(data)
    ind = quantize_evecs(ten.evecs)

    sphere = get_sphere('symmetric724')
    seed = [1000000., 1000000., 1000000.]
    eu = EuDX(a=ten.fa, ind=ind, seeds=[seed],
              odf_vertices=sphere.vertices, a_low=.2)
    assert_raises(ValueError, list, eu)

    print(data.shape)
    seed = [1., 5., 8.]
    eu = EuDX(a=ten.fa, ind=ind, seeds=[seed],
              odf_vertices=sphere.vertices, a_low=.2)
    track = list(eu)

    seed = [-1., 1000000., 1000000.]
    eu = EuDX(a=ten.fa, ind=ind, seeds=[seed],
              odf_vertices=sphere.vertices, a_low=.2)
    assert_raises(ValueError, list, eu)
Esempio n. 9
0
def test_eudx_boundaries():
    """
    This test checks that the tracking will exclude seeds in both directions.
    Here we create a volume of shape (50, 60, 40) and we will add 2 seeds
    exactly at the volume's boundaries (49, 0, 0) and (0, 0, 0). Those should
    not generate any streamlines as EuDX does not interpolate on the boundary
    voxels. We also add 3 seeds not in the boundaries which should generate
    streamlines without a problem.
    """

    fa = np.ones((50, 60, 40))
    ind = np.zeros(fa.shape)
    sphere = get_sphere('repulsion724')

    seed = [49., 0, 0]
    seed2 = [0., 0, 0]
    seed3 = [48., 0, 0]
    seed4 = [1., 0, 0]
    seed5 = [5., 5, 5]

    eu = EuDX(a=fa, ind=ind, seeds=[seed, seed2, seed3, seed4, seed5],
              odf_vertices=sphere.vertices, a_low=.2,
              total_weight=0.)
    track = list(eu)

    assert_equal(len(track), 3)
Esempio n. 10
0
def test_multib0_dsi():
    data, gtab = dsi_voxels()
    # Create a new data-set with a b0 measurement:
    new_data = np.concatenate([data, data[..., 0, None]], -1)
    new_bvecs = np.concatenate([gtab.bvecs, np.zeros((1, 3))])
    new_bvals = np.concatenate([gtab.bvals, [0]])
    new_gtab = gradient_table(new_bvals, new_bvecs)
    ds = DiffusionSpectrumModel(new_gtab)
    sphere = get_sphere('repulsion724')
    dsfit = ds.fit(new_data)
    pdf = dsfit.pdf()
    dsfit.odf(sphere)
    assert_equal(new_data.shape[:-1] + (17, 17, 17), pdf.shape)
    assert_equal(np.alltrue(np.isreal(pdf)), True)

    # And again, with one more b0 measurement (two in total):
    new_data = np.concatenate([data, data[..., 0, None]], -1)
    new_bvecs = np.concatenate([gtab.bvecs, np.zeros((1, 3))])
    new_bvals = np.concatenate([gtab.bvals, [0]])
    new_gtab = gradient_table(new_bvals, new_bvecs)
    ds = DiffusionSpectrumModel(new_gtab)
    dsfit = ds.fit(new_data)
    pdf = dsfit.pdf()
    dsfit.odf(sphere)
    assert_equal(new_data.shape[:-1] + (17, 17, 17), pdf.shape)
    assert_equal(np.alltrue(np.isreal(pdf)), True)
Esempio n. 11
0
def test_dti_xval():
    """
    Test k-fold cross-validation
    """
    data = nib.load(fdata).get_data()
    gtab = gt.gradient_table(fbval, fbvec)
    dm = dti.TensorModel(gtab, "LS")
    # The data has 102 directions, so will not divide neatly into 10 bits
    npt.assert_raises(ValueError, xval.kfold_xval, dm, data, 10)

    # But we can do this with 2 folds:
    kf_xval = xval.kfold_xval(dm, data, 2)

    # In simulation with no noise, COD should be perfect:
    psphere = dpd.get_sphere("symmetric362")
    bvecs = np.concatenate(([[0, 0, 0]], psphere.vertices))
    bvals = np.zeros(len(bvecs)) + 1000
    bvals[0] = 0
    gtab = gt.gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))
    mevecs = [np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])]
    S = sims.single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)

    dm = dti.TensorModel(gtab, "LS")
    kf_xval = xval.kfold_xval(dm, S, 2)
    cod = xval.coeff_of_determination(S, kf_xval)
    npt.assert_array_almost_equal(cod, np.ones(kf_xval.shape[:-1]) * 100)

    # Test with 2D data for use of a mask
    S = np.array([[S, S], [S, S]])
    mask = np.ones(S.shape[:-1], dtype=bool)
    mask[1, 1] = 0
    kf_xval = xval.kfold_xval(dm, S, 2, mask=mask)
    cod2d = xval.coeff_of_determination(S, kf_xval)
    npt.assert_array_almost_equal(np.round(cod2d[0, 0]), cod)
Esempio n. 12
0
def test_r2_term_odf_sharp():
    SNR = None
    S0 = 1
    angle = 75

    _, fbvals, fbvecs = get_data('small_64D')  #get_data('small_64D')

    bvals = np.load(fbvals)
    bvecs = np.load(fbvecs)

    sphere = get_sphere('symmetric724')
    gtab = gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0003],
                       [0.0015, 0.0003, 0.0003]))

    S, sticks = multi_tensor(gtab, mevals, S0, angles=[(0, 0), (angle, 0)],
                             fractions=[50, 50], snr=SNR)
    
    
    mevecs = [all_tensor_evecs(sticks[0]).T,
              all_tensor_evecs(sticks[1]).T]

    odf_gt = multi_tensor_odf(sphere.vertices, [0.5, 0.5], mevals, mevecs)
    odfs_sh = sf_to_sh(odf_gt, sphere, sh_order=8, basis_type=None)
    fodf_sh = odf_sh_to_sharp(odfs_sh, sphere, basis=None, ratio=3 / 15.,
                              sh_order=8, lambda_=1., tau=0.1, r2_term=True)
    fodf = sh_to_sf(fodf_sh, sphere, sh_order=8, basis_type=None)

    directions_gt, _, _ = peak_directions(odf_gt, sphere)
    directions, _, _ = peak_directions(fodf, sphere)

    ang_sim = angular_similarity(directions_gt, directions)
    assert_equal(ang_sim > 1.9, True)
    assert_equal(directions.shape[0], 2)
Esempio n. 13
0
File: sfm.py Progetto: qytian/dipy
    def __init__(self, gtab, sphere=None, response=[0.0015, 0.0005, 0.0005],
                 solver='ElasticNet', l1_ratio=0.5, alpha=0.001):
        """
        Initialize a Sparse Fascicle Model

        Parameters
        ----------
        gtab : GradientTable class instance
        sphere : Sphere class instance, optional
            A sphere on which coefficients will be estimated. Default:
        symmetric sphere with 362 points (from :mod:`dipy.data`).
        response : (3,) array-like, optional
            The eigenvalues of a canonical tensor to be used as the response
            function of single-fascicle signals.
            Default:[0.0015, 0.0005, 0.0005]

        solver : string or SKLearnLinearSolver object, optional
            This will determine the algorithm used to solve the set of linear
            equations underlying this model. If it is a string it needs to be
            one of the following: {'ElasticNet', 'NNLS'}. Otherwise, it can be
            an object that inherits from `dipy.optimize.SKLearnLinearSolver`.
            Default: 'ElasticNet'.

        l1_ratio : float, optional
            Sets the balance betwee L1 and L2 regularization in ElasticNet
            [Zou2005]_. Default: 0.5
        alpha : float, optional
            Sets the balance between least-squares error and L1/L2
            regularization in ElasticNet [Zou2005]_. Default: 0.001

        Notes
        -----
        This is an implementation of the SFM, described in [Rokem2014]_.

        .. [Rokem2014] Ariel Rokem, Jason D. Yeatman, Franco Pestilli, Kendrick
           N. Kay, Aviv Mezer, Stefan van der Walt, Brian A. Wandell
           (2014). Evaluating the accuracy of diffusion MRI models in white
           matter. http://arxiv.org/abs/1411.0721

        .. [Zou2005] Zou H, Hastie T (2005). Regularization and variable
           selection via the elastic net. J R Stat Soc B:301-320
        """
        ReconstModel.__init__(self, gtab)
        if sphere is None:
            sphere = dpd.get_sphere()
        self.sphere = sphere
        self.response = np.asarray(response)

        if solver == 'ElasticNet':
            self.solver = lm.ElasticNet(l1_ratio=l1_ratio, alpha=alpha,
                                        positive=True, warm_start=True)
        elif solver == 'NNLS' or solver == 'nnls':
            self.solver = opt.NonNegativeLeastSquares()
        elif isinstance(solver, opt.SKLearnLinearSolver):
            self.solver = solver
        else:
            e_s = "The `solver` key-word argument needs to be: "
            e_s += "'ElasticNet', 'NNLS', or a "
            e_s += "`dipy.optimize.SKLearnLinearSolver` object"
            raise ValueError(e_s)
Esempio n. 14
0
def track(model, data, sphere=None, step_size=1, angle_limit=20, seeds=None,
          density=[2,2,2], voxel_size=[1,1,1]):
    """
    Interface for tracking based on fiber ODF models

    `model` needs to have a `fit` method, such that model.fit(data).odf(sphere)
    is a legitimate ODF (that is has dimensions (x,y,z, n_vertices), where
    n_vertices refers to the vertices of the provided sphere. 
    
    """

    # If no sphere is provided, we will use the dipy symmetrical sphere with
    # 724 vertcies. That should be enough
    if sphere is None:
        sphere = dpd.get_sphere('symmetric724')

    stepper = dpt.FixedSizeStepper(step_size)
    interpolator = dpt.NearestNeighborInterpolator(data, voxel_size)
    
    if seeds is None:
        seeds = dpu.seeds_from_mask(mask, density, voxel_size)

    pwt = dpt.ProbabilisticOdfWeightedTracker(model, interpolator, mask,
                                              stepper, angle_limit, seeds,
                                              sphere)

    pwt_streamlines = list(pwt)

    fibers = []
    for f in pwt_streamlines:
          fibers.append(ozf.Fiber(f))
def constrained_spherical_deconvolution(dir_src, dir_out, verbose=False):

    # Load data
    fbval = pjoin(dir_src, 'bvals_' + par_b_tag)
    fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag)
    fdwi =  pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')
    #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz')
    fmask = pjoin(dir_src, 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold)
    data, affine = load_nifti(fdwi, verbose)
    mask, _ = load_nifti(fmask, verbose)

    sphere = get_sphere('symmetric724')

    response, ratio = auto_response(gtab, data, roi_radius=par_ar_radius, 
                                    fa_thr=par_ar_fa_th)
    # print('Response function', response)

    # Model fitting
    csd_model = ConstrainedSphericalDeconvModel(gtab, response)
    csd_fit = csd_model.fit(data, mask=mask)

    # Saving Spherical Harmonic Coefficient
    out_peaks = 'sh_' + par_b_tag + '_' + par_dim_tag + '.nii.gz'
    save_nifti(pjoin(dir_out, out_peaks), csd_fit.shm_coeff, affine)
Esempio n. 16
0
def test_csd_xval():
    # First, let's see that it works with some data:
    data = nib.load(fdata).get_data()[1:3, 1:3, 1:3]  # Make it *small*
    gtab = gt.gradient_table(fbval, fbvec)
    S0 = np.mean(data[..., gtab.b0s_mask])
    response = ([0.0015, 0.0003, 0.0001], S0)
    csdm = csd.ConstrainedSphericalDeconvModel(gtab, response)
    kf_xval = xval.kfold_xval(csdm, data, 2, response, sh_order=2)

    # In simulation, it should work rather well (high COD):
    psphere = dpd.get_sphere('symmetric362')
    bvecs = np.concatenate(([[0, 0, 0]], psphere.vertices))
    bvals = np.zeros(len(bvecs)) + 1000
    bvals[0] = 0
    gtab = gt.gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))
    mevecs = [ np.array( [ [1, 0, 0], [0, 1, 0], [0, 0, 1] ] ),
               np.array( [ [0, 0, 1], [0, 1, 0], [1, 0, 0] ] ) ]
    S0 = 100
    S = sims.single_tensor( gtab, S0, mevals[0], mevecs[0], snr=None )
    sm = csd.ConstrainedSphericalDeconvModel(gtab, response)
    smfit = sm.fit(S)
    np.random.seed(12345)
    response = ([0.0015, 0.0003, 0.0001], S0)
    kf_xval = xval.kfold_xval(sm, S, 2, response, sh_order=2)
    # Because of the regularization, COD is not going to be perfect here:
    cod = xval.coeff_of_determination(S, kf_xval)
    # We'll just test for regressions:
    csd_cod = 97 # pre-computed by hand for this random seed

    # We're going to be really lenient here:
    npt.assert_array_almost_equal(np.round(cod), csd_cod)
def tracking_prob(dir_src, dir_out, verbose=False):

    wm_name = 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz'
    wm_mask, affine = load_nifti(pjoin(dir_src, wm_name), verbose)

    sh_name = 'sh_' + par_b_tag + '_' + par_dim_tag + '.nii.gz'
    sh, _ = load_nifti(pjoin(dir_src, sh_name), verbose)

    sphere = get_sphere('symmetric724') 

    classifier = ThresholdTissueClassifier(wm_mask.astype('f8'), .5)
    classifier = BinaryTissueClassifier(wm_mask)
    max_dg = ProbabilisticDirectionGetter.from_shcoeff(sh, max_angle=par_trk_max_angle, sphere=sphere)
    seeds = utils.seeds_from_mask(wm_mask, density=2, affine=affine)
    streamlines = LocalTracking(max_dg, classifier, seeds, affine, step_size=par_trk_step_size)
    streamlines = list(streamlines)

    trk_name = 'tractogram_' + par_b_tag + '_' + par_dim_tag + '_' + par_trk_prob_tag + '.trk'
    trk_out = os.path.join(dir_out, trk_name)
 
    save_trk(trk_out, streamlines, affine, wm_mask.shape)

    dpy_out = trk_out.replace('.trk', '.dpy')
    dpy = Dpy(dpy_out, 'w')
    dpy.write_tracks(streamlines)
    dpy.close()
def tracking_eudx(dir_src, dir_out, verbose=False):

    # Loading FA and evecs data
    fa_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_FA.nii.gz'
    FA, affine = load_nifti(pjoin(dir_src, fa_name), verbose)

    evecs_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_EV.nii.gz'
    evecs, _ = load_nifti(pjoin(dir_src, evecs_name), verbose)

    # Computation of streamlines
    sphere = get_sphere('symmetric724') 
    peak_indices = quantize_evecs(evecs, sphere.vertices)
    streamlines = EuDX(FA.astype('f8'),
                       ind=peak_indices, 
                       seeds=par_eudx_seeds,
                       odf_vertices= sphere.vertices,
                       a_low=par_eudx_threshold)

    # Saving tractography
    voxel_size =  (par_dim_vox,) * 3
    dims = FA.shape[:3]
    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = voxel_size
    hdr['voxel_order'] = 'LAS'
    hdr['dim'] = dims
    hdr['vox_to_ras'] = affine
    strm = ((sl, None, None) for sl in streamlines)
    trk_name = 'tractogram_' + par_b_tag + '_' + par_dim_tag + '_' + par_rec_tag + '_' + par_eudx_tag + '.trk'
    trk_out = os.path.join(dir_out, trk_name)
    nib.trackvis.write(trk_out, strm, hdr, points_space='voxel')    

    dpy_out = trk_out.replace('.trk', '.dpy')
    dpy = Dpy(dpy_out, 'w')
    dpy.write_tracks(streamlines)
    dpy.close()
Esempio n. 19
0
def test_peak_finding():

    vertices, faces=get_sphere('symmetric724')
    odf=np.zeros(len(vertices))
    odf = np.abs(vertices.sum(-1))

    odf[1] = 10.
    odf[505] = 505.
    odf[143] = 143.

    peaks, inds=peak_finding(odf.astype('f8'), faces.astype('uint16'))
    print peaks, inds
    edges = unique_edges(faces)
    peaks, inds = local_maxima(odf, edges)
    print peaks, inds
    vertices_half, edges_half, faces_half = reduce_antipodal(vertices, faces)
    n = len(vertices_half)
    peaks, inds = local_maxima(odf[:n], edges_half)
    print peaks, inds
    mevals=np.array(([0.0015,0.0003,0.0003],
                    [0.0015,0.0003,0.0003]))
    e0=np.array([1,0,0.])
    e1=np.array([0.,1,0])
    mevecs=[all_tensor_evecs(e0),all_tensor_evecs(e1)]
    odf = multi_tensor_odf(vertices, [0.5,0.5], mevals, mevecs)
    peaks, inds=peak_finding(odf, faces)
    print peaks, inds
    peaks2, inds2 = local_maxima(odf[:n], edges_half)
    print peaks2, inds2
    assert_equal(len(peaks), 2)
    assert_equal(len(peaks2), 2)
Esempio n. 20
0
def test_multi_tensor():
    sphere = get_sphere('symmetric724')
    vertices = sphere.vertices
    mevals = np.array(([0.0015, 0.0003, 0.0003],
                       [0.0015, 0.0003, 0.0003]))
    e0 = np.array([np.sqrt(2) / 2., np.sqrt(2) / 2., 0])
    e1 = np.array([0, np.sqrt(2) / 2., np.sqrt(2) / 2.])
    mevecs = [all_tensor_evecs(e0), all_tensor_evecs(e1)]
    # odf = multi_tensor_odf(vertices, [0.5, 0.5], mevals, mevecs)
    # assert_(odf.shape == (len(vertices),))
    # assert_(np.all(odf <= 1) & np.all(odf >= 0))

    fimg, fbvals, fbvecs = get_data('small_101D')
    bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
    gtab = gradient_table(bvals, bvecs)

    s1 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)
    s2 = single_tensor(gtab, 100, mevals[1], mevecs[1], snr=None)

    Ssingle = 0.5*s1 + 0.5*s2

    S, sticks = MultiTensor(gtab, mevals, S0=100, angles=[(90, 45), (45, 90)],
                            fractions=[50, 50], snr=None)

    assert_array_almost_equal(S, Ssingle)
Esempio n. 21
0
def test_predict():
    """
    Test model prediction API
    """
    psphere = get_sphere('symmetric362')
    bvecs = np.concatenate(([[1, 0, 0]], psphere.vertices))
    bvals = np.zeros(len(bvecs)) + 1000
    bvals[0] = 0
    gtab = grad.gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))
    mevecs = [np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
              np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])]
    S = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)

    dm = dti.TensorModel(gtab, 'LS')
    dmfit = dm.fit(S)
    assert_array_almost_equal(dmfit.predict(gtab, S0=100), S)
    assert_array_almost_equal(dm.predict(dmfit.model_params, S0=100), S)

    fdata, fbvals, fbvecs = get_data()
    data = nib.load(fdata).get_data()
    # Make the data cube a bit larger:
    data = np.tile(data.T, 2).T
    gtab = grad.gradient_table(fbvals, fbvecs)
    dtim = dti.TensorModel(gtab)
    dtif = dtim.fit(data)
    S0 = np.mean(data[..., gtab.b0s_mask], -1)
    p = dtif.predict(gtab, S0)
    assert_equal(p.shape, data.shape)
Esempio n. 22
0
def test_local_maxima():
    sphere = get_sphere('symmetric724')
    vertices, faces = sphere.vertices, sphere.faces
    edges = unique_edges(faces)
    odf = abs(vertices.sum(-1))
    odf[1] = 10.
    odf[143] = 143.
    odf[505] = 505

    peak_values, peak_index = local_maxima(odf, edges)
    npt.assert_array_equal(peak_values, [505, 143, 10])
    npt.assert_array_equal(peak_index, [505, 143, 1])

    hemisphere = HemiSphere(xyz=vertices, faces=faces)
    vertices_half, edges_half = hemisphere.vertices, hemisphere.edges
    odf = abs(vertices_half.sum(-1))
    odf[1] = 10.
    odf[143] = 143.

    peak_value, peak_index = local_maxima(odf, edges_half)
    npt.assert_array_equal(peak_value, [143, 10])
    npt.assert_array_equal(peak_index, [143, 1])

    odf[20] = np.nan
    npt.assert_raises(ValueError, local_maxima, odf, edges_half)
Esempio n. 23
0
        def __init__(self,
                     gtab,
                     method='gqi2',
                     sampling_length=3.5,
                     normalize_peaks=True,
                     ratio=0.2,
                     sh_order=8,
                     lambda_=1.,
                     tau=0.1,
                     r2=True):

            super(GeneralizedQSamplingDeconvModel, self).__init__(gtab,
                                                                  method,
                                                                  sampling_length,
                                                                  normalize_peaks)
            sphere = get_sphere('symmetric724')
            self.invB = sf_to_sh_invB(sphere, sh_order, 'mrtrix')
            self.R, self.B_reg = mats_odfdeconv(sphere,
                                                basis='mrtrix',
                                                ratio=ratio,
                                                sh_order=sh_order,
                                                lambda_=lambda_, tau=tau,
                                                r2=r2)
            self.lambda_ = lambda_
            self.tau = tau
            self.sh_order = sh_order
Esempio n. 24
0
def test_minmax_normalize():

    bvalue = 3000
    S0 = 1
    SNR = 100

    sphere = get_sphere("symmetric362")
    bvecs = np.concatenate(([[0, 0, 0]], sphere.vertices))
    bvals = np.zeros(len(bvecs)) + bvalue
    bvals[0] = 0
    gtab = gradient_table(bvals, bvecs)

    evals = np.array(([0.0017, 0.0003, 0.0003], [0.0017, 0.0003, 0.0003]))

    S, sticks = multi_tensor(gtab, evals, S0, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=SNR)
    odf = multi_tensor_odf(sphere.vertices, evals, angles=[(0, 0), (90, 0)], fractions=[50, 50])

    odf2 = minmax_normalize(odf)
    assert_equal(odf2.max(), 1)
    assert_equal(odf2.min(), 0)

    odf3 = np.empty(odf.shape)
    odf3 = minmax_normalize(odf, odf3)
    assert_equal(odf3.max(), 1)
    assert_equal(odf3.min(), 0)
Esempio n. 25
0
def test_exponential_iso():
    fdata, fbvals, fbvecs = dpd.get_data()
    data_dti = nib.load(fdata).get_data()
    gtab_dti = grad.gradient_table(fbvals, fbvecs)
    data_multi, gtab_multi = dpd.dsi_deconv_voxels()

    for data, gtab in zip([data_dti, data_multi], [gtab_dti, gtab_multi]):
        sfmodel = sfm.SparseFascicleModel(
                  gtab, isotropic=sfm.ExponentialIsotropicModel)

        sffit1 = sfmodel.fit(data[0, 0, 0])
        sphere = dpd.get_sphere()
        odf1 = sffit1.odf(sphere)
        pred1 = sffit1.predict(gtab)

        SNR = 1000
        S0 = 100
        mevals = np.array(([0.0015, 0.0005, 0.0005],
                           [0.0015, 0.0005, 0.0005]))
        angles = [(0, 0), (60, 0)]
        S, sticks = sims.multi_tensor(gtab, mevals, S0, angles=angles,
                                      fractions=[50, 50], snr=SNR)
        sffit = sfmodel.fit(S)
        pred = sffit.predict()
        npt.assert_(xval.coeff_of_determination(pred, S) > 96)
Esempio n. 26
0
def test_sphere_scaling_csdmodel():
    """Check that mirroring regulization sphere does not change the result of
    csddeconv model"""
    _, fbvals, fbvecs = get_data('small_64D')

    bvals = np.load(fbvals)
    bvecs = np.load(fbvecs)

    gtab = gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0003],
                       [0.0015, 0.0003, 0.0003]))

    angles = [(0, 0), (60, 0)]

    S, sticks = multi_tensor(gtab, mevals, 100., angles=angles,
                             fractions=[50, 50], snr=None)

    sphere = get_sphere('symmetric362')
    hemi = HemiSphere.from_sphere(sphere)

    response = (np.array([0.0015, 0.0003, 0.0003]), 100)
    model_full = ConstrainedSphericalDeconvModel(gtab, response,
                                                reg_sphere=sphere)
    model_hemi = ConstrainedSphericalDeconvModel(gtab, response,
                                                reg_sphere=hemi)
    csd_fit_full = model_full.fit(S)
    csd_fit_hemi = model_hemi.fit(S)

    assert_array_almost_equal(csd_fit_full.shm_coeff, csd_fit_hemi.shm_coeff)
Esempio n. 27
0
def test_csd_predict():
    """

    """
    SNR = 100
    S0 = 1
    _, fbvals, fbvecs = get_data('small_64D')
    bvals = np.load(fbvals)
    bvecs = np.load(fbvecs)
    gtab = gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0003],
                       [0.0015, 0.0003, 0.0003]))
    angles = [(0, 0), (60, 0)]
    S, sticks = multi_tensor(gtab, mevals, S0, angles=angles,
                             fractions=[50, 50], snr=SNR)
    sphere = get_sphere('symmetric362')
    odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])
    response = (np.array([0.0015, 0.0003, 0.0003]), S0)

    csd = ConstrainedSphericalDeconvModel(gtab, response)
    csd_fit = csd.fit(S)
    prediction = csd_predict(csd_fit.shm_coeff, gtab, response=response, S0=S0)
    npt.assert_equal(prediction.shape[0], S.shape[0])
    model_prediction = csd.predict(csd_fit.shm_coeff)
    assert_array_almost_equal(prediction, model_prediction)
    # Roundtrip tests (quite inaccurate, because of regularization): 
    assert_array_almost_equal(csd_fit.predict(gtab, S0=S0),S,decimal=1)
    assert_array_almost_equal(csd.predict(csd_fit.shm_coeff, S0=S0),S,decimal=1)
Esempio n. 28
0
def test_odfdeconv():
    SNR = 100
    S0 = 1

    _, fbvals, fbvecs = get_data('small_64D')

    bvals = np.load(fbvals)
    bvecs = np.load(fbvecs)

    gtab = gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0003],
                       [0.0015, 0.0003, 0.0003]))

    angles = [(0, 0), (90, 0)]
    S, sticks = multi_tensor(gtab, mevals, S0, angles=angles,
                             fractions=[50, 50], snr=SNR)

    sphere = get_sphere('symmetric362')

    odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])

    e1 = 15.0
    e2 = 3.0
    ratio = e2 / e1

    csd = ConstrainedSDTModel(gtab, ratio, None)

    csd_fit = csd.fit(S)
    fodf = csd_fit.odf(sphere)

    directions, _, _ = peak_directions(odf_gt, sphere)
    directions2, _, _ = peak_directions(fodf, sphere)

    ang_sim = angular_similarity(directions, directions2)

    assert_equal(ang_sim > 1.9, True)

    assert_equal(directions.shape[0], 2)
    assert_equal(directions2.shape[0], 2)

    with warnings.catch_warnings(record=True) as w:

        ConstrainedSDTModel(gtab, ratio, sh_order=10)
        assert_equal(len(w) > 0, True)

    with warnings.catch_warnings(record=True) as w:

        ConstrainedSDTModel(gtab, ratio, sh_order=8)
        assert_equal(len(w) > 0, False)

    csd_fit = csd.fit(np.zeros_like(S))
    fodf = csd_fit.odf(sphere)
    assert_array_equal(fodf, np.zeros_like(fodf))

    odf_sh = np.zeros_like(fodf)
    odf_sh[1] = np.nan

    fodf, it = odf_deconv(odf_sh, csd.R, csd.B_reg)
    assert_array_equal(fodf, np.zeros_like(fodf))
Esempio n. 29
0
def test_design_matrix():
    data, gtab = dpd.dsi_voxels()
    sphere = dpd.get_sphere()
    # Make it with NNLS, so that it gets tested regardless of sklearn
    sparse_fascicle_model = sfm.SparseFascicleModel(gtab, sphere,
                                                    solver='NNLS')
    npt.assert_equal(sparse_fascicle_model.design_matrix.shape,
                     (np.sum(~gtab.b0s_mask), sphere.vertices.shape[0]))
Esempio n. 30
0
def test_shore_metrics():
    gtab = get_gtab_taiwan_dsi()
    mevals = np.array(([0.0015, 0.0003, 0.0003],
                       [0.0015, 0.0003, 0.0003]))
    angl = [(0, 0), (60, 0)]
    S, sticks = MultiTensor(gtab, mevals, S0=100.0, angles=angl,
                            fractions=[50, 50], snr=None)

    # since we are testing without noise we can use higher order and lower lambdas, with respect to the default.
    radial_order = 6
    lambd = 1e-8

    # test mapmri_indices
    indices =  mapmri_index_matrix(radial_order)
    n_c = indices.shape[0]
    F = radial_order / 2
    n_gt = np.round(1 / 6.0 * (F + 1) * (F + 2) * (4 * F + 3))
    assert_equal(n_c, n_gt)

    # test MAPMRI fitting

    mapm= MapmriModel(gtab, radial_order=radial_order, lambd=lambd)
    mapfit = mapm.fit(S)
    c_map=mapfit.mapmri_coeff

    R = mapfit.mapmri_R
    mu = mapfit.mapmri_mu

    S_reconst = mapfit.predict(gtab, 1.0)

    # test the signal reconstruction
    S = S / S[0]
    nmse_signal = np.sqrt(np.sum((S - S_reconst) ** 2)) / (S.sum())
    assert_almost_equal(nmse_signal, 0.0, 3)

    # test if the analytical integral of the pdf is equal to one
    integral = 0
    for i in range(indices.shape[0]):
        n1,n2,n3 = indices[i]
        integral += c_map[i] * int_func(n1) * int_func(n2) * int_func(n3)

    assert_almost_equal(integral, 1.0, 3)

    # compare the shore pdf with the ground truth multi_tensor pdf

    sphere = get_sphere('symmetric724')
    v = sphere.vertices
    radius = 10e-3
    r_points = v * radius
    pdf_mt = multi_tensor_pdf(r_points, mevals=mevals,
                              angles=angl, fractions= [50, 50])


    pdf_map = mapmri_EAP(r_points, radial_order, c_map, mu, R)


    nmse_pdf = np.sqrt(np.sum((pdf_mt - pdf_map) ** 2)) / (pdf_mt.sum())
    assert_almost_equal(nmse_pdf, 0.0, 2)
Esempio n. 31
0
def angle_aware_bilateral_filtering_cpu(in_sh, sh_order=8,
                                        sh_basis='descoteaux07',
                                        in_full_basis=False,
                                        sphere_str='repulsion724',
                                        sigma_spatial=1.0,
                                        sigma_angular=1.0,
                                        sigma_range=0.5,
                                        nbr_processes=1):
    """
    Angle-aware bilateral filtering on the CPU
    (optionally using multiple threads).

    Parameters
    ----------
    in_sh: ndarray (x, y, z, ncoeffs)
        Input SH volume.
    sh_order: int, optional
        Maximum SH order of input volume.
    sh_basis: str, optional
        Name of SH basis used.
    in_full_basis: bool, optional
        True if input is expressed in full SH basis.
    sphere_str: str, optional
        Name of the DIPY sphere to use for sh to sf projection.
    sigma_spatial: float, optional
        Standard deviation for spatial filter.
    sigma_angular: float, optional
        Standard deviation for angular filter.
    sigma_range: float, optional
        Standard deviation for range filter.
    nbr_processes: int, optional
        Number of processes to use.

    Returns
    -------
    out_sh: ndarray (x, y, z, ncoeffs)
        Output SH coefficient array in full SH basis.
    """
    # Load the sphere used for projection of SH
    sphere = get_sphere(sphere_str)

    # Normalized filter for each sf direction
    s_weights = _get_spatial_weights(sigma_spatial)
    a_weights = _get_angular_weights(s_weights.shape, sphere, sigma_angular)

    weights = s_weights[..., None] * a_weights
    weights /= np.sum(weights, axis=(0, 1, 2))

    nb_sf = len(sphere.vertices)
    B = sh_to_sf_matrix(sphere, sh_order=sh_order, basis_type=sh_basis,
                        return_inv=False, full_basis=in_full_basis)

    if nbr_processes > 1:
        # Apply filter to each sphere vertice in parallel
        pool = multiprocessing.Pool(nbr_processes)

        # divide the sphere directions among the processes
        base_chunk_size = int(nb_sf / nbr_processes + 0.5)
        first_ids = np.arange(0, nb_sf, base_chunk_size)
        residuals = nb_sf - first_ids
        chunk_sizes = np.where(residuals < base_chunk_size,
                               residuals, base_chunk_size)
        res = pool.map(_process_subset_directions,
                       zip(itertools.repeat(weights),
                           itertools.repeat(in_sh),
                           first_ids,
                           chunk_sizes,
                           itertools.repeat(B),
                           itertools.repeat(sigma_range)))
        pool.close()
        pool.join()

        # Patch chunks together.
        mean_sf = np.concatenate(res, axis=-1)
    else:
        args = [weights, in_sh, 0, nb_sf,
                B, sigma_range]
        mean_sf = _process_subset_directions(args)

    # Convert back to SH coefficients
    _, B_inv = sh_to_sf_matrix(sphere, sh_order=sh_order, basis_type=sh_basis,
                               full_basis=True)
    out_sh = np.array([np.dot(i, B_inv) for i in mean_sf], dtype=in_sh.dtype)
    # By default, return only asymmetric SH
    return out_sh
Esempio n. 32
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if args.verbose:
        logging.basicConfig(level=logging.DEBUG)

    assert_inputs_exist(
        parser,
        [args.in_sh, args.in_seed, args.in_map_include, args.map_exclude_file])
    assert_outputs_exist(parser, args, args.out_tractogram)

    if not nib.streamlines.is_supported(args.out_tractogram):
        parser.error('Invalid output streamline file format (must be trk or ' +
                     'tck): {0}'.format(args.out_tractogram))

    if not args.min_length > 0:
        parser.error('minL must be > 0, {}mm was provided.'.format(
            args.min_length))
    if args.max_length < args.min_length:
        parser.error(
            'maxL must be > than minL, (minL={}mm, maxL={}mm).'.format(
                args.min_length, args.max_length))

    if args.compress:
        if args.compress < 0.001 or args.compress > 1:
            logging.warning(
                'You are using an error rate of {}.\nWe recommend setting it '
                'between 0.001 and 1.\n0.001 will do almost nothing to the '
                'tracts while 1 will higly compress/linearize the tracts'.
                format(args.compress))

    if args.particles <= 0:
        parser.error('--particles must be >= 1.')

    if args.back_tracking <= 0:
        parser.error('PFT backtracking distance must be > 0.')

    if args.forward_tracking <= 0:
        parser.error('PFT forward tracking distance must be > 0.')

    if args.npv and args.npv <= 0:
        parser.error('Number of seeds per voxel must be > 0.')

    if args.nt and args.nt <= 0:
        parser.error('Total number of seeds must be > 0.')

    fodf_sh_img = nib.load(args.in_sh)
    if not np.allclose(np.mean(fodf_sh_img.header.get_zooms()[:3]),
                       fodf_sh_img.header.get_zooms()[0],
                       atol=1e-03):
        parser.error(
            'SH file is not isotropic. Tracking cannot be ran robustly.')

    tracking_sphere = HemiSphere.from_sphere(get_sphere('repulsion724'))

    # Check if sphere is unit, since we couldn't find such check in Dipy.
    if not np.allclose(np.linalg.norm(tracking_sphere.vertices, axis=1), 1.):
        raise RuntimeError('Tracking sphere should be unit normed.')

    sh_basis = args.sh_basis

    if args.algo == 'det':
        dgklass = DeterministicMaximumDirectionGetter
    else:
        dgklass = ProbabilisticDirectionGetter

    theta = get_theta(args.theta, args.algo)

    # Reminder for the future:
    # pmf_threshold == clip pmf under this
    # relative_peak_threshold is for initial directions filtering
    # min_separation_angle is the initial separation angle for peak extraction
    dg = dgklass.from_shcoeff(fodf_sh_img.get_fdata(dtype=np.float32),
                              max_angle=theta,
                              sphere=tracking_sphere,
                              basis_type=sh_basis,
                              pmf_threshold=args.sf_threshold,
                              relative_peak_threshold=args.sf_threshold_init)

    map_include_img = nib.load(args.in_map_include)
    map_exclude_img = nib.load(args.map_exclude_file)
    voxel_size = np.average(map_include_img.header['pixdim'][1:4])

    if not args.act:
        tissue_classifier = CmcStoppingCriterion(
            map_include_img.get_fdata(dtype=np.float32),
            map_exclude_img.get_fdata(dtype=np.float32),
            step_size=args.step_size,
            average_voxel_size=voxel_size)
    else:
        tissue_classifier = ActStoppingCriterion(
            map_include_img.get_fdata(dtype=np.float32),
            map_exclude_img.get_fdata(dtype=np.float32))

    if args.npv:
        nb_seeds = args.npv
        seed_per_vox = True
    elif args.nt:
        nb_seeds = args.nt
        seed_per_vox = False
    else:
        nb_seeds = 1
        seed_per_vox = True

    voxel_size = fodf_sh_img.header.get_zooms()[0]
    vox_step_size = args.step_size / voxel_size
    seed_img = nib.load(args.in_seed)
    seeds = track_utils.random_seeds_from_mask(
        get_data_as_mask(seed_img, dtype=bool),
        np.eye(4),
        seeds_count=nb_seeds,
        seed_count_per_voxel=seed_per_vox,
        random_seed=args.seed)

    # Note that max steps is used once for the forward pass, and
    # once for the backwards. This doesn't, in fact, control the real
    # max length
    max_steps = int(args.max_length / args.step_size) + 1
    pft_streamlines = ParticleFilteringTracking(
        dg,
        tissue_classifier,
        seeds,
        np.eye(4),
        max_cross=1,
        step_size=vox_step_size,
        maxlen=max_steps,
        pft_back_tracking_dist=args.back_tracking,
        pft_front_tracking_dist=args.forward_tracking,
        particle_count=args.particles,
        return_all=args.keep_all,
        random_seed=args.seed,
        save_seeds=args.save_seeds)

    scaled_min_length = args.min_length / voxel_size
    scaled_max_length = args.max_length / voxel_size

    if args.save_seeds:
        filtered_streamlines, seeds = \
            zip(*((s, p) for s, p in pft_streamlines
                  if scaled_min_length <= length(s) <= scaled_max_length))
        data_per_streamlines = {'seeds': lambda: seeds}
    else:
        filtered_streamlines = \
            (s for s in pft_streamlines
             if scaled_min_length <= length(s) <= scaled_max_length)
        data_per_streamlines = {}

    if args.compress:
        filtered_streamlines = (compress_streamlines(s, args.compress)
                                for s in filtered_streamlines)

    tractogram = LazyTractogram(lambda: filtered_streamlines,
                                data_per_streamlines,
                                affine_to_rasmm=seed_img.affine)

    filetype = nib.streamlines.detect_format(args.out_tractogram)
    reference = get_reference_info(seed_img)
    header = create_tractogram_header(filetype, *reference)

    # Use generator to save the streamlines on-the-fly
    nib.streamlines.save(tractogram, args.out_tractogram, header=header)
Esempio n. 33
0
def white_matter_response_tournier13(acquisition_scheme,
                                     data,
                                     max_iter=5,
                                     sh_order=10,
                                     N_candidate_voxels=300,
                                     peak_ratio_setting='mrtrix'):
    """
    Iterative model-free white matter response function estimation according to
    [1]_. Quoting the paper, the steps are the following:

    - 1) The 300 brain voxels with the highest FA were identified within a
        brain mask (eroded by three voxels to remove any noisy voxels at the
        brain edges).
    - 2) The single-fibre 'response function' was estimated within these
        voxels, and used to compute the fibre orientation distribution (FOD)
        employing constrained spherical deconvolution (CSD) up to lmax = 10.
    - 3) Within each voxel, a peak-finding procedure was used to identify the
        two largest FOD peaks, and their amplitude ratio was computed.
    - 4) The 300 voxels with the lowest second to first peak amplitude ratios
        were identified, and used as the current estimate of the set of
        'single-fibre' voxels. It should be noted that these voxels were not
        required to be a subset of the original set of 'single-fibre' voxels.
    - 5) To ensure minimal bias from the initial estimate of the 'response
        function', steps (2) to (4) were re-iterated until convergence (no
        difference in the set of 'single-fibre' voxels). It should be noted
        that, in practice, convergence was achieved within a single iteration
        in all cases.

    Parameters
    ----------
    acquisition_scheme : DmipyAcquisitionScheme instance,
        An acquisition scheme that has been instantiated using dMipy.
    data : NDarray,
        Measured diffusion signal array.
    max_iter : Positive integer,
        Defines the maximum amount of iterations to be done for the single-
        fibre response kernel.
    sh_order : Positive even integer,
        Maximum spherical harmonics order to be used in the FOD estimation for
        the single-fibre response kernel.
    N_candidate_voxels : integer,
        Number of voxels to be included in the final white matter response
        estimation. Default is 300 following [1]_.
    peak_ratio_setting : string,
        Can be either 'ratio' or 'mrtrix', meaning the 'ratio' parameter
        between two peaks is actually calculated as the ratio, or a more
        complicated version as 1 / sqrt(peak1 * (1 - peak2 / peak1)) ** 2, to
        avoid favouring small, yet low SNR FODs [2]_.

    Returns
    -------
    S0_wm : positive float,
        Estimated S0 tissue response value.
    TR2_wm_model : Dmipy Anisotropic ModelFree Model
        ModelFree representation of white matter response.
    selected_indices : array of size (N_candidate_voxels,),
        indices of selected voxels for white matter response.

    References
    ----------
    .. [1] Tournier, J-Donald, Fernando Calamante, and Alan Connelly.
        "Determination of the appropriate b value and number of gradient
        directions for high-angular-resolution diffusion-weighted imaging."
        NMR in Biomedicine 26.12 (2013): 1775-1786.
    .. [2] MRtrix 3.0 readthedocs
    """
    data_shape = np.atleast_2d(data).shape
    N_voxels = int(np.prod(data_shape[:-1]))
    if N_voxels < N_candidate_voxels:
        msg = "The parameter N_candidate voxels is set to {} but only ".format(
            N_candidate_voxels)
        msg += "{} voxels are given. N_candidate_voxels".format(N_voxels)
        msg += " reset to number of voxels given."
        print(msg)
        N_candidate_voxels = N_voxels

    ratio_settings = ['ratio', 'mrtrix']
    if peak_ratio_setting not in ratio_settings:
        msg = 'peak_ratio_setting must be in {}'.format(ratio_settings)
        raise ValueError(msg)

    if data.ndim == 4:
        # calculate brain mask on 4D data (x, y, z, DWI)
        b0_mask, mask = median_otsu(input_volume=data,
                                    vol_idx=np.where(
                                        acquisition_scheme.b0_mask)[0],
                                    median_radius=4,
                                    numpass=4)  # based on dipy default
        # needs to be eroded 3 times.
        mask_eroded = binary_erosion(mask, iterations=3)
        data_to_fit = data[mask_eroded]
    else:
        # can't calculate brain mask on other than 4D data.
        # assume the data was prepared.
        data_to_fit = data.reshape([-1, data_shape[-1]])

    gtab = gtab_dmipy2dipy(acquisition_scheme)
    tenmod = dti.TensorModel(gtab)
    tenfit = tenmod.fit(data_to_fit)
    fa = tenfit.fa

    # selected based on FA
    selected_indices = np.argsort(fa)[-N_candidate_voxels:]
    sphere = get_sphere('symmetric724')
    hemisphere = HemiSphere(theta=sphere.theta, phi=sphere.phi)
    # iterate until convergence
    it = 0
    while True:
        print('Tournier13 white matter response iteration {}'.format(it + 1))
        selected_data = data_to_fit[selected_indices]

        S0_wm, TR2_wm_model = estimate_TR2_anisotropic_tissue_response_model(
            acquisition_scheme, selected_data)
        sh_model = MultiCompartmentSphericalHarmonicsModel([TR2_wm_model],
                                                           sh_order=sh_order)
        sh_fit = sh_model.fit(acquisition_scheme,
                              data_to_fit,
                              solver='csd_tournier07',
                              use_parallel_processing=False,
                              lambda_lb=0.)
        peaks, values, indices = sh_fit.peaks_directions(
            hemisphere, max_peaks=2, relative_peak_threshold=0.)
        if peak_ratio_setting == 'ratio':
            ratio = values[..., 1] / values[..., 0]
        elif peak_ratio_setting == 'mrtrix':
            ratio = 1. / np.sqrt(values[..., 0] *
                                 (1 - values[..., 1] / values[..., 0]))**2
        selected_indices_old = selected_indices
        selected_indices = np.argsort(ratio)[:N_candidate_voxels]
        percentage_overlap = 100 * float(
            len(np.intersect1d(selected_indices,
                               selected_indices_old))) / N_candidate_voxels
        print('{:.1f} percent candidate voxel overlap.'.format(
            percentage_overlap))
        if percentage_overlap == 100.:
            print('White matter response converged')
            break
        it += 1
        if it == max_iter:
            print('Maximum iterations reached without convergence')
            break
    return S0_wm, TR2_wm_model, selected_indices
Esempio n. 34
0
    def __init__(self, gtab, ratio, reg_sphere=None, sh_order=8, lambda_=1.,
                 tau=0.1):
        r""" Spherical Deconvolution Transform (SDT) [1]_.

        The SDT computes a fiber orientation distribution (FOD) as opposed to a
        diffusion ODF as the QballModel or the CsaOdfModel. This results in a
        sharper angular profile with better angular resolution. The Constrained
        SDTModel is similar to the Constrained CSDModel but mathematically it
        deconvolves the q-ball ODF as oppposed to the HARDI signal (see [1]_
        for a comparison and a through discussion).

        A sharp fODF is obtained because a single fiber *response* function is
        injected as *a priori* knowledge. In the SDTModel, this response is a
        single fiber q-ball ODF as opposed to a single fiber signal function
        for the CSDModel. The response function will be used as deconvolution
        kernel.

        Parameters
        ----------
        gtab : GradientTable
        ratio : float
            ratio of the smallest vs the largest eigenvalue of the single
            prolate tensor response function
        reg_sphere : Sphere
            sphere used to build the regularization B matrix
        sh_order : int
            maximal spherical harmonics order
        lambda_ : float
            weight given to the constrained-positivity regularization part of
            the deconvolution equation
        tau : float
            threshold (tau *mean(fODF)) controlling the amplitude below
            which the corresponding fODF is assumed to be zero.

        References
        ----------
        .. [1] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and
               Probabilistic Tractography Based on Complex Fibre Orientation
               Distributions.

        """
        SphHarmModel.__init__(self, gtab)
        m, n = sph_harm_ind_list(sh_order)
        self.m, self.n = m, n
        self._where_b0s = lazy_index(gtab.b0s_mask)
        self._where_dwi = lazy_index(~gtab.b0s_mask)

        no_params = ((sh_order + 1) * (sh_order + 2)) / 2

        if no_params > np.sum(~gtab.b0s_mask):
            msg = "Number of parameters required for the fit are more "
            msg += "than the actual data points"
            warnings.warn(msg, UserWarning)

        x, y, z = gtab.gradients[self._where_dwi].T
        r, theta, phi = cart2sphere(x, y, z)
        # for the gradient sphere
        self.B_dwi = real_sph_harm(m, n, theta[:, None], phi[:, None])

        # for the odf sphere
        if reg_sphere is None:
            self.sphere = get_sphere('symmetric362')
        else:
            self.sphere = reg_sphere

        r, theta, phi = cart2sphere(
            self.sphere.x,
            self.sphere.y,
            self.sphere.z
        )
        self.B_reg = real_sph_harm(m, n, theta[:, None], phi[:, None])

        self.R, self.P = forward_sdt_deconv_mat(ratio, n)

        # scale lambda_ to account for differences in the number of
        # SH coefficients and number of mapped directions
        self.lambda_ = (lambda_ * self.R.shape[0] * self.R[0, 0] /
                        self.B_reg.shape[0])
        self.tau = tau
        self.sh_order = sh_order
Esempio n. 35
0
def test_odf_slicer(interactive=False):

    sphere = get_sphere('symmetric362')

    shape = (11, 11, 11, sphere.vertices.shape[0])

    fid, fname = mkstemp(suffix='_odf_slicer.mmap')
    print(fid)
    print(fname)

    odfs = np.memmap(fname, dtype=np.float64, mode='w+', shape=shape)

    odfs[:] = 1

    affine = np.eye(4)
    renderer = window.Renderer()

    mask = np.ones(odfs.shape[:3])
    mask[:4, :4, :4] = 0

    odfs[..., 0] = 1

    odf_actor = actor.odf_slicer(odfs,
                                 affine,
                                 mask=mask,
                                 sphere=sphere,
                                 scale=.25,
                                 colormap='jet')
    fa = 0. * np.zeros(odfs.shape[:3])
    fa[:, 0, :] = 1.
    fa[:, -1, :] = 1.
    fa[0, :, :] = 1.
    fa[-1, :, :] = 1.
    fa[5, 5, 5] = 1

    k = 5
    I, J, K = odfs.shape[:3]

    fa_actor = actor.slicer(fa, affine)
    fa_actor.display_extent(0, I, 0, J, k, k)
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()

    odf_actor.display_extent(0, I, 0, J, k, k)
    odf_actor.GetProperty().SetOpacity(1.0)
    if interactive:
        window.show(renderer, reset_camera=False)

    arr = window.snapshot(renderer)
    report = window.analyze_snapshot(arr, find_objects=True)
    npt.assert_equal(report.objects, 11 * 11)

    renderer.clear()
    renderer.add(fa_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    mask[:] = 0
    mask[5, 5, 5] = 1
    fa[5, 5, 5] = 0
    fa_actor = actor.slicer(fa, None)
    fa_actor.display(None, None, 5)
    odf_actor = actor.odf_slicer(odfs,
                                 None,
                                 mask=mask,
                                 sphere=sphere,
                                 scale=.25,
                                 colormap='jet',
                                 norm=False,
                                 global_cm=True)
    renderer.clear()
    renderer.add(fa_actor)
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    renderer.clear()
    renderer.add(odf_actor)
    renderer.add(fa_actor)
    odfs[:, :, :] = 1
    mask = np.ones(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs,
                                 None,
                                 mask=mask,
                                 sphere=sphere,
                                 scale=.25,
                                 colormap='jet',
                                 norm=False,
                                 global_cm=True)

    renderer.clear()
    renderer.add(odf_actor)
    renderer.add(fa_actor)
    renderer.add(actor.axes((11, 11, 11)))
    for i in range(11):
        odf_actor.display(i, None, None)
        fa_actor.display(i, None, None)
        if interactive:
            window.show(renderer)
    for j in range(11):
        odf_actor.display(None, j, None)
        fa_actor.display(None, j, None)
        if interactive:
            window.show(renderer)
    # with mask equal to zero everything should be black
    mask = np.zeros(odfs.shape[:3])
    odf_actor = actor.odf_slicer(odfs,
                                 None,
                                 mask=mask,
                                 sphere=sphere,
                                 scale=.25,
                                 colormap='plasma',
                                 norm=False,
                                 global_cm=True)
    renderer.clear()
    renderer.add(odf_actor)
    renderer.reset_camera()
    renderer.reset_clipping_range()
    if interactive:
        window.show(renderer)

    report = window.analyze_renderer(renderer)
    npt.assert_equal(report.actors, 1)
    npt.assert_equal(report.actors_classnames[0], 'vtkLODActor')

    del odf_actor
    odfs._mmap.close()
    del odfs
    os.close(fid)

    os.remove(fname)
Esempio n. 36
0
def diffusion_components(dki_params,
                         sphere='repulsion100',
                         awf=None,
                         mask=None):
    """ Extracts the restricted and hindered diffusion tensors of well aligned
    fibers from diffusion kurtosis imaging parameters [1]_.

    Parameters
    ----------
    dki_params : ndarray (x, y, z, 27) or (n, 27)
        All parameters estimated from the diffusion kurtosis model.
        Parameters are ordered as follows:
            1) Three diffusion tensor's eigenvalues
            2) Three lines of the eigenvector matrix each containing the first,
               second and third coordinates of the eigenvector
            3) Fifteen elements of the kurtosis tensor
    sphere : Sphere class instance, optional
        The sphere providing sample directions to sample the restricted and
        hindered cellular diffusion tensors. For more details see Fieremans
        et al., 2011.
    awf : ndarray (optional)
        Array containing values of the axonal water fraction that has the shape
        dki_params.shape[:-1]. If not given this will be automatically computed
        using :func:`axonal_water_fraction`" with function's default precision.
    mask : ndarray (optional)
        A boolean array used to mark the coordinates in the data that should be
        analyzed that has the shape dki_params.shape[:-1]

    Returns
    --------
    edt : ndarray (x, y, z, 6) or (n, 6)
        Parameters of the hindered diffusion tensor.
    idt : ndarray (x, y, z, 6) or (n, 6)
        Parameters of the restricted diffusion tensor.

    Note
    ----
    In the original article of DKI microstructural model [1]_, the hindered and
    restricted tensors were definde as the intra-cellular and extra-cellular
    diffusion compartments respectively.

    References
    ----------
    .. [1] Fieremans E, Jensen JH, Helpern JA, 2011. White matter
           characterization with diffusional kurtosis imaging.
           Neuroimage 58(1):177-88. doi: 10.1016/j.neuroimage.2011.06.006
    """
    shape = dki_params.shape[:-1]

    # load gradient directions
    if not isinstance(sphere, dps.Sphere):
        sphere = get_sphere(sphere)

    # select voxels where to apply the single fiber model
    if mask is None:
        mask = np.ones(shape, dtype='bool')
    else:
        if mask.shape != shape:
            raise ValueError("Mask is not the same shape as dki_params.")
        else:
            mask = np.array(mask, dtype=bool, copy=False)

    # check or compute awf values
    if awf is None:
        awf = axonal_water_fraction(dki_params, sphere=sphere, mask=mask)
    else:
        if awf.shape != shape:
            raise ValueError("awf array is not the same shape as dki_params.")

    # Initialize hindered and restricted diffusion tensors
    edt_all = np.zeros(shape + (6, ))
    idt_all = np.zeros(shape + (6, ))

    # Generate matrix that converts apparent diffusion coefficients to tensors
    B = np.zeros((sphere.x.size, 6))
    B[:, 0] = sphere.x * sphere.x  # Bxx
    B[:, 1] = sphere.x * sphere.y * 2.  # Bxy
    B[:, 2] = sphere.y * sphere.y  # Byy
    B[:, 3] = sphere.x * sphere.z * 2.  # Bxz
    B[:, 4] = sphere.y * sphere.z * 2.  # Byz
    B[:, 5] = sphere.z * sphere.z  # Bzz
    pinvB = np.linalg.pinv(B)

    # Compute hindered and restricted diffusion tensors for all voxels
    evals, evecs, kt = split_dki_param(dki_params)
    dt = lower_triangular(vec_val_vect(evecs, evals))
    md = mean_diffusivity(evals)

    index = ndindex(mask.shape)
    for idx in index:
        if not mask[idx]:
            continue
        # sample apparent diffusion and kurtosis values
        di = directional_diffusion(dt[idx], sphere.vertices)
        ki = directional_kurtosis(dt[idx],
                                  md[idx],
                                  kt[idx],
                                  sphere.vertices,
                                  adc=di,
                                  min_kurtosis=0)
        edi = di * (1 + np.sqrt(ki * awf[idx] / (3.0 - 3.0 * awf[idx])))
        edt = np.dot(pinvB, edi)
        edt_all[idx] = edt

        # We only move on if there is an axonal water fraction.
        # Otherwise, remaining params are already zero, so move on
        if awf[idx] == 0:
            continue
        # Convert apparent diffusion and kurtosis values to apparent diffusion
        # values of the hindered and restricted diffusion
        idi = di * (1 - np.sqrt(ki * (1.0 - awf[idx]) / (3.0 * awf[idx])))
        # generate hindered and restricted diffusion tensors
        idt = np.dot(pinvB, idi)
        idt_all[idx] = idt

    return edt_all, idt_all
Esempio n. 37
0
    def _run_interface(self, runtime):
        from dipy.reconst.peaks import peaks_from_model
        from dipy.tracking.eudx import EuDX
        from dipy.data import get_sphere
        # import marshal as pickle
        import pickle as pickle
        import gzip

        if (not (isdefined(self.inputs.in_model)
                 or isdefined(self.inputs.in_peaks))):
            raise RuntimeError(('At least one of in_model or in_peaks should '
                                'be supplied'))

        img = nb.load(self.inputs.in_file)
        imref = nb.four_to_three(img)[0]
        affine = img.affine

        data = img.get_data().astype(np.float32)
        hdr = imref.header.copy()
        hdr.set_data_dtype(np.float32)
        hdr['data_type'] = 16

        sphere = get_sphere('symmetric724')

        self._save_peaks = False
        if isdefined(self.inputs.in_peaks):
            IFLOGGER.info('Peaks file found, skipping ODF peaks search...')
            f = gzip.open(self.inputs.in_peaks, 'rb')
            peaks = pickle.load(f)
            f.close()
        else:
            self._save_peaks = True
            IFLOGGER.info('Loading model and computing ODF peaks')
            f = gzip.open(self.inputs.in_model, 'rb')
            odf_model = pickle.load(f)
            f.close()

            peaks = peaks_from_model(
                model=odf_model,
                data=data,
                sphere=sphere,
                relative_peak_threshold=self.inputs.peak_threshold,
                min_separation_angle=self.inputs.min_angle,
                parallel=self.inputs.multiprocess)

            f = gzip.open(self._gen_filename('peaks', ext='.pklz'), 'wb')
            pickle.dump(peaks, f, -1)
            f.close()

        hdr.set_data_shape(peaks.gfa.shape)
        nb.Nifti1Image(peaks.gfa.astype(np.float32), affine,
                       hdr).to_filename(self._gen_filename('gfa'))

        IFLOGGER.info('Performing tractography')

        if isdefined(self.inputs.tracking_mask):
            msk = nb.load(self.inputs.tracking_mask).get_data()
            msk[msk > 0] = 1
            msk[msk < 0] = 0
        else:
            msk = np.ones(imref.shape)

        gfa = peaks.gfa * msk
        seeds = self.inputs.num_seeds

        if isdefined(self.inputs.seed_coord):
            seeds = np.loadtxt(self.inputs.seed_coord)

        elif isdefined(self.inputs.seed_mask):
            seedmsk = nb.load(self.inputs.seed_mask).get_data()
            assert (seedmsk.shape == data.shape[:3])
            seedmsk[seedmsk > 0] = 1
            seedmsk[seedmsk < 1] = 0
            seedps = np.array(np.where(seedmsk == 1), dtype=np.float32).T
            vseeds = seedps.shape[0]
            nsperv = (seeds // vseeds) + 1
            IFLOGGER.info(
                'Seed mask is provided (%d voxels inside '
                'mask), computing seeds (%d seeds/voxel).', vseeds, nsperv)
            if nsperv > 1:
                IFLOGGER.info('Needed %d seeds per selected voxel (total %d).',
                              nsperv, vseeds)
                seedps = np.vstack(np.array([seedps] * nsperv))
                voxcoord = seedps + np.random.uniform(-1, 1, size=seedps.shape)
                nseeds = voxcoord.shape[0]
                seeds = affine.dot(
                    np.vstack((voxcoord.T, np.ones((1, nseeds)))))[:3, :].T

                if self.inputs.save_seeds:
                    np.savetxt(self._gen_filename('seeds', ext='.txt'), seeds)

        if isdefined(self.inputs.tracking_mask):
            tmask = msk
            a_low = 0.1
        else:
            tmask = gfa
            a_low = self.inputs.gfa_thresh

        eu = EuDX(tmask,
                  peaks.peak_indices[..., 0],
                  seeds=seeds,
                  affine=affine,
                  odf_vertices=sphere.vertices,
                  a_low=a_low)

        ss_mm = [np.array(s) for s in eu]

        trkfilev = nb.trackvis.TrackvisFile([(s, None, None) for s in ss_mm],
                                            points_space='rasmm',
                                            affine=np.eye(4))
        trkfilev.to_file(self._gen_filename('tracked', ext='.trk'))
        return runtime
Esempio n. 38
0
    bvecs = np.c_[bvecs[:, 0], bvecs[:, 1], -bvecs[:, 2]]

    gtab = gradient_table(bvals, bvecs)

    data, affine, vox_size = load_nifti(fdwi, return_voxsize=True)
    scale = [0.5, 0.5, 0.5, 1.]
    data = zoom(data, zoom=scale, order=1, mode='constant')
    labels = zoom(labels, zoom=scale[:3], order=1, mode='constant')
    labels = labels.astype(int)

    # Build Brain Mask
    #bm = np.where(labels == 0, False, True)
    bm = (labels != 0) * 1
    mask = bm
    #sphere = get_sphere('repulsion724')
    sphere = get_sphere('repulsion200')

    from dipy.reconst.dti import TensorModel
    tensor_model = TensorModel(gtab)

    t1 = time()
    tensor_fit = tensor_model.fit(data, bm)
    #    save_nifti('bmfa.nii.gz', tensor_fit.fa, affine)
    #   wenlin make this change-adress name to each animal

    affine = affine @ np.diag(scale)

    save_nifti(outpath + 'bmfa' + runno + '.nii.gz', tensor_fit.fa, affine)
    fa = tensor_fit.fa
    duration1 = time() - t1
    #wenlin make this change-adress name to each animal
Esempio n. 39
0
def test_bootstap_peak_tracker():
    """This tests that the Bootstrat Peak Direction Getter plays nice
    LocalTracking and produces reasonable streamlines in a simple example.
    """
    sphere = get_sphere('repulsion100')

    # A simple image with three possible configurations, a vertical tract,
    # a horizontal tract and a crossing
    simple_image = np.array([
        [0, 1, 0, 0, 0, 0],
        [0, 1, 0, 0, 0, 0],
        [2, 3, 2, 2, 2, 0],
        [0, 1, 0, 0, 0, 0],
        [0, 1, 0, 0, 0, 0],
    ])
    simple_image = simple_image[..., None]

    bvecs = sphere.vertices
    bvals = np.ones(len(bvecs)) * 1000
    bvecs = np.insert(bvecs, 0, np.array([0, 0, 0]), axis=0)
    bvals = np.insert(bvals, 0, 0)
    gtab = gradient_table(bvals, bvecs)
    angles = [(90, 90), (90, 0)]
    fracs = [50, 50]
    mevals = np.array([[1.5, 0.4, 0.4], [1.5, 0.4, 0.4]]) * 1e-3
    mevecs = [
        np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
        np.array([[0, 1, 0], [1, 0, 0], [0, 0, 1]])
    ]
    voxel1 = single_tensor(gtab, 1, mevals[0], mevecs[0], snr=None)
    voxel2 = single_tensor(gtab, 1, mevals[0], mevecs[1], snr=None)
    voxel3, _ = multi_tensor(gtab,
                             mevals,
                             fractions=fracs,
                             angles=angles,
                             snr=None)
    data = np.tile(voxel3, [5, 6, 1, 1])
    data[simple_image == 1] = voxel1
    data[simple_image == 2] = voxel2

    response = (np.array(mevals[1]), 1)
    csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6)

    seeds = [np.array([0., 1., 0.]), np.array([2., 4., 0.])]

    tc = BinaryTissueClassifier((simple_image > 0).astype(float))
    boot_dg = BootDirectionGetter.from_data(data, csd_model, 60)

    streamlines_generator = LocalTracking(boot_dg, tc, seeds, np.eye(4), 1.)
    streamlines = Streamlines(streamlines_generator)
    expected = [
        np.array([[0., 1., 0.], [1., 1., 0.], [2., 1., 0.], [3., 1., 0.],
                  [4., 1., 0.]]),
        np.array([
            [2., 5., 0.],
            [2., 4., 0.],
            [2., 3., 0.],
            [2., 2., 0.],
            [2., 1., 0.],
            [2., 0., 0.],
        ])
    ]

    def allclose(x, y):
        return x.shape == y.shape and np.allclose(x, y, atol=0.5)

    if not allclose(streamlines[0], expected[0]):
        raise AssertionError()
    if not allclose(streamlines[1], expected[1]):
        raise AssertionError()
Esempio n. 40
0
def run_track(B0_mask, gm_in_dwi, vent_csf_in_dwi, wm_in_dwi, tiss_class,
              labels_im_file_wm_gm_int, labels_im_file, target_samples,
              curv_thr_list, step_list, track_type, max_length, maxcrossing,
              directget, conn_model, gtab_file, dwi_file, network, node_size,
              dens_thresh, ID, roi, min_span_tree, disp_filt, parc, prune,
              atlas, uatlas, labels, coords, norm, binary, atlas_mni, life_run,
              min_length, fa_path):
    '''
    Run all ensemble tractography and filtering routines.

    Parameters
    ----------
    B0_mask : str
        File path to B0 brain mask.
    gm_in_dwi : str
        File path to grey-matter tissue segmentation Nifti1Image.
    vent_csf_in_dwi : str
        File path to ventricular CSF tissue segmentation Nifti1Image.
    wm_in_dwi : str
        File path to white-matter tissue segmentation Nifti1Image.
    tiss_class : str
        Tissue classification method.
    labels_im_file_wm_gm_int : str
        File path to atlas parcellation Nifti1Image in T1w-warped native diffusion space, restricted to wm-gm interface.
    labels_im_file : str
        File path to atlas parcellation Nifti1Image in T1w-warped native diffusion space.
    target_samples : int
        Total number of streamline samples specified to generate streams.
    curv_thr_list : list
        List of integer curvature thresholds used to perform ensemble tracking.
    step_list : list
        List of float step-sizes used to perform ensemble tracking.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    max_length : int
        Maximum fiber length threshold in mm to restrict tracking.
    maxcrossing : int
        Maximum number if diffusion directions that can be assumed per voxel while tracking.
    directget : str
        The statistical approach to tracking. Options are: det (deterministic), closest (clos), boot (bootstrapped),
        and prob (probabilistic).
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    gtab_file : str
        File path to pickled DiPy gradient table object.
    dwi_file : str
        File path to diffusion weighted image.
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based centroids
        are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    life_run : bool
        Indicates whether to perform Linear Fascicle Evaluation (LiFE).
    min_length : int
        Minimum fiber length threshold in mm.
    fa_path : str
        File path to FA Nifti1Image.

    Returns
    -------
    streams : str
        File path to save streamline array sequence in .trk format.
    track_type : str
        Tracking algorithm used (e.g. 'local' or 'particle').
    target_samples : int
        Total number of streamline samples specified to generate streams.
    conn_model : str
        Connectivity reconstruction method (e.g. 'csa', 'tensor', 'csd').
    dir_path : str
        Path to directory containing subject derivative data for a given pynets run.
    network : str
        Resting-state network based on Yeo-7 and Yeo-17 naming (e.g. 'Default')
        used to filter nodes in the study of brain subgraphs.
    node_size : int
        Spherical centroid node size in the case that coordinate-based centroids
        are used as ROI's for tracking.
    dens_thresh : bool
        Indicates whether a target graph density is to be used as the basis for
        thresholding.
    ID : str
        A subject id or other unique identifier.
    roi : str
        File path to binarized/boolean region-of-interest Nifti1Image file.
    min_span_tree : bool
        Indicates whether local thresholding from the Minimum Spanning Tree
        should be used.
    disp_filt : bool
        Indicates whether local thresholding using a disparity filter and
        'backbone network' should be used.
    parc : bool
        Indicates whether to use parcels instead of coordinates as ROI nodes.
    prune : bool
        Indicates whether to prune final graph of disconnected nodes/isolates.
    atlas : str
        Name of atlas parcellation used.
    uatlas : str
        File path to atlas parcellation Nifti1Image in MNI template space.
    labels : list
        List of string labels corresponding to graph nodes.
    coords : list
        List of (x, y, z) tuples corresponding to a coordinate atlas used or
        which represent the center-of-mass of each parcellation node.
    norm : int
        Indicates method of normalizing resulting graph.
    binary : bool
        Indicates whether to binarize resulting graph edges to form an
        unweighted graph.
    atlas_mni : str
        File path to atlas parcellation Nifti1Image in T1w-warped MNI space.
    curv_thr_list : list
        List of integer curvature thresholds used to perform ensemble tracking.
    step_list : list
        List of float step-sizes used to perform ensemble tracking.
    fa_path : str
        File path to FA Nifti1Image.
    dm_path : str
        File path to fiber density map Nifti1Image.
    '''

    try:
        import cPickle as pickle
    except ImportError:
        import _pickle as pickle
    from dipy.io import load_pickle
    from colorama import Fore, Style
    from dipy.data import get_sphere
    from pynets import utils
    from pynets.dmri.track import prep_tissues, reconstruction, filter_streamlines, track_ensemble

    # Load gradient table
    gtab = load_pickle(gtab_file)

    # Fit diffusion model
    mod_fit = reconstruction(conn_model, gtab, dwi_file, wm_in_dwi)

    # Load atlas parcellation (and its wm-gm interface reduced version for seeding)
    atlas_img = nib.load(labels_im_file)
    atlas_data = atlas_img.get_fdata().astype('int')
    atlas_img_wm_gm_int = nib.load(labels_im_file_wm_gm_int)
    atlas_data_wm_gm_int = atlas_img_wm_gm_int.get_fdata().astype('int')

    # Build mask vector from atlas for later roi filtering
    parcels = []
    i = 0
    for roi_val in np.unique(atlas_data)[1:]:
        parcels.append(atlas_data == roi_val)
        i = i + 1

    # Get sphere
    sphere = get_sphere('repulsion724')

    # Instantiate tissue classifier
    tiss_classifier = prep_tissues(B0_mask, gm_in_dwi, vent_csf_in_dwi,
                                   wm_in_dwi, tiss_class)

    if np.sum(atlas_data) == 0:
        raise ValueError(
            'ERROR: No non-zero voxels found in atlas. Check any roi masks and/or wm-gm interface images '
            'to verify overlap with dwi-registered atlas.')

    # Iteratively build a list of streamlines for each ROI while tracking
    print(
        "%s%s%s%s" %
        (Fore.GREEN, 'Target number of samples: ', Fore.BLUE, target_samples))
    print(Style.RESET_ALL)
    print("%s%s%s%s" % (Fore.GREEN, 'Using curvature threshold(s): ',
                        Fore.BLUE, curv_thr_list))
    print(Style.RESET_ALL)
    print("%s%s%s%s" %
          (Fore.GREEN, 'Using step size(s): ', Fore.BLUE, step_list))
    print(Style.RESET_ALL)
    print("%s%s%s%s" % (Fore.GREEN, 'Tracking type: ', Fore.BLUE, track_type))
    print(Style.RESET_ALL)
    if directget == 'prob':
        print("%s%s%s" %
              ('Using ', Fore.MAGENTA, 'Probabilistic Direction...'))
    elif directget == 'boot':
        print("%s%s%s" % ('Using ', Fore.MAGENTA, 'Bootstrapped Direction...'))
    elif directget == 'closest':
        print("%s%s%s" % ('Using ', Fore.MAGENTA, 'Closest Peak Direction...'))
    elif directget == 'det':
        print("%s%s%s" %
              ('Using ', Fore.MAGENTA, 'Deterministic Maximum Direction...'))
    print(Style.RESET_ALL)

    # Commence Ensemble Tractography
    streamlines = track_ensemble(target_samples, atlas_data_wm_gm_int, parcels,
                                 mod_fit, tiss_classifier, sphere, directget,
                                 curv_thr_list, step_list, track_type,
                                 maxcrossing, max_length)
    print('Tracking Complete')

    # Perform streamline filtering routines
    dir_path = utils.do_dir_path(atlas, dwi_file)
    [streams, dir_path,
     dm_path] = filter_streamlines(dwi_file, dir_path, gtab, streamlines,
                                   life_run, min_length, conn_model,
                                   target_samples, node_size, curv_thr_list,
                                   step_list, network, roi)

    return streams, track_type, target_samples, conn_model, dir_path, network, node_size, dens_thresh, ID, roi, min_span_tree, disp_filt, parc, prune, atlas, uatlas, labels, coords, norm, binary, atlas_mni, curv_thr_list, step_list, fa_path, dm_path
Esempio n. 41
0
def angle_aware_bilateral_filtering_gpu(in_sh, sh_order=8,
                                        sh_basis='descoteaux07',
                                        in_full_basis=False,
                                        sphere_str='repulsion724',
                                        sigma_spatial=1.0,
                                        sigma_angular=1.0,
                                        sigma_range=0.5):
    """
    Angle-aware bilateral filtering using OpenCL for GPU computing.

    Parameters
    ----------
    in_sh: ndarray (x, y, z, ncoeffs)
        Input SH volume.
    sh_order: int, optional
        Maximum SH order of input volume.
    sh_basis: str, optional
        Name of SH basis used.
    in_full_basis: bool, optional
        True if input is expressed in full SH basis.
    sphere_str: str, optional
        Name of the DIPY sphere to use for sh to sf projection.
    sigma_spatial: float, optional
        Standard deviation for spatial filter.
    sigma_angular: float, optional
        Standard deviation for angular filter.
    sigma_range: float, optional
        Standard deviation for range filter.

    Returns
    -------
    out_sh: ndarray (x, y, z, ncoeffs)
        Output SH coefficient array in full SH basis.
    """
    s_weights = _get_spatial_weights(sigma_spatial)
    h_half_width = len(s_weights) // 2

    sphere = get_sphere(sphere_str)
    a_weights = _get_angular_weights(s_weights.shape, sphere, sigma_angular)

    h_weights = s_weights[..., None] * a_weights
    h_weights /= np.sum(h_weights, axis=(0, 1, 2))

    sh_to_sf_mat = sh_to_sf_matrix(sphere, sh_order=sh_order,
                                   basis_type=sh_basis,
                                   full_basis=in_full_basis,
                                   return_inv=False)

    _, sf_to_sh_mat = sh_to_sf_matrix(sphere, sh_order=sh_order,
                                      basis_type=sh_basis,
                                      full_basis=True,
                                      return_inv=True)

    out_n_coeffs = sf_to_sh_mat.shape[1]
    n_dirs = len(sphere.vertices)
    volume_shape = in_sh.shape
    in_sh = np.pad(in_sh, ((h_half_width, h_half_width),
                           (h_half_width, h_half_width),
                           (h_half_width, h_half_width),
                           (0, 0)))

    cl_kernel = CLKernel('correlate', 'denoise', 'angle_aware_bilateral.cl')
    cl_kernel.set_define('IM_X_DIM', volume_shape[0])
    cl_kernel.set_define('IM_Y_DIM', volume_shape[1])
    cl_kernel.set_define('IM_Z_DIM', volume_shape[2])

    cl_kernel.set_define('H_X_DIM', h_weights.shape[0])
    cl_kernel.set_define('H_Y_DIM', h_weights.shape[1])
    cl_kernel.set_define('H_Z_DIM', h_weights.shape[2])

    cl_kernel.set_define('SIGMA_RANGE', float(sigma_range))

    cl_kernel.set_define('IN_N_COEFFS', volume_shape[-1])
    cl_kernel.set_define('OUT_N_COEFFS', out_n_coeffs)
    cl_kernel.set_define('N_DIRS', n_dirs)

    cl_manager = CLManager(cl_kernel, 4, 1)
    cl_manager.add_input_buffer(0, in_sh)
    cl_manager.add_input_buffer(1, h_weights)
    cl_manager.add_input_buffer(2, sh_to_sf_mat)
    cl_manager.add_input_buffer(3, sf_to_sh_mat)

    cl_manager.add_output_buffer(0, volume_shape[:3] + (out_n_coeffs,),
                                 np.float32)

    outputs = cl_manager.run(volume_shape[:3])
    return outputs[0]
Esempio n. 42
0
    def __init__(self,
                 gtab,
                 sh_order=8,
                 lambda_lb=1e-3,
                 dec_alg='CSD',
                 sphere=None,
                 lambda_csd=1.0):
        r""" Analytical and continuous modeling of the diffusion signal with
        respect to the FORECAST basis [1,2,3]_.
        This implementation is a modification of the original FORECAST
        model presented in [1]_ adapted for multi-shell data as in [2,3]_ .

        The main idea is to model the diffusion signal as the combination of a
        single fiber response function $F(\mathbf{b})$ times the fODF
        $\rho(\mathbf{v})$

        ..math::
            :nowrap:
                \begin{equation}
                    E(\mathbf{b}) = \int_{\mathbf{v} \in \mathcal{S}^2} \rho(\mathbf{v}) F({\mathbf{b}} | \mathbf{v}) d \mathbf{v}
                \end{equation}

        where $\mathbf{b}$ is the b-vector (b-value times gradient direction)
        and $\mathbf{v}$ is an unit vector representing a fiber direction.

        In FORECAST $\rho$ is modeled using real symmetric Spherical Harmonics
        (SH) and $F(\mathbf(b))$ is an axially symmetric tensor.


        Parameters
        ----------
        gtab : GradientTable,
            gradient directions and bvalues container class.
        sh_order : unsigned int,
            an even integer that represent the SH order of the basis (max 12)
        lambda_lb: float,
            Laplace-Beltrami regularization weight.
        dec_alg : str,
            Spherical deconvolution algorithm. The possible values are Weighted Least Squares ('WLS'),
            Positivity Constraints using CVXPY ('POS') and the Constraint
            Spherical Deconvolution algorithm ('CSD'). Default is 'CSD'.
        sphere : array, shape (N,3),
            sphere points where to enforce positivity when 'POS' or 'CSD'
            dec_alg are selected.
        lambda_csd : float,
            CSD regularization weight.

        References
        ----------
        .. [1] Anderson A. W., "Measurement of Fiber Orientation Distributions
               Using High Angular Resolution Diffusion Imaging", Magnetic
               Resonance in Medicine, 2005.

        .. [2] Kaden E. et al., "Quantitative Mapping of the Per-Axon Diffusion
               Coefficients in Brain White Matter", Magnetic Resonance in
               Medicine, 2016.

        .. [3] Zucchelli M. et al., "A generalized SMT-based framework for
               Diffusion MRI microstructural model estimation", MICCAI Workshop
               on Computational DIFFUSION MRI (CDMRI), 2017.

        Examples
        --------
        In this example, where the data, gradient table and sphere tessellation
        used for reconstruction are provided, we model the diffusion signal
        with respect to the FORECAST and compute the fODF, parallel and
        perpendicular diffusivity.

        >>> from dipy.data import get_sphere, get_3shell_gtab
        >>> gtab = get_3shell_gtab()
        >>> from dipy.sims.voxel import MultiTensor
        >>> mevals = np.array(([0.0017, 0.0003, 0.0003], 
        ...                    [0.0017, 0.0003, 0.0003]))
        >>> angl = [(0, 0), (60, 0)]
        >>> data, sticks = MultiTensor(gtab,
        ...                            mevals,
        ...                            S0=100.0,
        ...                            angles=angl,
        ...                            fractions=[50, 50],
        ...                            snr=None)
        >>> from dipy.reconst.forecast import ForecastModel
        >>> fm = ForecastModel(gtab, sh_order=6)
        >>> f_fit = fm.fit(data)
        >>> d_par = f_fit.dpar
        >>> d_perp = f_fit.dperp
        >>> sphere = get_sphere('symmetric724')
        >>> fodf = f_fit.odf(sphere)
        """
        OdfModel.__init__(self, gtab)

        # round the bvals in order to avoid numerical errors
        self.bvals = np.round(gtab.bvals / 100) * 100
        self.bvecs = gtab.bvecs

        if sh_order >= 0 and not (bool(sh_order % 2)) and sh_order <= 12:
            self.sh_order = sh_order
        else:
            msg = "sh_order must be a non-zero even positive number "
            msg += "between 2 and 12"
            raise ValueError(msg)

        if sphere is None:
            sphere = get_sphere('repulsion724')
            self.vertices = sphere.vertices[0:int(sphere.vertices.shape[0] /
                                                  2), :]

        else:
            self.vertices = sphere

        self.b0s_mask = self.bvals == 0
        self.one_0_bvals = np.r_[0, self.bvals[~self.b0s_mask]]
        self.one_0_bvecs = np.r_[np.array([0, 0, 0]).reshape(1, 3),
                                 self.bvecs[~self.b0s_mask, :]]

        self.rho = rho_matrix(self.sh_order, self.one_0_bvecs)

        # signal regularization matrix
        self.srm = rho_matrix(4, self.one_0_bvecs)
        self.lb_matrix_signal = lb_forecast(4)

        self.b_unique = np.sort(np.unique(self.bvals[self.bvals > 0]))
        self.wls = True
        self.csd = False
        self.pos = False

        if dec_alg.upper() == 'POS':
            if have_cvxpy:
                self.wls = False
                self.pos = True
            else:
                msg = 'cvxpy is needed to inforce positivity constraints.'
                raise ValueError(msg)

        if dec_alg.upper() == 'CSD':
            self.csd = True

        self.lb_matrix = lb_forecast(self.sh_order)
        self.lambda_lb = lambda_lb
        self.lambda_csd = lambda_csd
        self.fod = rho_matrix(sh_order, self.vertices)
Esempio n. 43
0
def test_predict():
    """
    Test model prediction API
    """
    psphere = get_sphere('symmetric362')
    bvecs = np.concatenate(([[1, 0, 0]], psphere.vertices))
    bvals = np.zeros(len(bvecs)) + 1000
    bvals[0] = 0
    gtab = grad.gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003]))
    mevecs = [
        np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]),
        np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])
    ]
    S = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)

    dm = dti.TensorModel(gtab, 'LS', return_S0_hat=True)
    dmfit = dm.fit(S)
    npt.assert_array_almost_equal(dmfit.predict(gtab, S0=100), S)
    npt.assert_array_almost_equal(dmfit.predict(gtab), S)
    npt.assert_array_almost_equal(dm.predict(dmfit.model_params, S0=100), S)

    fdata, fbvals, fbvecs = get_fnames()
    data = load_nifti_data(fdata)
    # Make the data cube a bit larger:
    data = np.tile(data.T, 2).T
    gtab = grad.gradient_table(fbvals, fbvecs)
    dtim = dti.TensorModel(gtab)
    dtif = dtim.fit(data)
    S0 = np.mean(data[..., gtab.b0s_mask], -1)
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)
    # Predict using S0_hat:
    dtim = dti.TensorModel(gtab, return_S0_hat=True)
    dtif = dtim.fit(data)
    p = dtif.predict(gtab)
    npt.assert_equal(p.shape, data.shape)
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)

    # Test iter_fit_tensor with S0_hat
    dtim = dti.TensorModel(gtab, step=2, return_S0_hat=True)
    dtif = dtim.fit(data)
    S0 = np.mean(data[..., gtab.b0s_mask], -1)
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)

    # Use a smaller step in predicting:

    dtim = dti.TensorModel(gtab, step=2)
    dtif = dtim.fit(data)
    S0 = np.mean(data[..., gtab.b0s_mask], -1)
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)
    # And with a scalar S0:
    S0 = 1
    p = dtif.predict(gtab, S0)
    npt.assert_equal(p.shape, data.shape)
    # Assign the step through kwarg:
    p = dtif.predict(gtab, S0, step=1)
    npt.assert_equal(p.shape, data.shape)
    # And without S0:
    p = dtif.predict(gtab, step=1)
    npt.assert_equal(p.shape, data.shape)
Esempio n. 44
0
def run_tractography(fdwi,
                     fbval,
                     fbvec,
                     fwmparc,
                     mod_func,
                     mod_type,
                     seed_density=20):
    """
    mod_func : 'str'
        'csd' or 'csa'
    mod_type : 'str'
        'det' or 'prob'
    seed_density : int, default=20
        Seeding density for tractography
    """
    # Getting default params
    sphere = get_sphere("repulsion724")
    stream_affine = np.eye(4)

    # Loading data
    print("Loading Data...")
    dwi, gtab, wm_mask = load_data(fdwi, fbval, fbvec, fwmparc)

    # Make tissue classifier
    tiss_classifier = BinaryStoppingCriterion(wm_mask)

    if mod_func == "csd":
        mod = csd_mod_est(gtab, dwi, wm_mask)
    elif mod_func == "csa":
        mod = odf_mod_est(gtab)

    # Build seed list
    seeds = utils.random_seeds_from_mask(
        wm_mask,
        affine=stream_affine,
        seeds_count=int(seed_density),
        seed_count_per_voxel=True,
    )

    # Make streamlines
    if mod_type == "det":
        print("Obtaining peaks from model...")
        direction_getter = peaks_from_model(
            mod,
            dwi,
            sphere,
            relative_peak_threshold=0.5,
            min_separation_angle=25,
            mask=wm_mask,
            npeaks=5,
            normalize_peaks=True,
        )
    elif mod_type == "prob":
        print("Preparing probabilistic tracking...")
        print("Fitting model to data...")
        mod_fit = mod.fit(dwi, wm_mask)
        print("Building direction-getter...")
        try:
            print(
                "Proceeding using spherical harmonic coefficient from model estimation..."
            )
            direction_getter = ProbabilisticDirectionGetter.from_shcoeff(
                mod_fit.shm_coeff, max_angle=60.0, sphere=sphere)
        except:
            print("Proceeding using FOD PMF from model estimation...")
            fod = mod_fit.odf(sphere)
            pmf = fod.clip(min=0)
            direction_getter = ProbabilisticDirectionGetter.from_pmf(
                pmf, max_angle=60.0, sphere=sphere)

    print("Running Local Tracking")
    streamline_generator = LocalTracking(
        direction_getter,
        tiss_classifier,
        seeds,
        stream_affine,
        step_size=0.5,
        return_all=True,
    )

    print("Reconstructing tractogram streamlines...")
    streamlines = Streamlines(streamline_generator)
    tracks = Streamlines([track for track in streamlines if len(track) > 60])
    return tracks
Esempio n. 45
0
def _get_direction_getter(args, mask_data):
    sh_data = nib.load(args.in_sh).get_fdata(dtype=np.float32)
    sphere = HemiSphere.from_sphere(get_sphere(args.sphere))
    theta = get_theta(args.theta, args.algo)

    non_zeros_count = np.count_nonzero(np.sum(sh_data, axis=-1))
    non_first_val_count = np.count_nonzero(np.argmax(sh_data, axis=-1))

    if args.algo in ['det', 'prob']:
        if non_first_val_count / non_zeros_count > 0.5:
            logging.warning('Input detected as peaks. Input should be'
                            'fodf for det/prob, verify input just in case.')
        if args.algo == 'det':
            dg_class = DeterministicMaximumDirectionGetter
        else:
            dg_class = ProbabilisticDirectionGetter
        return dg_class.from_shcoeff(shcoeff=sh_data,
                                     max_angle=theta,
                                     sphere=sphere,
                                     basis_type=args.sh_basis,
                                     relative_peak_threshold=args.sf_threshold)
    elif args.algo == 'eudx':
        # Code for type EUDX. We don't use peaks_from_model
        # because we want the peaks from the provided sh.
        sh_shape_3d = sh_data.shape[:-1]
        dg = PeaksAndMetrics()
        dg.sphere = sphere
        dg.ang_thr = theta
        dg.qa_thr = args.sf_threshold

        # Heuristic to find out if the input are peaks or fodf
        # fodf are always around 0.15 and peaks around 0.75
        if non_first_val_count / non_zeros_count > 0.5:
            logging.info('Input detected as peaks.')
            nb_peaks = sh_data.shape[-1] // 3
            slices = np.arange(0, 15 + 1, 3)
            peak_values = np.zeros(sh_shape_3d + (nb_peaks, ))
            peak_indices = np.zeros(sh_shape_3d + (nb_peaks, ))

            for idx in np.argwhere(np.sum(sh_data, axis=-1)):
                idx = tuple(idx)
                for i in range(nb_peaks):
                    peak_values[idx][i] = np.linalg.norm(
                        sh_data[idx][slices[i]:slices[i + 1]], axis=-1)
                    peak_indices[idx][i] = sphere.find_closest(
                        sh_data[idx][slices[i]:slices[i + 1]])

            dg.peak_dirs = sh_data
        else:
            logging.info('Input detected as fodf.')
            npeaks = 5
            peak_dirs = np.zeros((sh_shape_3d + (npeaks, 3)))
            peak_values = np.zeros((sh_shape_3d + (npeaks, )))
            peak_indices = np.full((sh_shape_3d + (npeaks, )), -1, dtype='int')
            b_matrix = get_b_matrix(find_order_from_nb_coeff(sh_data), sphere,
                                    args.sh_basis)

            for idx in np.argwhere(np.sum(sh_data, axis=-1)):
                idx = tuple(idx)
                directions, values, indices = get_maximas(
                    sh_data[idx], sphere, b_matrix, args.sf_threshold, 0)
                if values.shape[0] != 0:
                    n = min(npeaks, values.shape[0])
                    peak_dirs[idx][:n] = directions[:n]
                    peak_values[idx][:n] = values[:n]
                    peak_indices[idx][:n] = indices[:n]

            dg.peak_dirs = peak_dirs

        dg.peak_values = peak_values
        dg.peak_indices = peak_indices

        return dg
def dMRI2ODF_DTI(PATH):
    '''
    Input the dMRI data
    return the ODF
    '''
    dMRI_path = PATH + 'data.nii.gz'
    mask_path = PATH + 'nodif_brain_mask.nii.gz'
    dMRI_img = nib.load(dMRI_path)
    dMRI_data = dMRI_img.get_fdata()
    mask_img = nib.load(mask_path)
    mask = mask_img.get_fdata()

    ########## subsample ##########
    # dMRI_data = dMRI_data[45:-48,50:-65,51:-54,...]
    # mask = mask[45:-48,50:-65,51:-54]
    # breakpoint()
    dMRI_data = dMRI_data[:, 87, ...]
    mask = mask[:, 87, ...]

    for cnt in range(10):
        fig = plt.imshow(dMRI_data[:, :, cnt].transpose(1, 0),
                         cmap='Greys',
                         interpolation='nearest')
        plt.axis('off')
        # plt.imshow(dMRI_data[:,15,:,cnt].transpose(1,0),cmap='Greys')
        plt.savefig(str(cnt) + '.png',
                    bbox_inches='tight',
                    dpi=300,
                    transparent=True,
                    pad_inches=0)

    # breakpoint()
    bval = PATH + "bvals"
    bvec = PATH + "bvecs"

    radial_order = 6
    zeta = 700
    lambdaN = 1e-8
    lambdaL = 1e-8

    gtab = gradient_table(bvals=bval, bvecs=bvec)
    asm = ShoreModel(gtab,
                     radial_order=radial_order,
                     zeta=zeta,
                     lambdaN=lambdaN,
                     lambdaL=lambdaL)
    asmfit = asm.fit(dMRI_data, mask=mask)
    sphere = get_sphere('symmetric362')
    dMRI_odf = asmfit.odf(sphere)
    dMRI_odf[dMRI_odf <= 0] = 0

    tenmodel = dti.TensorModel(gtab)
    tenfit = tenmodel.fit(dMRI_data, mask)
    dMRI_dti = tenfit.quadratic_form

    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    FA = np.clip(FA, 0, 1)
    RGB = color_fa(FA, tenfit.evecs)

    evals = tenfit.evals + 1e-20
    evecs = tenfit.evecs
    cfa = RGB + 1e-20
    cfa /= cfa.max()

    evals = np.expand_dims(evals, 2)
    evecs = np.expand_dims(evecs, 2)
    cfa = np.expand_dims(cfa, 2)

    ren = window.Scene()
    sphere = get_sphere('symmetric362')
    ren.add(
        actor.tensor_slicer(evals,
                            evecs,
                            scalar_colors=cfa,
                            sphere=sphere,
                            scale=0.5))
    window.record(ren,
                  n_frames=1,
                  out_path='../data/tensor.png',
                  size=(5000, 5000))

    odf_ = dMRI_odf

    ren = window.Scene()
    sfu = actor.odf_slicer(np.expand_dims(odf_, 2),
                           sphere=sphere,
                           colormap="plasma",
                           scale=0.5)

    ren.add(sfu)
    window.record(ren,
                  n_frames=1,
                  out_path='../data/odfs.png',
                  size=(5000, 5000))

    return None
Esempio n. 47
0
def nii2streamlines(imgfile, maskfile, bvals, bvecs):
    import numpy as np
    import nibabel as nib
    import os

    from dipy.reconst.dti import TensorModel

    img = nib.load(imgfile)
    bvals = np.genfromtxt(bvals)
    bvecs = np.genfromtxt(bvecs)
    if bvecs.shape[1] != 3:
        bvecs = bvecs.T

    from nipype.utils.filemanip import split_filename
    _, prefix, _ = split_filename(imgfile)

    from dipy.data import gradient_table

    gtab = gradient_table(bvals, bvecs)
    data = img.get_data()
    affine = img.get_affine()
    zooms = img.get_header().get_zooms()[:3]
    new_zooms = (2., 2., 2.)
    data2, affine2 = data, affine
    mask = nib.load(maskfile).get_data().astype(np.bool)
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data2, mask)

    from dipy.reconst.dti import fractional_anisotropy
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    fa_img = nib.Nifti1Image(FA, img.get_affine())
    nib.save(fa_img, '%s_tensor_fa.nii.gz' % prefix)

    evecs = tenfit.evecs

    evec_img = nib.Nifti1Image(evecs, img.get_affine())
    nib.save(evec_img, '%s_tensor_evec.nii.gz' % prefix)

    from dipy.data import get_sphere
    sphere = get_sphere('symmetric724')
    from dipy.reconst.dti import quantize_evecs

    peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices)

    from dipy.tracking.eudx import EuDX

    eu = EuDX(FA,
              peak_indices,
              odf_vertices=sphere.vertices,
              a_low=0.2,
              seeds=10**6,
              ang_thr=35)
    tensor_streamlines = [streamline for streamline in eu]

    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = new_zooms
    hdr['voxel_order'] = 'LPS'
    hdr['dim'] = data2.shape[:3]

    import dipy.tracking.metrics as dmetrics
    tensor_streamlines = ((sl, None, None) for sl in tensor_streamlines
                          if dmetrics.length(sl) > 15)

    ten_sl_fname = '%s_streamline.trk' % prefix

    nib.trackvis.write(ten_sl_fname,
                       tensor_streamlines,
                       hdr,
                       points_space='voxel')
    return ten_sl_fname
Esempio n. 48
0
def test_particle_filtering_tractography():
    """This tests that the ParticleFilteringTracking produces
    more streamlines connecting the gray matter than LocalTracking.
    """
    sphere = get_sphere('repulsion100')
    step_size = 0.2

    # Simple tissue masks
    simple_wm = np.array([[0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0],
                          [0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 0, 0],
                          [0, 0, 0, 0, 0, 0]])
    simple_wm = np.dstack([
        np.zeros(simple_wm.shape), simple_wm, simple_wm, simple_wm,
        np.zeros(simple_wm.shape)
    ])
    simple_gm = np.array([[1, 1, 0, 0, 0, 0], [1, 1, 0, 0, 0, 0],
                          [0, 1, 0, 0, 1, 0], [0, 0, 0, 0, 1, 0],
                          [0, 0, 0, 0, 0, 0]])
    simple_gm = np.dstack([
        np.zeros(simple_gm.shape), simple_gm, simple_gm, simple_gm,
        np.zeros(simple_gm.shape)
    ])
    simple_csf = np.ones(simple_wm.shape) - simple_wm - simple_gm
    tc = ActTissueClassifier.from_pve(simple_wm, simple_gm, simple_csf)
    seeds = seeds_from_mask(simple_wm, density=2)

    # Random pmf in every voxel
    shape_img = list(simple_wm.shape)
    shape_img.extend([sphere.vertices.shape[0]])
    np.random.seed(0)  # Random number generator initialization
    pmf = np.random.random(shape_img)

    # Test that PFT recover equal or more streamlines than localTracking
    dg = ProbabilisticDirectionGetter.from_pmf(pmf, 60, sphere)
    local_streamlines_generator = LocalTracking(dg,
                                                tc,
                                                seeds,
                                                np.eye(4),
                                                step_size,
                                                max_cross=1,
                                                return_all=False)
    local_streamlines = Streamlines(local_streamlines_generator)

    pft_streamlines_generator = ParticleFilteringTracking(
        dg,
        tc,
        seeds,
        np.eye(4),
        step_size,
        max_cross=1,
        return_all=False,
        pft_back_tracking_dist=1,
        pft_front_tracking_dist=0.5)
    pft_streamlines = Streamlines(pft_streamlines_generator)

    npt.assert_(np.array([len(pft_streamlines) > 0]))
    npt.assert_(np.array([len(pft_streamlines) >= len(local_streamlines)]))

    # Test that all points are equally spaced
    for l in [1, 2, 5, 10, 100]:
        pft_streamlines = ParticleFilteringTracking(dg,
                                                    tc,
                                                    seeds,
                                                    np.eye(4),
                                                    step_size,
                                                    max_cross=1,
                                                    return_all=True,
                                                    maxlen=l)
        for s in pft_streamlines:
            for i in range(len(s) - 1):
                npt.assert_almost_equal(np.linalg.norm(s[i] - s[i + 1]),
                                        step_size)
    # Test that all points are within the image volume
    seeds = seeds_from_mask(np.ones(simple_wm.shape), density=1)
    pft_streamlines_generator = ParticleFilteringTracking(dg,
                                                          tc,
                                                          seeds,
                                                          np.eye(4),
                                                          step_size,
                                                          max_cross=1,
                                                          return_all=True)
    pft_streamlines = Streamlines(pft_streamlines_generator)

    for s in pft_streamlines:
        npt.assert_(np.all((s + 0.5).astype(int) >= 0))
        npt.assert_(np.all((s + 0.5).astype(int) < simple_wm.shape))

    # Test that the number of streamline return with return_all=True equal the
    # number of seeds places
    npt.assert_(np.array([len(pft_streamlines) == len(seeds)]))

    # Test non WM seed position
    seeds = [[0, 5, 4], [0, 0, 1], [50, 50, 50]]
    pft_streamlines_generator = ParticleFilteringTracking(dg,
                                                          tc,
                                                          seeds,
                                                          np.eye(4),
                                                          step_size,
                                                          max_cross=1,
                                                          return_all=True)
    pft_streamlines = Streamlines(pft_streamlines_generator)

    npt.assert_equal(len(pft_streamlines[0]), 3)  # INVALIDPOINT
    npt.assert_equal(len(pft_streamlines[1]), 3)  # ENDPOINT
    npt.assert_equal(len(pft_streamlines[2]), 1)  # OUTSIDEIMAGE

    # Test with wrong tissueclassifier type
    tc_bin = BinaryTissueClassifier(simple_wm)
    npt.assert_raises(
        ValueError, lambda: ParticleFilteringTracking(dg, tc_bin, seeds,
                                                      np.eye(4), step_size))
    # Test with invalid back/front tracking distances
    npt.assert_raises(
        ValueError,
        lambda: ParticleFilteringTracking(dg,
                                          tc,
                                          seeds,
                                          np.eye(4),
                                          step_size,
                                          pft_back_tracking_dist=0,
                                          pft_front_tracking_dist=0))
    npt.assert_raises(
        ValueError, lambda: ParticleFilteringTracking(
            dg, tc, seeds, np.eye(4), step_size, pft_back_tracking_dist=-1))
    npt.assert_raises(
        ValueError,
        lambda: ParticleFilteringTracking(dg,
                                          tc,
                                          seeds,
                                          np.eye(4),
                                          step_size,
                                          pft_back_tracking_dist=0,
                                          pft_front_tracking_dist=-2))

    # Test with invalid affine shape
    npt.assert_raises(
        ValueError,
        lambda: ParticleFilteringTracking(dg, tc, seeds, np.eye(3), step_size))

    # Test with invalid maxlen
    npt.assert_raises(
        ValueError, lambda: ParticleFilteringTracking(
            dg, tc, seeds, np.eye(4), step_size, maxlen=0))
    npt.assert_raises(
        ValueError, lambda: ParticleFilteringTracking(
            dg, tc, seeds, np.eye(4), step_size, maxlen=-1))

    # Test with invalid particle count
    npt.assert_raises(
        ValueError, lambda: ParticleFilteringTracking(
            dg, tc, seeds, np.eye(4), step_size, particle_count=0))
    npt.assert_raises(
        ValueError, lambda: ParticleFilteringTracking(
            dg, tc, seeds, np.eye(4), step_size, particle_count=-1))

    # Test reproducibility
    tracking_1 = Streamlines(
        ParticleFilteringTracking(dg,
                                  tc,
                                  seeds,
                                  np.eye(4),
                                  step_size,
                                  random_seed=0)).data
    tracking_2 = Streamlines(
        ParticleFilteringTracking(dg,
                                  tc,
                                  seeds,
                                  np.eye(4),
                                  step_size,
                                  random_seed=0)).data
    npt.assert_equal(tracking_1, tracking_2)
Esempio n. 49
0
this response.

It is a very good practice to always validate the result of auto_response. For,
this purpose we can print it and have a look at its values.
"""

print(response)
"""
(array([ 0.0014,  0.00029,  0.00029]), 416.206)

We initialize an SFM model object, using these values. We will use the default
sphere (362  vertices, symmetrically distributed on the surface of the sphere),
as a set of putative fascicle directions that are considered in the model
"""

sphere = dpd.get_sphere()
sf_model = sfm.SparseFascicleModel(gtab,
                                   sphere=sphere,
                                   l1_ratio=0.5,
                                   alpha=0.001,
                                   response=response[0])
"""
For the purpose of the example, we will consider a small volume of data
containing parts of the corpus callosum and of the centrum semiovale
"""

data_small = data[20:50, 55:85, 38:39]
"""
Fitting the model to this small volume of data, we calculate the ODF of this
model on the sphere, and plot it.
"""
Esempio n. 50
0
def run_track(nodif_B0_mask, gm_in_dwi, vent_csf_in_dwi, wm_in_dwi, tiss_class, labels_im_file_wm_gm_int,
              labels_im_file, target_samples, curv_thr_list, step_list, track_type, max_length, maxcrossing, directget,
              conn_model, gtab_file, dwi_file, network, node_size, dens_thresh, ID, roi, min_span_tree, disp_filt, parc,
              prune, atlas_select, uatlas_select, label_names, coords, norm, binary, atlas_mni, life_run, min_length,
              fa_path):
    try:
        import cPickle as pickle
    except ImportError:
        import _pickle as pickle
    from dipy.io import load_pickle
    from colorama import Fore, Style
    from dipy.data import get_sphere
    from pynets import utils
    from pynets.dmri.track import prep_tissues, reconstruction, filter_streamlines, track_ensemble

    # Load gradient table
    gtab = load_pickle(gtab_file)

    # Fit diffusion model
    mod_fit = reconstruction(conn_model, gtab, dwi_file, wm_in_dwi)

    # Load atlas parcellation (and its wm-gm interface reduced version for seeding)
    atlas_img = nib.load(labels_im_file)
    atlas_data = atlas_img.get_fdata().astype('int')
    atlas_img_wm_gm_int = nib.load(labels_im_file_wm_gm_int)
    atlas_data_wm_gm_int = atlas_img_wm_gm_int.get_fdata().astype('int')

    # Build mask vector from atlas for later roi filtering
    parcels = []
    i = 0
    for roi_val in np.unique(atlas_data)[1:]:
        parcels.append(atlas_data == roi_val)
        i = i + 1
    parcel_vec = np.ones(len(parcels))

    # Get sphere
    sphere = get_sphere('repulsion724')

    # Instantiate tissue classifier
    tiss_classifier = prep_tissues(nodif_B0_mask, gm_in_dwi, vent_csf_in_dwi, wm_in_dwi, tiss_class)

    if np.sum(atlas_data) == 0:
        raise ValueError('ERROR: No non-zero voxels found in atlas. Check any roi masks and/or wm-gm interface images '
                         'to verify overlap with dwi-registered atlas.')

    # Iteratively build a list of streamlines for each ROI while tracking
    print("%s%s%s%s" % (Fore.GREEN, 'Target number of samples: ', Fore.BLUE, target_samples))
    print(Style.RESET_ALL)
    print("%s%s%s%s" % (Fore.GREEN, 'Using curvature threshold(s): ', Fore.BLUE, curv_thr_list))
    print(Style.RESET_ALL)
    print("%s%s%s%s" % (Fore.GREEN, 'Using step size(s): ', Fore.BLUE, step_list))
    print(Style.RESET_ALL)
    print("%s%s%s%s" % (Fore.GREEN, 'Tracking type: ', Fore.BLUE, track_type))
    print(Style.RESET_ALL)
    if directget == 'prob':
        print("%s%s%s" % ('Using ', Fore.MAGENTA, 'Probabilistic Direction...'))
    elif directget == 'boot':
        print("%s%s%s" % ('Using ', Fore.MAGENTA, 'Bootstrapped Direction...'))
    elif directget == 'closest':
        print("%s%s%s" % ('Using ', Fore.MAGENTA, 'Closest Peak Direction...'))
    elif directget == 'det':
        print("%s%s%s" % ('Using ', Fore.MAGENTA, 'Deterministic Maximum Direction...'))
    print(Style.RESET_ALL)

    # Commence Ensemble Tractography
    streamlines = track_ensemble(target_samples, atlas_data_wm_gm_int, parcels, parcel_vec,
                                 mod_fit, tiss_classifier, sphere, directget, curv_thr_list, step_list, track_type,
                                 maxcrossing, max_length)
    print('Tracking Complete')

    # Perform streamline filtering routines
    dir_path = utils.do_dir_path(atlas_select, dwi_file)
    [streams, dir_path] = filter_streamlines(dwi_file, dir_path, gtab, streamlines, life_run, min_length, conn_model,
                                             target_samples, node_size, curv_thr_list, step_list)

    return streams, track_type, target_samples, conn_model, dir_path, network, node_size, dens_thresh, ID, roi, min_span_tree, disp_filt, parc, prune, atlas_select, uatlas_select, label_names, coords, norm, binary, atlas_mni, curv_thr_list, step_list, fa_path
Esempio n. 51
0
def execution(self, context):

    sh_coeff_vol = aims.read(self.sh_coefficients.fullPath())
    header = sh_coeff_vol.header()

    #transformation from Aims LPI mm space to RAS mm (reference space)

    aims_mm_to_ras_mm = np.array(header['transformations'][0]).reshape((4, 4))
    voxel_size = np.array(header['voxel_size'])
    if len(voxel_size) == 4:
        voxel_size = voxel_size[:-1]
    scaling = np.concatenate((voxel_size, np.ones(1)))
    context.write(voxel_size.shape)
    scaling_mat = np.diag(scaling)
    context.write(scaling_mat.shape, aims_mm_to_ras_mm.shape)
    aims_voxel_to_ras_mm = np.dot(scaling_mat, aims_mm_to_ras_mm)

    affine_tracking = np.eye(4)

    sh = np.array(sh_coeff_vol, copy=True)
    sh = sh.astype(np.float64)
    vol_shape = sh.shape[:-1]
    if self.sphere is not None:
        sphere = read_sphere(self.sphere.fullPath())
    else:
        context.write(
            'No Projection Sphere provided. Default dipy sphere symmetric 362 is used'
        )
        sphere = get_sphere()

    dg = DirectionGetter[self.type].from_shcoeff(
        sh,
        self.max_angle,
        sphere,
        basis_type=None,
        relative_peak_threshold=self.relative_peak_threshold,
        min_separation_angle=self.min_separation_angle)

    #Handling seeds in both deterministic and probabilistic framework
    s = np.loadtxt(self.seeds.fullPath())
    s = s.astype(np.float32)
    i = np.arange(self.nb_samples)
    if self.nb_samples <= 1:
        seeds = s
    else:
        seeds = np.zeros((self.nb_samples, ) + s.shape)
        seeds[i] = s
        seeds = seeds.reshape((-1, 3))
    seeds = nib.affines.apply_affine(np.linalg.inv(scaling_mat), seeds)
    #building classifier

    csf_vol = aims.read(self.csf_pve.fullPath())
    grey_vol = aims.read(self.gm_pve.fullPath())
    white_vol = aims.read(self.wm_pve.fullPath())

    csf = np.array(csf_vol)
    csf = csf[..., 0]
    gm = np.array(grey_vol)
    gm = gm[..., 0]
    wm = np.array(white_vol)
    wm = wm[..., 0]

    #rethreshold volumes due to interpolation (eg values >1)
    total = (csf + gm + wm).copy()
    csf[total <= 0] = 0
    gm[total <= 0] = 0
    wm[total <= 0] = 0
    csf[total != 0] = (csf[total != 0]) / (total[total != 0])
    wm[total != 0] = (wm[total != 0]) / (total[total != 0])
    gm[total != 0] = gm[total != 0] / (total[total != 0])

    classif = Classifiers[self.constraint]
    classifier = classif.from_pve(wm_map=wm, gm_map=gm, csf_map=csf)

    #Tracking is made in the LPI voxel space in order no to imposes affine to data. The seeds are supposed to also be in LPI voxel space
    streamlines_generator = ParticleFilteringTracking(
        dg,
        classifier,
        seeds,
        affine_tracking,
        step_size=self.step_size,
        max_cross=self.crossing_max,
        maxlen=self.nb_iter_max,
        pft_back_tracking_dist=self.back_tracking_dist,
        pft_front_tracking_dist=self.front_tracking_dist,
        pft_max_trial=self.max_trial,
        particle_count=self.nb_particles,
        return_all=self.return_all)
    #Store Fibers directly in  LPI orientation with appropriate transformation
    save_trk(self.streamlines.fullPath(),
             streamlines_generator,
             affine=aims_voxel_to_ras_mm,
             vox_size=voxel_size,
             shape=vol_shape)

    transformManager = getTransformationManager()
    transformManager.copyReferential(self.sh_coefficients, self.streamlines)
Esempio n. 52
0
def dwi_dipy_run(dwi_dir,
                 node_size,
                 dir_path,
                 conn_model,
                 parc,
                 atlas_select,
                 network,
                 wm_mask=None):
    import os
    import glob
    import re
    import nipype.interfaces.fsl as fsl
    from dipy.reconst.dti import TensorModel, quantize_evecs
    from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response
    from dipy.tracking.local import LocalTracking, ThresholdTissueClassifier
    from dipy.tracking import utils
    from dipy.direction import peaks_from_model
    from dipy.tracking.eudx import EuDX
    from dipy.data import get_sphere
    from dipy.core.gradients import gradient_table
    from dipy.io import read_bvals_bvecs

    def atoi(text):
        return int(text) if text.isdigit() else text

    def natural_keys(text):
        return [atoi(c) for c in re.split('(\d+)', text)]

    dwi_img = "%s%s" % (dwi_dir, '/dwi.nii.gz')
    nodif_brain_mask_path = "%s%s" % (dwi_dir, '/nodif_brain_mask.nii.gz')
    bvals = "%s%s" % (dwi_dir, '/bval')
    bvecs = "%s%s" % (dwi_dir, '/bvec')

    img = nib.load(dwi_img)
    data = img.get_data()

    # Loads mask and ensures it's a true binary mask
    img = nib.load(nodif_brain_mask_path)
    mask = img.get_data()
    mask = mask > 0

    [bvals, bvecs] = read_bvals_bvecs(bvals, bvecs)
    gtab = gradient_table(bvals, bvecs)

    # Estimates some tensors
    model = TensorModel(gtab)
    ten = model.fit(data, mask)
    sphere = get_sphere('symmetric724')
    ind = quantize_evecs(ten.evecs, sphere.vertices)

    # Tractography
    if conn_model == 'csd':
        trac_mod = 'csd'
    else:
        conn_model = 'tensor'
        trac_mod = ten.fa

    affine = img.affine
    print('Tracking with tensor model...')
    if wm_mask is None:
        mask = nib.load(mask).get_data()
        mask[0, :, :] = False
        mask[:, 0, :] = False
        mask[:, :, 0] = False
        seeds = utils.seeds_from_mask(mask, density=2)
    else:
        wm_mask_data = nib.load(wm_mask).get_data()
        wm_mask_data[0, :, :] = False
        wm_mask_data[:, 0, :] = False
        wm_mask_data[:, :, 0] = False
        seeds = utils.seeds_from_mask(wm_mask_data, density=2)
    #seeds = random_seeds_from_mask(ten.fa > 0.3, seeds_count=num_total_samples)

    if conn_model == 'tensor':
        eu = EuDX(a=trac_mod,
                  ind=ind,
                  seeds=seeds,
                  odf_vertices=sphere.vertices,
                  a_low=0.05,
                  step_sz=.5)
        tracks = [e for e in eu]
    elif conn_model == 'csd':
        print('Tracking with CSD model...')
        if wm_mask is None:
            response = recursive_response(gtab,
                                          data,
                                          mask=mask.astype('bool'),
                                          sh_order=8,
                                          peak_thr=0.01,
                                          init_fa=0.08,
                                          init_trace=0.0021,
                                          iter=8,
                                          convergence=0.001,
                                          parallel=True)
        else:
            response = recursive_response(gtab,
                                          data,
                                          mask=wm_mask_data.astype('bool'),
                                          sh_order=8,
                                          peak_thr=0.01,
                                          init_fa=0.08,
                                          init_trace=0.0021,
                                          iter=8,
                                          convergence=0.001,
                                          parallel=True)
        csd_model = ConstrainedSphericalDeconvModel(gtab, response)
        csd_peaks = peaks_from_model(model=csd_model,
                                     data=data,
                                     sphere=sphere,
                                     relative_peak_threshold=.5,
                                     min_separation_angle=25,
                                     parallel=True)
        tissue_classifier = ThresholdTissueClassifier(ten.fa, 0.1)
        streamline_generator = LocalTracking(csd_peaks,
                                             tissue_classifier,
                                             seeds,
                                             affine=affine,
                                             step_size=0.5)
        tracks = [e for e in streamline_generator]

    if parc is True:
        node_size = 'parc'

    if network:
        seeds_dir = "%s%s%s%s%s%s%s" % (dir_path, '/seeds_', network, '_',
                                        atlas_select, '_', str(node_size))
    else:
        seeds_dir = "%s%s%s%s%s" % (dir_path, '/seeds_', atlas_select, '_',
                                    str(node_size))

    seed_files = glob.glob("%s%s" % (seeds_dir, '/*diff.nii.gz'))

    seed_files.sort(key=natural_keys)

    # Binarize ROIs
    print('\nBinarizing seed masks...')
    j = 1
    for i in seed_files:
        args = ' -bin '
        out_file = "%s%s" % (i.split('.nii.gz')[0], '_bin.nii.gz')
        maths = fsl.ImageMaths(in_file=i, op_string=args, out_file=out_file)
        os.system(maths.cmdline)
        args = ' -mul ' + str(j)
        maths = fsl.ImageMaths(in_file=out_file,
                               op_string=args,
                               out_file=out_file)
        os.system(maths.cmdline)
        j = j + 1

    # Create atlas from ROIs
    seed_files = glob.glob("%s%s" % (seeds_dir, '/*diff_bin.nii.gz'))

    seed_files.sort(key=natural_keys)

    print('\nMerging seed masks into single labels image...')
    label_sum = "%s%s" % (seeds_dir, '/all_rois.nii.gz')
    args = ' -add ' + i
    maths = fsl.ImageMaths(in_file=seed_files[0],
                           op_string=args,
                           out_file=label_sum)
    os.system(maths.cmdline)

    for i in seed_files:
        args = ' -add ' + i
        maths = fsl.ImageMaths(in_file=label_sum,
                               op_string=args,
                               out_file=label_sum)
        os.system(maths.cmdline)

    labels_im = nib.load(label_sum)
    labels_data = labels_im.get_data().astype('int')
    conn_matrix, grouping = utils.connectivity_matrix(
        tracks,
        labels_data,
        affine=affine,
        return_mapping=True,
        mapping_as_streamlines=True)
    conn_matrix[:3, :] = 0
    conn_matrix[:, :3] = 0

    return conn_matrix
Esempio n. 53
0
def test_csdeconv():
    SNR = 100
    S0 = 1

    _, fbvals, fbvecs = get_fnames('small_64D')

    bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
    gtab = gradient_table(bvals, bvecs, b0_threshold=0)
    mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003]))

    angles = [(0, 0), (60, 0)]

    S, sticks = multi_tensor(gtab,
                             mevals,
                             S0,
                             angles=angles,
                             fractions=[50, 50],
                             snr=SNR)

    sphere = get_sphere('symmetric362')
    odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])
    response = (np.array([0.0015, 0.0003, 0.0003]), S0)
    csd = ConstrainedSphericalDeconvModel(gtab, response)
    csd_fit = csd.fit(S)
    assert_equal(csd_fit.shm_coeff[0] > 0, True)
    fodf = csd_fit.odf(sphere)

    directions, _, _ = peak_directions(odf_gt, sphere)
    directions2, _, _ = peak_directions(fodf, sphere)

    ang_sim = angular_similarity(directions, directions2)

    assert_equal(ang_sim > 1.9, True)
    assert_equal(directions.shape[0], 2)
    assert_equal(directions2.shape[0], 2)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", category=UserWarning)
        _ = ConstrainedSphericalDeconvModel(gtab, response, sh_order=10)
        assert_greater(
            len([lw for lw in w if issubclass(lw.category, UserWarning)]), 0)

    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", category=UserWarning)
        ConstrainedSphericalDeconvModel(gtab, response, sh_order=8)
        assert_equal(
            len([lw for lw in w if issubclass(lw.category, UserWarning)]), 0)

    mevecs = []
    for s in sticks:
        mevecs += [all_tensor_evecs(s).T]

    S2 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)
    big_S = np.zeros((10, 10, 10, len(S2)))
    big_S[:] = S2

    aresponse, aratio = auto_response_ssst(gtab,
                                           big_S,
                                           roi_center=(5, 5, 4),
                                           roi_radii=3,
                                           fa_thr=0.5)
    assert_array_almost_equal(aresponse[0], response[0])
    assert_almost_equal(aresponse[1], 100)
    assert_almost_equal(aratio, response[0][1] / response[0][0])

    auto_response_ssst(gtab, big_S, roi_radii=3, fa_thr=0.5)
    assert_array_almost_equal(aresponse[0], response[0])
Esempio n. 54
0
def test_odfdeconv():
    SNR = 100
    S0 = 1

    _, fbvals, fbvecs = get_fnames('small_64D')
    bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
    gtab = gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003]))

    angles = [(0, 0), (90, 0)]
    S, _ = multi_tensor(gtab,
                        mevals,
                        S0,
                        angles=angles,
                        fractions=[50, 50],
                        snr=SNR)

    sphere = get_sphere('symmetric362')

    odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])

    e1 = 15.0
    e2 = 3.0
    ratio = e2 / e1

    csd = ConstrainedSDTModel(gtab, ratio, None)

    csd_fit = csd.fit(S)
    fodf = csd_fit.odf(sphere)

    directions, _, _ = peak_directions(odf_gt, sphere)
    directions2, _, _ = peak_directions(fodf, sphere)

    ang_sim = angular_similarity(directions, directions2)

    assert_equal(ang_sim > 1.9, True)

    assert_equal(directions.shape[0], 2)
    assert_equal(directions2.shape[0], 2)

    with warnings.catch_warnings(record=True) as w:

        ConstrainedSDTModel(gtab, ratio, sh_order=10)
        w_count = len(w)
        # A warning is expected from the ConstrainedSDTModel constructor
        # and additionnal warnings should be raised where legacy SH bases
        # are used
        assert_equal(w_count > 1, True)

    with warnings.catch_warnings(record=True) as w:

        ConstrainedSDTModel(gtab, ratio, sh_order=8)
        # Test that the warning from ConstrainedSDTModel
        # constructor is no more raised
        assert_equal(len(w) == w_count - 1, True)

    csd_fit = csd.fit(np.zeros_like(S))
    fodf = csd_fit.odf(sphere)
    assert_array_equal(fodf, np.zeros_like(fodf))

    odf_sh = np.zeros_like(fodf)
    odf_sh[1] = np.nan

    fodf, _ = odf_deconv(odf_sh, csd.R, csd.B_reg)
    assert_array_equal(fodf, np.zeros_like(fodf))
Esempio n. 55
0
def test_csdeconv():
    SNR = 100
    S0 = 1

    _, fbvals, fbvecs = get_data('small_64D')

    bvals = np.load(fbvals)
    bvecs = np.load(fbvecs)
    gtab = gradient_table(bvals, bvecs)
    mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003]))

    angles = [(0, 0), (60, 0)]

    S, sticks = multi_tensor(gtab,
                             mevals,
                             S0,
                             angles=angles,
                             fractions=[50, 50],
                             snr=SNR)

    sphere = get_sphere('symmetric362')
    odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])
    response = (np.array([0.0015, 0.0003, 0.0003]), S0)
    csd = ConstrainedSphericalDeconvModel(gtab, response)
    csd_fit = csd.fit(S)
    assert_equal(csd_fit.shm_coeff[0] > 0, True)
    fodf = csd_fit.odf(sphere)

    directions, _, _ = peak_directions(odf_gt, sphere)
    directions2, _, _ = peak_directions(fodf, sphere)

    ang_sim = angular_similarity(directions, directions2)

    assert_equal(ang_sim > 1.9, True)
    assert_equal(directions.shape[0], 2)
    assert_equal(directions2.shape[0], 2)

    assert_warns(UserWarning,
                 ConstrainedSphericalDeconvModel,
                 gtab,
                 response,
                 sh_order=10)

    with warnings.catch_warnings(record=True) as w:
        ConstrainedSphericalDeconvModel(gtab, response, sh_order=8)
        assert_equal(
            len([
                local_warn for local_warn in w
                if issubclass(local_warn.category, UserWarning)
            ]) > 0, False)

    mevecs = []
    for s in sticks:
        mevecs += [all_tensor_evecs(s).T]

    S2 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None)
    big_S = np.zeros((10, 10, 10, len(S2)))
    big_S[:] = S2

    aresponse, aratio = auto_response(gtab,
                                      big_S,
                                      roi_center=(5, 5, 4),
                                      roi_radius=3,
                                      fa_thr=0.5)
    assert_array_almost_equal(aresponse[0], response[0])
    assert_almost_equal(aresponse[1], 100)
    assert_almost_equal(aratio, response[0][1] / response[0][0])

    auto_response(gtab, big_S, roi_radius=3, fa_thr=0.5)
    assert_array_almost_equal(aresponse[0], response[0])

    _, _, nvoxels = auto_response(gtab,
                                  big_S,
                                  roi_center=(5, 5, 4),
                                  roi_radius=30,
                                  fa_thr=0.5,
                                  return_number_of_voxels=True)
    assert_equal(nvoxels, 1000)
    _, _, nvoxels = auto_response(gtab,
                                  big_S,
                                  roi_center=(5, 5, 4),
                                  roi_radius=30,
                                  fa_thr=1,
                                  return_number_of_voxels=True)
    assert_equal(nvoxels, 0)
if bvecs.shape[1] != 3:
    bvecs = bvecs.T

bvals = np.genfromtxt(dpath + 'bvals_b10.txt')

# remove b0
bvecs = bvecs[bvals > 10]
bvals = bvals[bvals > 10]

# sym
bvecs = np.concatenate((bvecs, -bvecs), axis=0)
bvals = np.concatenate((bvals, bvals), axis=0)

# sample spherical distribution
ODFS_low = []
sphere_low = get_sphere('repulsion724')
for mu1 in [np.array([0, 0, 1])]:
    for k1 in [1, 2, 4, 16]:
        # comp1 (symmetric)
        # d1 = sphPDF(k1, mu1, sphere_low.vertices)
        # d2 = sphPDF(k1, mu1, -sphere_low.vertices)
        # dd1 = (d1+d2)/2.
        # dd1 = dd1/dd1.sum()
        dd1 = sphPDF_sym(k1, mu1, sphere_low.vertices, True)
        for mu2 in [
                np.array([1, 0, 0]),
                np.array([0, 1, 0]),
                np.array([0, 0, 1])
        ]:
            for k2 in [1, 2, 4, 16]:
                # comp2 (symmetric)
# memory for storing NNLS weights
if sparse:
    sparsity = 0.01  # expected proportion of nnz atom weights per fascicle
    nnz_pred = int(np.ceil(sparsity * num_atoms * num_samples * num_fasc))
    # Store row and column indices of the dense weight matrix
    w_idx = np.zeros((nnz_pred, 2), dtype=np.int64)  # 2 is 2 !
    # Store weights themselves
    w_data = np.zeros(nnz_pred, dtype=np.float64)
else:
    w_store = np.zeros((num_samples, num_fasc*num_atoms), dtype=np.float)

nnz_hist = np.zeros(num_samples)  # always useful even in non sparse mode

# Quantities used repeatedly for CSD peak estimation
# use largest sphere available in Dipy
odf_sphere = get_sphere('repulsion724')

gam = util.get_gyromagnetic_ratio('H')
G = sch_mat_b0[:, 3]
Deltas = sch_mat_b0[:, 4]
deltas = sch_mat_b0[:, 5]
bvals = (gam*G*deltas)**2*(Deltas-deltas/3)  # in SI units s/m^2
bvecs = sch_mat_b0[:, :3]
gtab = gradient_table(bvals/1e6, bvecs)  # bvals in s/mm^2
num_dwi = np.sum(bvals > 0)

MAX_SH_ORDER = 12
sh_max_vals = np.arange(2, MAX_SH_ORDER+1, 2)
# base sizes is the number of free coefficients to estimate, i.e. the
# degrees of freedom of the model
base_sizes = (sh_max_vals+1)*(sh_max_vals+2)//2
Esempio n. 58
0
plt.show()
plt.savefig('simulated_signal.png')
"""
.. figure:: simulated_signal.png
   :align: center

   **Simulated MultiTensor signal**
"""
"""
For the ODF simulation we will need a sphere. Because we are interetested in a
simulation of only a single voxel, we can use a sphere with very high
resolution. We generate that by subdividing the triangles of one of Dipy's
cached spheres, which we can read in the following way.
"""

sphere = get_sphere('symmetric724')
sphere = sphere.subdivide(2)

odf = multi_tensor_odf(sphere.vertices, mevals, angles, fractions)

from dipy.viz import fvtk

ren = fvtk.ren()

odf_actor = fvtk.sphere_funcs(odf, sphere)
odf_actor.RotateX(90)

fvtk.add(ren, odf_actor)

print('Saving illustration as multi_tensor_simulation')
fvtk.record(ren, out_path='multi_tensor_simulation.png', size=(300, 300))
Esempio n. 59
0
    def __init__(self,
                 a,
                 ind,
                 seeds=10000,
                 odf_vertices=None,
                 a_low=0.0239,
                 step_sz=0.5,
                 ang_thr=60.,
                 length_thr=0.,
                 total_weight=.5):
        ''' Euler integration with multiple stopping criteria and supporting multiple peaks

        Parameters
        ------------
        a : array, shape(x,y,z,Np)
            magnitude of the peak of a scalar anisotropic function e.g. QA
            (quantitative anisotropy)  or a different function of shape(x,y,z)
            e.g FA or GFA.
        ind : array, shape(x,y,z,Np)
            indices of orientations of the scalar anisotropic peaks found on the
            resampling sphere
        seeds : int or sequence, optional
            number of random seeds or list of seeds
        odf_vertices : None or ndarray, optional
            sphere points which define a discrete representation of orientations
            for the peaks, the same for all voxels. None results in 
        a_low : float, optional
            low threshold for QA(typical 0.023)  or FA(typical 0.2) or any other
            anisotropic function
        step_sz : float, optional
            euler propagation step size
        ang_thr : float, optional
            if turning angle is bigger than this threshold then tracking stops.
        length_thr: float, optional
        total_weight : float, optional
            total weighting threshold

        Examples
        --------
        >>> import nibabel as nib
        >>> from dipy.reconst.dti import Tensor
        >>> from dipy.data import get_data
        >>> fimg,fbvals,fbvecs=get_data('small_101D')
        >>> img=nib.load(fimg)
        >>> affine=img.get_affine()
        >>> bvals=np.loadtxt(fbvals)
        >>> gradients=np.loadtxt(fbvecs).T
        >>> data=img.get_data()
        >>> ten=Tensor(data,bvals,gradients,thresh=50)
        >>> eu=EuDX(a=ten.fa(),ind=ten.ind(),seeds=100,a_low=.2)
        >>> tracks=[e for e in eu]

        Notes
        -------
        This works as an iterator class because otherwise it could fill your entire memory if you generate many tracks. 
        Something very common as you can easily generate millions of tracks if you have many seeds.

        '''
        self.a = a.copy()
        self.ind = ind.copy()
        self.a_low = a_low
        self.ang_thr = ang_thr
        self.step_sz = step_sz
        self.length_thr = length_thr
        self.total_weight = total_weight
        if len(self.a.shape) == 3:
            #tmpa=np.zeros(self.a.shape+(2,))
            #tmpi=np.zeros(self.ind.shape+(2,))
            #self.a=tmpa.copy()
            #self.ind=tmpi.copy()
            #self.a[...,0]=a.copy()
            #self.ind[...,0]=ind.copy()
            self.a.shape = self.a.shape + (1, )
            self.ind.shape = self.ind.shape + (1, )
        #store number of maximum peacks
        x, y, z, g = self.a.shape
        self.Np = g
        if odf_vertices == None:
            vertices, faces = get_sphere('symmetric362')
            self.odf_vertices = vertices
        try:
            if len(seeds) > 0:
                self.seed_list = seeds
                self.seed_no = len(seeds)
        except TypeError:
            self.seed_no = seeds
            self.seed_list = None
        self.ind = self.ind.astype(np.double)
Esempio n. 60
0
def test_recursive_response_calibration():
    """
    Test the recursive response calibration method.
    """
    SNR = 100
    S0 = 1

    _, fbvals, fbvecs = get_data('small_64D')

    bvals = np.load(fbvals)
    bvecs = np.load(fbvecs)
    sphere = get_sphere('symmetric724')

    gtab = gradient_table(bvals, bvecs)
    evals = np.array([0.0015, 0.0003, 0.0003])
    evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T
    mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003]))
    angles = [(0, 0), (90, 0)]

    where_dwi = lazy_index(~gtab.b0s_mask)

    S_cross, sticks_cross = multi_tensor(gtab,
                                         mevals,
                                         S0,
                                         angles=angles,
                                         fractions=[50, 50],
                                         snr=SNR)

    S_single = single_tensor(gtab, S0, evals, evecs, snr=SNR)

    data = np.concatenate((np.tile(S_cross, (8, 1)), np.tile(S_single,
                                                             (2, 1))),
                          axis=0)

    odf_gt_cross = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])

    odf_gt_single = single_tensor_odf(sphere.vertices, evals, evecs)

    response = recursive_response(gtab,
                                  data,
                                  mask=None,
                                  sh_order=8,
                                  peak_thr=0.01,
                                  init_fa=0.05,
                                  init_trace=0.0021,
                                  iter=8,
                                  convergence=0.001,
                                  parallel=False)

    csd = ConstrainedSphericalDeconvModel(gtab, response)

    csd_fit = csd.fit(data)

    assert_equal(np.all(csd_fit.shm_coeff[:, 0] >= 0), True)

    fodf = csd_fit.odf(sphere)

    directions_gt_single, _, _ = peak_directions(odf_gt_single, sphere)
    directions_gt_cross, _, _ = peak_directions(odf_gt_cross, sphere)
    directions_single, _, _ = peak_directions(fodf[8, :], sphere)
    directions_cross, _, _ = peak_directions(fodf[0, :], sphere)

    ang_sim = angular_similarity(directions_cross, directions_gt_cross)
    assert_equal(ang_sim > 1.9, True)
    assert_equal(directions_cross.shape[0], 2)
    assert_equal(directions_gt_cross.shape[0], 2)

    ang_sim = angular_similarity(directions_single, directions_gt_single)
    assert_equal(ang_sim > 0.9, True)
    assert_equal(directions_single.shape[0], 1)
    assert_equal(directions_gt_single.shape[0], 1)

    sphere = Sphere(xyz=gtab.gradients[where_dwi])
    sf = response.on_sphere(sphere)
    S = np.concatenate(([response.S0], sf))

    tenmodel = dti.TensorModel(gtab, min_signal=0.001)

    tenfit = tenmodel.fit(S)
    FA = fractional_anisotropy(tenfit.evals)
    FA_gt = fractional_anisotropy(evals)
    assert_almost_equal(FA, FA_gt, 1)