Beispiel #1
0
def test_eudx_bad_seed():
    """Test passing a bad seed to eudx"""
    fimg, fbvals, fbvecs = get_data('small_101D')

    img = ni.load(fimg)
    affine = img.get_affine()
    data = img.get_data()
    gtab = gradient_table(fbvals, fbvecs)
    tensor_model = TensorModel(gtab)
    ten = tensor_model.fit(data)
    ind = quantize_evecs(ten.evecs)

    seed = [1000000., 1000000., 1000000.]
    eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], a_low=.2)
    try:
        track = list(eu)
    except ValueError as ve:        
        if ve.args[0] == 'Seed outside boundaries':
            print(ve)
   
    print(data.shape)
    seed = [1., 5., 8.]
    eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], a_low=.2)    
    track = list(eu)
    
    seed = [-1., 1000000., 1000000.]
    eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], a_low=.2)
    try:
        track = list(eu)
    except ValueError as ve:
        if ve.args[0] == 'Seed outside boundaries':
            print(ve)
def test_eudx_bad_seed():
    """Test passing a bad seed to eudx"""
    fimg, fbvals, fbvecs = get_data('small_101D')

    img = ni.load(fimg)
    affine = img.affine
    data = img.get_data()
    gtab = gradient_table(fbvals, fbvecs)
    tensor_model = TensorModel(gtab)
    ten = tensor_model.fit(data)
    ind = quantize_evecs(ten.evecs)

    sphere = get_sphere('symmetric724')
    seed = [1000000., 1000000., 1000000.]
    eu = EuDX(a=ten.fa, ind=ind, seeds=[seed],
              odf_vertices=sphere.vertices, a_low=.2)
    assert_raises(ValueError, list, eu)

    print(data.shape)
    seed = [1., 5., 8.]
    eu = EuDX(a=ten.fa, ind=ind, seeds=[seed],
              odf_vertices=sphere.vertices, a_low=.2)
    track = list(eu)

    seed = [-1., 1000000., 1000000.]
    eu = EuDX(a=ten.fa, ind=ind, seeds=[seed],
              odf_vertices=sphere.vertices, a_low=.2)
    assert_raises(ValueError, list, eu)
def compute_tensor_model(dir_src, dir_out, verbose=False):

    fbval = pjoin(dir_src, 'bvals_' + par_b_tag)
    fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag)
    fdwi =  pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz')
    fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz')

    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold)
    data, affine = load_nifti(fdwi, verbose)
    mask, _ = load_nifti(fmask, verbose)

    ten_model = TensorModel(gtab)
    ten_fit = ten_model.fit(data, mask)

    FA = ten_fit.fa
    MD = ten_fit.md
    EV = ten_fit.evecs.astype(np.float32)

    fa_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_FA.nii.gz'
    save_nifti(pjoin(dir_out, fa_name), FA, affine)
    md_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_MD.nii.gz'
    save_nifti(pjoin(dir_out, md_name), MD, affine)
    ev_name = 'data_' + par_b_tag + '_' + par_dim_tag + '_EV.nii.gz'
    save_nifti(pjoin(dir_out, ev_name), EV, affine)
Beispiel #4
0
def test_eudx_further():
    """ Cause we love testin.. ;-)
    """

    fimg,fbvals,fbvecs=get_data('small_101D')

    img=ni.load(fimg)
    affine=img.get_affine()
    data=img.get_data()
    gtab = gradient_table(fbvals, fbvecs)
    tensor_model = TensorModel(gtab)
    ten = tensor_model.fit(data)
    x,y,z=data.shape[:3]
    seeds=np.zeros((10**4,3))
    for i in range(10**4):
        rx=(x-1)*np.random.rand()
        ry=(y-1)*np.random.rand()
        rz=(z-1)*np.random.rand()            
        seeds[i]=np.ascontiguousarray(np.array([rx,ry,rz]),dtype=np.float64)
    
    ind = quantize_evecs(ten.evecs)
    eu=EuDX(a=ten.fa, ind=ind, seeds=seeds, a_low=.2)
    T=[e for e in eu]
    
    #check that there are no negative elements
    for t in T:
        assert_equal(np.sum(t.ravel()<0),0)
Beispiel #5
0
def test_response_from_mask():
    fdata, fbvals, fbvecs = get_data('small_64D')
    bvals = np.load(fbvals)
    bvecs = np.load(fbvecs)
    data = nib.load(fdata).get_data()

    gtab = gradient_table(bvals, bvecs)
    ten = TensorModel(gtab)
    tenfit = ten.fit(data)
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    radius = 3

    for fa_thr in np.arange(0, 1, 0.1):
        response_auto, ratio_auto, nvoxels = auto_response(gtab,
                                                           data,
                                                           roi_center=None,
                                                           roi_radius=radius,
                                                           fa_thr=fa_thr,
                                                           return_number_of_voxels=True)

        ci, cj, ck = np.array(data.shape[:3]) / 2
        mask = np.zeros(data.shape[:3])
        mask[ci - radius: ci + radius,
             cj - radius: cj + radius,
             ck - radius: ck + radius] = 1

        mask[FA <= fa_thr] = 0
        response_mask, ratio_mask = response_from_mask(gtab, data, mask)

        assert_equal(int(np.sum(mask)), nvoxels)
        assert_array_almost_equal(response_mask[0], response_auto[0])
        assert_almost_equal(response_mask[1], response_auto[1])
        assert_almost_equal(ratio_mask, ratio_auto)
Beispiel #6
0
def test_phantom():
    N = 50

    vol = orbital_phantom(gtab,
                          func=f,
                          t=np.linspace(0, 2 * np.pi, N),
                          datashape=(10, 10, 10, len(bvals)),
                          origin=(5, 5, 5),
                          scale=(3, 3, 3),
                          angles=np.linspace(0, 2 * np.pi, 16),
                          radii=np.linspace(0.2, 2, 6),
                          S0=100)

    m = TensorModel(gtab)
    t = m.fit(vol)
    FA = t.fa
    # print vol
    FA[np.isnan(FA)] = 0
    # 686 -> expected FA given diffusivities of [1500, 400, 400]
    l1, l2, l3 = 1500e-6, 400e-6, 400e-6
    expected_fa = (np.sqrt(0.5) *
                   np.sqrt((l1 - l2)**2 + (l2-l3)**2 + (l3-l1)**2) /
                   np.sqrt(l1**2 + l2**2 + l3**2))

    assert_array_almost_equal(FA.max(), expected_fa, decimal=2)
Beispiel #7
0
def test_masked_array_with_tensor():
    data = np.ones((2, 4, 56))
    mask = np.array([[True, False, False, True],
                     [True, False, True, False]])

    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T)

    tensor_model = TensorModel(gtab)
    tensor = tensor_model.fit(data, mask=mask)
    assert_equal(tensor.shape, (2, 4))
    assert_equal(tensor.fa.shape, (2, 4))
    assert_equal(tensor.evals.shape, (2, 4, 3))
    assert_equal(tensor.evecs.shape, (2, 4, 3, 3))

    tensor = tensor[0]
    assert_equal(tensor.shape, (4,))
    assert_equal(tensor.fa.shape, (4,))
    assert_equal(tensor.evals.shape, (4, 3))
    assert_equal(tensor.evecs.shape, (4, 3, 3))

    tensor = tensor[0]
    assert_equal(tensor.shape, tuple())
    assert_equal(tensor.fa.shape, tuple())
    assert_equal(tensor.evals.shape, (3,))
    assert_equal(tensor.evecs.shape, (3, 3))
    assert_equal(type(tensor.model_params), np.ndarray)
Beispiel #8
0
def test_WLS_and_LS_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    ### Defining Test Voxel (avoid nibabel dependency) ###

    #Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    B = bval[1]
    #Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    #Design Matrix
    X = dti.design_matrix(bvec, bval)
    #Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    gtab = grad.gradient_table(bval, bvec)

    ### Testing WLS Fit on Single Voxel ###
    #Estimate tensor from test signals
    model = TensorModel(gtab, min_signal=1e-8, fit_method='WLS')
    tensor_est = model.fit(Y)
    assert_equal(tensor_est.shape, Y.shape[:-1])
    assert_array_almost_equal(tensor_est.evals[0], evals)
    assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                              err_msg="Calculation of tensor from Y does not "
                                       "compare to analytical solution")
    assert_almost_equal(tensor_est.md[0], md)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, min_signal=1e-8, fit_method='LS')
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
def DIPY_nii2streamlines(imgfile, maskfile, bvals, bvecs, output_prefix):
    import numpy as np
    import nibabel as nib
    import os

    from dipy.reconst.dti import TensorModel

    print "nii2streamlines"

    img = nib.load(imgfile)
    bvals = np.genfromtxt(bvals)
    bvecs = np.genfromtxt(bvecs)
    if bvecs.shape[1] != 3:
        bvecs = bvecs.T
    print bvecs.shape

    from nipype.utils.filemanip import split_filename
    _, prefix, _  = split_filename(imgfile)
    from dipy.data import gradient_table
    gtab = gradient_table(bvals, bvecs)
    data = img.get_data()
    affine = img.get_affine()
    zooms = img.get_header().get_zooms()[:3]
    new_zooms = (2., 2., 2.)
    data2, affine2 = data, affine
    mask = nib.load(maskfile).get_data().astype(np.bool)
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data2, mask)

    from dipy.reconst.dti import fractional_anisotropy
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    fa_img = nib.Nifti1Image(FA, img.get_affine())
    nib.save(fa_img, experiment_dir + '/' + ('%s_tensor_fa.nii.gz' % prefix))
    evecs = tenfit.evecs
    evec_img = nib.Nifti1Image(evecs, img.get_affine())
    nib.save(evec_img, experiment_dir + '/' + ('%s_tensor_evec.nii.gz' % prefix))

    from dipy.data import get_sphere
    sphere = get_sphere('symmetric724')
    from dipy.reconst.dti import quantize_evecs

    peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices)

    from dipy.tracking.eudx import EuDX
    eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices, a_low=0.2, seeds=10**6, ang_thr=35)
    tensor_streamlines = [streamline for streamline in eu]
    hdr = nib.trackvis.empty_header()
    hdr['voxel_size'] = new_zooms
    hdr['voxel_order'] = 'LPS'
    hdr['dim'] = data2.shape[:3]

    import dipy.tracking.metrics as dmetrics
    tensor_streamlines = ((sl, None, None) for sl in tensor_streamlines if dmetrics.length(sl) > 15)
    ten_sl_fname = experiment_dir + '/' + ('%s_streamline.trk' % prefix)
    nib.trackvis.write(ten_sl_fname, tensor_streamlines, hdr, points_space='voxel')
    return ten_sl_fname
def test_eudx_further():
    """ Cause we love testin.. ;-)
    """

    fimg, fbvals, fbvecs = get_data('small_101D')

    img = ni.load(fimg)
    affine = img.affine
    data = img.get_data()
    gtab = gradient_table(fbvals, fbvecs)
    tensor_model = TensorModel(gtab)
    ten = tensor_model.fit(data)
    x, y, z = data.shape[:3]
    seeds = np.zeros((10**4, 3))
    for i in range(10**4):
        rx = (x-1)*np.random.rand()
        ry = (y-1)*np.random.rand()
        rz = (z-1)*np.random.rand()
        seeds[i] = np.ascontiguousarray(np.array([rx, ry, rz]),
                                        dtype=np.float64)

    sphere = get_sphere('symmetric724')

    ind = quantize_evecs(ten.evecs)
    eu = EuDX(a=ten.fa, ind=ind, seeds=seeds,
              odf_vertices=sphere.vertices, a_low=.2)
    T = [e for e in eu]

    # check that there are no negative elements
    for t in T:
        assert_equal(np.sum(t.ravel() < 0), 0)

    # Test eudx with affine
    def random_affine(seeds):
        affine = np.eye(4)
        affine[:3, :] = np.random.random((3, 4))
        seeds = np.dot(seeds, affine[:3, :3].T)
        seeds += affine[:3, 3]
        return affine, seeds

    # Make two random affines and move seeds
    affine1, seeds1 = random_affine(seeds)
    affine2, seeds2 = random_affine(seeds)

    # Make tracks using different affines
    eu1 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices,
               seeds=seeds1, a_low=.2, affine=affine1)
    eu2 = EuDX(a=ten.fa, ind=ind, odf_vertices=sphere.vertices,
               seeds=seeds2, a_low=.2, affine=affine2)

    # Move from eu2 affine2 to affine1
    eu2_to_eu1 = utils.move_streamlines(eu2, output_space=affine1,
                                        input_space=affine2)
    # Check that the tracks are the same
    for sl1, sl2 in zip(eu1, eu2_to_eu1):
        assert_array_almost_equal(sl1, sl2)
Beispiel #11
0
def test_single_tensor():
    evals = np.array([1.4, .35, .35]) * 10**(-3)
    evecs = np.eye(3)
    S = SingleTensor(gtab, 100, evals, evecs, snr=None)
    assert_array_almost_equal(S[gtab.b0s_mask], 100)
    assert_(np.mean(S[~gtab.b0s_mask]) < 100)

    from dipy.reconst.dti import TensorModel
    m = TensorModel(gtab)
    t = m.fit(S)

    assert_array_almost_equal(t.fa, 0.707, decimal=3)
Beispiel #12
0
def FA_RGB(data, gtab):
    """
    Input : data, gtab taken from the load_data.py script.
    Return : FA and RGB as two nd numpy array
    """

    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data)
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    FA = np.clip(FA, 0, 1)
    RGB = color_fa(FA, tenfit.evecs)
    return FA, RGB
def estimate_response(gtab, data, affine, mask, fa_thr=0.7):
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data, mask)
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    mask[FA <= 0.1] = 0
    mask[FA > 1.] = 0
    indices = np.where(FA > fa_thr)
    lambdas = tenfit.evals[indices][:, :2]
    S0s = data[indices][:, 0]
    S0 = np.mean(S0s)
    l01 = np.mean(lambdas, axis=0)
    evals = np.array([l01[0], l01[1], l01[1]])
    ratio = evals[1] / evals[0]
    print 'Response evals' , evals, ' ratio: ', ratio, '\tMean S0', S0
    return (evals, S0), ratio
def prepare(training, category, snr, denoised, odeconv, tv, method):

    data, affine, gtab = get_specific_data(training,
                                           category,
                                           snr,
                                           denoised)

    prefix = create_file_prefix(training,
                                category,
                                snr,
                                denoised,
                                odeconv,
                                tv,
                                method)

    if training:
        mask = nib.load('wm_mask_hardi_01.nii.gz').get_data()
    else:
        #mask = np.ones(data.shape[:-1])
        mask = nib.load('test_hardi_30_den=1_fa_0025_dilate2_mask.nii.gz').get_data()

    tenmodel = TensorModel(gtab)

    tenfit = tenmodel.fit(data, mask)

    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0

    mask[FA <= 0.1] = 0
    mask[FA > 1.] = 0

    indices = np.where(FA > 0.7)
    lambdas = tenfit.evals[indices][:, :2]
    S0s = data[indices][:, 0]
    S0 = np.mean(S0s)

    if S0 == 0:
        print 'S0 equals to 0 switching to 1'
        S0 = 1

    l01 = np.mean(lambdas, axis=0)

    evals = np.array([l01[0], l01[1], l01[1]])

    print evals, S0

    return data, affine, gtab, mask, evals, S0, prefix
def reconstruction(dwi,bval_file,bvec_file,mask=None,type='dti',b0=0.,order=4):
	""" Uses Dipy to reconstruct an fODF for each voxel.
	    
	    Parameters
 		----------
 	    dwi: numpy array (mandatory)
	    	Holds the diffusion weighted image in a 4D-array (see nibabel).
	    bval_file: string (mandatory)
	    	Path to the b-value file (FSL format).
	    bvec_file: string (mandatory)
	    	Path to the b-vectors file (FSL format).
	    mask:  numpy array
	    	Holds the mask in a 3D array (see nibabel).
	    type: string \in {'dti','csd','csa'} (default = 'dti')
	    	The type of the ODF reconstruction.
		b0: float (default = 0)
			Threshold to use for defining b0 images.
	    order: int (default = 4)
	    	Order to use for constrained spherical deconvolution (csd) or constant solid angle (csa).
	    	
	    Returns
		-----------
		model_fit: Dipy Object (depends on the type)
			Represents the fitted model for each voxel.
	"""	
	
	#b-values and b-vectors
	bvals, bvecs = read_bvals_bvecs(bval_file,bvec_file)
	gtab = gradient_table(bvals, bvecs, b0_threshold=b0)
	
	#reconstruction
	if type == 'dti':
		model = TensorModel(gtab,fit_method='WLS')
	elif type == 'csd':
		response, ratio = auto_response(gtab, dwi, roi_radius=10, fa_thr=0.7)
		model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=order)
	elif type == 'csa':
		model = CsaOdfModel(gtab, order)

	if mask is not None:
		model_fit = model.fit(dwi,mask=mask)
	else:
		model_fit = model.fit(dwi)
	
	return model_fit
Beispiel #16
0
def response_from_mask(gtab, data, mask):
    """ Estimate the response function from a given mask.

    Parameters
    ----------
    gtab : GradientTable
    data : ndarray
        Diffusion data
    mask : ndarray
        Mask to use for the estimation of the response function. For example a
        mask of the white matter voxels with FA values higher than 0.7
        (see [1]_).

    Returns
    -------
    response : tuple, (2,)
        (`evals`, `S0`)
    ratio : float
        The ratio between smallest versus largest eigenvalue of the response.

    Notes
    -----
    See csdeconv.auto_response() or csdeconv.recursive_response() if you don't
    have a computed mask for the response function estimation.

    References
    ----------
    .. [1] Tournier, J.D., et al. NeuroImage 2004. Direct estimation of the
    fiber orientation density function from diffusion-weighted MRI
    data using spherical deconvolution
    """

    ten = TensorModel(gtab)
    indices = np.where(mask > 0)

    if indices[0].size == 0:
        msg = "No voxel in mask with value > 0 were found."
        warnings.warn(msg, UserWarning)
        return (np.nan, np.nan), np.nan

    tenfit = ten.fit(data[indices])
    lambdas = tenfit.evals[:, :2]
    S0s = data[indices][:, np.nonzero(gtab.b0s_mask)[0]]

    return _get_response(S0s, lambdas)
Beispiel #17
0
def test_boot_pmf():
    """This tests the local model used for the bootstrapping.
    """
    hsph_updated = HemiSphere.from_sphere(unit_octahedron)
    vertices = hsph_updated.vertices
    bvecs = vertices
    bvals = np.ones(len(vertices)) * 1000
    bvecs = np.insert(bvecs, 0, np.array([0, 0, 0]), axis=0)
    bvals = np.insert(bvals, 0, 0)
    gtab = gradient_table(bvals, bvecs)
    voxel = single_tensor(gtab)
    data = np.tile(voxel, (3, 3, 3, 1))
    point = np.array([1., 1., 1.])
    tensor_model = TensorModel(gtab)

    boot_pmf_gen = BootPmfGen(data, model=tensor_model, sphere=hsph_updated)
    no_boot_pmf = boot_pmf_gen.get_pmf_no_boot(point)

    model_pmf = tensor_model.fit(voxel).odf(hsph_updated)

    npt.assert_equal(len(hsph_updated.vertices), no_boot_pmf.shape[0])
    npt.assert_array_almost_equal(no_boot_pmf, model_pmf)

    # test model sherical harminic order different than bootstrap order
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", category=UserWarning)
        csd_model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=6)
        assert_greater(len([lw for lw in w if issubclass(lw.category,
                                                         UserWarning)]), 0)

    boot_pmf_gen_sh4 = BootPmfGen(data, model=csd_model, sphere=hsph_updated,
                                  sh_order=4)
    pmf_sh4 = boot_pmf_gen_sh4.get_pmf(point)
    npt.assert_equal(len(hsph_updated.vertices), pmf_sh4.shape[0])
    npt.assert_(np.sum(pmf_sh4.shape) > 0)

    boot_pmf_gen_sh8 = BootPmfGen(data, model=csd_model, sphere=hsph_updated,
                                  sh_order=8)
    pmf_sh8 = boot_pmf_gen_sh8.get_pmf(point)
    npt.assert_equal(len(hsph_updated.vertices), pmf_sh8.shape[0])
    npt.assert_(np.sum(pmf_sh8.shape) > 0)
Beispiel #18
0
def single_fiber_response(diffusionData, mask, gtable, fa_thr = 0.7):
    from dipy.reconst.dti import TensorModel, fractional_anisotropy
    
    ten    = TensorModel(gtable)
    tenfit = ten.fit(diffusionData, mask=mask)
    
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    
    indices = np.where(FA > fa_thr)
    lambdas = tenfit.evals[indices][:, :2]
    
    S0s     = diffusionData[indices][:, np.nonzero(gtable.b0s_mask)[0]]
    S0      = np.mean(S0s)
    l01     = np.mean(lambdas, axis=0)
    evals   = np.array([l01[0], l01[1], l01[1]])
    
    response = (evals, S0)
    ratio    = evals[1]/evals[0]
    
    return response, ratio
Beispiel #19
0
def tensor_model(
    input_filename_data, input_filename_bvecs, input_filename_bvals, output_filename_fa=None, output_filename_evecs=None
):

    # print 'Tensor model ...'

    # print 'Loading data ...'
    img = nib.load(input_filename_data)
    data = img.get_data()
    affine = img.get_affine()

    bvals, bvecs = read_bvals_bvecs(input_filename_bvals, input_filename_bvecs)
    gtab = gradient_table(bvals, bvecs)

    mask = data[..., 0] > 50
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data, mask)

    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0

    if output_filename_fa == None:
        filename_save_fa = input_filename_data.split(".")[0] + "_tensor_fa.nii.gz"
    else:
        filename_save_fa = os.path.abspath(output_filename_fa)

    fa_img = nib.Nifti1Image(FA, img.get_affine())
    nib.save(fa_img, filename_save_fa)
    print "Saving fa to:", filename_save_fa

    if output_filename_evecs == None:
        filename_save_evecs = input_filename_data.split(".")[0] + "_tensor_evecs.nii.gz"
    else:
        filename_save_evecs = os.path.abspath(output_filename_evecs)

    evecs_img = nib.Nifti1Image(tenfit.evecs, img.get_affine())
    nib.save(evecs_img, filename_save_evecs)
    print "Saving evecs to:", filename_save_evecs

    return filename_save_fa, filename_save_evecs
Beispiel #20
0
    def eudx_basic(self, dti_file, mask_file, gtab, stop_val=0.1):
        """
        Tracking with basic tensors and basic eudx - experimental
        We now force seeding at every voxel in the provided mask for
        simplicity.  Future functionality will extend these options.
        **Positional Arguments:**

                dti_file:
                    - File (registered) to use for tensor/fiber tracking
                mask_file:
                    - Brain mask to keep tensors inside the brain
                gtab:
                    - dipy formatted bval/bvec Structure

        **Optional Arguments:**
                stop_val:
                    - Value to cutoff fiber track
        """

        img = nb.load(dti_file)
        data = img.get_data()

        img = nb.load(mask_file)

        mask = img.get_data()

        # use all points in mask
        seedIdx = np.where(mask > 0)  # seed everywhere not equal to zero
        seedIdx = np.transpose(seedIdx)

        model = TensorModel(gtab)
        ten = model.fit(data, mask)
        sphere = get_sphere('symmetric724')
        ind = quantize_evecs(ten.evecs, sphere.vertices)
        eu = EuDX(a=ten.fa, ind=ind, seeds=seedIdx,
                  odf_vertices=sphere.vertices, a_low=stop_val)
        tracks = [e for e in eu]
        return (ten, tracks)
Beispiel #21
0
def auto_response(gtab, data, roi_center=None, roi_radius=10, fa_thr=0.7,
                  fa_callable=fa_superior, return_number_of_voxels=False):
    """ Automatic estimation of response function using FA.

    Parameters
    ----------
    gtab : GradientTable
    data : ndarray
        diffusion data
    roi_center : tuple, (3,)
        Center of ROI in data. If center is None, it is assumed that it is
        the center of the volume with shape `data.shape[:3]`.
    roi_radius : int
        radius of cubic ROI
    fa_thr : float
        FA threshold
    fa_callable : callable
        A callable that defines an operation that compares FA with the fa_thr. The operator
        should have two positional arguments (e.g., `fa_operator(FA, fa_thr)`) and it should
        return a bool array.
    return_number_of_voxels : bool
        If True, returns the number of voxels used for estimating the response
        function.

    Returns
    -------
    response : tuple, (2,)
        (`evals`, `S0`)
    ratio : float
        The ratio between smallest versus largest eigenvalue of the response.
    number of voxels : int (optional)
        The number of voxels used for estimating the response function.

    Notes
    -----
    In CSD there is an important pre-processing step: the estimation of the
    fiber response function. In order to do this we look for voxels with very
    anisotropic configurations. For example we can use an ROI (20x20x20) at
    the center of the volume and store the signal values for the voxels with
    FA values higher than 0.7. Of course, if we haven't precalculated FA we
    need to fit a Tensor model to the datasets. Which is what we do in this
    function.

    For the response we also need to find the average S0 in the ROI. This is
    possible using `gtab.b0s_mask()` we can find all the S0 volumes (which
    correspond to b-values equal 0) in the dataset.

    The `response` consists always of a prolate tensor created by averaging
    the highest and second highest eigenvalues in the ROI with FA higher than
    threshold. We also include the average S0s.

    We also return the `ratio` which is used for the SDT models. If requested,
    the number of voxels used for estimating the response function is also
    returned, which can be used to judge the fidelity of the response function.
    As a rule of thumb, at least 300 voxels should be used to estimate a good
    response function (see [1]_).

    References
    ----------
    .. [1] Tournier, J.D., et al. NeuroImage 2004. Direct estimation of the
    fiber orientation density function from diffusion-weighted MRI
    data using spherical deconvolution
    """

    ten = TensorModel(gtab)
    if roi_center is None:
        ci, cj, ck = np.array(data.shape[:3]) // 2
    else:
        ci, cj, ck = roi_center
    w = roi_radius
    roi = data[int(ci - w): int(ci + w),
               int(cj - w): int(cj + w),
               int(ck - w): int(ck + w)]
    tenfit = ten.fit(roi)
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    indices = np.where(fa_callable(FA, fa_thr))

    if indices[0].size == 0:
        msg = "No voxel with a FA higher than " + str(fa_thr) + " were found."
        msg += " Try a larger roi or a lower threshold."
        warnings.warn(msg, UserWarning)

    lambdas = tenfit.evals[indices][:, :2]
    S0s = roi[indices][:, np.nonzero(gtab.b0s_mask)[0]]

    response, ratio = _get_response(S0s, lambdas)

    if return_number_of_voxels:
        return response, ratio, indices[0].size

    return response, ratio
Beispiel #22
0
def test_boot_pmf():
    # This tests the local model used for the bootstrapping.
    hsph_updated = HemiSphere.from_sphere(unit_octahedron)
    vertices = hsph_updated.vertices
    bvecs = vertices
    bvals = np.ones(len(vertices)) * 1000
    bvecs = np.insert(bvecs, 0, np.array([0, 0, 0]), axis=0)
    bvals = np.insert(bvals, 0, 0)
    gtab = gradient_table(bvals, bvecs)
    voxel = single_tensor(gtab)
    data = np.tile(voxel, (3, 3, 3, 1))
    point = np.array([1., 1., 1.])
    tensor_model = TensorModel(gtab)

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore",
                                message=descoteaux07_legacy_msg,
                                category=PendingDeprecationWarning)
        boot_pmf_gen = BootPmfGen(data,
                                  model=tensor_model,
                                  sphere=hsph_updated)
    no_boot_pmf = boot_pmf_gen.get_pmf_no_boot(point)

    model_pmf = tensor_model.fit(voxel).odf(hsph_updated)

    npt.assert_equal(len(hsph_updated.vertices), no_boot_pmf.shape[0])
    npt.assert_array_almost_equal(no_boot_pmf, model_pmf)

    # test model spherical harmonic order different than bootstrap order
    with warnings.catch_warnings(record=True) as w:
        warnings.simplefilter("always", category=UserWarning)
        warnings.simplefilter("always", category=PendingDeprecationWarning)
        csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6)
        # Tests that the first catched warning comes from
        # the CSD model  constructor
        npt.assert_(issubclass(w[0].category, UserWarning))
        npt.assert_("Number of parameters required " in str(w[0].message))
        # Tests that additional warnings are raised for outdated SH basis
        npt.assert_(len(w) > 1)

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore",
                                message=descoteaux07_legacy_msg,
                                category=PendingDeprecationWarning)
        boot_pmf_gen_sh4 = BootPmfGen(data,
                                      sphere=hsph_updated,
                                      model=csd_model,
                                      sh_order=4)
        pmf_sh4 = boot_pmf_gen_sh4.get_pmf(point)
    npt.assert_equal(len(hsph_updated.vertices), pmf_sh4.shape[0])
    npt.assert_(np.sum(pmf_sh4.shape) > 0)

    with warnings.catch_warnings():
        warnings.filterwarnings("ignore",
                                message=descoteaux07_legacy_msg,
                                category=PendingDeprecationWarning)
        boot_pmf_gen_sh8 = BootPmfGen(data,
                                      model=csd_model,
                                      sphere=hsph_updated,
                                      sh_order=8)
    pmf_sh8 = boot_pmf_gen_sh8.get_pmf(point)
    npt.assert_equal(len(hsph_updated.vertices), pmf_sh8.shape[0])
    npt.assert_(np.sum(pmf_sh8.shape) > 0)
Beispiel #23
0
def auto_response(gtab, data, roi_center=None, roi_radius=10, fa_thr=0.7):
    """ Automatic estimation of response function using FA

    Parameters
    ----------
    gtab : GradientTable
    data : ndarray
        diffusion data
    roi_center : tuple, (3,)
        Center of ROI in data. If center is None, it is assumed that it is
        the center of the volume with shape `data.shape[:3]`.
    roi_radius : int
        radius of cubic ROI
    fa_thr : float
        FA threshold

    Returns
    -------
    response : tuple, (2,)
        (`evals`, `S0`)
    ratio : float
        the ratio between smallest versus largest eigenvalue of the response

    Notes
    -----
    In CSD there is an important pre-processing step: the estimation of the
    fiber response function. In order to do this we look for voxels with very
    anisotropic configurations. For example we can use an ROI (20x20x20) at
    the center of the volume and store the signal values for the voxels with
    FA values higher than 0.7. Of course, if we haven't precalculated FA we
    need to fit a Tensor model to the datasets. Which is what we do  in this
    function.

    For the response we also need to find the average S0 in the ROI. This is
    possible using `gtab.b0s_mask()` we can find all the S0 volumes (which
    correspond to b-values equal 0) in the dataset.

    The `response` consists always of a prolate tensor created by averaging
    the highest and second highest eigenvalues in the ROI with FA higher than
    threshold. We also include the average S0s.

    Finally, we also return the `ratio` which is used for the SDT models.
    """

    ten = TensorModel(gtab)
    if roi_center is None:
        ci, cj, ck = np.array(data.shape[:3]) / 2
    else:
        ci, cj, ck = roi_center
    w = roi_radius
    roi = data[ci - w: ci + w, cj - w: cj + w, ck - w: ck + w]
    tenfit = ten.fit(roi)
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    indices = np.where(FA > fa_thr)
    lambdas = tenfit.evals[indices][:, :2]
    S0s = roi[indices][:, np.nonzero(gtab.b0s_mask)[0]]
    S0 = np.mean(S0s)
    l01 = np.mean(lambdas, axis=0)
    evals = np.array([l01[0], l01[1], l01[1]])
    response = (evals, S0)
    ratio = evals[1]/evals[0]
    return response, ratio
Beispiel #24
0
def test_wls_and_ls_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    # Defining Test Voxel (avoid nibabel dependency) ###

    # Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_fnames('55dir_grad.bvec'))
    B = bval[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    # Design Matrix
    gtab = grad.gradient_table(bval, bvec)
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    npt.assert_almost_equal(Y[0], b0)
    Y.shape = (-1,) + Y.shape

    # Testing WLS Fit on Single Voxel
    # If you do something wonky (passing min_signal<0), you should get an
    # error:
    npt.assert_raises(ValueError, TensorModel, gtab, fit_method='WLS',
                      min_signal=-1)

    # Estimate tensor from test signals
    model = TensorModel(gtab, fit_method='WLS', return_S0_hat=True)
    tensor_est = model.fit(Y)
    npt.assert_equal(tensor_est.shape, Y.shape[:-1])
    npt.assert_array_almost_equal(tensor_est.evals[0], evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form[0], tensor,
                                  err_msg="Calculation of tensor from Y does "
                                          "not compare to analytical solution")
    npt.assert_almost_equal(tensor_est.md[0], md)
    npt.assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    npt.assert_equal(tensor_est.shape, tuple())
    npt.assert_array_almost_equal(tensor_est.evals, evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    npt.assert_almost_equal(tensor_est.md, md)
    npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, fit_method='LS')
    tensor_est = model.fit(y)
    npt.assert_equal(tensor_est.shape, tuple())
    npt.assert_array_almost_equal(tensor_est.evals, evals)
    npt.assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    npt.assert_almost_equal(tensor_est.md, md)
    npt.assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
    npt.assert_array_almost_equal(tensor_est.linearity, linearity(evals))
    npt.assert_array_almost_equal(tensor_est.planarity, planarity(evals))
    npt.assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
Beispiel #25
0
def main():
    parser = _build_args_parser()
    args = parser.parse_args()

    img = nib.load(args.input)
    data = img.get_data()

    print('\ndata shape ({}, {}, {}, {})'.format(data.shape[0], data.shape[1],
                                                 data.shape[2], data.shape[3]))
    print('total voxels {}'.format(np.prod(data.shape[:3])))

    # remove negatives
    print('\ncliping negative ({} voxels, {:.2f} % of total)'.format(
        (data < 0).sum(),
        100 * (data < 0).sum() / float(np.prod(data.shape[:3]))))
    data = np.clip(data, 0, np.inf)

    affine = img.affine
    if args.mask is None:
        mask = None
        masksum = np.prod(data.shape[:3])
    else:
        mask = nib.load(args.mask).get_data().astype(np.bool)
        masksum = mask.sum()

    print('\nMask has {} voxels, {:.2f} % of total'.format(
        masksum, 100 * masksum / float(np.prod(data.shape[:3]))))

    # Validate bvals and bvecs
    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    if not is_normalized_bvecs(bvecs):
        print('Your b-vectors do not seem normalized...')
        bvecs = normalize_bvecs(bvecs)

    # detect unique b-shell and assign shell id to each volume
    # sort bvals to get monotone increasing bvalue
    bvals_argsort = np.argsort(bvals)
    bvals_sorted = bvals[bvals_argsort]

    b_shell_threshold = 25.
    unique_bvalues = []
    shell_idx = []

    unique_bvalues.append(bvals_sorted[0])
    shell_idx.append(0)
    for newb in bvals_sorted[1:]:
        # check if volume is in existing shell
        done = False
        for i, b in enumerate(unique_bvalues):
            if (newb - b_shell_threshold < b) and (newb + b_shell_threshold >
                                                   b):
                shell_idx.append(i)
                done = True
        if not done:
            unique_bvalues.append(newb)
            shell_idx.append(i + 1)

    unique_bvalues = np.array(unique_bvalues)
    # un-sort shells
    shells = np.zeros_like(bvals)
    shells[bvals_argsort] = shell_idx

    print('\nWe have {} shells'.format(len(unique_bvalues)))
    print('with b-values {}\n'.format(unique_bvalues))

    for i in range(len(unique_bvalues)):
        shell_b = bvals[shells == i]
        print('shell {}: n = {}, min/max {} {}'.format(i, len(shell_b),
                                                       shell_b.min(),
                                                       shell_b.max()))

    # Get tensors
    method = 'WLS'
    min_signal = 1e-16
    print('\nUsing fitting method {}'.format(method))
    # print('Using minimum signal = {}'.format(min_signal)

    b0_thr = bvals.min() + 10
    print('\nassuming existence of b0 (thr = {})\n'.format(b0_thr))

    mds = []
    for i in range(len(unique_bvalues) - 1):
        # max_shell = i+1
        print('fitting using first {} shells (bmax = {})'.format(
            i + 2, bvals[shells == i + 1].max()))

        # restricted gtab
        gtab = gradient_table(bvals[shells <= i + 1],
                              bvecs[shells <= i + 1],
                              b0_threshold=b0_thr)

        tenmodel = TensorModel(gtab, fit_method=method, min_signal=min_signal)

        tenfit = tenmodel.fit(data[..., shells <= i + 1], mask)

        mds.append(tenfit.md[mask])

    peaks = []
    fifty = []
    th = 0.01
    print('\nonly using values inside quantile [{}, {}] for plotting'.format(
        th, 1 - th))
    for i in range(len(unique_bvalues) - 1):
        plt.figure()
        tit = 'MD^-1, first {} shells (bmax = {})'.format(
            i + 2, bvals[shells == i + 1].max())
        print('\nbmax = {}'.format(bvals[shells == i + 1].max()))
        # truncate lower and upper MD to remove crazy outliers
        minval = np.quantile(mds[i], th)
        maxval = np.quantile(mds[i], 1 - th)
        tmp = mds[i]
        # vv1 = tmp.shape[0]
        tmp = tmp[np.logical_and(tmp >= minval, tmp <= maxval)]**-1
        # vv2 = tmp.shape[0]
        print('quantile threshold removed {} voxels'.format(
            np.logical_and(tmp >= minval, tmp <= maxval).sum()))

        # remove high diffusivity non physical outlier
        idx1 = (tmp <= 1 / 3.0e-3
                )  # free water diffusivity at in-vivo brain temperature
        print(
            '{} voxels above free water diffusivity ({:.2f} % of mask)'.format(
                idx1.sum(), 100 * idx1.sum() / float(masksum)))
        # remove low diffusivity probable outlier
        th_diff = 0.05
        idx2 = (
            tmp >= 1 / (th_diff * 1.0e-3)
        )  # 1% of mean diffusivity of in-vivo WM at in-vivo brain temperature
        print(
            '{} voxels below {} of in-vivo WM diffusivity ({:.2f} % of mask)'.
            format(idx2.sum(), th_diff, 100 * idx2.sum() / float(masksum)))
        tmp = tmp[np.logical_not(np.logical_or(idx1, idx2))]
        # fit smoothed curve for peak extraction
        gkde = gaussian_kde(tmp)
        plt.hist(tmp, bins=100, density=True, color='grey')
        bs = np.linspace(tmp.min(), tmp.max(), 1000)
        smoothed = gkde.pdf(bs)
        plt.plot(bs, smoothed, color='blue', linewidth=2)
        # peak extraction
        smoothed_peak = bs[smoothed.argmax()]
        plt.axvline(smoothed_peak,
                    color='red',
                    label='peak ({:.0f})'.format(smoothed_peak))
        peaks.append(smoothed_peak)
        # useless extra lines
        onequart = np.quantile(tmp, 0.25)
        threequart = np.quantile(tmp, 0.75)
        twoquart = np.quantile(tmp, 0.5)
        fifty.append(twoquart)
        plt.axvline(onequart,
                    color='pink',
                    label='25% ({:.0f})'.format(onequart))
        plt.axvline(twoquart,
                    color='yellow',
                    label='50% ({:.0f})'.format(twoquart))
        plt.axvline(threequart,
                    color='green',
                    label='75% ({:.0f})'.format(threequart))
        plt.title(tit)
        plt.legend(loc=1)

        plt.savefig('./bvalEst_bmax_{}.png'.format(unique_bvalues[i + 1]))

    print(
        '\nHigher-than-required bmax will artifactually decrease MD, increasing 1/MD'
    )
    print(
        'The error on the estimation of 1/MD should be small when the peak is close to bmax'
    )
    print(
        'This is under the assumption that we have a valid WM mask so that the tissues are somewhat uniform'
    )

    bmaxs = np.array(
        [bvals[shells == i + 1].max() for i in range(len(unique_bvalues) - 1)])

    plt.figure()
    plt.plot(bmaxs, peaks, '-x', label='fit')
    plt.plot(bmaxs, bmaxs, label='identity')
    plt.xlabel('bmax')
    plt.ylabel('MD^-1')
    plt.legend()
    plt.title('PEAK')

    plt.savefig('./bvalEst_peak.png')

    plt.figure()
    plt.plot(bmaxs, fifty, '-x', label='fit')
    plt.plot(bmaxs, bmaxs, label='identity')
    plt.xlabel('bmax')
    plt.ylabel('MD^-1')
    plt.legend()
    plt.title('50% quartile')

    plt.savefig('./bvalEst_50Q.png')
Beispiel #26
0
def segment_from_dwi(image,
                     bvals_file,
                     bvecs_file,
                     ROI,
                     threshold,
                     mask=None,
                     filename=None,
                     overwrite=True):
    """
    Takes a dwi, bvals and bvecs files and computes FA, RGB and a binary mask
    estimation of the supplied ROI according to a threshold on the RGB.
    """

    # Load raw dwi image
    data = image.get_data()
    affine = image.get_affine()

    # Load bval and bvec files, fit the tensor model
    print("Now fitting tensor model")
    b_vals, b_vecs = read_bvals_bvecs(bvals_file, bvecs_file)
    gtab = gradient_table_from_bvals_bvecs(b_vals, b_vecs)
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data)

    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    FA = np.clip(
        FA, 0,
        1)  # We clamp the FA between 0 and 1 to remove degenerate tensors

    if mask is not None:
        FA = apply_mask(FA, mask)

    FA_vol = nib.Nifti1Image(FA.astype('float32'), affine)

    if filename is None:
        FA_path = 'FA.nii.gz'
    else:
        FA_path = filename + '_FA.nii.gz'

    # Check if FA already exists
    if os.path.exists(FA_path):
        print("FA", FA_path, "already exists!")

        if overwrite is True:
            nib.save(FA_vol, FA_path)
            print("FA", FA_path, "was overwritten")
        else:
            print("New FA was not saved")
    else:
        nib.save(FA_vol, FA_path)
        print("FA was saved as ", FA_path)

    RGB = color_fa(FA, tenfit.evecs)

    if filename is None:
        RGB_path = 'RGB.nii.gz'
    else:
        RGB_path = filename + '_RGB.nii.gz'

    RGB_vol = nib.Nifti1Image(np.array(255 * RGB, 'uint8'), affine)

    # Check if RGB already exists
    if os.path.exists(RGB_path):
        print("RGB", RGB_path, "already exists!")

        if overwrite is True:
            nib.save(RGB_vol, RGB_path)
            print("RGB", RGB_path, "was overwritten")
        else:
            print("New RGB was not saved")
    else:
        nib.save(RGB_vol, RGB_path)
        print("RGB was saved as ", RGB_path)

    return segment_from_RGB(RGB, ROI, threshold)
Beispiel #27
0
def prepare_data_for_actors(dwi_filename, bvals_filename, bvecs_filename,
                            target_template_filename, slices_choice,
                            shells=None):
    # Load and prepare the data
    dwi_img = nib.load(dwi_filename)
    dwi_data = dwi_img.get_data()
    dwi_affine = dwi_img.get_affine()

    bvals, bvecs = read_bvals_bvecs(bvals_filename, bvecs_filename)

    target_template_img = nib.load(target_template_filename)
    target_template_data = target_template_img.get_data()
    target_template_affine = target_template_img.affine
    mask_data = np.zeros(target_template_data.shape)
    mask_data[target_template_data > 0] = 1

    # Prepare mask for tensors fit
    x_slice, y_slice, z_slice = slices_choice
    mask_data = prepare_slices_mask(mask_data,
                                    x_slice, y_slice, z_slice)

    # Extract B0
    gtab = gradient_table(bvals, normalize_bvecs(bvecs), b0_threshold=10)
    b0_idx = np.where(gtab.b0s_mask)[0]
    mean_b0 = np.mean(dwi_data[..., b0_idx], axis=3, dtype=dwi_data.dtype)

    if shells:
        indices = [get_shell_indices(bvals, shell) for shell in shells]
        indices = np.sort(np.hstack(indices))

        if len(indices) < 1:
            raise ValueError(
                'There are no volumes that have the supplied b-values.')
        shell_data = np.zeros((dwi_data.shape[:-1] + (len(indices),)),
                              dtype=dwi_data.dtype)
        shell_bvecs = np.zeros((len(indices), 3))
        shell_bvals = np.zeros((len(indices),))
        for i, indice in enumerate(indices):
            shell_data[..., i] = dwi_data[..., indice]
            shell_bvals[i] = bvals[indice]
            shell_bvecs[i, :] = bvecs[indice, :]
    else:
        shell_data = dwi_data
        shell_bvals = bvals
        shell_bvecs = bvecs

    # Register the DWI data to the template
    transformed_dwi, transformation = register_image(target_template_data,
                                                     target_template_affine,
                                                     mean_b0,
                                                     dwi_affine,
                                                     transformation_type='rigid',
                                                     dwi=shell_data)

    # Rotate gradients
    rotated_bvecs = np.dot(shell_bvecs, transformation[0:3, 0:3])

    rotated_bvecs = normalize_bvecs(rotated_bvecs)
    rotated_gtab = gradient_table(shell_bvals, rotated_bvecs, b0_threshold=10)

    # Get tensors
    tensor_model = TensorModel(rotated_gtab, fit_method='LS')
    tensor_fit = tensor_model.fit(transformed_dwi, mask_data)
    # Get FA
    fa_map = np.clip(fractional_anisotropy(tensor_fit.evals), 0, 1)

    # Get eigen vals/vecs
    evals = np.zeros(target_template_data.shape + (1,))
    evals[..., 0] = tensor_fit.evals[..., 0] / np.max(tensor_fit.evals[..., 0])
    evecs = np.zeros(target_template_data.shape + (1, 3))
    evecs[:, :, :, 0, :] = tensor_fit.evecs[..., 0]

    return fa_map, evals, evecs
Beispiel #28
0
    def run(self,
            data_files,
            bvals_files,
            bvecs_files,
            mask_file,
            bbox_threshold=[0.6, 1, 0, 0.1, 0, 0.1],
            out_dir='',
            out_file='product.json',
            out_mask_cc='cc.nii.gz',
            out_mask_noise='mask_noise.nii.gz'):
        """Compute the signal-to-noise ratio in the corpus callosum.

        Parameters
        ----------
        data_files : string
            Path to the dwi.nii.gz file. This path may contain wildcards to
            process multiple inputs at once.
        bvals_files : string
            Path of bvals.
        bvecs_files : string
            Path of bvecs.
        mask_file : string
            Path of a brain mask file.
        bbox_threshold : variable float, optional
            Threshold for bounding box, values separated with commas for ex.
            [0.6,1,0,0.1,0,0.1]. (default (0.6, 1, 0, 0.1, 0, 0.1))
        out_dir : string, optional
            Where the resulting file will be saved. (default '')
        out_file : string, optional
            Name of the result file to be saved. (default 'product.json')
        out_mask_cc : string, optional
            Name of the CC mask volume to be saved (default 'cc.nii.gz')
        out_mask_noise : string, optional
            Name of the mask noise volume to be saved
            (default 'mask_noise.nii.gz')

        """
        io_it = self.get_io_iterator()

        for dwi_path, bvals_path, bvecs_path, mask_path, out_path, \
                cc_mask_path, mask_noise_path in io_it:
            data, affine = load_nifti(dwi_path)
            bvals, bvecs = read_bvals_bvecs(bvals_path, bvecs_path)
            gtab = gradient_table(bvals=bvals, bvecs=bvecs)

            mask, affine = load_nifti(mask_path)

            logging.info('Computing tensors...')
            tenmodel = TensorModel(gtab)
            tensorfit = tenmodel.fit(data, mask=mask)

            logging.info('Computing worst-case/best-case SNR using the CC...')

            if np.ndim(data) == 4:
                CC_box = np.zeros_like(data[..., 0])
            elif np.ndim(data) == 3:
                CC_box = np.zeros_like(data)
            else:
                raise IOError('DWI data has invalid dimensions')

            mins, maxs = bounding_box(mask)
            mins = np.array(mins)
            maxs = np.array(maxs)
            diff = (maxs - mins) // 4
            bounds_min = mins + diff
            bounds_max = maxs - diff

            CC_box[bounds_min[0]:bounds_max[0], bounds_min[1]:bounds_max[1],
                   bounds_min[2]:bounds_max[2]] = 1

            if len(bbox_threshold) != 6:
                raise IOError('bbox_threshold should have 6 float values')

            mask_cc_part, cfa = segment_from_cfa(tensorfit,
                                                 CC_box,
                                                 bbox_threshold,
                                                 return_cfa=True)

            if not np.count_nonzero(mask_cc_part.astype(np.uint8)):
                logging.warning("Empty mask: corpus callosum not found."
                                " Update your data or your threshold")

            save_nifti(cc_mask_path, mask_cc_part.astype(np.uint8), affine)
            logging.info('CC mask saved as {0}'.format(cc_mask_path))

            masked_data = data[mask_cc_part]
            mean_signal = 0
            if masked_data.size:
                mean_signal = np.mean(masked_data, axis=0)
            mask_noise = binary_dilation(mask, iterations=10)
            mask_noise[..., :mask_noise.shape[-1] // 2] = 1
            mask_noise = ~mask_noise

            save_nifti(mask_noise_path, mask_noise.astype(np.uint8), affine)
            logging.info('Mask noise saved as {0}'.format(mask_noise_path))

            noise_std = 0
            if np.count_nonzero(mask_noise.astype(np.uint8)):
                noise_std = np.std(data[mask_noise, :])

            logging.info('Noise standard deviation sigma= ' + str(noise_std))

            idx = np.sum(gtab.bvecs, axis=-1) == 0
            gtab.bvecs[idx] = np.inf
            axis_X = np.argmin(
                np.sum((gtab.bvecs - np.array([1, 0, 0]))**2, axis=-1))
            axis_Y = np.argmin(
                np.sum((gtab.bvecs - np.array([0, 1, 0]))**2, axis=-1))
            axis_Z = np.argmin(
                np.sum((gtab.bvecs - np.array([0, 0, 1]))**2, axis=-1))

            SNR_output = []
            SNR_directions = []
            for direction in ['b0', axis_X, axis_Y, axis_Z]:
                if direction == 'b0':
                    SNR = mean_signal[0] / noise_std if noise_std else 0
                    logging.info("SNR for the b=0 image is :" + str(SNR))
                else:
                    logging.info("SNR for direction " + str(direction) + " " +
                                 str(gtab.bvecs[direction]) + "is :" +
                                 str(SNR))
                    SNR_directions.append(direction)
                    SNR = mean_signal[direction] / noise_std if noise_std else 0
                SNR_output.append(SNR)

            data = []
            data.append({
                'data':
                str(SNR_output[0]) + ' ' + str(SNR_output[1]) + ' ' +
                str(SNR_output[2]) + ' ' + str(SNR_output[3]),
                'directions':
                'b0' + ' ' + str(SNR_directions[0]) + ' ' +
                str(SNR_directions[1]) + ' ' + str(SNR_directions[2])
            })

            with open(os.path.join(out_dir, out_path), 'w') as myfile:
                json.dump(data, myfile)
Beispiel #29
0
ocmask_data = oc_mask.get_fdata()
ocmask_affine = oc_mask.affine

ocmask_good = resample_img(oc_mask,
                           affine,
                           img.shape[0:3],
                           interpolation='continuous')
ocmask_data = ocmask_good.get_fdata()
ocmask_affine = ocmask_good.affine

print('Computing brain mask...')
b0_mask, mask = median_otsu(data, np.arange(1, 138))

print('Computing tensors...')
tenmodel = TensorModel(gtab)
tensorfit = tenmodel.fit(data, mask=mask)
"""Next, we set our red-green-blue thresholds to (0.6, 1) in the x axis
and (0, 0.1) in the y and z axes respectively.
These values work well in practice to isolate the very RED voxels of the cfa map.

Then, as assurance, we want just RED voxels in the CC (there could be
noisy red voxels around the brain mask and we don't want those). Unless the brain
acquisition was badly aligned, the CC is always close to the mid-sagittal slice.

The following lines perform these two operations and then saves the computed mask.
"""

print('Computing worst-case/best-case SNR using the corpus callosum...')

#region = 80
    def _run_interface(self, runtime):
        from scipy.special import gamma
        from dipy.reconst.dti import TensorModel
        import gc

        img = nb.load(self.inputs.in_file)
        hdr = img.header.copy()
        affine = img.affine
        data = img.get_fdata()
        gtab = self._get_gradient_table()

        if isdefined(self.inputs.in_mask):
            msk = np.asanyarray(nb.load(self.inputs.in_mask).dataobj).astype(
                np.uint8)
        else:
            msk = np.ones(data.shape[:3], dtype=np.uint8)

        try_b0 = True
        if isdefined(self.inputs.noise_mask):
            noise_msk = (nb.load(self.inputs.noise_mask).get_fdata(
                dtype=np.float32).reshape(-1))
            noise_msk[noise_msk > 0.5] = 1
            noise_msk[noise_msk < 1.0] = 0
            noise_msk = noise_msk.astype(np.uint8)
            try_b0 = False
        elif np.all(data[msk == 0, 0] == 0):
            IFLOGGER.info("Input data are masked.")
            noise_msk = msk.reshape(-1).astype(np.uint8)
        else:
            noise_msk = (1 - msk).reshape(-1).astype(np.uint8)

        nb0 = np.sum(gtab.b0s_mask)
        dsample = data.reshape(-1, data.shape[-1])

        if try_b0 and (nb0 > 1):
            noise_data = dsample.take(np.where(gtab.b0s_mask),
                                      axis=-1)[noise_msk == 0, ...]
            n = nb0
        else:
            nodiff = np.where(~gtab.b0s_mask)
            nodiffidx = nodiff[0].tolist()
            n = 20 if len(nodiffidx) >= 20 else len(nodiffidx)
            idxs = np.random.choice(nodiffidx, size=n, replace=False)
            noise_data = dsample.take(idxs, axis=-1)[noise_msk == 1, ...]

        # Estimate sigma required by RESTORE
        mean_std = np.median(noise_data.std(-1))
        try:
            bias = 1.0 - np.sqrt(2.0 / (n - 1)) * (gamma(n / 2.0) / gamma(
                (n - 1) / 2.0))
        except:
            bias = 0.0
            pass

        sigma = mean_std * (1 + bias)

        if sigma == 0:
            IFLOGGER.warning(
                "Noise std is 0.0, looks like data was masked and "
                "noise cannot be estimated correctly. Using default "
                "tensor model instead of RESTORE.")
            dti = TensorModel(gtab)
        else:
            IFLOGGER.info("Performing RESTORE with noise std=%.4f.", sigma)
            dti = TensorModel(gtab, fit_method="RESTORE", sigma=sigma)

        try:
            fit_restore = dti.fit(data, msk)
        except TypeError:
            dti = TensorModel(gtab)
            fit_restore = dti.fit(data, msk)

        hdr.set_data_dtype(np.float32)
        hdr["data_type"] = 16

        for k in self._outputs().get():
            scalar = getattr(fit_restore, k)
            hdr.set_data_shape(np.shape(scalar))
            nb.Nifti1Image(scalar.astype(np.float32), affine,
                           hdr).to_filename(self._gen_filename(k))

        return runtime
Beispiel #31
0
def main():
    parser = _build_arg_parser()
    args = parser.parse_args()

    if not args.not_all:
        args.fa = args.fa or 'fa.nii.gz'
        args.ga = args.ga or 'ga.nii.gz'
        args.rgb = args.rgb or 'rgb.nii.gz'
        args.md = args.md or 'md.nii.gz'
        args.ad = args.ad or 'ad.nii.gz'
        args.rd = args.rd or 'rd.nii.gz'
        args.mode = args.mode or 'mode.nii.gz'
        args.norm = args.norm or 'tensor_norm.nii.gz'
        args.tensor = args.tensor or 'tensor.nii.gz'
        args.evecs = args.evecs or 'tensor_evecs.nii.gz'
        args.evals = args.evals or 'tensor_evals.nii.gz'
        args.residual = args.residual or 'dti_residual.nii.gz'
        args.p_i_signal =\
            args.p_i_signal or 'physically_implausible_signals_mask.nii.gz'
        args.pulsation = args.pulsation or 'pulsation_and_misalignment.nii.gz'

    outputs = [
        args.fa, args.ga, args.rgb, args.md, args.ad, args.rd, args.mode,
        args.norm, args.tensor, args.evecs, args.evals, args.residual,
        args.p_i_signal, args.pulsation
    ]
    if args.not_all and not any(outputs):
        parser.error('When using --not_all, you need to specify at least ' +
                     'one metric to output.')

    assert_inputs_exist(parser, [args.input, args.bvals, args.bvecs],
                        args.mask)
    assert_outputs_exist(parser, args, outputs)

    img = nib.load(args.input)
    data = img.get_fdata(dtype=np.float32)
    affine = img.affine
    if args.mask is None:
        mask = None
    else:
        mask = get_data_as_mask(nib.load(args.mask), dtype=bool)

    # Validate bvals and bvecs
    logging.info('Tensor estimation with the {} method...'.format(args.method))
    bvals, bvecs = read_bvals_bvecs(args.bvals, args.bvecs)

    if not is_normalized_bvecs(bvecs):
        logging.warning('Your b-vectors do not seem normalized...')
        bvecs = normalize_bvecs(bvecs)

    check_b0_threshold(args.force_b0_threshold, bvals.min())
    gtab = gradient_table(bvals, bvecs, b0_threshold=bvals.min())

    # Get tensors
    if args.method == 'restore':
        sigma = ne.estimate_sigma(data)
        tenmodel = TensorModel(gtab,
                               fit_method=args.method,
                               sigma=sigma,
                               min_signal=_get_min_nonzero_signal(data))
    else:
        tenmodel = TensorModel(gtab,
                               fit_method=args.method,
                               min_signal=_get_min_nonzero_signal(data))

    tenfit = tenmodel.fit(data, mask)

    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    FA = np.clip(FA, 0, 1)

    if args.tensor:
        # Get the Tensor values and format them for visualisation
        # in the Fibernavigator.
        tensor_vals = lower_triangular(tenfit.quadratic_form)
        correct_order = [0, 1, 3, 2, 4, 5]
        tensor_vals_reordered = tensor_vals[..., correct_order]
        fiber_tensors = nib.Nifti1Image(
            tensor_vals_reordered.astype(np.float32), affine)
        nib.save(fiber_tensors, args.tensor)

        del tensor_vals, fiber_tensors, tensor_vals_reordered

    if args.fa:
        fa_img = nib.Nifti1Image(FA.astype(np.float32), affine)
        nib.save(fa_img, args.fa)

        del fa_img

    if args.ga:
        GA = geodesic_anisotropy(tenfit.evals)
        GA[np.isnan(GA)] = 0

        ga_img = nib.Nifti1Image(GA.astype(np.float32), affine)
        nib.save(ga_img, args.ga)

        del GA, ga_img

    if args.rgb:
        RGB = color_fa(FA, tenfit.evecs)
        rgb_img = nib.Nifti1Image(np.array(255 * RGB, 'uint8'), affine)
        nib.save(rgb_img, args.rgb)

        del RGB, rgb_img

    if args.md:
        MD = mean_diffusivity(tenfit.evals)
        md_img = nib.Nifti1Image(MD.astype(np.float32), affine)
        nib.save(md_img, args.md)

        del MD, md_img

    if args.ad:
        AD = axial_diffusivity(tenfit.evals)
        ad_img = nib.Nifti1Image(AD.astype(np.float32), affine)
        nib.save(ad_img, args.ad)

        del AD, ad_img

    if args.rd:
        RD = radial_diffusivity(tenfit.evals)
        rd_img = nib.Nifti1Image(RD.astype(np.float32), affine)
        nib.save(rd_img, args.rd)

        del RD, rd_img

    if args.mode:
        # Compute tensor mode
        inter_mode = dipy_mode(tenfit.quadratic_form)

        # Since the mode computation can generate NANs when not masked,
        # we need to remove them.
        non_nan_indices = np.isfinite(inter_mode)
        mode = np.zeros(inter_mode.shape)
        mode[non_nan_indices] = inter_mode[non_nan_indices]

        mode_img = nib.Nifti1Image(mode.astype(np.float32), affine)
        nib.save(mode_img, args.mode)

        del inter_mode, mode_img, mode

    if args.norm:
        NORM = norm(tenfit.quadratic_form)
        norm_img = nib.Nifti1Image(NORM.astype(np.float32), affine)
        nib.save(norm_img, args.norm)

        del NORM, norm_img

    if args.evecs:
        evecs = tenfit.evecs.astype(np.float32)
        evecs_img = nib.Nifti1Image(evecs, affine)
        nib.save(evecs_img, args.evecs)

        # save individual e-vectors also
        for i in range(3):
            e_img = nib.Nifti1Image(evecs[..., i], affine)
            nib.save(e_img, add_filename_suffix(args.evecs, '_v' + str(i + 1)))
            del e_img

        del evecs, evecs_img

    if args.evals:
        evals = tenfit.evals.astype(np.float32)
        evals_img = nib.Nifti1Image(evals, affine)
        nib.save(evals_img, args.evals)

        # save individual e-values also
        for i in range(3):
            e_img = nib.Nifti1Image(evals[..., i], affine)
            nib.save(e_img, add_filename_suffix(args.evals, '_e' + str(i + 1)))
            del e_img

        del evals, evals_img

    if args.p_i_signal:
        S0 = np.mean(data[..., gtab.b0s_mask], axis=-1, keepdims=True)
        DWI = data[..., ~gtab.b0s_mask]
        pis_mask = np.max(S0 < DWI, axis=-1)

        if args.mask is not None:
            pis_mask *= mask

        pis_img = nib.Nifti1Image(pis_mask.astype(np.int16), affine)
        nib.save(pis_img, args.p_i_signal)

        del pis_img, S0, DWI

    if args.pulsation:
        STD = np.std(data[..., ~gtab.b0s_mask], axis=-1)

        if args.mask is not None:
            STD *= mask

        std_img = nib.Nifti1Image(STD.astype(np.float32), affine)
        nib.save(std_img, add_filename_suffix(args.pulsation, '_std_dwi'))

        if np.sum(gtab.b0s_mask) <= 1:
            logger.info('Not enough b=0 images to output standard '
                        'deviation map')
        else:
            if len(np.where(gtab.b0s_mask)) == 2:
                logger.info('Only two b=0 images. Be careful with the '
                            'interpretation of this std map')

            STD = np.std(data[..., gtab.b0s_mask], axis=-1)

            if args.mask is not None:
                STD *= mask

            std_img = nib.Nifti1Image(STD.astype(np.float32), affine)
            nib.save(std_img, add_filename_suffix(args.pulsation, '_std_b0'))

        del STD, std_img

    if args.residual:
        # Mean residual image
        S0 = np.mean(data[..., gtab.b0s_mask], axis=-1)
        data_diff = np.zeros(data.shape, dtype=np.float32)

        for i in range(data.shape[0]):
            if args.mask is not None:
                tenfit2 = tenmodel.fit(data[i, :, :, :], mask[i, :, :])
            else:
                tenfit2 = tenmodel.fit(data[i, :, :, :])

            data_diff[i, :, :, :] = np.abs(
                tenfit2.predict(gtab, S0[i, :, :]).astype(np.float32) -
                data[i, :, :])

        R = np.mean(data_diff[..., ~gtab.b0s_mask], axis=-1, dtype=np.float32)

        if args.mask is not None:
            R *= mask

        R_img = nib.Nifti1Image(R.astype(np.float32), affine)
        nib.save(R_img, args.residual)

        del R, R_img, S0

        # Each volume's residual statistics
        if args.mask is None:
            logger.info("Outlier detection will not be performed, since no "
                        "mask was provided.")
        stats = [
            dict.fromkeys([
                'label', 'mean', 'iqr', 'cilo', 'cihi', 'whishi', 'whislo',
                'fliers', 'q1', 'med', 'q3'
            ], []) for i in range(data.shape[-1])
        ]  # stats with format for boxplots
        # Note that stats will be computed manually and plotted using bxp
        # but could be computed using stats = cbook.boxplot_stats
        # or pyplot.boxplot(x)
        R_k = np.zeros(data.shape[-1],
                       dtype=np.float32)  # mean residual per DWI
        std = np.zeros(data.shape[-1],
                       dtype=np.float32)  # std residual per DWI
        q1 = np.zeros(data.shape[-1],
                      dtype=np.float32)  # first quartile per DWI
        q3 = np.zeros(data.shape[-1],
                      dtype=np.float32)  # third quartile per DWI
        iqr = np.zeros(data.shape[-1],
                       dtype=np.float32)  # interquartile per DWI
        percent_outliers = np.zeros(data.shape[-1], dtype=np.float32)
        nb_voxels = np.count_nonzero(mask)
        for k in range(data.shape[-1]):
            x = data_diff[..., k]
            R_k[k] = np.mean(x)
            std[k] = np.std(x)
            q3[k], q1[k] = np.percentile(x, [75, 25])
            iqr[k] = q3[k] - q1[k]
            stats[k]['med'] = (q1[k] + q3[k]) / 2
            stats[k]['mean'] = R_k[k]
            stats[k]['q1'] = q1[k]
            stats[k]['q3'] = q3[k]
            stats[k]['whislo'] = q1[k] - 1.5 * iqr[k]
            stats[k]['whishi'] = q3[k] + 1.5 * iqr[k]
            stats[k]['label'] = k

            # Outliers are observations that fall below Q1 - 1.5(IQR) or
            # above Q3 + 1.5(IQR) We check if a voxel is an outlier only if
            # we have a mask, else we are biased.
            if args.mask is not None:
                outliers = (x < stats[k]['whislo']) | (x > stats[k]['whishi'])
                percent_outliers[k] = np.sum(outliers) / nb_voxels * 100
                # What would be our definition of too many outliers?
                # Maybe mean(all_means)+-3SD?
                # Or we let people choose based on the figure.
                # if percent_outliers[k] > ???? :
                #    logger.warning('   Careful! Diffusion-Weighted Image'
                #                   ' i=%s has %s %% outlier voxels',
                #                   k, percent_outliers[k])

        # Saving all statistics as npy values
        residual_basename, _ = split_name_with_nii(args.residual)
        res_stats_basename = residual_basename + ".npy"
        np.save(add_filename_suffix(res_stats_basename, "_mean_residuals"),
                R_k)
        np.save(add_filename_suffix(res_stats_basename, "_q1_residuals"), q1)
        np.save(add_filename_suffix(res_stats_basename, "_q3_residuals"), q3)
        np.save(add_filename_suffix(res_stats_basename, "_iqr_residuals"), iqr)
        np.save(add_filename_suffix(res_stats_basename, "_std_residuals"), std)

        # Showing results in graph
        if args.mask is None:
            fig, axe = plt.subplots(nrows=1, ncols=1, squeeze=False)
        else:
            fig, axe = plt.subplots(nrows=1,
                                    ncols=2,
                                    squeeze=False,
                                    figsize=[10, 4.8])
            # Default is [6.4, 4.8]. Increasing width to see better.

        medianprops = dict(linestyle='-', linewidth=2.5, color='firebrick')
        meanprops = dict(linestyle='-', linewidth=2.5, color='green')
        axe[0, 0].bxp(stats,
                      showmeans=True,
                      meanline=True,
                      showfliers=False,
                      medianprops=medianprops,
                      meanprops=meanprops)
        axe[0, 0].set_xlabel('DW image')
        axe[0, 0].set_ylabel('Residuals per DWI volume. Red is median,\n'
                             'green is mean. Whiskers are 1.5*interquartile')
        axe[0, 0].set_title('Residuals')
        axe[0, 0].set_xticks(range(0, q1.shape[0], 5))
        axe[0, 0].set_xticklabels(range(0, q1.shape[0], 5))

        if args.mask is not None:
            axe[0, 1].plot(range(data.shape[-1]), percent_outliers)
            axe[0, 1].set_xticks(range(0, q1.shape[0], 5))
            axe[0, 1].set_xticklabels(range(0, q1.shape[0], 5))
            axe[0, 1].set_xlabel('DW image')
            axe[0, 1].set_ylabel('Percentage of outlier voxels')
            axe[0, 1].set_title('Outliers')
        plt.savefig(residual_basename + '_residuals_stats.png')
def dmri_recon(sid,
               data_dir,
               out_dir,
               resolution,
               recon='csd',
               dirs='',
               num_threads=2):
    import tempfile
    #tempfile.tempdir = '/om/scratch/Fri/ksitek/'

    import os
    oldval = None
    if 'MKL_NUM_THREADS' in os.environ:
        oldval = os.environ['MKL_NUM_THREADS']
    os.environ['MKL_NUM_THREADS'] = '%d' % num_threads
    ompoldval = None
    if 'OMP_NUM_THREADS' in os.environ:
        ompoldval = os.environ['OMP_NUM_THREADS']
    os.environ['OMP_NUM_THREADS'] = '%d' % num_threads
    import nibabel as nib
    import numpy as np
    from glob import glob

    if resolution == '0.2mm':
        filename = 'Reg_S64550_nii4d.nii'
        #filename = 'angular_resample/dwi_%s.nii.gz'%dirs
        fimg = os.path.abspath(glob(os.path.join(data_dir, filename))[0])
    else:
        filename = 'Reg_S64550_nii4d_resamp-%s.nii.gz' % (resolution)
        fimg = os.path.abspath(
            glob(os.path.join(data_dir, 'resample', filename))[0])
    print("dwi file = %s" % fimg)
    fbval = os.path.abspath(
        glob(os.path.join(data_dir, 'bvecs', 'camino_120_RAS.bvals'))[0])
    print("bval file = %s" % fbval)
    fbvec = os.path.abspath(
        glob(os.path.join(data_dir, 'bvecs',
                          'camino_120_RAS_flipped-xy.bvecs'))[0])
    #                                          'angular_resample',
    #                                          'dwi_%s.bvecs'%dirs))[0])
    print("bvec file = %s" % fbvec)
    img = nib.load(fimg)
    data = img.get_fdata()

    affine = img.get_affine()

    prefix = sid

    from dipy.io import read_bvals_bvecs
    bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
    '''
    from dipy.core.gradients import vector_norm
    b0idx = []
    for idx, val in enumerate(bvals):
        if val < 1:
            pass
            #bvecs[idx] = [1, 0, 0]
        else:
            b0idx.append(idx)
            #print "b0idx=%d"%idx
    #print "input bvecs:"
    #print bvecs
    bvecs[b0idx, :] = bvecs[b0idx, :]/vector_norm(bvecs[b0idx])[:, None]
    #print "bvecs after normalization:"
    #print bvecs
    '''

    from dipy.core.gradients import gradient_table
    gtab = gradient_table(bvals, bvecs)
    gtab.bvecs.shape == bvecs.shape
    gtab.bvecs
    gtab.bvals.shape == bvals.shape
    gtab.bvals

    #from dipy.segment.mask import median_otsu
    #b0_mask, mask = median_otsu(data[:, :, :, b0idx].mean(axis=3).squeeze(), 4, 4)

    if resolution == '0.2mm':
        mask_name = 'Reg_S64550_nii_b0-slice_mask.nii.gz'
        fmask1 = os.path.join(data_dir, mask_name)
    else:
        mask_name = 'Reg_S64550_nii_b0-slice_mask_resamp-%s.nii.gz' % (
            resolution)
        fmask1 = os.path.join(data_dir, 'resample', mask_name)
    print("fmask file = %s" % fmask1)
    mask = nib.load(fmask1).get_fdata()
    ''' DTI model & save metrics '''
    from dipy.reconst.dti import TensorModel
    print("running tensor model")
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data, mask)

    from dipy.reconst.dti import fractional_anisotropy
    print("running FA")
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    fa_img = nib.Nifti1Image(FA, img.get_affine())
    tensor_fa_file = os.path.abspath('%s_tensor_fa.nii.gz' % (prefix))
    nib.save(fa_img, tensor_fa_file)

    from dipy.reconst.dti import axial_diffusivity
    print("running AD")
    AD = axial_diffusivity(tenfit.evals)
    AD[np.isnan(AD)] = 0
    ad_img = nib.Nifti1Image(AD, img.get_affine())
    tensor_ad_file = os.path.abspath('%s_tensor_ad.nii.gz' % (prefix))
    nib.save(ad_img, tensor_ad_file)

    from dipy.reconst.dti import radial_diffusivity
    print("running RD")
    RD = radial_diffusivity(tenfit.evals)
    RD[np.isnan(RD)] = 0
    rd_img = nib.Nifti1Image(RD, img.get_affine())
    tensor_rd_file = os.path.abspath('%s_tensor_rd.nii.gz' % (prefix))
    nib.save(rd_img, tensor_rd_file)

    from dipy.reconst.dti import mean_diffusivity
    print("running MD")
    MD = mean_diffusivity(tenfit.evals)
    MD[np.isnan(MD)] = 0
    md_img = nib.Nifti1Image(MD, img.get_affine())
    tensor_md_file = os.path.abspath('%s_tensor_md.nii.gz' % (prefix))
    nib.save(md_img, tensor_md_file)

    evecs = tenfit.evecs
    evec_img = nib.Nifti1Image(evecs, img.get_affine())
    tensor_evec_file = os.path.abspath('%s_tensor_evec.nii.gz' % (prefix))
    nib.save(evec_img, tensor_evec_file)
    ''' ODF model '''
    useFA = True
    print("creating %s model" % recon)
    if recon == 'csd':
        from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel
        from dipy.reconst.csdeconv import auto_response
        response, ratio = auto_response(gtab, data, roi_radius=10,
                                        fa_thr=0.5)  # 0.7

        model = ConstrainedSphericalDeconvModel(gtab, response)
        useFA = True
        return_sh = True
    elif recon == 'csa':
        from dipy.reconst.shm import CsaOdfModel, normalize_data
        model = CsaOdfModel(gtab, sh_order=8)
        useFA = True
        return_sh = True
    elif recon == 'gqi':
        from dipy.reconst.gqi import GeneralizedQSamplingModel
        model = GeneralizedQSamplingModel(gtab)
        return_sh = False
    else:
        raise ValueError('only csd, csa supported currently')
        from dipy.reconst.dsi import (DiffusionSpectrumDeconvModel,
                                      DiffusionSpectrumModel)
        model = DiffusionSpectrumDeconvModel(gtab)
    '''reconstruct ODFs'''
    from dipy.data import get_sphere
    sphere = get_sphere('symmetric724')
    #odfs = fit.odf(sphere)

    # with CSD/GQI, uses > 50GB per core; don't get greedy with cores!
    from dipy.reconst.peaks import peaks_from_model
    print("running peaks_from_model")
    peaks = peaks_from_model(
        model=model,
        data=data,
        sphere=sphere,
        mask=mask,
        return_sh=return_sh,
        return_odf=False,
        normalize_peaks=True,
        npeaks=5,
        relative_peak_threshold=.5,
        min_separation_angle=10,  #25,
        parallel=num_threads > 1,
        nbr_processes=num_threads)

    # save the peaks
    from dipy.io.peaks import save_peaks
    peaks_file = os.path.abspath('%s_peaks.pam5' % (prefix))
    save_peaks(peaks_file, peaks)

    # save the spherical harmonics
    shm_coeff_file = os.path.abspath('%s_shm_coeff.nii.gz' % (prefix))
    if return_sh:
        shm_coeff = peaks.shm_coeff
        nib.save(nib.Nifti1Image(shm_coeff, img.get_affine()), shm_coeff_file)
    else:
        # if it's not a spherical model, output it as an essentially null file
        np.savetxt(shm_coeff_file, [0])

    # save the generalized fractional anisotropy image
    gfa_img = nib.Nifti1Image(peaks.gfa, img.get_affine())
    model_gfa_file = os.path.abspath('%s_%s_gfa.nii.gz' % (prefix, recon))
    nib.save(gfa_img, model_gfa_file)

    #from dipy.reconst.dti import quantize_evecs
    #peak_indices = quantize_evecs(tenfit.evecs, sphere.vertices)
    #eu = EuDX(FA, peak_indices, odf_vertices = sphere.vertices,
    #a_low=0.2, seeds=10**6, ang_thr=35)
    ''' probabilistic tracking '''
    '''
    from dipy.direction import ProbabilisticDirectionGetter
    from dipy.tracking.local import LocalTracking
    from dipy.tracking.streamline import Streamlines
    from dipy.io.streamline import save_trk

    prob_dg = ProbabilisticDirectionGetter.from_shcoeff(shm_coeff,
                                                        max_angle=45.,
                                                        sphere=sphere)
    streamlines_generator = LocalTracking(prob_dg,
                                          affine,
                                          step_size=.5,
                                          max_cross=1)

    # Generate streamlines object
    streamlines = Streamlines(streamlines_generator)

    affine = img.get_affine()
    vox_size=fa_img.get_header().get_zooms()[:3]

    fname = os.path.abspath('%s_%s_prob_streamline.trk' % (prefix, recon))
    save_trk(fname, streamlines, affine, vox_size=vox_size)
    '''
    ''' deterministic tracking with EuDX method'''
    from dipy.tracking.eudx import EuDX
    print("reconstructing with EuDX")
    if useFA:
        eu = EuDX(
            FA,
            peaks.peak_indices[..., 0],
            odf_vertices=sphere.vertices,
            a_low=0.001,  # default is 0.0239
            seeds=10**6,
            ang_thr=75)
    else:
        eu = EuDX(
            peaks.gfa,
            peaks.peak_indices[..., 0],
            odf_vertices=sphere.vertices,
            #a_low=0.1,
            seeds=10**6,
            ang_thr=45)

    sl_fname = os.path.abspath('%s_%s_det_streamline.trk' % (prefix, recon))

    # trying new dipy.io.streamline module, per email to neuroimaging list
    # 2018.04.05
    from nibabel.streamlines import Field
    from nibabel.orientations import aff2axcodes
    affine = img.get_affine()
    vox_size = fa_img.get_header().get_zooms()[:3]
    fov_shape = FA.shape[:3]

    if vox_size is not None and fov_shape is not None:
        hdr = {}
        hdr[Field.VOXEL_TO_RASMM] = affine.copy()
        hdr[Field.VOXEL_SIZES] = vox_size
        hdr[Field.DIMENSIONS] = fov_shape
        hdr[Field.VOXEL_ORDER] = "".join(aff2axcodes(affine))

    tractogram = nib.streamlines.Tractogram(eu)
    tractogram.affine_to_rasmm = affine
    trk_file = nib.streamlines.TrkFile(tractogram, header=hdr)
    nib.streamlines.save(trk_file, sl_fname)

    if oldval:
        os.environ['MKL_NUM_THREADS'] = oldval
    else:
        del os.environ['MKL_NUM_THREADS']
    if ompoldval:
        os.environ['OMP_NUM_THREADS'] = ompoldval
    else:
        del os.environ['OMP_NUM_THREADS']

    print('all output files created')

    return (tensor_fa_file, tensor_evec_file, model_gfa_file, sl_fname, affine,
            tensor_ad_file, tensor_rd_file, tensor_md_file, shm_coeff_file,
            peaks_file)
Beispiel #33
0
def tracking(folder):
    print('Tracking in ' + folder)
    output_folder = folder + 'dipy_out/'

    # make a folder to save new data into
    try:
        Path(output_folder).mkdir(parents=True, exist_ok=True)
    except OSError:
        print('Could not create output dir. Aborting...')
        return

    # load data
    print('Loading data...')
    img = nib.load(folder + 'data.nii.gz')
    dmri = np.asarray(img.dataobj)
    affine = img.affine
    mask, _ = load_nifti(folder + 'nodif_brain_mask.nii.gz')
    bvals, bvecs = read_bvals_bvecs(folder + 'bvals', folder + 'bvecs')
    gtab = gradient_table(bvals, bvecs)

    # extract peaksoutput_folder + 'peak_vals.nii.gz'
    if Path(output_folder + 'peaks.pam5').exists():
        peaks = load_peaks(output_folder + 'peaks.pam5')
    else:
        print('Extracting peaks...')
        response, ration = auto_response(gtab, dmri, roi_radius=10, fa_thr=.7)
        csd_model = ConstrainedSphericalDeconvModel(gtab, response)

        peaks = peaks_from_model(model=csd_model,
                                 data=dmri,
                                 sphere=default_sphere,
                                 relative_peak_threshold=.5,
                                 min_separation_angle=25,
                                 parallel=True)

        save_peaks(output_folder + 'peaks.pam5', peaks, affine)
        scaled = peaks.peak_dirs * np.repeat(
            np.expand_dims(peaks.peak_values, -1), 3, -1)

        cropped = scaled[:, :, :, :3, :].reshape(dmri.shape[:3] + (9, ))
        save_nifti(output_folder + 'peaks.nii.gz', cropped, affine)
        #save_nifti(output_folder + 'peak_dirs.nii.gz', peaks.peak_dirs, affine)
        #save_nifti(output_folder + 'peak_vals.nii.gz', peaks.peak_values, affine)

    # tracking
    print('Tracking...')
    maskdata, mask = median_otsu(dmri,
                                 vol_idx=range(0, dmri.shape[3]),
                                 median_radius=3,
                                 numpass=1,
                                 autocrop=True,
                                 dilate=2)
    tensor_model = TensorModel(gtab, fit_method='WLS')
    tensor_fit = tensor_model.fit(maskdata)
    fa = fractional_anisotropy(tensor_fit.evals)
    fa[np.isnan(fa)] = 0
    bla = np.average(fa)
    tissue_classifier = ThresholdStoppingCriterion(fa, .1)
    seeds = random_seeds_from_mask(fa > 1e-5, affine, seeds_count=1)

    streamline_generator = LocalTracking(direction_getter=peaks,
                                         stopping_criterion=tissue_classifier,
                                         seeds=seeds,
                                         affine=affine,
                                         step_size=.5)
    streamlines = Streamlines(streamline_generator)
    save_trk(StatefulTractogram(streamlines, img, Space.RASMM),
             output_folder + 'whole_brain.trk')
Beispiel #34
0
def tens_mod_fa_est(gtab_file, dwi_file, B0_mask):
    """
    Estimate a tensor FA image to use for registrations.

    Parameters
    ----------
    gtab_file : str
        File path to pickled DiPy gradient table object.
    dwi_file : str
        File path to diffusion weighted image.
    B0_mask : str
        File path to B0 brain mask.

    Returns
    -------
    fa_path : str
        File path to FA Nifti1Image.
    B0_mask : str
        File path to B0 brain mask Nifti1Image.
    gtab_file : str
        File path to pickled DiPy gradient table object.
    dwi_file : str
        File path to diffusion weighted Nifti1Image.
    fa_md_path : str
        File path to FA/MD mask Nifti1Image.
    """
    import os
    from dipy.io import load_pickle
    from dipy.reconst.dti import TensorModel
    from dipy.reconst.dti import fractional_anisotropy, mean_diffusivity

    gtab = load_pickle(gtab_file)

    data = nib.load(dwi_file, mmap=False).get_fdata()

    print("Generating tensor FA image to use for registrations...")
    nodif_B0_img = nib.load(B0_mask, mmap=False)
    nodif_B0_mask_data = nodif_B0_img.get_fdata().astype("bool")
    model = TensorModel(gtab)
    mod = model.fit(data, nodif_B0_mask_data)
    FA = fractional_anisotropy(mod.evals)
    # MD = mean_diffusivity(mod.evals)
    # FA_MD = np.logical_or(
    #     FA >= 0.2, (np.logical_and(
    #         FA >= 0.08, MD >= 0.0011)))
    # FA_MD[np.isnan(FA_MD)] = 0
    FA = np.nan_to_num(np.asarray(FA.astype('float32')))

    fa_path = f"{os.path.dirname(B0_mask)}{'/tensor_fa.nii.gz'}"
    nib.save(
        nib.Nifti1Image(
            FA,
            nodif_B0_img.affine),
        fa_path)

    # md_path = f"{os.path.dirname(B0_mask)}{'/tensor_md.nii.gz'}"
    # nib.save(
    #     nib.Nifti1Image(
    #         MD.astype(
    #             np.float32),
    #         nodif_B0_img.affine),
    #     md_path)

    nodif_B0_img.uncache()
    del FA

    return fa_path, B0_mask, gtab_file, dwi_file
    del data, affine, zooms

    print(data2.shape)
    print(affine2)
    print(nib.aff2axcodes(affine2))

    print('>>> Save resampled data, masks and S0...')

    # Save as nii (not nii.gz) to reduce saving and loading time
    fname2 = join(dname, 'dwi_1x1x1.nii')
    nib.save(nib.Nifti1Image(data2, affine2), fname2)

    fname2_mask = join(dname, 'dwi_mask_1x1x1.nii.gz')
    nib.save(nib.Nifti1Image(mask2.astype(np.uint8), affine2), fname2_mask)

    fname2_S0 = join(dname, 'dwi_S0_1x1x1.nii.gz')

    S0s = data2[..., b0_index]
    S0 = np.mean(S0s, axis=-1)

    nib.save(nib.Nifti1Image(S0, affine2), fname2_S0)

    print('>>> Calculate FA...')

    ten = TensorModel(gtab)
    tenfit = ten.fit(data2, mask2)
    fname2_fa = join(dname, 'dwi_fa_1x1x1.nii.gz')
    nib.save(nib.Nifti1Image(tenfit.fa, affine2), fname2_fa)

    del data2, mask2
Beispiel #36
0
class IvimTensorModel(ReconstModel):
    def __init__(self, gtab, split_b_D=200.0, n_threads=1):
        """
        Model to reconstruct an IVIM tensor

        Parameters
        ----------
        gtab : GradientTable class instance

        split_b_D : float
            The value of b that separates perfusion from diffusion
        """
        ReconstModel.__init__(self, gtab)
        self.split_b_D = split_b_D
        # Use two separate tensors for initial estimation:
        self.diffusion_idx = np.hstack(
            [np.where(gtab.bvals > self.split_b_D),
             np.where(gtab.b0s_mask)]).squeeze()

        # The first tensor represents diffusion
        self.diffusion_gtab = gradient_table(
            self.gtab.bvals[self.diffusion_idx],
            self.gtab.bvecs[self.diffusion_idx])

        self.diffusion_model = TensorModel(self.diffusion_gtab)

        # The second tensor represents perfusion:
        self.perfusion_idx = np.array(
            np.where(gtab.bvals <= self.split_b_D)).squeeze()
        self.perfusion_gtab = gradient_table(
            self.gtab.bvals[self.perfusion_idx],
            self.gtab.bvecs[self.perfusion_idx])

        self.perfusion_model = TensorModel(self.perfusion_gtab)

        # We'll need a "vanilla" IVIM model:
        self.ivim_model = IvimModel(self.gtab)
        # How many threads in parallel execution:
        self.n_threads = n_threads

    def model_eq1(self, b, *params):
        """
        The model with a fixed perfusion fraction
        """
        bvecs = self.gtab.bvecs
        beta = self._ivim_pf
        Q, Q_star = _reconstruct_tensors(params)
        return _ivim_tensor_equation(beta, b, bvecs, Q_star, Q)

    def model_eq2(self, b, *params):
        """
        The full model, including perfusion fraction as free parameter
        """
        beta = params[0]
        bvecs = self.gtab.bvecs
        Q, Q_star = _reconstruct_tensors(params[1:])
        return _ivim_tensor_equation(beta, b, bvecs, Q_star, Q)

    def _inner_loop(self, vox_chunk):
        model_params = np.zeros((vox_chunk.shape[0], 13))
        for ii, vox in enumerate(vox_chunk):
            # Extract initial guess of Euler angles for the diffusion fit:
            dt_evecs = self.diffusion_fit.evecs[vox]
            angles_dti = calc_euler(dt_evecs)
            # Extract initial guess of Euler angles for the perfusion fit:
            perfusion_evecs = self.perfusion_fit.evecs[vox]
            angles_perfusion = calc_euler(perfusion_evecs)
            # Initial guess of perfusion fraction based on "vanilla" IVIM:
            self._ivim_pf = np.clip(
                np.min([
                    self.ivim_fit.perfusion_fraction[vox],
                    1 - self.ivim_fit.perfusion_fraction[vox]
                ]), 0, 1)
            # If diffusivity is lower than this, it's not perfusion!
            min_D_star = 0.003
            # Put together initial guess for 13 parameters of full model:
            initial = [
                self._ivim_pf,
                np.min([self.diffusion_fit.evals[vox, 0], min_D_star]),
                np.min([self.diffusion_fit.evals[vox, 1], min_D_star]),
                np.min([self.diffusion_fit.evals[vox, 2], min_D_star]),
                angles_dti[0], angles_dti[1], angles_dti[2],
                np.max([self.perfusion_fit.evals[vox, 0], min_D_star]),
                np.max([self.perfusion_fit.evals[vox, 1], min_D_star]),
                np.max([self.perfusion_fit.evals[vox, 2], min_D_star]),
                angles_perfusion[0], angles_perfusion[1], angles_perfusion[2]
            ]

            # Bounds on the parameters:
            lb = (0, 0, 0, 0, -np.pi, -np.pi, -np.pi, 0.003, 0.003, 0.003,
                  -np.pi, -np.pi, -np.pi)
            ub = (0.5, 0.003, 0.003, 0.003, np.pi, np.pi, np.pi, np.inf,
                  np.inf, np.inf, np.pi, np.pi, np.pi)

            # Fit the full model to the data with initial guess and bounds
            try:
                popt, pcov = curve_fit(
                    self.model_eq2,
                    self.gtab.bvals,
                    self.mask_data[vox] /
                    np.mean(self.mask_data[vox, self.gtab.b0s_mask]),
                    p0=initial,
                    bounds=(lb, ub),
                    xtol=0.05,
                    ftol=0.05,
                    maxfev=10000)
            # Sometimes it can't fit the data:
            except RuntimeError:
                popt = np.ones(len(initial)) * np.nan
            model_params[ii] = popt

        return model_params

    def fit(self, data, mask=None):
        """
        Fit the IVIM tensor model
        """
        if mask is None:
            mask = np.ones(data.shape[:-1], dtype=bool)
        self.mask_data = data[mask]

        # Fit diffusion tensor to diffusion-weighted data:
        diffusion_data = self.mask_data[:, self.diffusion_idx]
        self.diffusion_fit = self.diffusion_model.fit(diffusion_data)
        # Fit "vanilla" IVIM to all of the data:
        self.ivim_fit = self.ivim_model.fit(self.mask_data)
        # Fit perfusion tensor to perfusion-weighted data:
        perfusion_data = self.mask_data[:, self.perfusion_idx]
        self.perfusion_fit = self.perfusion_model.fit(perfusion_data)
        # Pre-allocate parameters
        #model_params = np.zeros((self.mask_data.shape[0], 13))

        voxel_indices = np.arange(self.mask_data.shape[0])

        if self.n_threads > 1:
            # Loop over voxels:
            vox_chunks = np.array_split(voxel_indices, self.n_threads)

            with ThreadPoolExecutor(max_workers=self.n_threads) as executor:
                loop = asyncio.new_event_loop()

                tasks = [
                    loop.run_in_executor(
                        executor,
                        self._inner_loop,
                        vox_chunk,
                    ) for vox_chunk in vox_chunks
                ]

                try:
                    model_params = np.concatenate(
                        list(
                            tqdm(
                                loop.run_until_complete(
                                    asyncio.gather(*tasks)))))
                finally:
                    loop.close()
        else:
            model_params = self._inner_loop(voxel_indices)

        return IvimTensorFit(self, model_params)
Beispiel #37
0
csd_peaks = peaks_from_model(model=csd_model,
                             data=data,
                             sphere=sphere,
                             mask=mask,
                             relative_peak_threshold=.5,
                             min_separation_angle=25,
                             parallel=True)

"""
For the tracking part, we will use the fiber directions from the ``csd_model``
but stop tracking in areas where fractional anisotropy (FA) is low (< 0.1).
To derive the FA, used here as a stopping criterion, we would need to fit a
tensor model first. Here, we fit the Tensor using weighted least squares (WLS).
"""

tensor_model = TensorModel(gtab, fit_method='WLS')
tensor_fit = tensor_model.fit(data, mask)

FA = fractional_anisotropy(tensor_fit.evals)

"""
In order for the stopping values to be used with our tracking algorithm we need
to have the same dimensions as the ``csd_peaks.peak_values``. For this reason,
we can assign the same FA value to every peak direction in the same voxel in
the following way.
"""

stopping_values = np.zeros(csd_peaks.peak_values.shape)
stopping_values[:] = FA[..., None]

"""
Beispiel #38
0
def auto_response(gtab,
                  data,
                  roi_center=None,
                  roi_radius=10,
                  fa_thr=0.7,
                  fa_callable=fa_superior,
                  return_number_of_voxels=False):
    """ Automatic estimation of response function using FA.

    Parameters
    ----------
    gtab : GradientTable
    data : ndarray
        diffusion data
    roi_center : tuple, (3,)
        Center of ROI in data. If center is None, it is assumed that it is
        the center of the volume with shape `data.shape[:3]`.
    roi_radius : int
        radius of cubic ROI
    fa_thr : float
        FA threshold
    fa_callable : callable
        A callable that defines an operation that compares FA with the fa_thr.
        The operator should have two positional arguments
        (e.g., `fa_operator(FA, fa_thr)`) and it should return a bool array.
    return_number_of_voxels : bool
        If True, returns the number of voxels used for estimating the response
        function.

    Returns
    -------
    response : tuple, (2,)
        (`evals`, `S0`)
    ratio : float
        The ratio between smallest versus largest eigenvalue of the response.
    number of voxels : int (optional)
        The number of voxels used for estimating the response function.

    Notes
    -----
    In CSD there is an important pre-processing step: the estimation of the
    fiber response function. In order to do this we look for voxels with very
    anisotropic configurations. For example we can use an ROI (20x20x20) at
    the center of the volume and store the signal values for the voxels with
    FA values higher than 0.7. Of course, if we haven't precalculated FA we
    need to fit a Tensor model to the datasets. Which is what we do in this
    function.

    For the response we also need to find the average S0 in the ROI. This is
    possible using `gtab.b0s_mask()` we can find all the S0 volumes (which
    correspond to b-values equal 0) in the dataset.

    The `response` consists always of a prolate tensor created by averaging
    the highest and second highest eigenvalues in the ROI with FA higher than
    threshold. We also include the average S0s.

    We also return the `ratio` which is used for the SDT models. If requested,
    the number of voxels used for estimating the response function is also
    returned, which can be used to judge the fidelity of the response function.
    As a rule of thumb, at least 300 voxels should be used to estimate a good
    response function (see [1]_).

    References
    ----------
    .. [1] Tournier, J.D., et al. NeuroImage 2004. Direct estimation of the
    fiber orientation density function from diffusion-weighted MRI
    data using spherical deconvolution
    """

    ten = TensorModel(gtab)
    if roi_center is None:
        ci, cj, ck = np.array(data.shape[:3]) // 2
    else:
        ci, cj, ck = roi_center
    w = roi_radius
    roi = data[int(ci - w):int(ci + w),
               int(cj - w):int(cj + w),
               int(ck - w):int(ck + w)]
    tenfit = ten.fit(roi)
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    indices = np.where(fa_callable(FA, fa_thr))

    if indices[0].size == 0:
        msg = "No voxel with a FA higher than " + str(fa_thr) + " were found."
        msg += " Try a larger roi or a lower threshold."
        warnings.warn(msg, UserWarning)

    lambdas = tenfit.evals[indices][:, :2]
    S0s = roi[indices][:, np.nonzero(gtab.b0s_mask)[0]]

    response, ratio = _get_response(S0s, lambdas)

    if return_number_of_voxels:
        return response, ratio, indices[0].size

    return response, ratio
Beispiel #39
0
    # Brent look at Aman's PR and call here
    pass

if rec_model == 'CSD':
    # Elef add CSD version and add MTMSCSD when is ready.
    pass

pam = peaks_from_model(model,
                       data,
                       sphere,
                       relative_peak_threshold=.8,
                       min_separation_angle=45,
                       mask=mask,
                       parallel=parallel)

ten_model = TensorModel(gtab)
fa = ten_model.fit(data, mask).fa
save_nifti(ffa, fa, affine)

save_peaks(fpam5, pam, affine)

show_odfs_and_fa(fa, pam, mask, None, sphere, ftmp='odf.mmap', basis_type=None)

pve_csf, pve_gm, pve_wm = pve[..., 0], pve[..., 1], pve[..., 2]

cmc_classifier = CmcTissueClassifier.from_pve(
    pve_wm,
    pve_gm,
    pve_csf,
    step_size=step_size,
    average_voxel_size=np.average(vox_size))
Beispiel #40
0
    def _run_interface(self, runtime):
        from scipy.special import gamma
        from dipy.reconst.dti import TensorModel
        import gc

        img = nb.load(self.inputs.in_file)
        hdr = img.get_header().copy()
        affine = img.get_affine()
        data = img.get_data()
        gtab = self._get_gradient_table()

        if isdefined(self.inputs.in_mask):
            msk = nb.load(self.inputs.in_mask).get_data().astype(np.uint8)
        else:
            msk = np.ones(data.shape[:3], dtype=np.uint8)

        try_b0 = True
        if isdefined(self.inputs.noise_mask):
            noise_msk = nb.load(self.inputs.noise_mask).get_data().reshape(-1)
            noise_msk[noise_msk > 0.5] = 1
            noise_msk[noise_msk < 1.0] = 0
            noise_msk = noise_msk.astype(np.uint8)
            try_b0 = False
        elif np.all(data[msk == 0, 0] == 0):
            IFLOGGER.info('Input data are masked.')
            noise_msk = msk.reshape(-1).astype(np.uint8)
        else:
            noise_msk = (1 - msk).reshape(-1).astype(np.uint8)

        nb0 = np.sum(gtab.b0s_mask)
        dsample = data.reshape(-1, data.shape[-1])

        if try_b0 and (nb0 > 1):
            noise_data = dsample.take(np.where(gtab.b0s_mask),
                                      axis=-1)[noise_msk == 0, ...]
            n = nb0
        else:
            nodiff = np.where(~gtab.b0s_mask)
            nodiffidx = nodiff[0].tolist()
            n = 20 if len(nodiffidx) >= 20 else len(nodiffidx)
            idxs = np.random.choice(nodiffidx, size=n, replace=False)
            noise_data = dsample.take(idxs, axis=-1)[noise_msk == 1, ...]

        # Estimate sigma required by RESTORE
        mean_std = np.median(noise_data.std(-1))
        try:
            bias = (1. - np.sqrt(2. / (n - 1)) *
                    (gamma(n / 2.) / gamma((n - 1) / 2.)))
        except:
            bias = .0
            pass

        sigma = mean_std * (1 + bias)

        if sigma == 0:
            IFLOGGER.warn(
                ('Noise std is 0.0, looks like data was masked and noise'
                 ' cannot be estimated correctly. Using default tensor '
                 'model instead of RESTORE.'))
            dti = TensorModel(gtab)
        else:
            IFLOGGER.info(('Performing RESTORE with noise std=%.4f.') % sigma)
            dti = TensorModel(gtab, fit_method='RESTORE', sigma=sigma)

        try:
            fit_restore = dti.fit(data, msk)
        except TypeError:
            dti = TensorModel(gtab)
            fit_restore = dti.fit(data, msk)

        hdr.set_data_dtype(np.float32)
        hdr['data_type'] = 16

        for k in self._outputs().get():
            scalar = getattr(fit_restore, k)
            hdr.set_data_shape(np.shape(scalar))
            nb.Nifti1Image(scalar.astype(np.float32),
                           affine, hdr).to_filename(self._gen_filename(k))

        return runtime
Beispiel #41
0
    def compute_tensors(self, dti_vol, atlas_file, gtab):
        # WGR:TODO figure out how to organize tensor options and formats
        # WGR:TODO figure out how to deal with files on disk vs. in workspace
        """
        Takes registered DTI image and produces tensors

        **Positional Arguments:**

                dti_vol:
                    - Registered DTI volume, from workspace.
                atlas_file:
                    - File containing an atlas (or brain mask).
                gtab:
                    - Structure containing dipy formatted bval/bvec information
        """

        labeldata = nib.load(atlas_file)

        label = labeldata.get_data()

        """
        Create a brain mask. Here we just threshold labels.
        """

        mask = (label > 0)

        gtab.info
        print data.shape
        """
        For the constrained spherical deconvolution we need to estimate the
        response function (see :ref:`example_reconst_csd`) and create a model.
        """

        response, ratio = auto_response(gtab, dti_vol, roi_radius=10,
                                        fa_thr=0.7)

        csd_model = ConstrainedSphericalDeconvModel(gtab, response)

        """
        Next, we use ``peaks_from_model`` to fit the data and calculated
        the fiber directions in all voxels.
        """

        sphere = get_sphere('symmetric724')

        csd_peaks = peaks_from_model(model=csd_model,
                                     data=data,
                                     sphere=sphere,
                                     mask=mask,
                                     relative_peak_threshold=.5,
                                     min_separation_angle=25,
                                     parallel=True)

        """
        For the tracking part, we will use ``csd_model`` fiber directions
        but stop tracking where fractional anisotropy (FA) is low (< 0.1).
        To derive the FA, used as a stopping criterion, we need to fit a
        tensor model first. Here, we use weighted least squares (WLS).
        """
        print 'tensors...'

        tensor_model = TensorModel(gtab, fit_method='WLS')
        tensor_fit = tensor_model.fit(data, mask)

        FA = fractional_anisotropy(tensor_fit.evals)

        """
        In order for the stopping values to be used with our tracking
        algorithm we need to have the same dimensions as the
        ``csd_peaks.peak_values``. For this reason, we can assign the
        same FA value to every peak direction in the same voxel in
        the following way.
        """

        stopping_values = np.zeros(csd_peaks.peak_values.shape)
        stopping_values[:] = FA[..., None]
        print datetime.now() - startTime
        pass
Beispiel #42
0
    def compute_tensors(self, dti_vol, atlas_file, gtab):
        # WGR:TODO figure out how to organize tensor options and formats
        # WGR:TODO figure out how to deal with files on disk vs. in workspace
        """
        Takes registered DTI image and produces tensors

        **Positional Arguments:**

                dti_vol:
                    - Registered DTI volume, from workspace.
                atlas_file:
                    - File containing an atlas (or brain mask).
                gtab:
                    - Structure containing dipy formatted bval/bvec information
        """

        labeldata = nib.load(atlas_file)

        label = labeldata.get_data()
        """
        Create a brain mask. Here we just threshold labels.
        """

        mask = (label > 0)

        gtab.info
        print data.shape
        """
        For the constrained spherical deconvolution we need to estimate the
        response function (see :ref:`example_reconst_csd`) and create a model.
        """

        response, ratio = auto_response(gtab,
                                        dti_vol,
                                        roi_radius=10,
                                        fa_thr=0.7)

        csd_model = ConstrainedSphericalDeconvModel(gtab, response)
        """
        Next, we use ``peaks_from_model`` to fit the data and calculated
        the fiber directions in all voxels.
        """

        sphere = get_sphere('symmetric724')

        csd_peaks = peaks_from_model(model=csd_model,
                                     data=data,
                                     sphere=sphere,
                                     mask=mask,
                                     relative_peak_threshold=.5,
                                     min_separation_angle=25,
                                     parallel=True)
        """
        For the tracking part, we will use ``csd_model`` fiber directions
        but stop tracking where fractional anisotropy (FA) is low (< 0.1).
        To derive the FA, used as a stopping criterion, we need to fit a
        tensor model first. Here, we use weighted least squares (WLS).
        """
        print 'tensors...'

        tensor_model = TensorModel(gtab, fit_method='WLS')
        tensor_fit = tensor_model.fit(data, mask)

        FA = fractional_anisotropy(tensor_fit.evals)
        """
        In order for the stopping values to be used with our tracking
        algorithm we need to have the same dimensions as the
        ``csd_peaks.peak_values``. For this reason, we can assign the
        same FA value to every peak direction in the same voxel in
        the following way.
        """

        stopping_values = np.zeros(csd_peaks.peak_values.shape)
        stopping_values[:] = FA[..., None]
        print datetime.now() - startTime
        pass
Beispiel #43
0
    def eudx_advanced(self, dti_file, mask_file, gtab,
                      seed_num=100000, stop_val=0.1):
        """
        Tracking with more complex tensors - experimental

        Initializes the graph with nodes corresponding to the number of ROIs

        **Positional Arguments:**

                dti_file:
                    - File (registered) to use for tensor/fiber tracking
                mask_file:
                    - Brain mask to keep tensors inside the brain
                gtab:
                    - dipy formatted bval/bvec Structure

        **Optional Arguments:**
                seed_num:
                    - Number of seeds to use for fiber tracking
                stop_val:
                    - Value to cutoff fiber track
        """

        img = nb.load(dti_file)
        data = img.get_data()

        img = nb.load(mask_file)

        mask = img.get_data()
        mask = mask > 0  # to ensure binary mask

        """
        For the constrained spherical deconvolution we need to estimate the
        response function (see :ref:`example_reconst_csd`) and create a model.
        """

        response, ratio = auto_response(gtab, data, roi_radius=10,
                                        fa_thr=0.7)

        csd_model = ConstrainedSphericalDeconvModel(gtab, response)

        """
        Next, we use ``peaks_from_model`` to fit the data and calculated
        the fiber directions in all voxels.
        """

        sphere = get_sphere('symmetric724')

        csd_peaks = peaks_from_model(model=csd_model,
                                     data=data,
                                     sphere=sphere,
                                     mask=mask,
                                     relative_peak_threshold=.5,
                                     min_separation_angle=25,
                                     parallel=True)

        """
        For the tracking part, we will use ``csd_model`` fiber directions
        but stop tracking where fractional anisotropy (FA) is low (< 0.1).
        To derive the FA, used as a stopping criterion, we need to fit a
        tensor model first. Here, we use weighted least squares (WLS).
        """
        print 'tensors...'

        tensor_model = TensorModel(gtab, fit_method='WLS')
        tensor_fit = tensor_model.fit(data, mask)

        FA = fractional_anisotropy(tensor_fit.evals)

        """
        In order for the stopping values to be used with our tracking
        algorithm we need to have the same dimensions as the
        ``csd_peaks.peak_values``. For this reason, we can assign the
        same FA value to every peak direction in the same voxel in
        the following way.
        """

        stopping_values = np.zeros(csd_peaks.peak_values.shape)
        stopping_values[:] = FA[..., None]

        streamline_generator = EuDX(stopping_values,
                                    csd_peaks.peak_indices,
                                    seeds=seed_num,
                                    odf_vertices=sphere.vertices,
                                    a_low=stop_val)

        streamlines = [streamline for streamline in streamline_generator]

        return streamlines
    # Correct flipping issue
    bvecs = np.c_[bvecs[:, 0], bvecs[:, 1], -bvecs[:, 2]]

    gtab = gradient_table(bvals, bvecs)

    data, affine, vox_size = load_nifti(fdwi, return_voxsize=True)

    # Build Brain Mask
    bm = np.where(labels == 0, False, True)
    mask = bm

    sphere = get_sphere('repulsion724')

    from dipy.reconst.dti import TensorModel

    tensor_model = TensorModel(gtab)

    t1 = time()
    tensor_fit = tensor_model.fit(data, mask)
    #    save_nifti('bmfa.nii.gz', tensor_fit.fa, affine)
    #   wenlin make this change-adress name to each animal
    save_nifti(outpath + 'bmfa' + runno + '.nii.gz', tensor_fit.fa, affine)
    fa = tensor_fit.fa
    duration1 = time() - t1
    #wenlin make this change-adress name to each animal
    #    print('DTI duration %.3f' % (duration1,))
    print(runno + ' DTI duration %.3f' % (duration1, ))

    # Compute odfs in Brain Mask
    t2 = time()
import nibabel as nib
img = nib.load('t0.nii.gz')
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)

mask = data[..., 0] > 50

from dipy.io import read_bvals_bvecs
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)

from dipy.core.gradients import gradient_table
gtab = gradient_table(bvals, bvecs)

from dipy.reconst.dti import TensorModel
ten = TensorModel(gtab)
tenfit = ten.fit(data,mask)

from dipy.reconst.dti import fractional_anisotropy
fa = fractional_anisotropy(tenfit.evals)
fa[np.isnan(fa)] = 0


from dipy.reconst.dti import color_fa
Rgbv = color_fa(fa, tenfit.evecs)


fa = np.clip(fa, 0,1)
#save FA to image

nib.save(nib.Nifti1Image(np.array(255*Rgbv,'uint8'),img.get_affine()),'tensor_rgb.nii.gz')
Beispiel #46
0
def mask_for_response_ssst(gtab,
                           data,
                           roi_center=None,
                           roi_radii=10,
                           fa_thr=0.7):
    """ Computation of mask for single-shell single-tissue (ssst) response
        function using FA.

    Parameters
    ----------
    gtab : GradientTable
    data : ndarray
        diffusion data (4D)
    roi_center : array-like, (3,)
        Center of ROI in data. If center is None, it is assumed that it is
        the center of the volume with shape `data.shape[:3]`.
    roi_radii : int or array-like, (3,)
        radii of cuboid ROI
    fa_thr : float
        FA threshold

    Returns
    -------
    mask : ndarray
        Mask of voxels within the ROI and with FA above the FA threshold.

    Notes
    -----
    In CSD there is an important pre-processing step: the estimation of the
    fiber response function. In order to do this, we look for voxels with very
    anisotropic configurations. This function aims to accomplish that by
    returning a mask of voxels within a ROI, that have a FA value above a
    given threshold. For example we can use a ROI (20x20x20) at
    the center of the volume and store the signal values for the voxels with
    FA values higher than 0.7 (see [1]_).

    References
    ----------
    .. [1] Tournier, J.D., et al. NeuroImage 2004. Direct estimation of the
    fiber orientation density function from diffusion-weighted MRI
    data using spherical deconvolution
    """

    if len(data.shape) < 4:
        msg = """Data must be 4D (3D image + directions). To use a 2D image,
        please reshape it into a (N, N, 1, ndirs) array."""
        raise ValueError(msg)

    if isinstance(roi_radii, numbers.Number):
        roi_radii = (roi_radii, roi_radii, roi_radii)

    if roi_center is None:
        roi_center = np.array(data.shape[:3]) // 2

    roi_radii = _roi_in_volume(data.shape, np.asarray(roi_center),
                               np.asarray(roi_radii))

    roi_mask = _mask_from_roi(data.shape[:3], roi_center, roi_radii)

    ten = TensorModel(gtab)
    tenfit = ten.fit(data, mask=roi_mask)
    fa = fractional_anisotropy(tenfit.evals)
    fa[np.isnan(fa)] = 0

    mask = np.zeros(fa.shape, dtype=np.int64)
    mask[fa > fa_thr] = 1

    if np.sum(mask) == 0:
        msg = """No voxel with a FA higher than {} were found.
        Try a larger roi or a lower threshold.""".format(str(fa_thr))
        warnings.warn(msg, UserWarning)

    return mask
Beispiel #47
0
def processDiffusion(file, ds=False, ec=False, bvs=None):
    '''
    Process a diffusion-weighted dataset.
    
    file=filename of Analyze or Nifti file (without extension)
    bvs = list of b-values (optional)
    ec = Whether or not eddy current correction should be applied (takes a while,
         and does not work  inside Spyder IDE but only from command line)
    ds = Whether or not the image should be downsampled to an isotropic voxel size
    
    2D images should be formatted as (x,y,t,z) and 3D images as (x,y,z,t)
    which is standard for Bruker files.
    This protocol automatically determines how many diffusion
    dirs there are and how many b-values according to what is saved in the
    accompanying text file. You can provide a list of exact b-values, but if
    you do not the program will calculate mean b-values for each cluster of
    diffusion directions based on the text file.
    '''

    dims = list_values(read_line('VisuCoreSize=', file))
    ext = checkFileType(file)
    img = nib.load(file + ext)
    data = img.get_data()
    affine = img.get_affine()
    bvals, avbvals, dwgrad, dwdir, nA0, nbvals, ndirs = getDiffusionPars(file)
    if len(dims) == 2:
        #2D arrays are arranged differently. scipy.swapaxes is not sufficient for
        #Paravision's Fortran-style column-major ordering as the t-axis is ordered differently.
        newshape = (data.shape[0], data.shape[1], data.shape[3], data.shape[2])
        print '2D array with shape %r. Reshaping to %r in Fortran-style column major.' % (
            data.shape, newshape)
        data = np.reshape(data, newshape, order='F')
    rescaleImage(file, data, nbvals, dims)
    img = nib.Nifti1Image(data, affine)

    if ds:
        print "Voxel size nonisotropic. Downsampling..."
        data, affine = downsampleImage(img)
        img = nib.Nifti1Image(data, affine)
    else:
        affine = img.get_affine()
        data = img.get_data()

    thresh = np.mean(data[:5, :5, :, 0])
    mask = data[..., 0] > 2.5 * thresh
    for i in range(data.shape[3]):
        data[:, :, :, i] *= mask
    if ec:
        starttime = datetime.now()
        print "Applying eddy current correction."
        img = eddyCorrection(img, file + '_Eddy.nii')
        data = img.get_data()
        affine = img.get_affine()
        time = datetime.now() - starttime
        print "Eddy current correction completed in %r seconds." % time.seconds

    if bvs == None:
        bvalmat = np.array(avbvals)
        bvalmat[bvalmat < 10] = 0
    else:
        bvalmat = np.zeros([nA0 + (ndirs * len(bvs))])  #entered pars
        for i, b in enumerate(
                bvs
        ):  #unfortunately the ideal b-vals(not effective b-vals) are not in the text file. Have to enter manually and convert to appropriate matrix
            bvalmat[nA0 + ndirs * i:] = b

    bvecmat = np.zeros([nA0 + ndirs * nbvals, 3])
    for i in range(nbvals):
        bvecmat[nA0 + ndirs * i:nA0 + ndirs * (
            i + 1
        ), :] = dwdir  #fills b-vector matrix with the different diffusion dirs

    if len(bvecmat) != len(bvals):
        print "Error. Cannot process this image."

    print bvalmat.shape
    print dwgrad.shape
    gtab = gradient_table(
        bvalmat, bvecmat
    )  #creates a gradient table with b-vals and diffusion dirs for processing

    from dipy.reconst.dti import TensorModel

    starttime = datetime.now()
    print "Fitting tensor model."
    ten = TensorModel(gtab)
    tenfit = ten.fit(data, mask)
    time = datetime.now() - starttime
    print "Tensor fit completed in %r seconds." % time.seconds

    from dipy.reconst.dti import fractional_anisotropy
    evecs = tenfit.evecs  #eigenvectors
    fa = fractional_anisotropy(tenfit.evals)
    fa = np.clip(
        fa, 0, 1)  #removes voxels where fit failed by thresholding at 0 and 1
    md = tenfit.md
    md[np.isnan(md)] = 0  #removes voxels where fit failed
    print "Calculated eigenvectors, MD and FA."

    from dipy.reconst.dti import color_fa
    cfa = color_fa(fa, tenfit.evecs)

    return tenfit, cfa, bvalmat, dwgrad, bvecmat
Beispiel #48
0
def test_eudx_further():
    """ Cause we love testin.. ;-)
    """

    fimg, fbvals, fbvecs = get_data('small_101D')

    img = ni.load(fimg)
    data = img.get_data()
    gtab = gradient_table(fbvals, fbvecs)
    tensor_model = TensorModel(gtab)
    ten = tensor_model.fit(data)
    x, y, z = data.shape[:3]
    seeds = np.zeros((10**4, 3))
    for i in range(10**4):
        rx = (x - 1) * np.random.rand()
        ry = (y - 1) * np.random.rand()
        rz = (z - 1) * np.random.rand()
        seeds[i] = np.ascontiguousarray(np.array([rx, ry, rz]),
                                        dtype=np.float64)

    sphere = get_sphere('symmetric724')

    ind = quantize_evecs(ten.evecs)
    eu = EuDX(a=ten.fa,
              ind=ind,
              seeds=seeds,
              odf_vertices=sphere.vertices,
              a_low=.2)
    T = [e for e in eu]

    # check that there are no negative elements
    for t in T:
        assert_equal(np.sum(t.ravel() < 0), 0)

    # Test eudx with affine
    def random_affine(seeds):
        affine = np.eye(4)
        affine[:3, :] = np.random.random((3, 4))
        seeds = np.dot(seeds, affine[:3, :3].T)
        seeds += affine[:3, 3]
        return affine, seeds

    # Make two random affines and move seeds
    affine1, seeds1 = random_affine(seeds)
    affine2, seeds2 = random_affine(seeds)

    # Make tracks using different affines
    eu1 = EuDX(a=ten.fa,
               ind=ind,
               odf_vertices=sphere.vertices,
               seeds=seeds1,
               a_low=.2,
               affine=affine1)
    eu2 = EuDX(a=ten.fa,
               ind=ind,
               odf_vertices=sphere.vertices,
               seeds=seeds2,
               a_low=.2,
               affine=affine2)

    # Move from eu2 affine2 to affine1
    eu2_to_eu1 = utils.move_streamlines(eu2,
                                        output_space=affine1,
                                        input_space=affine2)
    # Check that the tracks are the same
    for sl1, sl2 in zip(eu1, eu2_to_eu1):
        assert_array_almost_equal(sl1, sl2)
Beispiel #49
0
def test_wls_and_ls_fit():
    """
    Tests the WLS and LS fitting functions to see if they returns the correct
    eigenvalues and eigenvectors.

    Uses data/55dir_grad.bvec as the gradient table and 3by3by56.nii
    as the data.

    """

    # Defining Test Voxel (avoid nibabel dependency) ###

    # Recall: D = [Dxx,Dyy,Dzz,Dxy,Dxz,Dyz,log(S_0)] and D ~ 10^-4 mm^2 /s
    b0 = 1000.
    bvec, bval = read_bvec_file(get_data('55dir_grad.bvec'))
    B = bval[1]
    # Scale the eigenvalues and tensor by the B value so the units match
    D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B
    evals = np.array([2., 1., 0.]) / B
    md = evals.mean()
    tensor = from_lower_triangular(D)
    # Design Matrix
    gtab = grad.gradient_table(bval, bvec)
    X = dti.design_matrix(gtab)
    # Signals
    Y = np.exp(np.dot(X, D))
    assert_almost_equal(Y[0], b0)
    Y.shape = (-1, ) + Y.shape

    # Testing WLS Fit on Single Voxel
    # If you do something wonky (passing min_signal<0), you should get an
    # error:
    npt.assert_raises(ValueError,
                      TensorModel,
                      gtab,
                      fit_method='WLS',
                      min_signal=-1)

    # Estimate tensor from test signals
    model = TensorModel(gtab, fit_method='WLS', return_S0_hat=True)
    tensor_est = model.fit(Y)
    assert_equal(tensor_est.shape, Y.shape[:-1])
    assert_array_almost_equal(tensor_est.evals[0], evals)
    assert_array_almost_equal(tensor_est.quadratic_form[0],
                              tensor,
                              err_msg="Calculation of tensor from Y does not "
                              "compare to analytical solution")
    assert_almost_equal(tensor_est.md[0], md)
    assert_array_almost_equal(tensor_est.S0_hat[0], b0, decimal=3)

    # Test that we can fit a single voxel's worth of data (a 1d array)
    y = Y[0]
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)

    # Test using fit_method='LS'
    model = TensorModel(gtab, fit_method='LS')
    tensor_est = model.fit(y)
    assert_equal(tensor_est.shape, tuple())
    assert_array_almost_equal(tensor_est.evals, evals)
    assert_array_almost_equal(tensor_est.quadratic_form, tensor)
    assert_almost_equal(tensor_est.md, md)
    assert_array_almost_equal(tensor_est.lower_triangular(b0), D)
    assert_array_almost_equal(tensor_est.linearity, linearity(evals))
    assert_array_almost_equal(tensor_est.planarity, planarity(evals))
    assert_array_almost_equal(tensor_est.sphericity, sphericity(evals))
if fmask is None:
    from dipy.segment.mask import median_otsu
    b0_mask, mask = median_otsu(
        data)  # TODO: check parameters to improve the mask
else:
    mask, mask_affine = load_nifti(fmask)
    mask = np.squeeze(mask)  #fix mask dimensions

# ### DTI Model computation and fitting (further for CSD--not tested)

# In[33]:

# compute DTI model
from dipy.reconst.dti import TensorModel
tenmodel = TensorModel(gtab)  #, fit_method='OLS') #, min_signal=5000)

# In[34]:

# fit the dti model
tenfit = tenmodel.fit(data, mask=mask)

# ### DWI indicators computation and saving in nifti files (FA, first eigen vector, rgb tensor)

# In[35]:

# save fa
ffa = dname + 'tensor_fa.nii.gz'

fa_img = nib.Nifti1Image(tenfit.fa.astype(np.float32), affine)
nib.save(fa_img, ffa)
Beispiel #51
0
		print 'fitting with dti'
		data, affine, gtab = get_train_dti(30)
	elif datat == 1:
		print 'fitting with hardi'
		data, affine, gtab = get_train_hardi(30)
	elif datat == 2:
		print 'fitting with dsi'
		data, affine, gtab = get_train_dsi(30)

	mask, affine = get_train_mask()

	data.shape
	mask.shape


	model = TensorModel(gtab)
	fit = model.fit(data, mask)
	print 'done!'
	fa = fit.fa


	slice_z = 25

	Th = [0.05, 0.075, 0.1,0.15]

	figure(2*datat+1)
	imshow(fa[:, :, slice_z], interpolation='nearest')
	colorbar()
	title(mask.sum())

	figure(2*datat + 2)
Beispiel #52
0
from dipy.io.image import load_nifti, save_nifti
from dipy.io import read_bvals_bvecs
from dipy.core.gradients import gradient_table
from dipy.reconst.dti import TensorModel

fdwi = 'dwidata.nii.gz'
fbval = 'bvals'
fbvec = 'bvecs'
mask = load_nifti('mask.nii.gz')

data, affine = load_nifti(fdwi)
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)
gtab = gradient_table(bvals, bvecs)

tenmodel = TensorModel(gtab)
tenfit = tenmodel.fit(data)

fwhm = 1.25
gauss_std = fwhm / np.sqrt(8 * np.log(2))  # converting fwhm to Gaussian std
data_smooth = np.zeros(data.shape)
for v in range(data.shape[-1]):
    data_smooth[..., v] = gaussian_filter(data[..., v], sigma=gauss_std)

dkimodel = dki.DiffusionKurtosisModel(gtab)

dkifit = dkimodel.fit(data_smooth, mask=mask[0])

FA = dkifit.fa
MD = dkifit.md
AD = dkifit.ad
Beispiel #53
0
import nibabel as nib
img = nib.load('t0.nii.gz')
data = img.get_data()
print('data.shape (%d, %d, %d, %d)' % data.shape)

mask = data[..., 0] > 50

from dipy.io import read_bvals_bvecs
bvals, bvecs = read_bvals_bvecs(fbval, fbvec)

from dipy.core.gradients import gradient_table
gtab = gradient_table(bvals, bvecs)

from dipy.reconst.dti import TensorModel
ten = TensorModel(gtab)
tenfit = ten.fit(data, mask)

from dipy.reconst.dti import fractional_anisotropy
fa = fractional_anisotropy(tenfit.evals)
fa[np.isnan(fa)] = 0

from dipy.reconst.dti import color_fa
Rgbv = color_fa(fa, tenfit.evecs)

fa = np.clip(fa, 0, 1)
#save FA to image

nib.save(nib.Nifti1Image(np.array(255 * Rgbv, 'uint8'), img.get_affine()),
         'tensor_rgb.nii.gz')
Beispiel #54
0
 def get_tensor_model(self, gtab):
     return TensorModel(gtab, fit_method="WLS")
Beispiel #55
0
def dwi_dipy_run(dwi_dir,
                 node_size,
                 dir_path,
                 conn_model,
                 parc,
                 atlas_select,
                 network,
                 wm_mask=None):
    from dipy.reconst.dti import TensorModel, quantize_evecs
    from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response
    from dipy.tracking.local import LocalTracking, ActTissueClassifier
    from dipy.tracking import utils
    from dipy.direction import peaks_from_model
    from dipy.tracking.eudx import EuDX
    from dipy.data import get_sphere, default_sphere
    from dipy.core.gradients import gradient_table
    from dipy.io import read_bvals_bvecs
    from dipy.tracking.streamline import Streamlines
    from dipy.direction import ProbabilisticDirectionGetter, ClosestPeakDirectionGetter, BootDirectionGetter
    from nibabel.streamlines import save as save_trk
    from nibabel.streamlines import Tractogram

    ##
    dwi_dir = '/Users/PSYC-dap3463/Downloads/bedpostx_s002'
    img_pve_csf = nib.load(
        '/Users/PSYC-dap3463/Downloads/002_all/tmp/reg_a/t1w_vent_csf_diff_dwi.nii.gz'
    )
    img_pve_wm = nib.load(
        '/Users/PSYC-dap3463/Downloads/002_all/tmp/reg_a/t1w_wm_in_dwi_bin.nii.gz'
    )
    img_pve_gm = nib.load(
        '/Users/PSYC-dap3463/Downloads/002_all/tmp/reg_a/t1w_gm_mask_dwi.nii.gz'
    )
    labels_img = nib.load(
        '/Users/PSYC-dap3463/Downloads/002_all/tmp/reg_a/dwi_aligned_atlas.nii.gz'
    )
    num_total_samples = 10000
    tracking_method = 'boot'  # Options are 'boot', 'prob', 'peaks', 'closest'
    procmem = [2, 4]
    ##

    if parc is True:
        node_size = 'parc'

    dwi_img = "%s%s" % (dwi_dir, '/dwi.nii.gz')
    nodif_brain_mask_path = "%s%s" % (dwi_dir, '/nodif_brain_mask.nii.gz')
    bvals = "%s%s" % (dwi_dir, '/bval')
    bvecs = "%s%s" % (dwi_dir, '/bvec')

    dwi_img = nib.load(dwi_img)
    data = dwi_img.get_data()
    [bvals, bvecs] = read_bvals_bvecs(bvals, bvecs)
    gtab = gradient_table(bvals, bvecs)
    gtab.b0_threshold = min(bvals)
    sphere = get_sphere('symmetric724')

    # Loads mask and ensures it's a true binary mask
    mask_img = nib.load(nodif_brain_mask_path)
    mask = mask_img.get_data()
    mask = mask > 0

    # Fit a basic tensor model first
    model = TensorModel(gtab)
    ten = model.fit(data, mask)
    fa = ten.fa

    # Tractography
    if conn_model == 'csd':
        print('Tracking with csd model...')
    elif conn_model == 'tensor':
        print('Tracking with tensor model...')
    else:
        raise RuntimeError("%s%s" % (conn_model, ' is not a valid model.'))

    # Combine seed counts from voxel with seed counts total
    wm_mask_data = img_pve_wm.get_data()
    wm_mask_data[0, :, :] = False
    wm_mask_data[:, 0, :] = False
    wm_mask_data[:, :, 0] = False
    seeds = utils.seeds_from_mask(wm_mask_data,
                                  density=1,
                                  affine=dwi_img.get_affine())
    seeds_rnd = utils.random_seeds_from_mask(ten.fa > 0.02,
                                             seeds_count=num_total_samples,
                                             seed_count_per_voxel=True)
    seeds_all = np.vstack([seeds, seeds_rnd])

    # Load tissue maps and prepare tissue classifier (Anatomically-Constrained Tractography (ACT))
    background = np.ones(img_pve_gm.shape)
    background[(img_pve_gm.get_data() + img_pve_wm.get_data() +
                img_pve_csf.get_data()) > 0] = 0
    include_map = img_pve_gm.get_data()
    include_map[background > 0] = 1
    exclude_map = img_pve_csf.get_data()
    act_classifier = ActTissueClassifier(include_map, exclude_map)

    if conn_model == 'tensor':
        ind = quantize_evecs(ten.evecs, sphere.vertices)
        streamline_generator = EuDX(a=fa,
                                    ind=ind,
                                    seeds=seeds_all,
                                    odf_vertices=sphere.vertices,
                                    a_low=0.05,
                                    step_sz=.5)
    elif conn_model == 'csd':
        print('Tracking with CSD model...')
        response = recursive_response(
            gtab,
            data,
            mask=img_pve_wm.get_data().astype('bool'),
            sh_order=8,
            peak_thr=0.01,
            init_fa=0.05,
            init_trace=0.0021,
            iter=8,
            convergence=0.001,
            parallel=True)
        csd_model = ConstrainedSphericalDeconvModel(gtab, response)
        if tracking_method == 'boot':
            dg = BootDirectionGetter.from_data(data,
                                               csd_model,
                                               max_angle=30.,
                                               sphere=default_sphere)
        elif tracking_method == 'prob':
            try:
                print(
                    'First attempting to build the direction getter directly from the spherical harmonic representation of the FOD...'
                )
                csd_fit = csd_model.fit(
                    data, mask=img_pve_wm.get_data().astype('bool'))
                dg = ProbabilisticDirectionGetter.from_shcoeff(
                    csd_fit.shm_coeff, max_angle=30., sphere=default_sphere)
            except:
                print(
                    'Sphereical harmonic not available for this model. Using peaks_from_model to represent the ODF of the model on a spherical harmonic basis instead...'
                )
                peaks = peaks_from_model(
                    csd_model,
                    data,
                    default_sphere,
                    .5,
                    25,
                    mask=img_pve_wm.get_data().astype('bool'),
                    return_sh=True,
                    parallel=True,
                    nbr_processes=procmem[0])
                dg = ProbabilisticDirectionGetter.from_shcoeff(
                    peaks.shm_coeff, max_angle=30., sphere=default_sphere)
        elif tracking_method == 'peaks':
            dg = peaks_from_model(model=csd_model,
                                  data=data,
                                  sphere=default_sphere,
                                  relative_peak_threshold=.5,
                                  min_separation_angle=25,
                                  mask=img_pve_wm.get_data().astype('bool'),
                                  parallel=True,
                                  nbr_processes=procmem[0])
        elif tracking_method == 'closest':
            csd_fit = csd_model.fit(data,
                                    mask=img_pve_wm.get_data().astype('bool'))
            pmf = csd_fit.odf(default_sphere).clip(min=0)
            dg = ClosestPeakDirectionGetter.from_pmf(pmf,
                                                     max_angle=30.,
                                                     sphere=default_sphere)
        streamline_generator = LocalTracking(dg,
                                             act_classifier,
                                             seeds_all,
                                             affine=dwi_img.affine,
                                             step_size=0.5)
        del dg
        try:
            del csd_fit
        except:
            pass
        try:
            del response
        except:
            pass
        try:
            del csd_model
        except:
            pass
        streamlines = Streamlines(streamline_generator, buffer_size=512)

    save_trk(Tractogram(streamlines, affine_to_rasmm=dwi_img.affine),
             'prob_streamlines.trk')
    tracks = [sl for sl in streamlines if len(sl) > 1]
    labels_data = labels_img.get_data().astype('int')
    labels_affine = labels_img.affine
    conn_matrix, grouping = utils.connectivity_matrix(
        tracks,
        labels_data,
        affine=labels_affine,
        return_mapping=True,
        mapping_as_streamlines=True,
        symmetric=True)
    conn_matrix[:3, :] = 0
    conn_matrix[:, :3] = 0

    return conn_matrix
Beispiel #56
0
def mask_for_response_msmt(gtab, data, roi_center=None, roi_radii=10,
                           wm_fa_thr=0.7, gm_fa_thr=0.2, csf_fa_thr=0.1,
                           gm_md_thr=0.0007, csf_md_thr=0.002):
    """ Computation of masks for multi-shell multi-tissue (msmt) response
        function using FA and MD.

    Parameters
    ----------
    gtab : GradientTable
    data : ndarray
        diffusion data (4D)
    roi_center : array-like, (3,)
        Center of ROI in data. If center is None, it is assumed that it is
        the center of the volume with shape `data.shape[:3]`.
    roi_radii : int or array-like, (3,)
        radii of cuboid ROI
    wm_fa_thr : float
        FA threshold for WM.
    gm_fa_thr : float
        FA threshold for GM.
    csf_fa_thr : float
        FA threshold for CSF.
    gm_md_thr : float
        MD threshold for GM.
    csf_md_thr : float
        MD threshold for CSF.

    Returns
    -------
    mask_wm : ndarray
        Mask of voxels within the ROI and with FA above the FA threshold
        for WM.
    mask_gm : ndarray
        Mask of voxels within the ROI and with FA below the FA threshold
        for GM and with MD below the MD threshold for GM.
    mask_csf : ndarray
        Mask of voxels within the ROI and with FA below the FA threshold
        for CSF and with MD below the MD threshold for CSF.

    Notes
    -----
    In msmt-CSD there is an important pre-processing step: the estimation of
    every tissue's response function. In order to do this, we look for voxels
    corresponding to WM, GM and CSF. This function aims to accomplish that by
    returning a mask of voxels within a ROI and who respect some threshold
    constraints, for each tissue. More precisely, the WM mask must have a FA
    value above a given threshold. The GM mask and CSF mask must have a FA
    below given thresholds and a MD below other thresholds. To get the FA and
    MD, we need to fit a Tensor model to the datasets.
    """

    if len(data.shape) < 4:
        msg = """Data must be 4D (3D image + directions). To use a 2D image,
        please reshape it into a (N, N, 1, ndirs) array."""
        raise ValueError(msg)

    if isinstance(roi_radii, numbers.Number):
        roi_radii = (roi_radii, roi_radii, roi_radii)

    if roi_center is None:
        roi_center = np.array(data.shape[:3]) // 2

    roi_radii = _roi_in_volume(data.shape, np.asarray(roi_center),
                               np.asarray(roi_radii))

    roi_mask = _mask_from_roi(data.shape[:3], roi_center, roi_radii)

    list_bvals = unique_bvals_tolerance(gtab.bvals)
    if not np.all(list_bvals <= 1200):
        msg_bvals = """Some b-values are higher than 1200.
        The DTI fit might be affected."""
        warnings.warn(msg_bvals, UserWarning)

    ten = TensorModel(gtab)
    tenfit = ten.fit(data, mask=roi_mask)
    fa = fractional_anisotropy(tenfit.evals)
    fa[np.isnan(fa)] = 0
    md = mean_diffusivity(tenfit.evals)
    md[np.isnan(md)] = 0

    mask_wm = np.zeros(fa.shape, dtype=np.int64)
    mask_wm[fa > wm_fa_thr] = 1
    mask_wm *= roi_mask

    md_mask_gm = np.zeros(md.shape, dtype=np.int64)
    md_mask_gm[(md < gm_md_thr)] = 1

    fa_mask_gm = np.zeros(fa.shape, dtype=np.int64)
    fa_mask_gm[(fa < gm_fa_thr) & (fa > 0)] = 1

    mask_gm = md_mask_gm * fa_mask_gm
    mask_gm *= roi_mask

    md_mask_csf = np.zeros(md.shape, dtype=np.int64)
    md_mask_csf[(md < csf_md_thr) & (md > 0)] = 1

    fa_mask_csf = np.zeros(fa.shape, dtype=np.int64)
    fa_mask_csf[(fa < csf_fa_thr) & (fa > 0)] = 1

    mask_csf = md_mask_csf * fa_mask_csf
    mask_csf *= roi_mask

    msg = """No voxel with a {0} than {1} were found.
    Try a larger roi or a {2} threshold for {3}."""

    if np.sum(mask_wm) == 0:
        msg_fa = msg.format('FA higher', str(wm_fa_thr), 'lower FA', 'WM')
        warnings.warn(msg_fa, UserWarning)

    if np.sum(mask_gm) == 0:
        msg_fa = msg.format('FA lower', str(gm_fa_thr), 'higher FA', 'GM')
        msg_md = msg.format('MD lower', str(gm_md_thr), 'higher MD', 'GM')
        warnings.warn(msg_fa, UserWarning)
        warnings.warn(msg_md, UserWarning)

    if np.sum(mask_csf) == 0:
        msg_fa = msg.format('FA lower', str(csf_fa_thr), 'higher FA', 'CSF')
        msg_md = msg.format('MD lower', str(csf_md_thr), 'higher MD', 'CSF')
        warnings.warn(msg_fa, UserWarning)
        warnings.warn(msg_md, UserWarning)

    return mask_wm, mask_gm, mask_csf
Beispiel #57
0
def test_recursive_response_calibration():
    """
    Test the recursive response calibration method.
    """
    SNR = 100
    S0 = 1

    _, fbvals, fbvecs = get_fnames('small_64D')

    bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs)
    sphere = default_sphere

    gtab = gradient_table(bvals, bvecs)
    evals = np.array([0.0015, 0.0003, 0.0003])
    evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T
    mevals = np.array(([0.0015, 0.0003, 0.0003],
                       [0.0015, 0.0003, 0.0003]))
    angles = [(0, 0), (90, 0)]

    where_dwi = lazy_index(~gtab.b0s_mask)

    S_cross, _ = multi_tensor(gtab, mevals, S0, angles=angles,
                              fractions=[50, 50], snr=SNR)

    S_single = single_tensor(gtab, S0, evals, evecs, snr=SNR)

    data = np.concatenate((np.tile(S_cross, (8, 1)),
                           np.tile(S_single, (2, 1))),
                          axis=0)

    odf_gt_cross = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50])

    odf_gt_single = single_tensor_odf(sphere.vertices, evals, evecs)

    response = recursive_response(gtab, data, mask=None, sh_order=8,
                                  peak_thr=0.01, init_fa=0.05,
                                  init_trace=0.0021, iter=8, convergence=0.001,
                                  parallel=False)

    csd = ConstrainedSphericalDeconvModel(gtab, response)

    csd_fit = csd.fit(data)

    assert_equal(np.all(csd_fit.shm_coeff[:, 0] >= 0), True)

    fodf = csd_fit.odf(sphere)

    directions_gt_single, _, _ = peak_directions(odf_gt_single, sphere)
    directions_gt_cross, _, _ = peak_directions(odf_gt_cross, sphere)
    directions_single, _, _ = peak_directions(fodf[8, :], sphere)
    directions_cross, _, _ = peak_directions(fodf[0, :], sphere)

    ang_sim = angular_similarity(directions_cross, directions_gt_cross)
    assert_equal(ang_sim > 1.9, True)
    assert_equal(directions_cross.shape[0], 2)
    assert_equal(directions_gt_cross.shape[0], 2)

    ang_sim = angular_similarity(directions_single, directions_gt_single)
    assert_equal(ang_sim > 0.9, True)
    assert_equal(directions_single.shape[0], 1)
    assert_equal(directions_gt_single.shape[0], 1)

    with warnings.catch_warnings(record=True) as w:
        sphere = Sphere(xyz=gtab.gradients[where_dwi])
        npt.assert_equal(len(w), 1)
        npt.assert_(issubclass(w[0].category, UserWarning))
        npt.assert_("Vertices are not on the unit sphere" in str(w[0].message))
    sf = response.on_sphere(sphere)
    S = np.concatenate(([response.S0], sf))

    tenmodel = TensorModel(gtab, min_signal=0.001)

    tenfit = tenmodel.fit(S)
    FA = fractional_anisotropy(tenfit.evals)
    FA_gt = fractional_anisotropy(evals)
    assert_almost_equal(FA, FA_gt, 1)
Beispiel #58
0
    def eudx_advanced(self,
                      dti_file,
                      mask_file,
                      gtab,
                      seed_num=100000,
                      stop_val=0.1):
        """
        Tracking with more complex tensors - experimental

        Initializes the graph with nodes corresponding to the number of ROIs

        **Positional Arguments:**

                dti_file:
                    - File (registered) to use for tensor/fiber tracking
                mask_file:
                    - Brain mask to keep tensors inside the brain
                gtab:
                    - dipy formatted bval/bvec Structure

        **Optional Arguments:**
                seed_num:
                    - Number of seeds to use for fiber tracking
                stop_val:
                    - Value to cutoff fiber track
        """

        img = nb.load(dti_file)
        data = img.get_data()

        img = nb.load(mask_file)

        mask = img.get_data()
        mask = mask > 0  # to ensure binary mask
        """
        For the constrained spherical deconvolution we need to estimate the
        response function (see :ref:`example_reconst_csd`) and create a model.
        """

        response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)

        csd_model = ConstrainedSphericalDeconvModel(gtab, response)
        """
        Next, we use ``peaks_from_model`` to fit the data and calculated
        the fiber directions in all voxels.
        """

        sphere = get_sphere('symmetric724')

        csd_peaks = peaks_from_model(model=csd_model,
                                     data=data,
                                     sphere=sphere,
                                     mask=mask,
                                     relative_peak_threshold=.5,
                                     min_separation_angle=25,
                                     parallel=True)
        """
        For the tracking part, we will use ``csd_model`` fiber directions
        but stop tracking where fractional anisotropy (FA) is low (< 0.1).
        To derive the FA, used as a stopping criterion, we need to fit a
        tensor model first. Here, we use weighted least squares (WLS).
        """
        print 'tensors...'

        tensor_model = TensorModel(gtab, fit_method='WLS')
        tensor_fit = tensor_model.fit(data, mask)

        FA = fractional_anisotropy(tensor_fit.evals)
        """
        In order for the stopping values to be used with our tracking
        algorithm we need to have the same dimensions as the
        ``csd_peaks.peak_values``. For this reason, we can assign the
        same FA value to every peak direction in the same voxel in
        the following way.
        """

        stopping_values = np.zeros(csd_peaks.peak_values.shape)
        stopping_values[:] = FA[..., None]

        streamline_generator = EuDX(stopping_values,
                                    csd_peaks.peak_indices,
                                    seeds=seed_num,
                                    odf_vertices=sphere.vertices,
                                    a_low=stop_val)

        streamlines = [streamline for streamline in streamline_generator]

        return streamlines
from show_streamlines import show_streamlines
from conn_mat import connectivity_matrix

from dipy.io.pickles import save_pickle, load_pickle

from time import time

threshold = 0.75
from dipy.data import get_sphere
sphere = get_sphere('symmetric724')
dname = 'SNR20/'

if __name__ == '__main__':
    data, affine, gtab = get_test_hardi(snr=20, denoised=0)    
    mask = get_test_mask()
    tenmodel = TensorModel(gtab)
    tenfit = tenmodel.fit(data, mask)
    FA = fractional_anisotropy(tenfit.evals)
    FA[np.isnan(FA)] = 0
    nib.save(nib.Nifti1Image(FA.astype('float32'), affine), 
             'FA.nii.gz')
    
    for i in range(27) :
        print 'White matter bundle: ', i
        wm_mask = get_test_wm_mask(i)
        print(FA[wm_mask].max())
        indicesAniso = np.where(np.logical_and(FA > threshold, wm_mask))  

        print '    Response function'
        S0s = data[indicesAniso][:, np.nonzero(gtab.b0s_mask)[0]]                               
        S0 = np.mean(S0s)
- threshold: float

**Stopping States**

- 'ENDPOINT': stops at a position where metric_map < threshold; the streamline
reached the target stopping area.
- 'OUTSIDEIMAGE': stops at a position outside of metric_map; the streamline
reached an area outside the image where no direction data is available.
- 'TRACKPOINT': stops at a position because no direction is available; the
streamline is stopping where metric_map >= threshold, but there is no valid
direction to follow.
- 'INVALIDPOINT': N/A.
"""


tensor_model = TensorModel(gtab)
tenfit = tensor_model.fit(data, mask=labels > 0)
FA = fractional_anisotropy(tenfit.evals)

threshold_criterion = ThresholdStoppingCriterion(FA, .2)

fig = plt.figure()
mask_fa = FA.copy()
mask_fa[mask_fa < 0.2] = 0
plt.xticks([])
plt.yticks([])
plt.imshow(mask_fa[:, :, data.shape[2] // 2].T, cmap='gray', origin='lower',
           interpolation='nearest')
fig.tight_layout()
fig.savefig('threshold_fa.png')