def test_predict(): """ Test model prediction API """ psphere = get_sphere('symmetric362') bvecs = np.concatenate(([[1, 0, 0]], psphere.vertices)) bvals = np.zeros(len(bvecs)) + 1000 bvals[0] = 0 gtab = grad.gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003])) mevecs = [np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])] S = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None) dm = dti.TensorModel(gtab, 'LS') dmfit = dm.fit(S) assert_array_almost_equal(dmfit.predict(gtab, S0=100), S) assert_array_almost_equal(dm.predict(dmfit.model_params, S0=100), S) fdata, fbvals, fbvecs = get_data() data = nib.load(fdata).get_data() # Make the data cube a bit larger: data = np.tile(data.T, 2).T gtab = grad.gradient_table(fbvals, fbvecs) dtim = dti.TensorModel(gtab) dtif = dtim.fit(data) S0 = np.mean(data[..., gtab.b0s_mask], -1) p = dtif.predict(gtab, S0) assert_equal(p.shape, data.shape)
def test_pca_noise_estimate(): np.random.seed(1984) # MUBE: bvals1 = np.concatenate([np.zeros(17), np.ones(3) * 1000]) bvecs1 = np.concatenate([np.zeros((17, 3)), np.eye(3)]) gtab1 = dpg.gradient_table(bvals1, bvecs1) # SIBE: bvals2 = np.concatenate([np.zeros(1), np.ones(3) * 1000]) bvecs2 = np.concatenate([np.zeros((1, 3)), np.eye(3)]) gtab2 = dpg.gradient_table(bvals2, bvecs2) for patch_radius in [1, 2]: for gtab in [gtab1, gtab2]: for dtype in [np.int16, np.float64]: signal = np.ones((20, 20, 20, gtab.bvals.shape[0])) for correct_bias in [True, False]: if not correct_bias: # High signal for no bias correction signal = signal * 100 sigma = 1 noise1 = np.random.normal(0, sigma, size=signal.shape) noise2 = np.random.normal(0, sigma, size=signal.shape) # Rician noise: data = np.sqrt((signal + noise1) ** 2 + noise2 ** 2) sigma_est = pca_noise_estimate(data.astype(dtype), gtab, correct_bias=correct_bias, patch_radius=patch_radius) assert_array_almost_equal(np.mean(sigma_est), sigma, decimal=1) assert_(np.mean(pca_noise_estimate(data, gtab, correct_bias=True)) > np.mean(pca_noise_estimate(data, gtab, correct_bias=False)))
def test_csd_xval(): # First, let's see that it works with some data: data = nib.load(fdata).get_data()[1:3, 1:3, 1:3] # Make it *small* gtab = gt.gradient_table(fbval, fbvec) S0 = np.mean(data[..., gtab.b0s_mask]) response = ([0.0015, 0.0003, 0.0001], S0) csdm = csd.ConstrainedSphericalDeconvModel(gtab, response) kf_xval = xval.kfold_xval(csdm, data, 2, response, sh_order=2) # In simulation, it should work rather well (high COD): psphere = dpd.get_sphere('symmetric362') bvecs = np.concatenate(([[0, 0, 0]], psphere.vertices)) bvals = np.zeros(len(bvecs)) + 1000 bvals[0] = 0 gtab = gt.gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003])) mevecs = [ np.array( [ [1, 0, 0], [0, 1, 0], [0, 0, 1] ] ), np.array( [ [0, 0, 1], [0, 1, 0], [1, 0, 0] ] ) ] S0 = 100 S = sims.single_tensor( gtab, S0, mevals[0], mevecs[0], snr=None ) sm = csd.ConstrainedSphericalDeconvModel(gtab, response) smfit = sm.fit(S) np.random.seed(12345) response = ([0.0015, 0.0003, 0.0001], S0) kf_xval = xval.kfold_xval(sm, S, 2, response, sh_order=2) # Because of the regularization, COD is not going to be perfect here: cod = xval.coeff_of_determination(S, kf_xval) # We'll just test for regressions: csd_cod = 97 # pre-computed by hand for this random seed # We're going to be really lenient here: npt.assert_array_almost_equal(np.round(cod), csd_cod)
def test_btable_prepare(): sq2 = np.sqrt(2) / 2. bvals = 1500 * np.ones(7) bvals[0] = 0 bvecs = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [sq2, sq2, 0], [sq2, 0, sq2], [0, sq2, sq2]]) bt = gradient_table(bvals, bvecs) npt.assert_array_equal(bt.bvecs, bvecs) bt.info fimg, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) bvecs = np.where(np.isnan(bvecs), 0, bvecs) bt = gradient_table(bvals, bvecs) npt.assert_array_equal(bt.bvecs, bvecs) bt2 = gradient_table(bvals, bvecs.T) npt.assert_array_equal(bt2.bvecs, bvecs) btab = np.concatenate((bvals[:, None], bvecs), axis=1) bt3 = gradient_table(btab) npt.assert_array_equal(bt3.bvecs, bvecs) npt.assert_array_equal(bt3.bvals, bvals) bt4 = gradient_table(btab.T) npt.assert_array_equal(bt4.bvecs, bvecs) npt.assert_array_equal(bt4.bvals, bvals) # Test for proper inputs (expects either bvals/bvecs or 4 by n): assert_raises(ValueError, gradient_table, bvecs)
def test_btable_prepare(): sq2 = np.sqrt(2) / 2. bvals = 1500 * np.ones(7) bvals[0] = 0 bvecs = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [sq2, sq2, 0], [sq2, 0, sq2], [0, sq2, sq2]]) bt = gradient_table(bvals, bvecs) npt.assert_array_equal(bt.bvecs, bvecs) bt.info fimg, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) bvecs = np.where(np.isnan(bvecs), 0, bvecs) bt = gradient_table(bvals, bvecs) npt.assert_array_equal(bt.bvecs, bvecs) bt2 = gradient_table(bvals, bvecs.T) npt.assert_array_equal(bt2.bvecs, bvecs) btab = np.concatenate((bvals[:, None], bvecs), axis=1) bt3 = gradient_table(btab) npt.assert_array_equal(bt3.bvecs, bvecs) npt.assert_array_equal(bt3.bvals, bvals) bt4 = gradient_table(btab.T) npt.assert_array_equal(bt4.bvecs, bvecs) npt.assert_array_equal(bt4.bvals, bvals)
def test_dti_xval(): """ Test k-fold cross-validation """ data = nib.load(fdata).get_data() gtab = gt.gradient_table(fbval, fbvec) dm = dti.TensorModel(gtab, "LS") # The data has 102 directions, so will not divide neatly into 10 bits npt.assert_raises(ValueError, xval.kfold_xval, dm, data, 10) # But we can do this with 2 folds: kf_xval = xval.kfold_xval(dm, data, 2) # In simulation with no noise, COD should be perfect: psphere = dpd.get_sphere("symmetric362") bvecs = np.concatenate(([[0, 0, 0]], psphere.vertices)) bvals = np.zeros(len(bvecs)) + 1000 bvals[0] = 0 gtab = gt.gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003])) mevecs = [np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])] S = sims.single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None) dm = dti.TensorModel(gtab, "LS") kf_xval = xval.kfold_xval(dm, S, 2) cod = xval.coeff_of_determination(S, kf_xval) npt.assert_array_almost_equal(cod, np.ones(kf_xval.shape[:-1]) * 100) # Test with 2D data for use of a mask S = np.array([[S, S], [S, S]]) mask = np.ones(S.shape[:-1], dtype=bool) mask[1, 1] = 0 kf_xval = xval.kfold_xval(dm, S, 2, mask=mask) cod2d = xval.coeff_of_determination(S, kf_xval) npt.assert_array_almost_equal(np.round(cod2d[0, 0]), cod)
def test_multib0_dsi(): data, gtab = dsi_voxels() # Create a new data-set with a b0 measurement: new_data = np.concatenate([data, data[..., 0, None]], -1) new_bvecs = np.concatenate([gtab.bvecs, np.zeros((1, 3))]) new_bvals = np.concatenate([gtab.bvals, [0]]) new_gtab = gradient_table(new_bvals, new_bvecs) ds = DiffusionSpectrumModel(new_gtab) sphere = get_sphere('repulsion724') dsfit = ds.fit(new_data) pdf = dsfit.pdf() dsfit.odf(sphere) assert_equal(new_data.shape[:-1] + (17, 17, 17), pdf.shape) assert_equal(np.alltrue(np.isreal(pdf)), True) # And again, with one more b0 measurement (two in total): new_data = np.concatenate([data, data[..., 0, None]], -1) new_bvecs = np.concatenate([gtab.bvecs, np.zeros((1, 3))]) new_bvals = np.concatenate([gtab.bvals, [0]]) new_gtab = gradient_table(new_bvals, new_bvecs) ds = DiffusionSpectrumModel(new_gtab) dsfit = ds.fit(new_data) pdf = dsfit.pdf() dsfit.odf(sphere) assert_equal(new_data.shape[:-1] + (17, 17, 17), pdf.shape) assert_equal(np.alltrue(np.isreal(pdf)), True)
def setup_module(): """Module-level setup""" global gtab, gtab_2s _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) # 2 shells for techniques that requires multishell data bvals_2s = np.concatenate((bvals, bvals * 2), axis=0) bvecs_2s = np.concatenate((bvecs, bvecs), axis=0) gtab_2s = gradient_table(bvals_2s, bvecs_2s)
def test_nan_bvecs(): """ Test that the presence of nan's in b-vectors doesn't raise warnings. In previous versions, the presence of NaN in b-vectors was taken to indicate a 0 b-value, but also raised a warning when testing for the length of these vectors. This checks that it doesn't happen. """ fdata, fbvals, fbvecs = get_fnames() with warnings.catch_warnings(record=True) as w: gradient_table(fbvals, fbvecs) npt.assert_(len(w) == 0)
def setup_module(): """Module-level setup""" global gtab, gtab_2s, mevals, model_params_mv global DWI, FAref, GTF, MDref, FAdti, MDdti _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) # FW model requires multishell data bvals_2s = np.concatenate((bvals, bvals * 1.5), axis=0) bvecs_2s = np.concatenate((bvecs, bvecs), axis=0) gtab_2s = gradient_table(bvals_2s, bvecs_2s) # Simulation a typical DT and DW signal for no water contamination S0 = np.array(100) dt = np.array([0.0017, 0, 0.0003, 0, 0, 0.0003]) evals, evecs = decompose_tensor(from_lower_triangular(dt)) S_tissue = single_tensor(gtab_2s, S0=100, evals=evals, evecs=evecs, snr=None) dm = dti.TensorModel(gtab_2s, 'WLS') dtifit = dm.fit(S_tissue) FAdti = dtifit.fa MDdti = dtifit.md dtiparams = dtifit.model_params # Simulation of 8 voxels tested DWI = np.zeros((2, 2, 2, len(gtab_2s.bvals))) FAref = np.zeros((2, 2, 2)) MDref = np.zeros((2, 2, 2)) # Diffusion of tissue and water compartments are constant for all voxel mevals = np.array([[0.0017, 0.0003, 0.0003], [0.003, 0.003, 0.003]]) # volume fractions GTF = np.array([[[0.06, 0.71], [0.33, 0.91]], [[0., 0.], [0., 0.]]]) # S0 multivoxel S0m = 100 * np.ones((2, 2, 2)) # model_params ground truth (to be fill) model_params_mv = np.zeros((2, 2, 2, 13)) for i in range(2): for j in range(2): gtf = GTF[0, i, j] S, p = multi_tensor(gtab_2s, mevals, S0=100, angles=[(90, 0), (90, 0)], fractions=[(1-gtf) * 100, gtf*100], snr=None) DWI[0, i, j] = S FAref[0, i, j] = FAdti MDref[0, i, j] = MDdti R = all_tensor_evecs(p[0]) R = R.reshape((9)) model_params_mv[0, i, j] = \ np.concatenate(([0.0017, 0.0003, 0.0003], R, [gtf]), axis=0)
def test_nlls_fit_tensor(): """ Test the implementation of NLLS and RESTORE """ b0 = 1000. bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table(bval, bvecs) B = bval[1] #Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) #Design Matrix X = dti.design_matrix(bvecs, bval) #Signals Y = np.exp(np.dot(X,D)) Y.shape = (-1,) + Y.shape #Estimate tensor from test signals and compare against expected result #using non-linear least squares: tensor_model = dti.TensorModel(gtab, fit_method='NLLS') tensor_est = tensor_model.fit(Y) assert_equal(tensor_est.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_est.evals[0], evals) assert_array_almost_equal(tensor_est.quadratic_form[0], tensor) assert_almost_equal(tensor_est.md[0], md) # Using the gmm weighting scheme: tensor_model = dti.TensorModel(gtab, fit_method='NLLS', weighting='gmm') assert_equal(tensor_est.shape, Y.shape[:-1]) assert_array_almost_equal(tensor_est.evals[0], evals) assert_array_almost_equal(tensor_est.quadratic_form[0], tensor) assert_almost_equal(tensor_est.md[0], md) # Use NLLS with some actual 4D data: data, bvals, bvecs = get_data('small_25') gtab = grad.gradient_table(bvals, bvecs) tm1 = dti.TensorModel(gtab, fit_method='NLLS') dd = nib.load(data).get_data() tf1 = tm1.fit(dd) tm2 = dti.TensorModel(gtab) tf2 = tm2.fit(dd) assert_array_almost_equal(tf1.fa, tf2.fa, decimal=1)
def test_eudx_bad_seed(): """Test passing a bad seed to eudx""" fimg, fbvals, fbvecs = get_data('small_101D') img = ni.load(fimg) affine = img.affine data = img.get_data() gtab = gradient_table(fbvals, fbvecs) tensor_model = TensorModel(gtab) ten = tensor_model.fit(data) ind = quantize_evecs(ten.evecs) sphere = get_sphere('symmetric724') seed = [1000000., 1000000., 1000000.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], odf_vertices=sphere.vertices, a_low=.2) assert_raises(ValueError, list, eu) print(data.shape) seed = [1., 5., 8.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], odf_vertices=sphere.vertices, a_low=.2) track = list(eu) seed = [-1., 1000000., 1000000.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], odf_vertices=sphere.vertices, a_low=.2) assert_raises(ValueError, list, eu)
def read_taiwan_ntu_dsi(): """ Load Taiwan NTU dataset Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ folder = pjoin(dipy_home, 'taiwan_ntu_dsi') fraw = pjoin(folder, 'DSI203.nii.gz') fbval = pjoin(folder, 'DSI203.bval') fbvec = pjoin(folder, 'DSI203.bvec') md5_dict = {'data': '950408c0980a7154cb188666a885a91f', 'bval': '602e5cb5fad2e7163e8025011d8a6755', 'bvec': 'a95eb1be44748c20214dc7aa654f9e6b', 'license': '7fa1d5e272533e832cc7453eeba23f44'} check_md5(fraw, md5_dict['data']) check_md5(fbval, md5_dict['bval']) check_md5(fbvec, md5_dict['bvec']) check_md5(pjoin(folder, 'DSI203_license.txt'), md5_dict['license']) bvals, bvecs = read_bvals_bvecs(fbval, fbvec) bvecs[1:] = bvecs[1:] / np.sqrt(np.sum(bvecs[1:] * bvecs[1:], axis=1))[:, None] gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab
def read_stanford_hardi(): """ Load Stanford HARDI dataset Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ folder = pjoin(dipy_home, 'stanford_hardi') fraw = pjoin(folder, 'HARDI150.nii.gz') fbval = pjoin(folder, 'HARDI150.bval') fbvec = pjoin(folder, 'HARDI150.bvec') md5_dict = {'data': '0b18513b46132b4d1051ed3364f2acbc', 'bval': '4e08ee9e2b1d2ec3fddb68c70ae23c36', 'bvec': '4c63a586f29afc6a48a5809524a76cb4'} check_md5(fraw, md5_dict['data']) check_md5(fbval, md5_dict['bval']) check_md5(fbvec, md5_dict['bvec']) bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab
def read_sherbrooke_3shell(): """ Load Sherbrooke 3-shell HARDI dataset Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ folder = pjoin(dipy_home, 'sherbrooke_3shell') fraw = pjoin(folder, 'HARDI193.nii.gz') fbval = pjoin(folder, 'HARDI193.bval') fbvec = pjoin(folder, 'HARDI193.bvec') md5_dict = {'data': '0b735e8f16695a37bfbd66aab136eb66', 'bval': 'e9b9bb56252503ea49d31fb30a0ac637', 'bvec': '0c83f7e8b917cd677ad58a078658ebb7'} check_md5(fraw, md5_dict['data']) check_md5(fbval, md5_dict['bval']) check_md5(fbvec, md5_dict['bvec']) bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab
def read_isbi2013_2shell(): """ Load ISBI 2013 2-shell synthetic dataset Returns ------- img : obj, Nifti1Image gtab : obj, GradientTable """ folder = pjoin(dipy_home, 'isbi2013') fraw = pjoin(folder, 'phantom64.nii.gz') fbval = pjoin(folder, 'phantom64.bval') fbvec = pjoin(folder, 'phantom64.bvec') md5_dict = {'data': '42911a70f232321cf246315192d69c42', 'bval': '90e8cf66e0f4d9737a3b3c0da24df5ea', 'bvec': '4b7aa2757a1ccab140667b76e8075cb1'} check_md5(fraw, md5_dict['data']) check_md5(fbval, md5_dict['bval']) check_md5(fbvec, md5_dict['bvec']) bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs) img = nib.load(fraw) return img, gtab
def test_GradientTable(): gradients = np.array([[0, 0, 0], [1, 0, 0], [0, 0, 1], [3, 4, 0], [5, 0, 12]], 'float') expected_bvals = np.array([0, 1, 1, 5, 13]) expected_b0s_mask = expected_bvals == 0 expected_bvecs = gradients / (expected_bvals + expected_b0s_mask)[:, None] gt = GradientTable(gradients, b0_threshold=0) npt.assert_array_almost_equal(gt.bvals, expected_bvals) npt.assert_array_equal(gt.b0s_mask, expected_b0s_mask) npt.assert_array_almost_equal(gt.bvecs, expected_bvecs) npt.assert_array_almost_equal(gt.gradients, gradients) gt = GradientTable(gradients, b0_threshold=1) npt.assert_array_equal(gt.b0s_mask, [1, 1, 1, 0, 0]) npt.assert_array_equal(gt.bvals, expected_bvals) npt.assert_array_equal(gt.bvecs, expected_bvecs) # checks negative values in gtab npt.assert_raises(ValueError, GradientTable, -1) npt.assert_raises(ValueError, GradientTable, np.ones((6, 2))) npt.assert_raises(ValueError, GradientTable, np.ones((6,))) with warnings.catch_warnings(record=True) as w: bad_gt = gradient_table(expected_bvals, expected_bvecs, b0_threshold=200) assert len(w) == 1
def test_sfm(): fdata, fbvals, fbvecs = dpd.get_data() data = nib.load(fdata).get_data() gtab = grad.gradient_table(fbvals, fbvecs) sfmodel = sfm.SparseFascicleModel(gtab) sffit1 = sfmodel.fit(data[0, 0, 0]) sphere = dpd.get_sphere("symmetric642") odf1 = sffit1.odf(sphere) pred1 = sffit1.predict(gtab) mask = np.ones(data.shape[:-1]) sffit2 = sfmodel.fit(data, mask) pred2 = sffit2.predict(gtab) odf2 = sffit2.odf(sphere) sffit3 = sfmodel.fit(data) pred3 = sffit3.predict(gtab) odf3 = sffit3.odf(sphere) npt.assert_almost_equal(pred3, pred2, decimal=2) npt.assert_almost_equal(pred3[0, 0, 0], pred1, decimal=2) npt.assert_almost_equal(odf3[0, 0, 0], odf1, decimal=2) npt.assert_almost_equal(odf3[0, 0, 0], odf2[0, 0, 0], decimal=2) # Fit zeros and you will get back zeros npt.assert_almost_equal( sfmodel.fit(np.zeros(data[0, 0, 0].shape)).beta, np.zeros(sfmodel.design_matrix[0].shape[-1]) )
def test_nnls_jacobian_fucn(): b0 = 1000. bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table(bval, bvecs) B = bval[1] # Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B # Design Matrix X = dti.design_matrix(gtab) # Signals Y = np.exp(np.dot(X, D)) # Test Jacobian at D args = [X, Y] analytical = dti._nlls_jacobian_func(D, *args) for i in range(len(X)): args = [X[i], Y[i]] approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args) assert_true(np.allclose(approx, analytical[i])) # Test Jacobian at zero D = np.zeros_like(D) args = [X, Y] analytical = dti._nlls_jacobian_func(D, *args) for i in range(len(X)): args = [X[i], Y[i]] approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args) assert_true(np.allclose(approx, analytical[i]))
def generate( self, out_path, aux, idx_in, idx_out ) : scheme_high = amico.lut.create_high_resolution_scheme( self.scheme, b_scale=1 ) gtab = gradient_table( scheme_high.b, scheme_high.raw[:,0:3] ) nATOMS = 1 + len(self.ICVFs) + len(self.d_ISOs) progress = ProgressBar( n=nATOMS, prefix=" ", erase=True ) # Stick signal = single_tensor( gtab, evals=[0, 0, self.d_par] ) lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False ) np.save( pjoin( out_path, 'A_001.npy' ), lm ) progress.update() # Zeppelin(s) for d in [ self.d_par*(1.0-ICVF) for ICVF in self.ICVFs] : signal = single_tensor( gtab, evals=[d, d, self.d_par] ) lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, False ) np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm ) progress.update() # Ball(s) for d in self.d_ISOs : signal = single_tensor( gtab, evals=[d, d, d] ) lm = amico.lut.rotate_kernel( signal, aux, idx_in, idx_out, True ) np.save( pjoin( out_path, 'A_%03d.npy'%progress.i ), lm ) progress.update()
def readDataset(niifilename, niiBrainMaskFilename, btablefilename, parcellationfilename = None): # load the masked diffusion dataset diffusionData = nib.load(niifilename).get_data() affine = nib.load(niifilename).get_affine() # load the brain mask mask = nib.load(niiBrainMaskFilename).get_data() rows, cols, nSlices, nDirections = diffusionData.shape bvals, bvecs = readbtable(btablefilename) gtable = gradient_table(bvals, bvecs) if parcellationfilename != None: #parcellation = nib.load(parcellationfilename).get_data() parcellation,_ = nrrd.read(parcellationfilename) if parcellation.shape[2] != nSlices: # for the second phantom (unc_res) parcellation = parcellation[:,:,parcellation.shape[2]-nSlices:] parcellation = np.squeeze(parcellation) else: parcellation = None return diffusionData, mask, affine, gtable, parcellation
def test_diffusivities(): psphere = get_sphere('symmetric362') bvecs = np.concatenate(([[0, 0, 0]], psphere.vertices)) bvals = np.zeros(len(bvecs)) + 1000 bvals[0] = 0 gtab = grad.gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0001], [0.0015, 0.0003, 0.0003])) mevecs = [np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), np.array([[0, 0, 1], [0, 1, 0], [1, 0, 0]])] S = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None) dm = dti.TensorModel(gtab, 'LS') dmfit = dm.fit(S) md = mean_diffusivity(dmfit.evals) Trace = trace(dmfit.evals) rd = radial_diffusivity(dmfit.evals) ad = axial_diffusivity(dmfit.evals) lin = linearity(dmfit.evals) plan = planarity(dmfit.evals) spher = sphericity(dmfit.evals) assert_almost_equal(md, (0.0015 + 0.0003 + 0.0001) / 3) assert_almost_equal(Trace, (0.0015 + 0.0003 + 0.0001)) assert_almost_equal(ad, 0.0015) assert_almost_equal(rd, (0.0003 + 0.0001) / 2) assert_almost_equal(lin, (0.0015 - 0.0003)/Trace) assert_almost_equal(plan, 2 * (0.0003 - 0.0001)/Trace) assert_almost_equal(spher, (3 * 0.0001)/Trace)
def test_csd_superres(): """ Check the quality of csdfit with high SH order. """ _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) # img, gtab = read_stanford_hardi() evals = np.array([[1.5, .3, .3]]) * [[1.], [1.]] / 1000. S, sticks = multi_tensor(gtab, evals, snr=None, fractions=[55., 45.]) model16 = ConstrainedSphericalDeconvModel(gtab, (evals[0], 3.), sh_order=16) fit16 = model16.fit(S) # print local_maxima(fit16.odf(default_sphere), default_sphere.edges) d, v, ind = peak_directions(fit16.odf(default_sphere), default_sphere, relative_peak_threshold=.2, min_separation_angle=0) # Check that there are two peaks assert_equal(len(d), 2) # Check that peaks line up with sticks cos_sim = abs((d * sticks).sum(1)) ** .5 assert_(all(cos_sim > .99))
def _run_interface(self, runtime): import dipy.reconst.dti as dti import dipy.denoise.noise_estimate as ne from dipy.core.gradients import gradient_table from nipype.utils.filemanip import split_filename import nibabel as nib fname = self.inputs.in_file img = nib.load(fname) data = img.get_data() affine = img.get_affine() bvals = self.inputs.bval bvecs = self.inputs.bvec gtab = gradient_table(bvals, bvecs) sigma = ne.estimate_sigma(data) dti = dti.TensorModel(gtab,fit_method='RESTORE', sigma=sigma) dtifit = dti.fit(data) fa = dtifit.fa _, base, _ = split_filename(fname) nib.save(nib.Nifti1Image(fa, affine), base + '_FA.nii') return runtime
def test_response_from_mask(): fdata, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) data = nib.load(fdata).get_data() gtab = gradient_table(bvals, bvecs) ten = TensorModel(gtab) tenfit = ten.fit(data) FA = fractional_anisotropy(tenfit.evals) FA[np.isnan(FA)] = 0 radius = 3 for fa_thr in np.arange(0, 1, 0.1): response_auto, ratio_auto, nvoxels = auto_response(gtab, data, roi_center=None, roi_radius=radius, fa_thr=fa_thr, return_number_of_voxels=True) ci, cj, ck = np.array(data.shape[:3]) / 2 mask = np.zeros(data.shape[:3]) mask[ci - radius: ci + radius, cj - radius: cj + radius, ck - radius: ck + radius] = 1 mask[FA <= fa_thr] = 0 response_mask, ratio_mask = response_from_mask(gtab, data, mask) assert_equal(int(np.sum(mask)), nvoxels) assert_array_almost_equal(response_mask[0], response_auto[0]) assert_almost_equal(response_mask[1], response_auto[1]) assert_almost_equal(ratio_mask, ratio_auto)
def test_sphere_scaling_csdmodel(): """Check that mirroring regularization sphere does not change the result of the model""" _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, sticks = multi_tensor(gtab, mevals, 100., angles=angles, fractions=[50, 50], snr=None) hemi = small_sphere sphere = hemi.mirror() response = (np.array([0.0015, 0.0003, 0.0003]), 100) model_full = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=sphere) model_hemi = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=hemi) csd_fit_full = model_full.fit(S) csd_fit_hemi = model_hemi.fit(S) assert_array_almost_equal(csd_fit_full.shm_coeff, csd_fit_hemi.shm_coeff)
def _generate_gradients(ndirs=64, values=[1000, 3000], nb0s=1): """ Automatically generate a `gradient table <http://nipy.org/dipy/examples_built/gradients_spheres.html#example-gradients-spheres>`_ """ import numpy as np from dipy.core.sphere import (disperse_charges, Sphere, HemiSphere) from dipy.core.gradients import gradient_table theta = np.pi * np.random.rand(ndirs) phi = 2 * np.pi * np.random.rand(ndirs) hsph_initial = HemiSphere(theta=theta, phi=phi) hsph_updated, potential = disperse_charges(hsph_initial, 5000) values = np.atleast_1d(values).tolist() vertices = hsph_updated.vertices bvecs = vertices.copy() bvals = np.ones(vertices.shape[0]) * values[0] for v in values[1:]: bvecs = np.vstack((bvecs, vertices)) bvals = np.hstack((bvals, v * np.ones(vertices.shape[0]))) for i in range(0, nb0s): bvals = bvals.tolist() bvals.insert(0, 0) bvecs = bvecs.tolist() bvecs.insert(0, np.zeros(3)) return gradient_table(bvals, bvecs)
def test_eudx_bad_seed(): """Test passing a bad seed to eudx""" fimg, fbvals, fbvecs = get_data('small_101D') img = ni.load(fimg) affine = img.get_affine() data = img.get_data() gtab = gradient_table(fbvals, fbvecs) tensor_model = TensorModel(gtab) ten = tensor_model.fit(data) ind = quantize_evecs(ten.evecs) seed = [1000000., 1000000., 1000000.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], a_low=.2) try: track = list(eu) except ValueError as ve: if ve.args[0] == 'Seed outside boundaries': print(ve) print(data.shape) seed = [1., 5., 8.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], a_low=.2) track = list(eu) seed = [-1., 1000000., 1000000.] eu = EuDX(a=ten.fa, ind=ind, seeds=[seed], a_low=.2) try: track = list(eu) except ValueError as ve: if ve.args[0] == 'Seed outside boundaries': print(ve)
def test_eudx_further(): """ Cause we love testin.. ;-) """ fimg,fbvals,fbvecs=get_data('small_101D') img=ni.load(fimg) affine=img.get_affine() data=img.get_data() gtab = gradient_table(fbvals, fbvecs) tensor_model = TensorModel(gtab) ten = tensor_model.fit(data) x,y,z=data.shape[:3] seeds=np.zeros((10**4,3)) for i in range(10**4): rx=(x-1)*np.random.rand() ry=(y-1)*np.random.rand() rz=(z-1)*np.random.rand() seeds[i]=np.ascontiguousarray(np.array([rx,ry,rz]),dtype=np.float64) ind = quantize_evecs(ten.evecs) eu=EuDX(a=ten.fa, ind=ind, seeds=seeds, a_low=.2) T=[e for e in eu] #check that there are no negative elements for t in T: assert_equal(np.sum(t.ravel()<0),0)
def test_restore(): """ Test the implementation of the RESTORE algorithm """ b0 = 1000. bvecs, bval = read_bvec_file(get_data('55dir_grad.bvec')) gtab = grad.gradient_table(bval, bvecs) B = bval[1] #Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B evals = np.array([2., 1., 0.]) / B md = evals.mean() tensor = from_lower_triangular(D) #Design Matrix X = dti.design_matrix(gtab) #Signals Y = np.exp(np.dot(X,D)) Y.shape = (-1,) + Y.shape for drop_this in range(1, Y.shape[-1]): # RESTORE estimates should be robust to dropping this_y = Y.copy() this_y[:, drop_this] = 1.0 tensor_model = dti.TensorModel(gtab, fit_method='restore', sigma=67.0) tensor_est = tensor_model.fit(this_y) assert_array_almost_equal(tensor_est.evals[0], evals, decimal=3) assert_array_almost_equal(tensor_est.quadratic_form[0], tensor, decimal=3)
import numpy as np import random from numpy.testing import (assert_array_almost_equal, assert_raises, assert_almost_equal, assert_) from dipy.sims.voxel import (single_tensor, multi_tensor_dki) from dipy.io.gradients import read_bvals_bvecs from dipy.core.gradients import (gradient_table, unique_bvals_magnitude, round_bvals) from dipy.data import get_fnames import dipy.reconst.msdki as msdki from dipy.reconst.msdki import (msk_from_awf, awf_from_msk) fimg, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) bvals = round_bvals(bvals) gtab = gradient_table(bvals, bvecs) # 2 shells for techniques that requires multishell data bvals_3s = np.concatenate((bvals, bvals * 1.5, bvals * 2), axis=0) bvecs_3s = np.concatenate((bvecs, bvecs, bvecs), axis=0) gtab_3s = gradient_table(bvals_3s, bvecs_3s) # Simulation 1. Spherical kurtosis tensor - MSK and MSD from the MSDKI model # should be equal to the MK and MD of the DKI tensor for cases of # spherical kurtosis tensors Di = 0.00099 De = 0.00226 mevals_sph = np.array([[Di, Di, Di], [De, De, De]]) f = 0.5 frac_sph = [f * 100, (1.0 - f) * 100] signal_sph, dt_sph, kt_sph = multi_tensor_dki(gtab_3s,
bundles = {} for name in bundle_names: for hemi in ['_R', '_L']: bundles[name + hemi] = { 'ROIs': [ templates[name + '_roi1' + hemi], templates[name + '_roi1' + hemi] ], 'rules': [True, True] } print("Registering to template...") MNI_T2_img = dpd.read_mni_template() if not op.exists('mapping.nii.gz'): import dipy.core.gradients as dpg gtab = dpg.gradient_table(hardi_fbval, hardi_fbvec) mapping = reg.syn_register_dwi(hardi_fdata, gtab) reg.write_mapping(mapping, './mapping.nii.gz') else: mapping = reg.read_mapping('./mapping.nii.gz', img, MNI_T2_img) print("Segmenting fiber groups...") fiber_groups = seg.segment(hardi_fdata, hardi_fbval, hardi_fbvec, streamlines, bundles, reg_template=MNI_T2_img, mapping=mapping, as_generator=False, affine=img.affine)