def test_ascm_accuracy(): test_ascm_data_ref = nib.load(dpd.get_fnames("ascm_test")).get_data() test_data = nib.load(dpd.get_fnames("aniso_vox")).get_data() # the test data was constructed in this manner mask = test_data > 50 sigma = estimate_sigma(test_data, N=4) den_small = non_local_means( test_data, sigma=sigma, mask=mask, patch_radius=1, block_radius=1, rician=True) den_large = non_local_means( test_data, sigma=sigma, mask=mask, patch_radius=2, block_radius=1, rician=True) S0n = np.array(adaptive_soft_matching(test_data, den_small, den_large, sigma[0])) assert_array_almost_equal(S0n, test_ascm_data_ref)
def test_all_zeros(): bvecs, bvals = read_bvec_file(get_fnames('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T) fit_methods = ['LS', 'OLS', 'NNLS', 'RESTORE'] for _ in fit_methods: dm = dti.TensorModel(gtab) npt.assert_array_almost_equal(dm.fit(np.zeros(bvals.shape[0])).evals, 0)
def test_exponential_iso(): fdata, fbvals, fbvecs = dpd.get_fnames() data_dti = nib.load(fdata).get_data() gtab_dti = grad.gradient_table(fbvals, fbvecs) data_multi, gtab_multi = dpd.dsi_deconv_voxels() for data, gtab in zip([data_dti, data_multi], [gtab_dti, gtab_multi]): sfmodel = sfm.SparseFascicleModel( gtab, isotropic=sfm.ExponentialIsotropicModel) sffit1 = sfmodel.fit(data[0, 0, 0]) sphere = dpd.get_sphere() sffit1.odf(sphere) sffit1.predict(gtab) SNR = 1000 S0 = 100 mevals = np.array(([0.0015, 0.0005, 0.0005], [0.0015, 0.0005, 0.0005])) angles = [(0, 0), (60, 0)] S, sticks = sims.multi_tensor(gtab, mevals, S0, angles=angles, fractions=[50, 50], snr=SNR) sffit = sfmodel.fit(S) pred = sffit.predict() npt.assert_(xval.coeff_of_determination(pred, S) > 96)
def test_nnls_jacobian_fucn(): b0 = 1000. bvecs, bval = read_bvec_file(get_fnames('55dir_grad.bvec')) gtab = grad.gradient_table(bval, bvecs) B = bval[1] # Scale the eigenvalues and tensor by the B value so the units match D = np.array([1., 1., 1., 0., 0., 1., -np.log(b0) * B]) / B # Design Matrix X = dti.design_matrix(gtab) # Signals Y = np.exp(np.dot(X, D)) # Test Jacobian at D args = [X, Y] analytical = dti._nlls_jacobian_func(D, *args) for i in range(len(X)): args = [X[i], Y[i]] approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args) assert np.allclose(approx, analytical[i]) # Test Jacobian at zero D = np.zeros_like(D) args = [X, Y] analytical = dti._nlls_jacobian_func(D, *args) for i in range(len(X)): args = [X[i], Y[i]] approx = opt.approx_fprime(D, dti._nlls_err_func, 1e-8, *args) assert np.allclose(approx, analytical[i])
def test_masked_array_with_tensor(): data = np.ones((2, 4, 56)) mask = np.array([[True, False, False, True], [True, False, True, False]]) bvec, bval = read_bvec_file(get_fnames('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T) tensor_model = TensorModel(gtab) tensor = tensor_model.fit(data, mask=mask) npt.assert_equal(tensor.shape, (2, 4)) npt.assert_equal(tensor.fa.shape, (2, 4)) npt.assert_equal(tensor.evals.shape, (2, 4, 3)) npt.assert_equal(tensor.evecs.shape, (2, 4, 3, 3)) tensor = tensor[0] npt.assert_equal(tensor.shape, (4,)) npt.assert_equal(tensor.fa.shape, (4,)) npt.assert_equal(tensor.evals.shape, (4, 3)) npt.assert_equal(tensor.evecs.shape, (4, 3, 3)) tensor = tensor[0] npt.assert_equal(tensor.shape, tuple()) npt.assert_equal(tensor.fa.shape, tuple()) npt.assert_equal(tensor.evals.shape, (3,)) npt.assert_equal(tensor.evecs.shape, (3, 3)) npt.assert_equal(type(tensor.model_params), np.ndarray)
def test_median_otsu(): fname = get_fnames('S0_10') img = nib.load(fname) data = img.get_data() data = np.squeeze(data.astype('f8')) dummy_mask = data > data.mean() data_masked, mask = median_otsu(data, median_radius=3, numpass=2, autocrop=False, vol_idx=None, dilate=None) assert_equal(mask.sum() < dummy_mask.sum(), True) data2 = np.zeros(data.shape + (2,)) data2[..., 0] = data data2[..., 1] = data data2_masked, mask2 = median_otsu(data2, median_radius=3, numpass=2, autocrop=False, vol_idx=[0, 1], dilate=None) assert_almost_equal(mask.sum(), mask2.sum()) _, mask3 = median_otsu(data2, median_radius=3, numpass=2, autocrop=False, vol_idx=[0, 1], dilate=1) assert_equal(mask2.sum() < mask3.sum(), True) _, mask4 = median_otsu(data2, median_radius=3, numpass=2, autocrop=False, vol_idx=[0, 1], dilate=2) assert_equal(mask3.sum() < mask4.sum(), True)
def test_slr_flow(): with TemporaryDirectory() as out_dir: data_path = get_fnames('fornix') streams, hdr = nib.trackvis.read(data_path) fornix = [s[0] for s in streams] f = Streamlines(fornix) f1 = f.copy() f1_path = pjoin(out_dir, "f1.trk") save_trk(f1_path, Streamlines(f1), affine=np.eye(4)) f2 = f1.copy() f2._data += np.array([50, 0, 0]) f2_path = pjoin(out_dir, "f2.trk") save_trk(f2_path, Streamlines(f2), affine=np.eye(4)) slr_flow = SlrWithQbxFlow(force=True) slr_flow.run(f1_path, f2_path) out_path = slr_flow.last_generated_outputs['out_moved'] npt.assert_equal(os.path.isfile(out_path), True)
def test_btable_prepare(): sq2 = np.sqrt(2) / 2. bvals = 1500 * np.ones(7) bvals[0] = 0 bvecs = np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [sq2, sq2, 0], [sq2, 0, sq2], [0, sq2, sq2]]) bt = gradient_table(bvals, bvecs) npt.assert_array_equal(bt.bvecs, bvecs) # bt.info fimg, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) bvecs = np.where(np.isnan(bvecs), 0, bvecs) bt = gradient_table(bvals, bvecs) npt.assert_array_equal(bt.bvecs, bvecs) bt2 = gradient_table(bvals, bvecs.T) npt.assert_array_equal(bt2.bvecs, bvecs) btab = np.concatenate((bvals[:, None], bvecs), axis=1) bt3 = gradient_table(btab) npt.assert_array_equal(bt3.bvecs, bvecs) npt.assert_array_equal(bt3.bvals, bvals) bt4 = gradient_table(btab.T) npt.assert_array_equal(bt4.bvecs, bvecs) npt.assert_array_equal(bt4.bvals, bvals) # Test for proper inputs (expects either bvals/bvecs or 4 by n): npt.assert_raises(ValueError, gradient_table, bvecs)
def test_force_overwrite(): with TemporaryDirectory() as out_dir: data_path, _, _ = get_fnames('small_25') mo_flow = MedianOtsuFlow(output_strategy='absolute') # Generate the first results mo_flow.run(data_path, out_dir=out_dir) mask_file = mo_flow.last_generated_outputs['out_mask'] first_time = os.path.getmtime(mask_file) # re-run with no force overwrite, modified time should not change mo_flow.run(data_path, out_dir=out_dir) mask_file = mo_flow.last_generated_outputs['out_mask'] second_time = os.path.getmtime(mask_file) assert first_time == second_time # re-run with force overwrite, modified time should change mo_flow = MedianOtsuFlow(output_strategy='absolute', force=True) # Make sure that at least one second elapsed, so that time-stamp is # different (sometimes measured in whole seconds) time.sleep(1) mo_flow.run(data_path, out_dir=out_dir) mask_file = mo_flow.last_generated_outputs['out_mask'] third_time = os.path.getmtime(mask_file) assert third_time != second_time
def test_multi_tensor(): sphere = get_sphere('symmetric724') # vertices = sphere.vertices mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) e0 = np.array([np.sqrt(2) / 2., np.sqrt(2) / 2., 0]) e1 = np.array([0, np.sqrt(2) / 2., np.sqrt(2) / 2.]) mevecs = [all_tensor_evecs(e0), all_tensor_evecs(e1)] # odf = multi_tensor_odf(vertices, [0.5, 0.5], mevals, mevecs) # assert_(odf.shape == (len(vertices),)) # assert_(np.all(odf <= 1) & np.all(odf >= 0)) fimg, fbvals, fbvecs = get_fnames('small_101D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) s1 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None) s2 = single_tensor(gtab, 100, mevals[1], mevecs[1], snr=None) Ssingle = 0.5*s1 + 0.5*s2 S, sticks = MultiTensor(gtab, mevals, S0=100, angles=[(90, 45), (45, 90)], fractions=[50, 50], snr=None) assert_array_almost_equal(S, Ssingle)
def test_sfm(): fdata, fbvals, fbvecs = dpd.get_fnames() data = nib.load(fdata).get_data() gtab = grad.gradient_table(fbvals, fbvecs) for iso in [sfm.ExponentialIsotropicModel, None]: sfmodel = sfm.SparseFascicleModel(gtab, isotropic=iso) sffit1 = sfmodel.fit(data[0, 0, 0]) sphere = dpd.get_sphere() odf1 = sffit1.odf(sphere) pred1 = sffit1.predict(gtab) mask = np.ones(data.shape[:-1]) sffit2 = sfmodel.fit(data, mask) pred2 = sffit2.predict(gtab) odf2 = sffit2.odf(sphere) sffit3 = sfmodel.fit(data) pred3 = sffit3.predict(gtab) odf3 = sffit3.odf(sphere) npt.assert_almost_equal(pred3, pred2, decimal=2) npt.assert_almost_equal(pred3[0, 0, 0], pred1, decimal=2) npt.assert_almost_equal(odf3[0, 0, 0], odf1, decimal=2) npt.assert_almost_equal(odf3[0, 0, 0], odf2[0, 0, 0], decimal=2) # Fit zeros and you will get back zeros npt.assert_almost_equal( sfmodel.fit(np.zeros(data[0, 0, 0].shape)).beta, np.zeros(sfmodel.design_matrix[0].shape[-1]))
def test_median_otsu_flow(): with TemporaryDirectory() as out_dir: data_path, _, _ = get_fnames('small_25') volume = nib.load(data_path).get_data() save_masked = True median_radius = 3 numpass = 3 autocrop = False vol_idx = [0] dilate = 0 mo_flow = MedianOtsuFlow() mo_flow.run(data_path, out_dir=out_dir, save_masked=save_masked, median_radius=median_radius, numpass=numpass, autocrop=autocrop, vol_idx=vol_idx, dilate=dilate) mask_name = mo_flow.last_generated_outputs['out_mask'] masked_name = mo_flow.last_generated_outputs['out_masked'] masked, mask = median_otsu(volume, vol_idx=vol_idx, median_radius=median_radius, numpass=numpass, autocrop=autocrop, dilate=dilate) result_mask_data = nib.load(join(out_dir, mask_name)).get_data() npt.assert_array_equal(result_mask_data, mask) result_masked_data = nib.load(join(out_dir, masked_name)).get_data() npt.assert_array_equal(result_masked_data, masked)
def reconst_flow_core(flow, extra_args=[]): with TemporaryDirectory() as out_dir: data_path, bval_path, bvec_path = get_fnames('small_25') vol_img = nib.load(data_path) volume = vol_img.get_data() mask = np.ones_like(volume[:, :, :, 0]) mask_img = nib.Nifti1Image(mask.astype(np.uint8), vol_img.affine) mask_path = join(out_dir, 'tmp_mask.nii.gz') nib.save(mask_img, mask_path) dti_flow = flow() args = [data_path, bval_path, bvec_path, mask_path] args.extend(extra_args) dti_flow.run(*args, out_dir=out_dir) fa_path = dti_flow.last_generated_outputs['out_fa'] fa_data = nib.load(fa_path).get_data() assert_equal(fa_data.shape, volume.shape[:-1]) tensor_path = dti_flow.last_generated_outputs['out_tensor'] tensor_data = nib.load(tensor_path) assert_equal(tensor_data.shape[-1], 6) assert_equal(tensor_data.shape[:-1], volume.shape[:-1]) ga_path = dti_flow.last_generated_outputs['out_ga'] ga_data = nib.load(ga_path).get_data() assert_equal(ga_data.shape, volume.shape[:-1]) rgb_path = dti_flow.last_generated_outputs['out_rgb'] rgb_data = nib.load(rgb_path) assert_equal(rgb_data.shape[-1], 3) assert_equal(rgb_data.shape[:-1], volume.shape[:-1]) md_path = dti_flow.last_generated_outputs['out_md'] md_data = nib.load(md_path).get_data() assert_equal(md_data.shape, volume.shape[:-1]) ad_path = dti_flow.last_generated_outputs['out_ad'] ad_data = nib.load(ad_path).get_data() assert_equal(ad_data.shape, volume.shape[:-1]) rd_path = dti_flow.last_generated_outputs['out_rd'] rd_data = nib.load(rd_path).get_data() assert_equal(rd_data.shape, volume.shape[:-1]) mode_path = dti_flow.last_generated_outputs['out_mode'] mode_data = nib.load(mode_path).get_data() assert_equal(mode_data.shape, volume.shape[:-1]) evecs_path = dti_flow.last_generated_outputs['out_evec'] evecs_data = nib.load(evecs_path).get_data() assert_equal(evecs_data.shape[-2:], tuple((3, 3))) assert_equal(evecs_data.shape[:-2], volume.shape[:-1]) evals_path = dti_flow.last_generated_outputs['out_eval'] evals_data = nib.load(evals_path).get_data() assert_equal(evals_data.shape[-1], 3) assert_equal(evals_data.shape[:-1], volume.shape[:-1])
def test_qbundles(): streams, hdr = nib.trackvis.read(get_fnames('fornix')) T = [s[0] for s in streams] qb = QuickBundles(T, 10., 12) qb.virtuals() qb.exemplars() assert_equal(4, qb.total_clusters)
def test_bundle_analysis_population_flow(): with TemporaryDirectory() as dirpath: streams, hdr = nib.trackvis.read(get_fnames('fornix')) fornix = [s[0] for s in streams] f = Streamlines(fornix) mb = os.path.join(dirpath, "model_bundles") sub = os.path.join(dirpath, "subjects") os.mkdir(mb) save_trk(os.path.join(mb, "temp.trk"), f, affine=np.eye(4)) os.mkdir(sub) os.mkdir(os.path.join(sub, "patient")) os.mkdir(os.path.join(sub, "control")) p = os.path.join(sub, "patient", "10001") os.mkdir(p) c = os.path.join(sub, "control", "20002") os.mkdir(c) for pre in [p, c]: os.mkdir(os.path.join(pre, "rec_bundles")) save_trk(os.path.join(pre, "rec_bundles", "temp.trk"), f, affine=np.eye(4)) os.mkdir(os.path.join(pre, "org_bundles")) save_trk(os.path.join(pre, "org_bundles", "temp.trk"), f, affine=np.eye(4)) os.mkdir(os.path.join(pre, "measures")) fa = np.random.rand(255, 255, 255) save_nifti(os.path.join(pre, "measures", "fa.nii.gz"), fa, affine=np.eye(4)) out_dir = os.path.join(dirpath, "output") os.mkdir(out_dir) ba_flow = BundleAnalysisPopulationFlow() ba_flow.run(mb, sub, out_dir=out_dir) assert_true(os.path.exists(os.path.join(out_dir, 'fa.h5'))) dft = pd.read_hdf(os.path.join(out_dir, 'fa.h5')) assert_true(dft.bundle.unique() == "temp") assert_true(set(dft.subject.unique()) == set(['10001', '20002']))
def get_streamlines(): from nibabel import trackvis as tv from dipy.data import get_fnames fname = get_fnames('fornix') streams, hdr = tv.read(fname) streamlines = [i[0] for i in streams] return streamlines
def test_sfm_background(): fdata, fbvals, fbvecs = dpd.get_fnames() data = nib.load(fdata).get_data() gtab = grad.gradient_table(fbvals, fbvecs) to_fit = data[0, 0, 0] to_fit[gtab.b0s_mask] = 0 sfmodel = sfm.SparseFascicleModel(gtab, solver='NNLS') sffit = sfmodel.fit(to_fit) npt.assert_equal(sffit.beta, np.zeros_like(sffit.beta))
def test_all_constant(): bvecs, bvals = read_bvec_file(get_fnames('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bvals, bvecs.T) fit_methods = ['LS', 'OLS', 'NNLS', 'RESTORE'] for _ in fit_methods: dm = dti.TensorModel(gtab) npt.assert_almost_equal(dm.fit(100 * np.ones(bvals.shape[0])).fa, 0) # Doesn't matter if the signal is smaller than 1: npt.assert_almost_equal(dm.fit(0.4 * np.ones(bvals.shape[0])).fa, 0)
def test_odf_with_zeros(): fdata, fbval, fbvec = get_fnames('small_25') gtab = grad.gradient_table(fbval, fbvec) data = nib.load(fdata).get_data() dm = dti.TensorModel(gtab) df = dm.fit(data) df.evals[0, 0, 0] = np.array([0, 0, 0]) sphere = create_unit_sphere(4) odf = df.odf(sphere) npt.assert_equal(odf[0, 0, 0], np.zeros(sphere.vertices.shape[0]))
def test_fit_method_error(): bvec, bval = read_bvec_file(get_fnames('55dir_grad.bvec')) gtab = grad.gradient_table_from_bvals_bvecs(bval, bvec.T) # This should work (smoke-testing!): TensorModel(gtab, fit_method='WLS') # This should raise an error because there is no such fit_method npt.assert_raises(ValueError, TensorModel, gtab, min_signal=1e-9, fit_method='s')
def bench_quickbundles(): dtype = "float32" repeat = 10 nb_points = 12 streams, hdr = nib.trackvis.read(get_fnames('fornix')) fornix = [s[0].astype(dtype) for s in streams] fornix = streamline_utils.set_number_of_points(fornix, nb_points) # Create eight copies of the fornix to be clustered (one in each octant). streamlines = [] streamlines += [s + np.array([100, 100, 100], dtype) for s in fornix] streamlines += [s + np.array([100, -100, 100], dtype) for s in fornix] streamlines += [s + np.array([100, 100, -100], dtype) for s in fornix] streamlines += [s + np.array([100, -100, -100], dtype) for s in fornix] streamlines += [s + np.array([-100, 100, 100], dtype) for s in fornix] streamlines += [s + np.array([-100, -100, 100], dtype) for s in fornix] streamlines += [s + np.array([-100, 100, -100], dtype) for s in fornix] streamlines += [s + np.array([-100, -100, -100], dtype) for s in fornix] # The expected number of clusters of the fornix using threshold=10 is 4. threshold = 10. expected_nb_clusters = 4 * 8 print("Timing QuickBundles 1.0 vs. 2.0") qb = QB_Old(streamlines, threshold, pts=None) qb1_time = measure("QB_Old(streamlines, threshold, nb_points)", repeat) print("QuickBundles time: {0:.4}sec".format(qb1_time)) assert_equal(qb.total_clusters, expected_nb_clusters) sizes1 = [qb.partitions()[i]['N'] for i in range(qb.total_clusters)] indices1 = [qb.partitions()[i]['indices'] for i in range(qb.total_clusters)] qb2 = QB_New(threshold) qb2_time = measure("clusters = qb2.cluster(streamlines)", repeat) print("QuickBundles2 time: {0:.4}sec".format(qb2_time)) print("Speed up of {0}x".format(qb1_time / qb2_time)) clusters = qb2.cluster(streamlines) sizes2 = map(len, clusters) indices2 = map(lambda c: c.indices, clusters) assert_equal(len(clusters), expected_nb_clusters) assert_array_equal(list(sizes2), sizes1) assert_arrays_equal(indices2, indices1) qb = QB_New(threshold, metric=MDFpy()) qb3_time = measure("clusters = qb.cluster(streamlines)", repeat) print("QuickBundles2_python time: {0:.4}sec".format(qb3_time)) print("Speed up of {0}x".format(qb1_time / qb3_time)) clusters = qb.cluster(streamlines) sizes3 = map(len, clusters) indices3 = map(lambda c: c.indices, clusters) assert_equal(len(clusters), expected_nb_clusters) assert_array_equal(list(sizes3), sizes1) assert_arrays_equal(indices3, indices1)
def bench_compress_streamlines(): repeat = 10 fname = get_fnames('fornix') streams, hdr = tv.read(fname) streamlines = [i[0] for i in streams] print("Timing compress_streamlines() in Cython" " ({0} streamlines)".format(len(streamlines))) cython_time = measure("compress_streamlines(streamlines)", repeat) print("Cython time: {0:.3}sec".format(cython_time)) del streamlines fname = get_fnames('fornix') streams, hdr = tv.read(fname) streamlines = [i[0] for i in streams] python_time = measure("map(compress_streamlines_python, streamlines)", repeat) print("Python time: {0:.2}sec".format(python_time)) print("Speed up of {0}x".format(python_time/cython_time)) del streamlines
def test_whole_brain_slr(): streams, hdr = nib.trackvis.read(get_fnames('fornix')) fornix = [s[0] for s in streams] f = Streamlines(fornix) f1 = f.copy() f2 = f.copy() # check translation f2._data += np.array([50, 0, 0]) moved, transform, qb_centroids1, qb_centroids2 = whole_brain_slr( f1, f2, x0='affine', verbose=True, rm_small_clusters=2, greater_than=0, less_than=np.inf, qbx_thr=[5, 2, 1], progressive=False) # we can check the quality of registration by comparing the matrices # MAM streamline distances before and after SLR D12 = bundles_distances_mam(f1, f2) D1M = bundles_distances_mam(f1, moved) d12_minsum = np.sum(np.min(D12, axis=0)) d1m_minsum = np.sum(np.min(D1M, axis=0)) print("distances= ", d12_minsum, " ", d1m_minsum) assert_equal(d1m_minsum < d12_minsum, True) assert_array_almost_equal(transform[:3, 3], [-50, -0, -0], 2) # check rotation mat = compose_matrix44([0, 0, 0, 15, 0, 0]) f3 = f.copy() f3 = transform_streamlines(f3, mat) moved, transform, qb_centroids1, qb_centroids2 = slr_with_qbx( f1, f3, verbose=False, rm_small_clusters=1, greater_than=20, less_than=np.inf, qbx_thr=[2], progressive=True) # we can also check the quality by looking at the decomposed transform assert_array_almost_equal(decompose_matrix44(transform)[3], -15, 2) moved, transform, qb_centroids1, qb_centroids2 = slr_with_qbx( f1, f3, verbose=False, rm_small_clusters=1, select_random=400, greater_than=20, less_than=np.inf, qbx_thr=[2], progressive=True) # we can also check the quality by looking at the decomposed transform assert_array_almost_equal(decompose_matrix44(transform)[3], -15, 2)
def test_min_signal_alone(): fdata, fbvals, fbvecs = get_fnames() data = nib.load(fdata).get_data() gtab = grad.gradient_table(fbvals, fbvecs) idx = tuple(np.array(np.where(data == np.min(data)))[:-1, 0]) ten_model = dti.TensorModel(gtab) fit_alone = ten_model.fit(data[idx]) fit_together = ten_model.fit(data) npt.assert_array_almost_equal(fit_together.model_params[idx], fit_alone.model_params, decimal=12)
def test_resample(): fimg, _, _ = get_fnames("small_25") img = nib.load(fimg) data = img.get_data() affine = img.affine zooms = img.header.get_zooms()[:3] # test that new zooms are correctly from the affine (check with 3D volume) new_zooms = (1, 1.2, 2.1) data2, affine2 = reslice(data[..., 0], affine, zooms, new_zooms, order=1, mode='constant') img2 = nib.Nifti1Image(data2, affine2) new_zooms_confirmed = img2.header.get_zooms()[:3] assert_almost_equal(new_zooms, new_zooms_confirmed) # test that shape changes correctly for the first 3 dimensions (check 4D) new_zooms = (1, 1, 1.) data2, affine2 = reslice(data, affine, zooms, new_zooms, order=0, mode='reflect') assert_equal(2 * np.array(data.shape[:3]), data2.shape[:3]) assert_equal(data2.shape[-1], data.shape[-1]) # same with different interpolation order new_zooms = (1, 1, 1.) data3, affine2 = reslice(data, affine, zooms, new_zooms, order=5, mode='reflect') assert_equal(2 * np.array(data.shape[:3]), data3.shape[:3]) assert_equal(data3.shape[-1], data.shape[-1]) # test that the sigma will be reduced with interpolation sigmas = estimate_sigma(data) sigmas2 = estimate_sigma(data2) sigmas3 = estimate_sigma(data3) assert_(np.all(sigmas > sigmas2)) assert_(np.all(sigmas2 > sigmas3)) # check that 4D resampling matches 3D resampling data2, affine2 = reslice(data, affine, zooms, new_zooms) for i in range(data.shape[-1]): _data, _affine = reslice(data[..., i], affine, zooms, new_zooms) assert_almost_equal(data2[..., i], _data) assert_almost_equal(affine2, _affine) # check use of multiprocessing pool of specified size data3, affine3 = reslice(data, affine, zooms, new_zooms, num_processes=4) assert_almost_equal(data2, data3) assert_almost_equal(affine2, affine3) # check use of multiprocessing pool of autoconfigured size data3, affine3 = reslice(data, affine, zooms, new_zooms, num_processes=0) assert_almost_equal(data2, data3) assert_almost_equal(affine2, affine3)
def setup_module(): """Module-level setup""" global gtab, gtab_2s _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) # 2 shells for techniques that requires multishell data bvals_2s = np.concatenate((bvals, bvals * 2), axis=0) bvecs_2s = np.concatenate((bvecs, bvecs), axis=0) gtab_2s = gradient_table(bvals_2s, bvecs_2s)
def test_nan_bvecs(): """ Test that the presence of nan's in b-vectors doesn't raise warnings. In previous versions, the presence of NaN in b-vectors was taken to indicate a 0 b-value, but also raised a warning when testing for the length of these vectors. This checks that it doesn't happen. """ fdata, fbvals, fbvecs = get_fnames() with warnings.catch_warnings(record=True) as w: gradient_table(fbvals, fbvecs) npt.assert_(len(w) == 0)
def setup_module(): """Module-level setup""" global gtab, gtab_2s, mevals, model_params_mv global DWI, FAref, GTF, MDref, FAdti, MDdti _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) # FW model requires multishell data bvals_2s = np.concatenate((bvals, bvals * 1.5), axis=0) bvecs_2s = np.concatenate((bvecs, bvecs), axis=0) gtab_2s = gradient_table(bvals_2s, bvecs_2s) # Simulation a typical DT and DW signal for no water contamination S0 = np.array(100) dt = np.array([0.0017, 0, 0.0003, 0, 0, 0.0003]) evals, evecs = decompose_tensor(from_lower_triangular(dt)) S_tissue = single_tensor(gtab_2s, S0=100, evals=evals, evecs=evecs, snr=None) dm = dti.TensorModel(gtab_2s, 'WLS') dtifit = dm.fit(S_tissue) FAdti = dtifit.fa MDdti = dtifit.md dtiparams = dtifit.model_params # Simulation of 8 voxels tested DWI = np.zeros((2, 2, 2, len(gtab_2s.bvals))) FAref = np.zeros((2, 2, 2)) MDref = np.zeros((2, 2, 2)) # Diffusion of tissue and water compartments are constant for all voxel mevals = np.array([[0.0017, 0.0003, 0.0003], [0.003, 0.003, 0.003]]) # volume fractions GTF = np.array([[[0.06, 0.71], [0.33, 0.91]], [[0., 0.], [0., 0.]]]) # S0 multivoxel S0m = 100 * np.ones((2, 2, 2)) # model_params ground truth (to be fill) model_params_mv = np.zeros((2, 2, 2, 13)) for i in range(2): for j in range(2): gtf = GTF[0, i, j] S, p = multi_tensor(gtab_2s, mevals, S0=100, angles=[(90, 0), (90, 0)], fractions=[(1-gtf) * 100, gtf*100], snr=None) DWI[0, i, j] = S FAref[0, i, j] = FAdti MDref[0, i, j] = MDdti R = all_tensor_evecs(p[0]) R = R.reshape((9)) model_params_mv[0, i, j] = \ np.concatenate(([0.0017, 0.0003, 0.0003], R, [gtf]), axis=0)
def test_denoise(): """ """ fdata, fbval, fbvec = dpd.get_fnames() # Test on 4D image: data = nib.load(fdata).get_data() sigma1 = estimate_sigma(data) nlmeans(data, sigma=sigma1) # Test on 3D image: data = data[..., 0] sigma2 = estimate_sigma(data) nlmeans(data, sigma=sigma2)
def test_streamline_signal(): data_file, bval_file, bvec_file = dpd.get_fnames('small_64D') gtab = dpg.gradient_table(bval_file, bvec_file) evals = [0.0015, 0.0005, 0.0005] streamline1 = [[[1, 2, 3], [4, 5, 3], [5, 6, 3], [6, 7, 3]], [[1, 2, 3], [4, 5, 3], [5, 6, 3]]] [life.streamline_signal(s, gtab, evals) for s in streamline1] streamline2 = [[[1, 2, 3], [4, 5, 3], [5, 6, 3], [6, 7, 3]]] [life.streamline_signal(s, gtab, evals) for s in streamline2] npt.assert_array_equal(streamline2[0], streamline1[0])
def test_auto_response(): fdata, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) data = nib.load(fdata).get_data() gtab = gradient_table(bvals, bvecs) radius = 3 def test_fa_superior(FA, fa_thr): return FA > fa_thr def test_fa_inferior(FA, fa_thr): return FA < fa_thr predefined_functions = [fa_superior, fa_inferior] defined_functions = [test_fa_superior, test_fa_inferior] for fa_thr in np.arange(0.1, 1, 0.1): for predefined, defined in zip(predefined_functions, defined_functions): response_predefined, ratio_predefined, nvoxels_predefined = auto_response( gtab, data, roi_center=None, roi_radius=radius, fa_callable=predefined, fa_thr=fa_thr, return_number_of_voxels=True) response_defined, ratio_defined, nvoxels_defined = auto_response( gtab, data, roi_center=None, roi_radius=radius, fa_callable=defined, fa_thr=fa_thr, return_number_of_voxels=True) assert_equal(nvoxels_predefined, nvoxels_defined) assert_array_almost_equal(response_predefined[0], response_defined[0]) assert_almost_equal(response_predefined[1], response_defined[1]) assert_almost_equal(ratio_predefined, ratio_defined)
def test_dsi(): # load symmetric 724 sphere sphere = get_sphere('symmetric724') # load icosahedron sphere sphere2 = create_unit_sphere(5) btable = np.loadtxt(get_fnames('dsi515btable')) gtab = gradient_table(btable[:, 0], btable[:, 1:]) data, golden_directions = SticksAndBall(gtab, d=0.0015, S0=100, angles=[(0, 0), (90, 0)], fractions=[50, 50], snr=None) ds = DiffusionSpectrumDeconvModel(gtab) # symmetric724 dsfit = ds.fit(data) odf = dsfit.odf(sphere) directions, _, _ = peak_directions(odf, sphere, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) # 5 subdivisions dsfit = ds.fit(data) odf2 = dsfit.odf(sphere2) directions, _, _ = peak_directions(odf2, sphere2, .35, 25) assert_equal(len(directions), 2) assert_almost_equal(angular_similarity(directions, golden_directions), 2, 1) assert_equal(dsfit.pdf().shape, 3 * (ds.qgrid_size, )) sb_dummies = sticks_and_ball_dummies(gtab) for sbd in sb_dummies: data, golden_directions = sb_dummies[sbd] odf = ds.fit(data).odf(sphere2) directions, _, _ = peak_directions(odf, sphere2, .35, 25) if len(directions) <= 3: assert_equal(len(directions), len(golden_directions)) if len(directions) > 3: assert_equal(gfa(odf) < 0.1, True) assert_raises(ValueError, DiffusionSpectrumDeconvModel, gtab, qgrid_size=16)
def _create_mt_sim(mevals, angles, fractions, S0, SNR, half_sphere=False): _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) S, sticks = multi_tensor(gtab, mevals, S0, angles=angles, fractions=fractions, snr=SNR) sphere = get_sphere('symmetric724').subdivide(2) if half_sphere: sphere = HemiSphere.from_sphere(sphere) odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles=angles, fractions=fractions) return odf_gt, sticks, sphere
def test_predict(): SNR = 1000 S0 = 100 _, fbvals, fbvecs = dpd.get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = grad.gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, sticks = sims.multi_tensor(gtab, mevals, S0, angles=angles, fractions=[10, 90], snr=SNR) sfmodel = sfm.SparseFascicleModel(gtab, response=[0.0015, 0.0003, 0.0003]) sffit = sfmodel.fit(S) pred = sffit.predict() npt.assert_(xval.coeff_of_determination(pred, S) > 97) # Should be possible to predict using a different gtab: new_gtab = grad.gradient_table(bvals[::2], bvecs[::2]) new_pred = sffit.predict(new_gtab) npt.assert_(xval.coeff_of_determination(new_pred, S[::2]) > 97)
def test_register_dwi_series_and_motion_correction(): fdata, fbval, fbvec = dpd.get_fnames('small_64D') with nbtmp.InTemporaryDirectory() as tmpdir: # Use an abbreviated data-set: img = nib.load(fdata) data = img.get_fdata()[..., :10] nib.save(nib.Nifti1Image(data, img.affine), op.join(tmpdir, 'data.nii.gz')) # Save a subset: bvals = np.loadtxt(fbval) bvecs = np.loadtxt(fbvec) np.savetxt(op.join(tmpdir, 'bvals.txt'), bvals[:10]) np.savetxt(op.join(tmpdir, 'bvecs.txt'), bvecs[:10]) gtab = dpg.gradient_table(op.join(tmpdir, 'bvals.txt'), op.join(tmpdir, 'bvecs.txt')) reg_img, reg_affines = register_dwi_series(data, gtab, img.affine) reg_img_2, reg_affines_2 = motion_correction(data, gtab, img.affine) npt.assert_(isinstance(reg_img, nib.Nifti1Image)) npt.assert_array_equal(reg_img.get_fdata(), reg_img_2.get_fdata()) npt.assert_array_equal(reg_affines, reg_affines_2)
def get_test_data(): _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) evals_list = [ np.array([1.7E-3, 0.4E-3, 0.4E-3]), np.array([4.0E-4, 4.0E-4, 4.0E-4]), np.array([3.0E-3, 3.0E-3, 3.0E-3]) ] s0 = [0.8, 1, 4] signals = [single_tensor(gtab, x[0], x[1]) for x in zip(s0, evals_list)] tissues = [0, 0, 2, 0, 1, 0, 0, 1, 2] data = [signals[tissue] for tissue in tissues] data = np.asarray(data).reshape((3, 3, 1, len(signals[0]))) evals = [evals_list[tissue] for tissue in tissues] evals = np.asarray(evals).reshape((3, 3, 1, 3)) tissues = np.asarray(tissues).reshape((3, 3, 1)) mask = np.where(tissues == 0, 1, 0) response = (evals_list[0], s0[0]) fa = fractional_anisotropy(evals) return (gtab, data, mask, response, fa)
def test_FiberModel_init(): # Get some small amount of data: data_file, bval_file, bvec_file = dpd.get_fnames('small_64D') data_ni = nib.load(data_file) bvals, bvecs = read_bvals_bvecs(bval_file, bvec_file) gtab = dpg.gradient_table(bvals, bvecs) FM = life.FiberModel(gtab) streamline = [[[1, 2, 3], [4, 5, 3], [5, 6, 3], [6, 7, 3]], [[1, 2, 3], [4, 5, 3], [5, 6, 3]]] affine = np.eye(4) for sphere in [None, False, dpd.get_sphere('symmetric362')]: fiber_matrix, vox_coords = FM.setup(streamline, affine, sphere=sphere) npt.assert_array_equal( np.array(vox_coords), np.array([[1, 2, 3], [4, 5, 3], [5, 6, 3], [6, 7, 3]])) npt.assert_equal(fiber_matrix.shape, (len(vox_coords) * 64, len(streamline)))
def test_default_lambda_csdmodel(): """We check that the default value of lambda is the expected value with the symmetric362 sphere. This value has empirically been found to work well and changes to this default value should be discussed with the dipy team. """ sphere = default_sphere # Create gradient table _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) # Some response function response = (np.array([0.0015, 0.0003, 0.0003]), 100) for sh_order, expected in expected_lambda.items(): model_full = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order, reg_sphere=sphere) B_reg, _, _ = real_sym_sh_basis(sh_order, sphere.theta, sphere.phi) npt.assert_array_almost_equal(model_full.B_reg, expected * B_reg)
def test_det_track(): with TemporaryDirectory() as out_dir: data_path, bval_path, bvec_path = get_fnames('small_64D') vol_img = nib.load(data_path) volume = vol_img.get_data() mask = np.ones_like(volume[:, :, :, 0]) mask_img = nib.Nifti1Image(mask.astype(np.uint8), vol_img.affine) mask_path = join(out_dir, 'tmp_mask.nii.gz') nib.save(mask_img, mask_path) reconst_csd_flow = ReconstCSDFlow() reconst_csd_flow.run(data_path, bval_path, bvec_path, mask_path, out_dir=out_dir, extract_pam_values=True) pam_path = reconst_csd_flow.last_generated_outputs['out_pam'] gfa_path = reconst_csd_flow.last_generated_outputs['out_gfa'] # Create seeding mask by thresholding the gfa mask_flow = MaskFlow() mask_flow.run(gfa_path, 0.8, out_dir=out_dir) seeds_path = mask_flow.last_generated_outputs['out_mask'] # Put identity in gfa path to prevent impossible to use # local tracking because of affine containing shearing. gfa_img = nib.load(gfa_path) save_nifti(gfa_path, gfa_img.get_data(), np.eye(4), gfa_img.header) # Test tracking with pam no sh det_track_pam = DetTrackPAMFlow() assert_equal(det_track_pam.get_short_name(), 'det_track') det_track_pam.run(pam_path, gfa_path, seeds_path) tractogram_path = \ det_track_pam.last_generated_outputs['out_tractogram'] assert_false(is_tractogram_empty(tractogram_path)) # Test tracking with pam with sh det_track_pam.run(pam_path, gfa_path, seeds_path, use_sh=True) tractogram_path = \ det_track_pam.last_generated_outputs['out_tractogram'] assert_false(is_tractogram_empty(tractogram_path))
def test_peaks_shm_coeff(): SNR = 100 S0 = 100 _, fbvals, fbvecs = get_fnames('small_64D') sphere = default_sphere bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) data, _ = multi_tensor(gtab, mevals, S0, angles=[(0, 0), (60, 0)], fractions=[50, 50], snr=SNR) from dipy.reconst.shm import CsaOdfModel model = CsaOdfModel(gtab, 4) pam = peaks_from_model(model, data[None, :], sphere, .5, 45, return_odf=True, return_sh=True) # Test that spherical harmonic coefficients return back correctly odf2 = np.dot(pam.shm_coeff, pam.B) assert_array_almost_equal(pam.odf, odf2) assert_equal(pam.shm_coeff.shape[-1], 45) pam = peaks_from_model(model, data[None, :], sphere, .5, 45, return_odf=True, return_sh=False) assert_equal(pam.shm_coeff, None) pam = peaks_from_model(model, data[None, :], sphere, .5, 45, return_odf=True, return_sh=True, sh_basis_type='tournier07') odf2 = np.dot(pam.shm_coeff, pam.B) assert_array_almost_equal(pam.odf, odf2)
def test_fit_csd(): fdata, fbval, fbvec = dpd.get_fnames('small_64D') with nbtmp.InTemporaryDirectory() as tmpdir: # Convert from npy to txt: bvals = np.loadtxt(fbval) bvecs = np.loadtxt(fbvec) np.savetxt(op.join(tmpdir, 'bvals.txt'), bvals) np.savetxt(op.join(tmpdir, 'bvecs.txt'), bvecs) for msmt in [True, False]: for sh_order in [4, 6]: fname = csd.fit_csd( fdata, op.join(tmpdir, 'bvals.txt'), op.join(tmpdir, 'bvecs.txt'), out_dir=tmpdir, sh_order=sh_order, msmt=msmt) npt.assert_(op.exists(fname)) sh_coeffs_img = nib.load(fname) npt.assert_equal(sh_order, calculate_max_order(sh_coeffs_img.shape[-1]))
def test_em_2d_gauss_newton(): r""" Test 2D SyN with EM metric, Gauss-Newton optimizer Register a coronal slice from a T1w brain MRI before and after warping it under a synthetic invertible map. We verify that the final registration is of good quality. """ fname = get_fnames('t1_coronal_slice') nslices = 1 b = 0.1 m = 4 image = np.load(fname) moving, static = get_warped_stacked_image(image, nslices, b, m) # Configure the metric smooth = 5.0 inner_iter = 20 q_levels = 256 double_gradient = False iter_type = 'gauss_newton' metric = metrics.EMMetric(2, smooth, inner_iter, q_levels, double_gradient, iter_type) # Configure and run the Optimizer level_iters = [40, 20, 10] optimizer = imwarp.SymmetricDiffeomorphicRegistration(metric, level_iters) optimizer.verbosity = VerbosityLevels.DEBUG mapping = optimizer.optimize(static, moving, None) m = optimizer.get_map() assert_equal(mapping, m) warped = mapping.transform(moving) starting_energy = np.sum((static - moving)**2) final_energy = np.sum((static - warped)**2) reduced = 1.0 - final_energy / starting_energy assert (reduced > 0.9)
def make_dki_data(out_fbval, out_fbvec, out_fdata, out_shape=(5, 6, 7)): """ Create a synthetic data-set with a 2-shell acquisition out_fbval, out_fbvec, out_fdata : str Full paths to generated data and bval/bvec files out_shape : tuple The 3D shape of the output volum """ # This is one-shell (b=1000) data: fimg, fbvals, fbvecs = dpd.get_fnames('small_64D') img = nib.load(fimg) bvals, bvecs = dio.read_bvals_bvecs(fbvals, fbvecs) # So we create two shells out of it bvals_2s = np.concatenate((bvals, bvals * 2), axis=0) bvecs_2s = np.concatenate((bvecs, bvecs), axis=0) gtab_2s = dpg.gradient_table(bvals_2s, bvecs_2s) # Simulate a signal based on the DKI model: mevals_cross = np.array([[0.00099, 0, 0], [0.00226, 0.00087, 0.00087], [0.00099, 0, 0], [0.00226, 0.00087, 0.00087]]) angles_cross = [(80, 10), (80, 10), (20, 30), (20, 30)] fie = 0.49 frac_cross = [fie * 50, (1 - fie) * 50, fie * 50, (1 - fie) * 50] # Noise free simulates signal_cross, dt_cross, kt_cross = multi_tensor_dki(gtab_2s, mevals_cross, S0=100, angles=angles_cross, fractions=frac_cross, snr=None) DWI = np.zeros(out_shape + (len(gtab_2s.bvals), )) DWI[:] = signal_cross nib.save(nib.Nifti1Image(DWI, img.affine), out_fdata) np.savetxt(out_fbval, bvals_2s) np.savetxt(out_fbvec, bvecs_2s)
def test_median_otsu_flow(): with TemporaryDirectory() as out_dir: data_path, _, _ = get_fnames('small_25') volume = load_nifti_data(data_path) save_masked = True median_radius = 3 numpass = 3 autocrop = False vol_idx = [0] dilate = 0 mo_flow = MedianOtsuFlow() mo_flow.run(data_path, out_dir=out_dir, save_masked=save_masked, median_radius=median_radius, numpass=numpass, autocrop=autocrop, vol_idx=vol_idx, dilate=dilate) mask_name = mo_flow.last_generated_outputs['out_mask'] masked_name = mo_flow.last_generated_outputs['out_masked'] masked, mask = median_otsu(volume, vol_idx=vol_idx, median_radius=median_radius, numpass=numpass, autocrop=autocrop, dilate=dilate) result_mask_data = load_nifti_data(join(out_dir, mask_name)) npt.assert_array_equal(result_mask_data.astype(np.uint8), mask) result_masked = nib.load(join(out_dir, masked_name)) result_masked_data = np.asanyarray(result_masked.dataobj) npt.assert_array_equal(np.round(result_masked_data), masked)
def make_dti_data(out_fbval, out_fbvec, out_fdata, out_shape=(5, 6, 7)): """ Create a synthetic data-set with a single shell acquisition out_fbval, out_fbvec, out_fdata : str Full paths to generated data and bval/bvec files out_shape : tuple The 3D shape of the output volum """ fimg, fbvals, fbvecs = dpd.get_fnames('small_64D') img = nib.load(fimg) bvals, bvecs = dio.read_bvals_bvecs(fbvals, fbvecs) gtab = dpg.gradient_table(bvals, bvecs) # Simulate a signal based on the DTI model: signal = single_tensor(gtab, S0=100) DWI = np.zeros(out_shape + (len(gtab.bvals), )) DWI[:] = signal nib.save(nib.Nifti1Image(DWI, img.affine), out_fdata) np.savetxt(out_fbval, bvals) np.savetxt(out_fbvec, bvecs)
def test_sphere_scaling_csdmodel(): """Check that mirroring regularization sphere does not change the result of the model""" _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, _ = multi_tensor(gtab, mevals, 100., angles=angles, fractions=[50, 50], snr=None) hemi = small_sphere sphere = hemi.mirror() response = (np.array([0.0015, 0.0003, 0.0003]), 100) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=descoteaux07_legacy_msg, category=PendingDeprecationWarning) model_full = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=sphere) model_hemi = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=hemi) csd_fit_full = model_full.fit(S) csd_fit_hemi = model_hemi.fit(S) assert_array_almost_equal(csd_fit_full.shm_coeff, csd_fit_hemi.shm_coeff)
def test_fit_data(): fdata, fbval, fbvec = dpd.get_fnames('small_25') gtab = grad.gradient_table(fbval, fbvec) ni_data = nib.load(fdata) data = ni_data.get_data() dtmodel = dti.TensorModel(gtab) dtfit = dtmodel.fit(data) sphere = dpd.get_sphere() peak_idx = dti.quantize_evecs(dtfit.evecs, sphere.vertices) eu = edx.EuDX(dtfit.fa.astype('f8'), peak_idx, seeds=list(nd.ndindex(data.shape[:-1])), odf_vertices=sphere.vertices, a_low=0) tensor_streamlines = [streamline for streamline in eu] life_model = life.FiberModel(gtab) life_fit = life_model.fit(data, tensor_streamlines) model_error = life_fit.predict() - life_fit.data model_rmse = np.sqrt(np.mean(model_error**2, -1)) matlab_rmse, matlab_weights = dpd.matlab_life_results() # Lower error than the matlab implementation for these data: npt.assert_(np.median(model_rmse) < np.median(matlab_rmse)) # And a moderate correlation with the Matlab implementation weights: npt.assert_(np.corrcoef(matlab_weights, life_fit.beta)[0, 1] > 0.6)
def test_odf_sh_to_sharp(): SNR = None S0 = 1 _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) S, _ = multi_tensor(gtab, mevals, S0, angles=[(10, 0), (100, 0)], fractions=[50, 50], snr=SNR) sphere = get_sphere('symmetric724') qb = QballModel(gtab, sh_order=8, assume_normed=True) qbfit = qb.fit(S) odf_gt = qbfit.odf(sphere) Z = np.linalg.norm(odf_gt) odfs_gt = np.zeros((3, 1, 1, odf_gt.shape[0])) odfs_gt[:, :, :] = odf_gt[:] odfs_sh = sf_to_sh(odfs_gt, sphere, sh_order=8, basis_type=None) odfs_sh /= Z fodf_sh = odf_sh_to_sharp(odfs_sh, sphere, basis=None, ratio=3 / 15., sh_order=8, lambda_=1., tau=0.1) fodf = sh_to_sf(fodf_sh, sphere, sh_order=8, basis_type=None) directions2, _, _ = peak_directions(fodf[0, 0, 0], sphere) assert_equal(directions2.shape[0], 2)
def test_sfm_stick(): fdata, fbvals, fbvecs = dpd.get_fnames() data = nib.load(fdata).get_data() gtab = grad.gradient_table(fbvals, fbvecs) sfmodel = sfm.SparseFascicleModel(gtab, solver='NNLS', response=[0.001, 0, 0]) sffit1 = sfmodel.fit(data[0, 0, 0]) sphere = dpd.get_sphere() sffit1.odf(sphere) sffit1.predict(gtab) SNR = 1000 S0 = 100 mevals = np.array(([0.001, 0, 0], [0.001, 0, 0])) angles = [(0, 0), (60, 0)] S, sticks = sims.multi_tensor(gtab, mevals, S0, angles=angles, fractions=[50, 50], snr=SNR) sfmodel = sfm.SparseFascicleModel(gtab, solver='NNLS', response=[0.001, 0, 0]) sffit = sfmodel.fit(S) pred = sffit.predict() npt.assert_(xval.coeff_of_determination(pred, S) > 96)
def test_fit_dki(): fdata, fbval, fbvec = dpd.get_fnames('small_101D') with nbtmp.InTemporaryDirectory() as tmpdir: file_dict = dki.fit_dki(fdata, fbval, fbvec, out_dir=tmpdir) for f in file_dict.values(): op.exists(f)
from dipy.core.gradients import gradient_table from dipy.data import get_fnames from dipy.io.gradients import read_bvals_bvecs from dipy.io.image import load_nifti, load_nifti_data from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) from dipy.tracking import utils from dipy.tracking.local_tracking import LocalTracking from dipy.tracking.streamline import Streamlines from dipy.tracking.stopping_criterion import ThresholdStoppingCriterion from dipy.viz import window, actor, colormap, has_fury # Enables/disables interactive visualization interactive = False hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi') label_fname = get_fnames('stanford_labels') data, affine, hardi_img = load_nifti(hardi_fname, return_img=True) labels = load_nifti_data(label_fname) bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname) gtab = gradient_table(bvals, bvecs) seed_mask = (labels == 2) white_matter = (labels == 1) | (labels == 2) seeds = utils.seeds_from_mask(seed_mask, affine, density=1) response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6) csd_fit = csd_model.fit(data, mask=white_matter) """
from dipy.data import get_fnames # load other dipy's functions that will be used for auxiliar analysis from dipy.core.gradients import gradient_table from dipy.io.image import load_nifti from dipy.io.gradients import read_bvals_bvecs from dipy.segment.mask import median_otsu import dipy.reconst.dki as dki """ For this example, we use fetch to download a multi-shell dataset which was kindly provided by Hansen and Jespersen (more details about the data are provided in their paper [Hansen2016]_). The total size of the downloaded data is 192 MBytes, however you only need to fetch it once. """ dwi_fname, dwi_bval_fname, dwi_bvec_fname, _ = get_fnames('cfin_multib') data, affine = load_nifti(dwi_fname) bvals, bvecs = read_bvals_bvecs(dwi_bval_fname, dwi_bvec_fname) gtab = gradient_table(bvals, bvecs) """ For the sake of simplicity, we only select two non-zero b-values for this example. """ bvals = gtab.bvals bvecs = gtab.bvecs sel_b = np.logical_or(np.logical_or(bvals == 0, bvals == 1000), bvals == 2000) data = data[..., sel_b]
def test_bundle_analysis_population_flow(): with TemporaryDirectory() as dirpath: data_path = get_fnames('fornix') fornix = load_tractogram(data_path, 'same', bbox_valid_check=False).streamlines f = Streamlines(fornix) mb = os.path.join(dirpath, "model_bundles") sub = os.path.join(dirpath, "subjects") os.mkdir(mb) sft = StatefulTractogram(f, data_path, Space.RASMM) save_tractogram(sft, os.path.join(mb, "temp.trk"), bbox_valid_check=False) os.mkdir(sub) os.mkdir(os.path.join(sub, "patient")) os.mkdir(os.path.join(sub, "control")) p = os.path.join(sub, "patient", "10001") os.mkdir(p) c = os.path.join(sub, "control", "20002") os.mkdir(c) for pre in [p, c]: os.mkdir(os.path.join(pre, "rec_bundles")) sft = StatefulTractogram(f, data_path, Space.RASMM) save_tractogram(sft, os.path.join(pre, "rec_bundles", "temp.trk"), bbox_valid_check=False) os.mkdir(os.path.join(pre, "org_bundles")) sft = StatefulTractogram(f, data_path, Space.RASMM) save_tractogram(sft, os.path.join(pre, "org_bundles", "temp.trk"), bbox_valid_check=False) os.mkdir(os.path.join(pre, "measures")) fa = np.random.rand(255, 255, 255) save_nifti(os.path.join(pre, "measures", "fa.nii.gz"), fa, affine=np.eye(4)) out_dir = os.path.join(dirpath, "output") os.mkdir(out_dir) ba_flow = BundleAnalysisPopulationFlow() ba_flow.run(mb, sub, out_dir=out_dir) assert_true(os.path.exists(os.path.join(out_dir, 'fa.h5'))) dft = pd.read_hdf(os.path.join(out_dir, 'fa.h5')) assert_true(dft.bundle.unique() == "temp") assert_true(set(dft.subject.unique()) == set(['10001', '20002']))
import numpy as np import matplotlib.pyplot as plt from dipy.core.gradients import gradient_table from dipy.data import get_fnames from dipy.denoise.noise_estimate import estimate_sigma from dipy.io.image import load_nifti, save_nifti from dipy.io.gradients import read_bvals_bvecs from time import time from dipy.denoise.non_local_means import non_local_means from dipy.denoise.adaptive_soft_matching import adaptive_soft_matching """ Choose one of the data from the datasets in dipy_ """ dwi_fname, dwi_bval_fname, dwi_bvec_fname = get_fnames('sherbrooke_3shell') data, affine = load_nifti(dwi_fname) bvals, bvecs = read_bvals_bvecs(dwi_bval_fname, dwi_bvec_fname) gtab = gradient_table(bvals, bvecs) mask = data[..., 0] > 80 data = data[..., 1] print("vol size", data.shape) t = time() """ In order to generate the two pre-denoised versions of the data we will use the ``non_local_means`` denoising. For ``non_local_means`` first we need to estimate the standard deviation of the noise. We use N=4 since the Sherbrooke
from dipy.io.gradients import read_bvals_bvecs from dipy.io.image import load_nifti, load_nifti_data from dipy.io.stateful_tractogram import Space, StatefulTractogram from dipy.io.streamline import save_trk from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) from dipy.tracking.local_tracking import (LocalTracking, ParticleFilteringTracking) from dipy.tracking.streamline import Streamlines from dipy.tracking import utils from dipy.viz import window, actor, colormap, has_fury # Enables/disables interactive visualization interactive = False hardi_fname, hardi_bval_fname, hardi_bvec_fname = get_fnames('stanford_hardi') label_fname = get_fnames('stanford_labels') f_pve_csf, f_pve_gm, f_pve_wm = get_fnames('stanford_pve_maps') data, affine, hardi_img = load_nifti(hardi_fname, return_img=True) labels = load_nifti_data(label_fname) bvals, bvecs = read_bvals_bvecs(hardi_bval_fname, hardi_bvec_fname) gtab = gradient_table(bvals, bvecs) pve_csf_data = load_nifti_data(f_pve_csf) pve_gm_data = load_nifti_data(f_pve_gm) pve_wm_data, _, voxel_size = load_nifti(f_pve_wm, return_voxsize=True) shape = labels.shape response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7)
def test_affine_transformations(): """This tests that the input affine is properly handled by LocalTracking and produces reasonable streamlines in a simple example. """ sphere = HemiSphere.from_sphere(unit_octahedron) # A simple image with three possible configurations, a vertical tract, # a horizontal tract and a crossing pmf_lookup = np.array([[0., 0., 1.], [1., 0., 0.], [0., 1., 0.], [.4, .6, 0.]]) simple_image = np.array([ [0, 0, 0, 0, 0, 0], [0, 1, 0, 0, 0, 0], [0, 3, 2, 2, 2, 0], [0, 1, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0], ]) simple_image = simple_image[..., None] pmf = pmf_lookup[simple_image] seeds = [np.array([1., 1., 0.]), np.array([2., 4., 0.])] expected = [ np.array([[1., 1., 0.], [2., 1., 0.], [3., 1., 0.]]), np.array([[2., 1., 0.], [2., 2., 0.], [2., 3., 0.], [2., 4., 0.]]) ] mask = (simple_image > 0).astype(float) sc = BinaryStoppingCriterion(mask) dg = DeterministicMaximumDirectionGetter.from_pmf(pmf, 60, sphere, pmf_threshold=0.1) # TST- bad affine wrong shape bad_affine = np.eye(3) npt.assert_raises(ValueError, LocalTracking, dg, sc, seeds, bad_affine, 1.) # TST - bad affine with shearing bad_affine = np.eye(4) bad_affine[0, 1] = 1. npt.assert_raises(ValueError, LocalTracking, dg, sc, seeds, bad_affine, 1.) # TST - bad seeds bad_seeds = 1000 npt.assert_raises(ValueError, LocalTracking, dg, sc, bad_seeds, np.eye(4), 1.) # TST - identity a0 = np.eye(4) # TST - affines with positive/negative offsets a1 = np.eye(4) a1[:3, 3] = [1, 2, 3] a2 = np.eye(4) a2[:3, 3] = [-2, 0, -1] # TST - affine with scaling a3 = np.eye(4) a3[0, 0] = a3[1, 1] = a3[2, 2] = 8 # TST - affine with axes inverting (negative value) a4 = np.eye(4) a4[1, 1] = a4[2, 2] = -1 # TST - combined affines a5 = a1 + a2 + a3 a5[3, 3] = 1 # TST - in vivo affine example # Sometimes data have affines with tiny shear components. # For example, the small_101D data-set has some of that: fdata, _, _ = get_fnames('small_101D') a6 = nib.load(fdata).affine for affine in [a0, a1, a2, a3, a4, a5, a6]: lin = affine[:3, :3] offset = affine[:3, 3] seeds_trans = [np.dot(lin, s) + offset for s in seeds] # We compute the voxel size to adjust the step size to one voxel voxel_size = np.mean(np.sqrt(np.dot(lin, lin).diagonal())) streamlines = LocalTracking(direction_getter=dg, stopping_criterion=sc, seeds=seeds_trans, affine=affine, step_size=voxel_size, return_all=True) # We apply the inverse affine transformation to the generated # streamlines. It should be equals to the expected streamlines # (generated with the identity affine matrix). affine_inv = np.linalg.inv(affine) lin = affine_inv[:3, :3] offset = affine_inv[:3, 3] streamlines_inv = [] for line in streamlines: streamlines_inv.append([np.dot(pts, lin) + offset for pts in line]) npt.assert_equal(len(streamlines_inv[0]), len(expected[0])) npt.assert_(np.allclose(streamlines_inv[0], expected[0], atol=0.3)) npt.assert_equal(len(streamlines_inv[1]), len(expected[1])) npt.assert_(np.allclose(streamlines_inv[1], expected[1], atol=0.3))
The algorithm to suppress Gibbs oscillations can be imported from the denoise module of dipy: """ from dipy.denoise.gibbs import gibbs_removal """ We first apply this algorithm to T1-weighted dataset which can be fetched using the following code: """ from dipy.data import get_fnames from dipy.io.image import load_nifti_data t1_fname, t1_denoised_fname, ap_fname = get_fnames('tissue_data') t1 = load_nifti_data(t1_denoised_fname) """ Let's plot a slice of this dataset. """ import matplotlib.pyplot as plt import numpy as np axial_slice = 88 t1_slice = t1[..., axial_slice] fig = plt.figure(figsize=(15, 4)) fig.subplots_adjust(wspace=0.2)
def test_reconst_dki(): with TemporaryDirectory() as out_dir: data_path, bval_path, bvec_path = get_fnames('small_101D') volume, affine = load_nifti(data_path) mask = np.ones_like(volume[:, :, :, 0]) mask_path = pjoin(out_dir, 'tmp_mask.nii.gz') save_nifti(mask_path, mask.astype(np.uint8), affine) dki_flow = ReconstDkiFlow() args = [data_path, bval_path, bvec_path, mask_path] dki_flow.run(*args, out_dir=out_dir) fa_path = dki_flow.last_generated_outputs['out_fa'] fa_data = load_nifti_data(fa_path) assert_equal(fa_data.shape, volume.shape[:-1]) tensor_path = dki_flow.last_generated_outputs['out_dt_tensor'] tensor_data = load_nifti_data(tensor_path) assert_equal(tensor_data.shape[-1], 6) assert_equal(tensor_data.shape[:-1], volume.shape[:-1]) ga_path = dki_flow.last_generated_outputs['out_ga'] ga_data = load_nifti_data(ga_path) assert_equal(ga_data.shape, volume.shape[:-1]) rgb_path = dki_flow.last_generated_outputs['out_rgb'] rgb_data = load_nifti_data(rgb_path) assert_equal(rgb_data.shape[-1], 3) assert_equal(rgb_data.shape[:-1], volume.shape[:-1]) md_path = dki_flow.last_generated_outputs['out_md'] md_data = load_nifti_data(md_path) assert_equal(md_data.shape, volume.shape[:-1]) ad_path = dki_flow.last_generated_outputs['out_ad'] ad_data = load_nifti_data(ad_path) assert_equal(ad_data.shape, volume.shape[:-1]) rd_path = dki_flow.last_generated_outputs['out_rd'] rd_data = load_nifti_data(rd_path) assert_equal(rd_data.shape, volume.shape[:-1]) mk_path = dki_flow.last_generated_outputs['out_mk'] mk_data = load_nifti_data(mk_path) assert_equal(mk_data.shape, volume.shape[:-1]) ak_path = dki_flow.last_generated_outputs['out_ak'] ak_data = load_nifti_data(ak_path) assert_equal(ak_data.shape, volume.shape[:-1]) rk_path = dki_flow.last_generated_outputs['out_rk'] rk_data = load_nifti_data(rk_path) assert_equal(rk_data.shape, volume.shape[:-1]) kt_path = dki_flow.last_generated_outputs['out_dk_tensor'] kt_data = load_nifti_data(kt_path) assert_equal(kt_data.shape[-1], 15) assert_equal(kt_data.shape[:-1], volume.shape[:-1]) mode_path = dki_flow.last_generated_outputs['out_mode'] mode_data = load_nifti_data(mode_path) assert_equal(mode_data.shape, volume.shape[:-1]) evecs_path = dki_flow.last_generated_outputs['out_evec'] evecs_data = load_nifti_data(evecs_path) assert_equal(evecs_data.shape[-2:], tuple((3, 3))) assert_equal(evecs_data.shape[:-2], volume.shape[:-1]) evals_path = dki_flow.last_generated_outputs['out_eval'] evals_data = load_nifti_data(evals_path) assert_equal(evals_data.shape[-1], 3) assert_equal(evals_data.shape[:-1], volume.shape[:-1]) bvals, bvecs = read_bvals_bvecs(bval_path, bvec_path) bvals[0] = 5. bvecs = generate_bvecs(len(bvals)) tmp_bval_path = pjoin(out_dir, "tmp.bval") tmp_bvec_path = pjoin(out_dir, "tmp.bvec") np.savetxt(tmp_bval_path, bvals) np.savetxt(tmp_bvec_path, bvecs.T) dki_flow._force_overwrite = True npt.assert_warns(UserWarning, dki_flow.run, data_path, tmp_bval_path, tmp_bvec_path, mask_path, out_dir=out_dir, b0_threshold=0)
def test_affine_registration(): moving = subset_b0 static = subset_b0 moving_affine = static_affine = np.eye(4) xformed, affine_mat = affine_registration(moving, static, moving_affine=moving_affine, static_affine=static_affine, level_iters=[5, 5], sigmas=[3, 1], factors=[2, 1]) # We don't ask for much: npt.assert_almost_equal(affine_mat[:3, :3], np.eye(3), decimal=1) # [center_of_mass] + ret_metric=True should raise an error with pytest.raises(ValueError): # For array input, must provide affines: xformed, affine_mat = affine_registration(moving, static, moving_affine=moving_affine, static_affine=static_affine, pipeline=["center_of_mass"], ret_metric=True) # Define list of methods reg_methods = ["center_of_mass", "translation", "rigid", "rigid_isoscaling", "rigid_scaling", "affine", center_of_mass, translation, rigid, rigid_isoscaling, rigid_scaling, affine] # Test methods individually (without returning any metric) for func in reg_methods: xformed, affine_mat = affine_registration(moving, static, moving_affine=moving_affine, static_affine=static_affine, level_iters=[5, 5], sigmas=[3, 1], factors=[2, 1], pipeline=[func]) # We don't ask for much: npt.assert_almost_equal(affine_mat[:3, :3], np.eye(3), decimal=1) # Bad method with pytest.raises(ValueError, match=r'^pipeline\[0\] must be one.*foo.*'): affine_registration( moving, static, moving_affine, static_affine, pipeline=['foo']) # Test methods individually (returning quality metric) expected_nparams = [0, 3, 6, 7, 9, 12] * 2 assert len(expected_nparams) == len(reg_methods) for i, func in enumerate(reg_methods): if func in ('center_of_mass', center_of_mass): # can't return metric with pytest.raises(ValueError, match='cannot return any quality'): affine_registration( moving, static, moving_affine, static_affine, pipeline=[func], ret_metric=True) continue xformed, affine_mat, \ xopt, fopt = affine_registration(moving, static, moving_affine=moving_affine, static_affine=static_affine, level_iters=[5, 5], sigmas=[3, 1], factors=[2, 1], pipeline=[func], ret_metric=True) # Expected number of optimization parameters npt.assert_equal(len(xopt), expected_nparams[i]) # Optimization metric must be a single numeric value npt.assert_equal(isinstance(fopt, (int, float)), True) with pytest.raises(ValueError): # For array input, must provide affines: xformed, affine_mat = affine_registration(moving, static) # Not supported transform names should raise an error npt.assert_raises(ValueError, affine_registration, moving, static, moving_affine, static_affine, pipeline=["wrong_transform"]) # If providing nifti image objects, don't need to provide affines: moving_img = nib.Nifti1Image(moving, moving_affine) static_img = nib.Nifti1Image(static, static_affine) xformed, affine_mat = affine_registration(moving_img, static_img) npt.assert_almost_equal(affine_mat[:3, :3], np.eye(3), decimal=1) # Using strings with full paths as inputs also works: t1_name, b0_name = dpd.get_fnames('syn_data') moving = b0_name static = t1_name xformed, affine_mat = affine_registration(moving, static, level_iters=[5, 5], sigmas=[3, 1], factors=[4, 2]) npt.assert_almost_equal(affine_mat[:3, :3], np.eye(3), decimal=1)
# -*- coding: utf-8 -*- """ Created on Wed May 1 11:39:26 2019 @author: Furkan """ import numpy as np from nibabel import trackvis as tv from dipy.data import get_fnames from dipy.viz import window, actor fname = get_fnames('fornix') streams, hdr = tv.read(fname) streamlines = [i[0] for i in streams]