def test_sphere_scaling_csdmodel(): """Check that mirroring regulization sphere does not change the result of csddeconv model""" _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, sticks = multi_tensor(gtab, mevals, 100., angles=angles, fractions=[50, 50], snr=None) hemi = small_sphere sphere = hemi.mirror() response = (np.array([0.0015, 0.0003, 0.0003]), 100) model_full = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=sphere) model_hemi = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=hemi) csd_fit_full = model_full.fit(S) csd_fit_hemi = model_hemi.fit(S) assert_array_almost_equal(csd_fit_full.shm_coeff, csd_fit_hemi.shm_coeff)
def test_csd_convergence(): """ Check existence of `convergence` keyword in CSD model """ _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) evals = np.array([[1.5, .3, .3]]) * [[1.], [1.]] / 1000. S, sticks = multi_tensor(gtab, evals, snr=None, fractions=[55., 45.]) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=descoteaux07_legacy_msg, category=PendingDeprecationWarning) model_w_conv = ConstrainedSphericalDeconvModel( gtab, (evals[0], 3.), sh_order=8, convergence=50, ) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=descoteaux07_legacy_msg, category=PendingDeprecationWarning) model_wo_conv = ConstrainedSphericalDeconvModel(gtab, (evals[0], 3.), sh_order=8) assert_equal(model_w_conv.fit(S).shm_coeff, model_wo_conv.fit(S).shm_coeff)
def test_sphere_scaling_csdmodel(): """Check that mirroring regularization sphere does not change the result of the model""" _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, sticks = multi_tensor(gtab, mevals, 100., angles=angles, fractions=[50, 50], snr=None) hemi = small_sphere sphere = hemi.mirror() response = (np.array([0.0015, 0.0003, 0.0003]), 100) model_full = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=sphere) model_hemi = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=hemi) csd_fit_full = model_full.fit(S) csd_fit_hemi = model_hemi.fit(S) assert_array_almost_equal(csd_fit_full.shm_coeff, csd_fit_hemi.shm_coeff)
def test_csd_predict(): """ Test prediction API """ SNR = 100 S0 = 1 _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, sticks = multi_tensor(gtab, mevals, S0, angles=angles, fractions=[50, 50], snr=SNR) sphere = small_sphere odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50]) response = (np.array([0.0015, 0.0003, 0.0003]), S0) csd = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd.fit(S) # Predicting from a fit should give the same result as predicting from a # model, S0 is 1 by default prediction1 = csd_fit.predict() prediction2 = csd.predict(csd_fit.shm_coeff) npt.assert_array_equal(prediction1, prediction2) npt.assert_array_equal(prediction1[..., gtab.b0s_mask], 1.) # Same with a different S0 prediction1 = csd_fit.predict(S0=123.) prediction2 = csd.predict(csd_fit.shm_coeff, S0=123.) npt.assert_array_equal(prediction1, prediction2) npt.assert_array_equal(prediction1[..., gtab.b0s_mask], 123.) # For "well behaved" coefficients, the model should be able to find the # coefficients from the predicted signal. coeff = np.random.random(csd_fit.shm_coeff.shape) - .5 coeff[..., 0] = 10. S = csd.predict(coeff) csd_fit = csd.fit(S) npt.assert_array_almost_equal(coeff, csd_fit.shm_coeff) # Test predict on nd-data set S_nd = np.zeros((2, 3, 4, S.size)) S_nd[:] = S fit = csd.fit(S_nd) predict1 = fit.predict() predict2 = csd.predict(fit.shm_coeff) npt.assert_array_almost_equal(predict1, predict2)
def test_csd_convergence(): """ Check existence of `convergence` keyword in CSD model """ _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) evals = np.array([[1.5, .3, .3]]) * [[1.], [1.]] / 1000. S, sticks = multi_tensor(gtab, evals, snr=None, fractions=[55., 45.]) model_w_conv = ConstrainedSphericalDeconvModel(gtab, (evals[0], 3.), sh_order=8, convergence=50) model_wo_conv = ConstrainedSphericalDeconvModel(gtab, (evals[0], 3.), sh_order=8) assert_equal(model_w_conv.fit(S).shm_coeff, model_wo_conv.fit(S).shm_coeff)
def probal(Threshold=.2, data_list=None, seed='.', one_node=False, two_node=False): time0 = time.time() print("begin loading data, time:", time.time() - time0) data = data_list['DWI'] affine = data_list['affine'] img = data_list['img'] labels = data_list['labels'] gtab = data_list['gtab'] head_mask = data_list['head_mask'] if type(seed) != str: seed_mask = seed else: seed_mask = (labels == 2) * (head_mask == 1) white_matter = (labels == 2) * (head_mask == 1) seeds = utils.seeds_from_mask(seed_mask, affine, density=1) print("begin reconstruction, time:", time.time() - time0) response, ratio = auto_response_ssst(gtab, data, roi_radii=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6) csd_fit = csd_model.fit(data, mask=white_matter) csa_model = CsaOdfModel(gtab, sh_order=6) gfa = csa_model.fit(data, mask=white_matter).gfa stopping_criterion = ThresholdStoppingCriterion(gfa, Threshold) print("begin tracking, time:", time.time() - time0) fod = csd_fit.odf(small_sphere) pmf = fod.clip(min=0) prob_dg = ProbabilisticDirectionGetter.from_pmf(pmf, max_angle=30., sphere=small_sphere) streamline_generator = LocalTracking(prob_dg, stopping_criterion, seeds, affine, step_size=.5) streamlines = Streamlines(streamline_generator) sft = StatefulTractogram(streamlines, img, Space.RASMM) if one_node or two_node: sft.to_vox() streamlines = reduct_seed_ROI(sft.streamlines, seed_mask, one_node, two_node) sft = StatefulTractogram(streamlines, img, Space.VOX) sft._vox_to_rasmm() print("begin saving, time:", time.time() - time0) output = 'tractogram_probabilistic.trk' save_trk(sft, output) print("finished, time:", time.time() - time0)
def get_csd_gfa(nii_data, gtab): from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel csd_model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=6) GFA = csd_model.fit(data, mask).gfa print('csd_gfa ok')
def csd(training, category, snr, denoised, odeconv, tv, method, weight=0.1): data, affine, gtab, mask, evals, S0, prefix = prepare(training, category, snr, denoised, odeconv, tv, method) if category == 'dti': csd_model = ConstrainedSphericalDeconvModel(gtab, (evals, S0), sh_order=6) if category == 'hardi': csd_model = ConstrainedSphericalDeconvModel(gtab, (evals, S0), sh_order=8) csd_fit = csd_model.fit(data, mask) sphere = get_sphere('symmetric724') odf = csd_fit.odf(sphere) if tv == True: odf = tv_denoise_4d(odf, weight=0.1) save_odfs_peaks(training, odf, affine, sphere, dres, prefix)
def test_csd_superres(): """ Check the quality of csdfit with high SH order. """ _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) # img, gtab = read_stanford_hardi() evals = np.array([[1.5, .3, .3]]) * [[1.], [1.]] / 1000. S, sticks = multi_tensor(gtab, evals, snr=None, fractions=[55., 45.]) with warnings.catch_warnings(record=True) as w: warnings.filterwarnings(action="always", message="Number of parameters required.*", category=UserWarning) model16 = ConstrainedSphericalDeconvModel(gtab, (evals[0], 3.), sh_order=16) assert_greater_equal(len(w), 1) npt.assert_(issubclass(w[-1].category, UserWarning)) fit16 = model16.fit(S) sphere = HemiSphere.from_sphere(get_sphere('symmetric724')) # print local_maxima(fit16.odf(default_sphere), default_sphere.edges) d, v, ind = peak_directions(fit16.odf(sphere), sphere, relative_peak_threshold=.2, min_separation_angle=0) # Check that there are two peaks assert_equal(len(d), 2) # Check that peaks line up with sticks cos_sim = abs((d * sticks).sum(1)) ** .5 assert_(all(cos_sim > .99))
def calculate_fodf(gtab, images, name, sphere=default_sphere, radius=10, fa_threshold=0.7): response, ratio = auto_response(gtab, images, roi_radius=radius, fa_thr=fa_threshold) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(images) csd_odf = csd_fit.odf(sphere) fodf_spheres = actor.odf_slicer(csd_odf, sphere=sphere, scale=0.9, norm=False, colormap='plasma') ren = window.Scene() ren.add(fodf_spheres) print('Saving illustration as csd_odfs_{}.png'.format(name)) window.record(ren, out_path='results/csd_odfs_{}.png'.format(name), size=(600, 600)) return csd_fit
def test_csd_superres(): """ Check the quality of csdfit with high SH order. """ _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) # img, gtab = read_stanford_hardi() evals = np.array([[1.5, .3, .3]]) * [[1.], [1.]] / 1000. S, sticks = multi_tensor(gtab, evals, snr=None, fractions=[55., 45.]) model16 = ConstrainedSphericalDeconvModel(gtab, (evals[0], 3.), sh_order=16) fit16 = model16.fit(S) # print local_maxima(fit16.odf(default_sphere), default_sphere.edges) d, v, ind = peak_directions(fit16.odf(default_sphere), default_sphere, relative_peak_threshold=.2, min_separation_angle=0) # Check that there are two peaks assert_equal(len(d), 2) # Check that peaks line up with sticks cos_sim = abs((d * sticks).sum(1)) ** .5 assert_(all(cos_sim > .99))
def test_csd_predict_multi(): """ Check that we can predict reasonably from multi-voxel fits: """ S0 = 123. _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) response = (np.array([0.0015, 0.0003, 0.0003]), S0) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=descoteaux07_legacy_msg, category=PendingDeprecationWarning) csd = ConstrainedSphericalDeconvModel(gtab, response) coeff = np.random.random(45) - .5 coeff[..., 0] = 10. with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=descoteaux07_legacy_msg, category=PendingDeprecationWarning) S = csd.predict(coeff, S0=123.) multi_S = np.array([[S, S], [S, S]]) csd_fit_multi = csd.fit(multi_S) S0_multi = np.mean(multi_S[..., gtab.b0s_mask], -1) pred_multi = csd_fit_multi.predict(S0=S0_multi) npt.assert_array_almost_equal(pred_multi, multi_S)
def constrained_spherical_deconvolution(dir_src, dir_out, verbose=False): # Load data fbval = pjoin(dir_src, 'bvals_' + par_b_tag) fbvec = pjoin(dir_src, 'bvecs_' + par_b_tag) fdwi = pjoin(dir_src, 'data_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') #fmask = pjoin(dir_src, 'nodif_brain_mask_' + par_dim_tag + '.nii.gz') fmask = pjoin(dir_src, 'wm_mask_' + par_b_tag + '_' + par_dim_tag + '.nii.gz') bvals, bvecs = read_bvals_bvecs(fbval, fbvec) gtab = gradient_table(bvals, bvecs, b0_threshold=par_b0_threshold) data, affine = load_nifti(fdwi, verbose) mask, _ = load_nifti(fmask, verbose) sphere = get_sphere('symmetric724') response, ratio = auto_response(gtab, data, roi_radius=par_ar_radius, fa_thr=par_ar_fa_th) # print('Response function', response) # Model fitting csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(data, mask=mask) # Saving Spherical Harmonic Coefficient out_peaks = 'sh_' + par_b_tag + '_' + par_dim_tag + '.nii.gz' save_nifti(pjoin(dir_out, out_peaks), csd_fit.shm_coeff, affine)
def _init_odf(self, odf_mode): print("Initialising ODF") # fit DTI model to data if odf_mode == "DTI": print("DTI-based ODF computation") dti_model = dti.TensorModel(self.dataset.gtab, fit_method='LS') dti_fit = dti_model.fit(self.dataset.dwi, mask=self.dataset.binary_mask) # compute ODF odf = dti_fit.odf(self.sphere) elif odf_mode == "CSD": print("CSD-based ODF computation") mask = mask_for_response_ssst(self.dataset.gtab, self.dataset.dwi, roi_radii=10, fa_thr=0.7) response, ratio = response_from_mask_ssst(self.dataset.gtab, self.dataset.dwi, mask) dti_model = ConstrainedSphericalDeconvModel( self.dataset.gtab, response) dti_fit = dti_model.fit(self.dataset.dwi) odf = dti_fit.odf(self.sphere) else: raise NotImplementedError("ODF mode not found") # -- set up interpolator for odf evaluation odf = torch.from_numpy(odf).to(device=self.device).float() self.odf_interpolator = TorchGridInterpolator(odf)
def calculate_model(self, response): csa_model = CsaOdfModel(self.gtab, sh_order=self.sh_order) csd_model = ConstrainedSphericalDeconvModel(self.gtab, response, sh_order=self.sh_order) csd_fit = csd_model.fit(self.data, mask=self.white_mask) return csd_fit, csa_model
def test_csd_predict(): """ """ SNR = 100 S0 = 1 _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, sticks = multi_tensor(gtab, mevals, S0, angles=angles, fractions=[50, 50], snr=SNR) sphere = get_sphere('symmetric362') odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50]) response = (np.array([0.0015, 0.0003, 0.0003]), S0) csd = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd.fit(S) prediction = csd_predict(csd_fit.shm_coeff, gtab, response=response, S0=S0) npt.assert_equal(prediction.shape[0], S.shape[0]) model_prediction = csd.predict(csd_fit.shm_coeff) assert_array_almost_equal(prediction, model_prediction) # Roundtrip tests (quite inaccurate, because of regularization): assert_array_almost_equal(csd_fit.predict(gtab, S0=S0),S,decimal=1) assert_array_almost_equal(csd.predict(csd_fit.shm_coeff, S0=S0),S,decimal=1)
def get_csd_gfa(nii_data,gtab): from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel csd_model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=6) GFA=csd_model.fit(data,mask).gfa print ('csd_gfa ok')
def init_odf(self, params={}, csd=False): baseparams = { 'assume_normed': self.normed, 'sh_order': 6, } baseparams.update(params) if csd: logging.info("Using CSD for ground truth reconstruction.") basemodel = ConstrainedSphericalDeconvModel( self.gtab, self.csd_response) else: basemodel = CsaOdfModel(self.gtab, **baseparams) S_data = self.raw[self.slice] S_data_orig = self.ground_truth[self.slice] f = basemodel.fit(S_data).odf(self.dipy_sph) self.odf = np.clip(f, 0, np.max(f, -1)[..., None]) f = basemodel.fit(S_data_orig).odf(self.dipy_sph) self.odf_ground_truth = np.clip(f, 0, np.max(f, -1)[..., None])
def predict_image(gtab_odf, gtab_predict, images, radius=10, fa_threshold=0.7): response, ratio = auto_response(gtab_odf, images, roi_radius=radius, fa_thr=fa_threshold) csd_model = ConstrainedSphericalDeconvModel(gtab_odf, response) csd_fit = csd_model.fit(images) return csd_fit.predict(gtab_predict)
def csd_mod_est(gtab, data, B0_mask, sh_order=8): ''' Estimate a Constrained Spherical Deconvolution (CSD) model from dwi data. Parameters ---------- gtab : Obj DiPy object storing diffusion gradient information. data : array 4D numpy array of diffusion image data. B0_mask : str File path to B0 brain mask. sh_order : int The order of the SH model. Default is 8. Returns ------- csd_mod : ndarray Coefficients of the csd reconstruction. model : obj Fitted csd model. References ---------- .. [1] Tournier, J.D., et al. NeuroImage 2007. Robust determination of the fibre orientation distribution in diffusion MRI: Non-negativity constrained super-resolved spherical deconvolution .. [2] Descoteaux, M., et al. IEEE TMI 2009. Deterministic and Probabilistic Tractography Based on Complex Fibre Orientation Distributions .. [3] Côté, M-A., et al. Medical Image Analysis 2013. Tractometer: Towards validation of tractography pipelines .. [4] Tournier, J.D, et al. Imaging Systems and Technology 2012. MRtrix: Diffusion Tractography in Crossing Fiber Regions ''' from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response print('Fitting CSD model...') B0_mask_data = np.asarray(nib.load(B0_mask).dataobj).astype('bool') print('Reconstructing...') response = recursive_response(gtab, data, mask=B0_mask_data, sh_order=sh_order, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=False) print('CSD Reponse: ' + str(response)) model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) csd_mod = model.fit(data, B0_mask_data).shm_coeff del response, B0_mask_data return csd_mod, model
def create_csd_model(data, gtab, white_matter, sh_order=6): from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, auto_response_ssst response, ratio = auto_response_ssst(gtab, data, roi_radii=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) csd_fit = csd_model.fit(data, mask=white_matter) return csd_fit
def _setup_odf(self): print("Setting up ODF") mask = mask_for_response_ssst(self.dataset.gtab, self.dataset.dwi, roi_radii=10, fa_thr=0.7) print("Calculating response") response, _ = response_from_mask_ssst(self.dataset.gtab, self.dataset.dwi, mask) dti_model = ConstrainedSphericalDeconvModel(self.dataset.gtab, response) print("Fitting CSD model") dti_fit = dti_model.fit(self.dataset.dwi) self.odf = dti_fit.odf(self.sphere)
def PFT_tracking(name=None, data_path=None, output_path='.', Threshold=.20): time0 = time.time() print("begin loading data, time:", time.time() - time0) data, affine, img, labels, gtab, head_mask = get_data(name, data_path) seed_mask = (labels == 2) * (head_mask == 1) white_matter = (labels == 2) * (head_mask == 1) seeds = utils.seeds_from_mask(seed_mask, affine, density=1) print('begin reconstruction, time:', time.time() - time0) response, ratio = auto_response_ssst(gtab, data, roi_radii=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(data, mask=white_matter) csa_model = CsaOdfModel(gtab, sh_order=6) gfa = csa_model.fit(data, mask=white_matter).gfa stopping_criterion = ThresholdStoppingCriterion(gfa, Threshold) dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=20., sphere=default_sphere) #seed_mask = (labels == 2) #seed_mask[pve_wm_data < 0.5] = 0 seeds = utils.seeds_from_mask(seed_mask, affine, density=1) #voxel_size = np.average(voxel_size[1:4]) step_size = 0.2 #cmc_criterion = CmcStoppingCriterion.from_pve(pve_wm_data, # pve_gm_data, # pve_csf_data, # step_size=step_size, # average_voxel_size=voxel_size) # Particle Filtering Tractography pft_streamline_generator = ParticleFilteringTracking( dg, stopping_criterion, seeds, affine, max_cross=1, step_size=step_size, maxlen=1000, pft_back_tracking_dist=2, pft_front_tracking_dist=1, particle_count=15, return_all=False) streamlines = Streamlines(pft_streamline_generator) sft = StatefulTractogram(streamlines, img, Space.RASMM) output = output_path + '/tractogram_pft_' + name + '.trk'
def _csd_ft(self): from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, auto_response_ssst response, ratio = auto_response_ssst(self.gtab, self.data, roi_radii=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel( self.gtab, response, sh_order=self.parameters_dict['sh_order']) csd_fit = csd_model.fit(self.data) self.model_fit = csd_fit
def test_csdeconv(): SNR = 100 S0 = 1 _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) S, sticks = multi_tensor(gtab, mevals, S0, angles=[(0, 0), (60, 0)], fractions=[50, 50], snr=SNR) sphere = get_sphere('symmetric724') mevecs = [all_tensor_evecs(sticks[0]).T, all_tensor_evecs(sticks[1]).T] odf_gt = multi_tensor_odf(sphere.vertices, [0.5, 0.5], mevals, mevecs) response = (np.array([0.0015, 0.0003, 0.0003]), S0) csd = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd.fit(S) assert_equal(csd_fit.shm_coeff[0] > 0, True) fodf = csd_fit.odf(sphere) directions, _, _ = peak_directions(odf_gt, sphere) directions2, _, _ = peak_directions(fodf, sphere) ang_sim = angular_similarity(directions, directions2) assert_equal(ang_sim > 1.9, True) assert_equal(directions.shape[0], 2) assert_equal(directions2.shape[0], 2) with warnings.catch_warnings(record=True) as w: ConstrainedSphericalDeconvModel(gtab, response, sh_order=10) assert_equal(len(w) > 0, True) with warnings.catch_warnings(record=True) as w: ConstrainedSphericalDeconvModel(gtab, response, sh_order=8) assert_equal(len(w) > 0, False)
def quick_fodf(gtab, images, sphere=default_sphere, radius=10, fa_threshold=0.7): response, ratio = auto_response(gtab, images, roi_radius=radius, fa_thr=fa_threshold) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(images) csd_odf = csd_fit.odf(sphere) return csd_odf
def test_sphere_scaling_csdmodel(): """Check that mirroring regularization sphere does not change the result of the model""" _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, _ = multi_tensor(gtab, mevals, 100., angles=angles, fractions=[50, 50], snr=None) hemi = small_sphere sphere = hemi.mirror() response = (np.array([0.0015, 0.0003, 0.0003]), 100) with warnings.catch_warnings(): warnings.filterwarnings("ignore", message=descoteaux07_legacy_msg, category=PendingDeprecationWarning) model_full = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=sphere) model_hemi = ConstrainedSphericalDeconvModel(gtab, response, reg_sphere=hemi) csd_fit_full = model_full.fit(S) csd_fit_hemi = model_hemi.fit(S) assert_array_almost_equal(csd_fit_full.shm_coeff, csd_fit_hemi.shm_coeff)
def _run_interface(self, runtime): from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel from dipy.data import get_sphere # import marshal as pickle import pickle as pickle import gzip img = nb.load(self.inputs.in_file) imref = nb.four_to_three(img)[0] if isdefined(self.inputs.in_mask): msk = nb.load(self.inputs.in_mask).get_data() else: msk = np.ones(imref.shape) data = img.get_data().astype(np.float32) gtab = self._get_gradient_table() resp_file = np.loadtxt(self.inputs.response) response = (np.array(resp_file[0:3]), resp_file[-1]) ratio = response[0][1] / response[0][0] if abs(ratio - 0.2) > 0.1: IFLOGGER.warning( "Estimated response is not prolate enough. " "Ratio=%0.3f.", ratio ) csd_model = ConstrainedSphericalDeconvModel( gtab, response, sh_order=self.inputs.sh_order ) IFLOGGER.info("Fitting CSD model") csd_fit = csd_model.fit(data, msk) f = gzip.open(self._gen_filename("csdmodel", ext=".pklz"), "wb") pickle.dump(csd_model, f, -1) f.close() if self.inputs.save_fods: sphere = get_sphere("symmetric724") fods = csd_fit.odf(sphere) nb.Nifti1Image(fods.astype(np.float32), img.affine, None).to_filename( self._gen_filename("fods") ) return runtime
def csd_mod_est(gtab, data, wm_in_dwi): from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response print('Fitting CSD model...') wm_in_dwi_mask = nib.load(wm_in_dwi).get_fdata().astype('bool') try: print('Attempting to use spherical harmonic...') model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=6) except: print('Falling back to recursive response...') response = recursive_response(gtab, data, mask=wm_in_dwi_mask, sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=False) print('CSD Reponse: ' + str(response)) model = ConstrainedSphericalDeconvModel(gtab, response) mod = model.fit(data, wm_in_dwi_mask) return mod.shm_coeff
def _run_interface(self, runtime): from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel from dipy.data import get_sphere # import marshal as pickle import pickle as pickle import gzip img = nb.load(self.inputs.in_file) imref = nb.four_to_three(img)[0] affine = img.get_affine() if isdefined(self.inputs.in_mask): msk = nb.load(self.inputs.in_mask).get_data() else: msk = np.ones(imref.get_shape()) data = img.get_data().astype(np.float32) hdr = imref.get_header().copy() gtab = self._get_gradient_table() resp_file = np.loadtxt(self.inputs.response) response = (np.array(resp_file[0:3]), resp_file[-1]) ratio = response[0][1] / response[0][0] if abs(ratio - 0.2) > 0.1: IFLOGGER.warn(('Estimated response is not prolate enough. ' 'Ratio=%0.3f.') % ratio) csd_model = ConstrainedSphericalDeconvModel( gtab, response, sh_order=self.inputs.sh_order) IFLOGGER.info('Fitting CSD model') csd_fit = csd_model.fit(data, msk) f = gzip.open(self._gen_filename('csdmodel', ext='.pklz'), 'wb') pickle.dump(csd_model, f, -1) f.close() if self.inputs.save_fods: sphere = get_sphere('symmetric724') fods = csd_fit.odf(sphere) nb.Nifti1Image(fods.astype(np.float32), img.get_affine(), None).to_filename(self._gen_filename('fods')) return runtime
def test_csd_predict_multi(): """ Check that we can predict reasonably from multi-voxel fits: """ S0 = 123. _, fbvals, fbvecs = get_fnames('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) response = (np.array([0.0015, 0.0003, 0.0003]), S0) csd = ConstrainedSphericalDeconvModel(gtab, response) coeff = np.random.random(45) - .5 coeff[..., 0] = 10. S = csd.predict(coeff, S0=123.) multi_S = np.array([[S, S], [S, S]]) csd_fit_multi = csd.fit(multi_S) S0_multi = np.mean(multi_S[..., gtab.b0s_mask], -1) pred_multi = csd_fit_multi.predict(S0=S0_multi) npt.assert_array_almost_equal(pred_multi, multi_S)
def test_csd_predict_multi(): """ Check that we can predict reasonably from multi-voxel fits: """ S0 = 123. _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) response = (np.array([0.0015, 0.0003, 0.0003]), S0) csd = ConstrainedSphericalDeconvModel(gtab, response) coeff = np.random.random(45) - .5 coeff[..., 0] = 10. S = csd.predict(coeff, S0=123.) multi_S = np.array([[S, S], [S, S]]) csd_fit_multi = csd.fit(multi_S) S0_multi = np.mean(multi_S[..., gtab.b0s_mask], -1) pred_multi = csd_fit_multi.predict(S0=S0_multi) npt.assert_array_almost_equal(pred_multi, multi_S)
def csd_mod_est(gtab, data, wm_in_dwi): ''' Estimate a Constrained Spherical Deconvolution (CSD) model from dwi data. Parameters ---------- gtab : Obj DiPy object storing diffusion gradient information. data : array 4D numpy array of diffusion image data. wm_in_dwi : str File path to white-matter tissue segmentation Nifti1Image. Returns ------- csd_mod : obj Spherical harmonics coefficients of the CSD-estimated reconstruction model. ''' from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response print('Fitting CSD model...') wm_in_dwi_mask = nib.load(wm_in_dwi).get_fdata().astype('bool') try: print('Reconstructing...') model = ConstrainedSphericalDeconvModel(gtab, None, sh_order=6) except: print('Falling back to recursive response...') response = recursive_response(gtab, data, mask=wm_in_dwi_mask, sh_order=8, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=False) print('CSD Reponse: ' + str(response)) model = ConstrainedSphericalDeconvModel(gtab, response) csd_mod = model.fit(data, wm_in_dwi_mask).shm_coeff return csd_mod
def csd_mod_est(gtab, data, B0_mask, sh_order=8): ''' Estimate a Constrained Spherical Deconvolution (CSD) model from dwi data. Parameters ---------- gtab : Obj DiPy object storing diffusion gradient information. data : array 4D numpy array of diffusion image data. B0_mask : str File path to B0 brain mask. sh_order : int The order of the SH model. Default is 8. Returns ------- csd_mod : obj Spherical harmonics coefficients of the CSD-estimated reconstruction model. ''' from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel, recursive_response print('Fitting CSD model...') B0_mask_data = np.asarray(nib.load(B0_mask).dataobj).astype('bool') print('Reconstructing...') response = recursive_response(gtab, data, mask=B0_mask_data, sh_order=sh_order, peak_thr=0.01, init_fa=0.08, init_trace=0.0021, iter=8, convergence=0.001, parallel=False) print('CSD Reponse: ' + str(response)) model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=sh_order) csd_mod = model.fit(data, B0_mask_data).shm_coeff del model, response, B0_mask_data return csd_mod
def test_num_sls(): toydict = uniform_toy_data() csd_model = ConstrainedSphericalDeconvModel(toydict['gtab'], None, sh_order=6) csd_fit = csd_model.fit(toydict['toy_data']) sltest_list = [('toy_roi_long_plane', 121), ('toy_roi_radial_plane', 121), ('toy_roi_center_vox', 1)] classifier = ThresholdTissueClassifier(toydict['toy_tissue_classifier'], .1) detmax_dg = DeterministicMaximumDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=30., sphere=default_sphere) expected_sl_length = 11 for roi, num_sl in sltest_list: seed = utils.seeds_from_mask(toydict[roi]) streamlines = LocalTracking(detmax_dg, classifier, seed, toydict['toy_affine'], step_size=1) streamlines = list(streamlines) npt.assert_equal(len(streamlines), num_sl) for sl in streamlines: npt.assert_equal(len(sl), expected_sl_length)
different orientations. The density of fibers along each orientation is known as the Fiber Orientation Distribution (FOD). In order to perform probabilistic fiber tracking, we pick a fiber from the FOD at random at each new location along the streamline. Note: one could use this model to perform deterministic fiber tracking by always tracking along the directions that have the most fibers. Let's begin probabilistic fiber tracking by fitting the data to the CSD model. """ from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=6) csd_fit = csd_model.fit(data, mask=white_matter) """ Next we'll need to make a ``ProbabilisticDirectionGetter``. Because the CSD model represents the FOD using the spherical harmonic basis, we can use the ``from_shcoeff`` method to create the direction getter. This direction getter will randomly sample directions from the FOD each time the tracking algorithm needs to take another step. """ from dipy.direction import ProbabilisticDirectionGetter prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=30., sphere=default_sphere)
# slice out b0 b0 = data[:,:,:,0]; img2 = nib.Nifti1Image(b0,affine); nib.save(img2, "b0.nii.gz"); # load bvals bvecs fbval = "dwi.bval"; fbvec = "dwi.bvec"; bvals, bvecs = read_bvals_bvecs(fbval, fbvec); gtab = gradient_table(bvals, bvecs); # fit constrained spherical deconvolution model response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7); csd_model = CsdModel(gtab, response); csd_fit = csd_model.fit(data); # init fiber odf sphere = get_sphere('symmetric724'); csd_odf = csd_fit.odf(sphere); stepper = FixedSizeStepper(1); # load in b0 and create mask b0_img = nib.load('b0.nii.gz'); b0_data = b0_img.get_data(); b0_mask, mask = median_otsu(b0_data, 2, 1); # save mask for b0 mask_img = nib.Nifti1Image(mask.astype(np.float32), b0_img.get_affine()) b0_img = nib.Nifti1Image(b0_mask.astype(np.float32), b0_img.get_affine())
fvtk.rm(ren, response_actor) """ Now, that we have the response function, we are ready to start the deconvolution process. Let's import the CSD model and fit the datasets. """ from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel csd_model = ConstrainedSphericalDeconvModel(gtab, response) """ For illustration purposes we will fit only a small portion of the data. """ data_small = data[20:50, 55:85, 38:39] csd_fit = csd_model.fit(data_small) """ Show the CSD-based ODFs also known as FODFs (fiber ODFs). """ csd_odf = csd_fit.odf(sphere) """ Here we visualize only a 30x30 region. """ fodf_spheres = fvtk.sphere_funcs(csd_odf, sphere, scale=1.3, norm=False) fvtk.add(ren, fodf_spheres)
def test_csdeconv(): SNR = 100 S0 = 1 _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) gtab = gradient_table(bvals, bvecs) mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (60, 0)] S, sticks = multi_tensor(gtab, mevals, S0, angles=angles, fractions=[50, 50], snr=SNR) sphere = get_sphere('symmetric362') odf_gt = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50]) response = (np.array([0.0015, 0.0003, 0.0003]), S0) csd = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd.fit(S) assert_equal(csd_fit.shm_coeff[0] > 0, True) fodf = csd_fit.odf(sphere) directions, _, _ = peak_directions(odf_gt, sphere) directions2, _, _ = peak_directions(fodf, sphere) ang_sim = angular_similarity(directions, directions2) assert_equal(ang_sim > 1.9, True) assert_equal(directions.shape[0], 2) assert_equal(directions2.shape[0], 2) with warnings.catch_warnings(record=True) as w: ConstrainedSphericalDeconvModel(gtab, response, sh_order=10) assert_equal(len(w) > 0, True) with warnings.catch_warnings(record=True) as w: ConstrainedSphericalDeconvModel(gtab, response, sh_order=8) assert_equal(len(w) > 0, False) mevecs = [] for s in sticks: mevecs += [all_tensor_evecs(s).T] S2 = single_tensor(gtab, 100, mevals[0], mevecs[0], snr=None) big_S = np.zeros((10, 10, 10, len(S2))) big_S[:] = S2 aresponse, aratio = auto_response(gtab, big_S, roi_center=(5, 5, 4), roi_radius=3, fa_thr=0.5) assert_array_almost_equal(aresponse[0], response[0]) assert_almost_equal(aresponse[1], 100) assert_almost_equal(aratio, response[0][1]/response[0][0]) aresponse2, aratio2 = auto_response(gtab, big_S, roi_radius=3, fa_thr=0.5) assert_array_almost_equal(aresponse[0], response[0])
def _run_interface(self, runtime): import numpy as np import nibabel as nib from dipy.io import read_bvals_bvecs from dipy.core.gradients import gradient_table from nipype.utils.filemanip import split_filename # Loading the data fname = self.inputs.in_file img = nib.load(fname) data = img.get_data() affine = img.get_affine() FA_fname = self.inputs.FA_file FA_img = nib.load(FA_fname) fa = FA_img.get_data() affine = FA_img.get_affine() affine = np.matrix.round(affine) mask_fname = self.inputs.brain_mask mask_img = nib.load(mask_fname) mask = mask_img.get_data() bval_fname = self.inputs.bval bvals = np.loadtxt(bval_fname) bvec_fname = self.inputs.bvec bvecs = np.loadtxt(bvec_fname) bvecs = np.vstack([bvecs[0,:],bvecs[1,:],bvecs[2,:]]).T gtab = gradient_table(bvals, bvecs) # Creating a white matter mask fa = fa*mask white_matter = fa >= 0.2 # Creating a seed mask from dipy.tracking import utils seeds = utils.seeds_from_mask(white_matter, density=[2, 2, 2], affine=affine) # Fitting the CSA model from dipy.reconst.shm import CsaOdfModel from dipy.data import default_sphere from dipy.direction import peaks_from_model csa_model = CsaOdfModel(gtab, sh_order=8) csa_peaks = peaks_from_model(csa_model, data, default_sphere, relative_peak_threshold=.8, min_separation_angle=45, mask=white_matter) from dipy.tracking.local import ThresholdTissueClassifier classifier = ThresholdTissueClassifier(csa_peaks.gfa, .25) # CSD model from dipy.reconst.csdeconv import (ConstrainedSphericalDeconvModel, auto_response) response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response, sh_order=8) csd_fit = csd_model.fit(data, mask=white_matter) from dipy.direction import ProbabilisticDirectionGetter prob_dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=45., sphere=default_sphere) # Tracking from dipy.tracking.local import LocalTracking streamlines = LocalTracking(prob_dg, classifier, seeds, affine, step_size=.5, maxlen=200, max_cross=1) # Compute streamlines and store as a list. streamlines = list(streamlines) # Saving the trackfile from dipy.io.trackvis import save_trk _, base, _ = split_filename(fname) save_trk(base + '_CSDprob.trk', streamlines, affine, fa.shape) return runtime
# Select a small part of it data_small = data[25:40, 65:80, 35:42] data_noisy_small = data_noisy[25:40, 65:80, 35:42] """ Fit an initial model to the data, in this case Constrained Spherical Deconvolution is used. """ # Perform CSD on the original data from dipy.reconst.csdeconv import auto_response from dipy.reconst.csdeconv import ConstrainedSphericalDeconvModel response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model_orig = ConstrainedSphericalDeconvModel(gtab, response) csd_fit_orig = csd_model_orig.fit(data_small) csd_shm_orig = csd_fit_orig.shm_coeff # Perform CSD on the original data + noise response, ratio = auto_response(gtab, data_noisy, roi_radius=10, fa_thr=0.7) csd_model_noisy = ConstrainedSphericalDeconvModel(gtab, response) csd_fit_noisy = csd_model_noisy.fit(data_noisy_small) csd_shm_noisy = csd_fit_noisy.shm_coeff """ Inspired by [RodriguesEurographics_], a lookup-table is created, containing rotated versions of the kernel :math:`P_t` sampled over a discrete set of orientations. In order to ensure rotationally invariant processing, the discrete orientations are required to be equally distributed over a sphere. Per default, a sphere with 100 directions is used.
def test_recursive_response_calibration(): """ Test the recursive response calibration method. """ SNR = 100 S0 = 1 sh_order = 8 _, fbvals, fbvecs = get_data('small_64D') bvals = np.load(fbvals) bvecs = np.load(fbvecs) sphere = get_sphere('symmetric724') gtab = gradient_table(bvals, bvecs) evals = np.array([0.0015, 0.0003, 0.0003]) evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T mevals = np.array(([0.0015, 0.0003, 0.0003], [0.0015, 0.0003, 0.0003])) angles = [(0, 0), (90, 0)] where_dwi = lazy_index(~gtab.b0s_mask) S_cross, sticks_cross = multi_tensor(gtab, mevals, S0, angles=angles, fractions=[50, 50], snr=SNR) S_single = single_tensor(gtab, S0, evals, evecs, snr=SNR) data = np.concatenate((np.tile(S_cross, (8, 1)), np.tile(S_single, (2, 1))), axis=0) odf_gt_cross = multi_tensor_odf(sphere.vertices, mevals, angles, [50, 50]) odf_gt_single = single_tensor_odf(sphere.vertices, evals, evecs) response = recursive_response(gtab, data, mask=None, sh_order=8, peak_thr=0.01, init_fa=0.05, init_trace=0.0021, iter=8, convergence=0.001, parallel=False) csd = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd.fit(data) assert_equal(np.all(csd_fit.shm_coeff[:, 0] >= 0), True) fodf = csd_fit.odf(sphere) directions_gt_single, _, _ = peak_directions(odf_gt_single, sphere) directions_gt_cross, _, _ = peak_directions(odf_gt_cross, sphere) directions_single, _, _ = peak_directions(fodf[8, :], sphere) directions_cross, _, _ = peak_directions(fodf[0, :], sphere) ang_sim = angular_similarity(directions_cross, directions_gt_cross) assert_equal(ang_sim > 1.9, True) assert_equal(directions_cross.shape[0], 2) assert_equal(directions_gt_cross.shape[0], 2) ang_sim = angular_similarity(directions_single, directions_gt_single) assert_equal(ang_sim > 0.9, True) assert_equal(directions_single.shape[0], 1) assert_equal(directions_gt_single.shape[0], 1) sphere = Sphere(xyz=gtab.gradients[where_dwi]) sf = response.on_sphere(sphere) S = np.concatenate(([response.S0], sf)) tenmodel = dti.TensorModel(gtab, min_signal=0.001) tenfit = tenmodel.fit(S) FA = fractional_anisotropy(tenfit.evals) FA_gt = fractional_anisotropy(evals) assert_almost_equal(FA, FA_gt, 1)
from dipy.viz import window, actor, colormap as cmap renderer = window.Renderer() img_pve_csf, img_pve_gm, img_pve_wm = read_stanford_pve_maps() hardi_img, gtab, labels_img = read_stanford_labels() data = hardi_img.get_data() labels = labels_img.get_data() affine = hardi_img.affine shape = labels.shape response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(data, mask=img_pve_wm.get_data()) dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=20., sphere=default_sphere) """ CMC/ACT Tissue Classifiers --------------------- Continuous map criterion (CMC) [Girard2014]_ and Anatomically-constrained tractography (ACT) [Smith2012]_ both uses PVEs information from anatomical images to determine when the tractography stops. Both tissue classifiers use a trilinear interpolation at the tracking position. CMC tissue classifier uses a probability derived from the PVE maps to determine if the streamline reaches a 'valid' or 'invalid'
class RLTractEnvironment(gym.Env): def __init__(self, device, seeds=None, step_width=0.8, dataset='100307', grid_dim=(3, 3, 3), max_l2_dist_to_state=0.1, tracking_in_RAS=True, fa_threshold=0.1, b_val=1000, odf_state=True, odf_mode="CSD", action_space=100, pFolderBundles="data/gt_bundles/"): print("DEPRECATED! Dont use anymore.") self.state_history = None self.reference_seed_point_ijk = None self.points_visited = None self.past_reward = None self.reward = None self.stepCounter = None self.done = None self.seed_index = None self.step_angles = None self.line = None self.na_reward_history = None self.av_na_reward = None self.past_bundle = None print("Loading dataset # ", dataset) self.device = device preprocessor = DataPreprocessor().normalize().crop(b_val).fa_estimate() if dataset == 'ISMRM': self.dataset = preprocessor.get_ismrm(f"data/ISMRM2015/") else: self.dataset = preprocessor.get_hcp(f"data/HCP/{dataset}/") self.step_width = step_width self.dtype = torch.FloatTensor # vs. torch.cuda.FloatTensor self.dti_model = None self.dti_fit = None self.odf_interpolator = None self.sh_coefficient = None self.odf_mode = odf_mode np.random.seed(42) action_space = action_space phi = np.pi * np.random.rand(action_space) theta = 2 * np.pi * np.random.rand(action_space) sphere = HemiSphere(theta=theta, phi=phi) #Sphere(theta=theta, phi=phi) sphere, _ = disperse_charges( sphere, 5000) # enforce uniform distribtuion of our points self.sphere = sphere self.sphere_odf = sphere # -- interpolation function of state's value -- self.state_interpol_func = self.interpolate_dwi_at_state if odf_state: print("Interpolating ODF as state Value") self.state_interpol_func = self.interpolate_odf_at_state self.directions = torch.from_numpy(self.sphere.vertices).to(device) no_actions, _ = self.directions.shape self.directions_odf = torch.from_numpy( self.sphere_odf.vertices).to(device) self.action_space = Discrete( no_actions) # spaces.Discrete(no_actions+1) self.dwi_postprocessor = Resample( sphere=get_sphere('repulsion100')) # resample(sphere=sphere) self.referenceStreamline_ijk = None self.grid = get_grid(np.array(grid_dim)) self.maxL2dist_to_State = max_l2_dist_to_state self.tracking_in_RAS = tracking_in_RAS # -- load streamlines -- self.fa_threshold = fa_threshold self.maxSteps = 2000 # -- init seeds -- self.seeds = seeds if self.seeds is None: if self.dti_fit is None: self._init_odf() dti_model = dti.TensorModel(self.dataset.gtab, fit_method='LS') dti_fit = dti_model.fit(self.dataset.dwi, mask=self.dataset.binary_mask) fa_img = dti_fit.fa seed_mask = fa_img.copy() seed_mask[seed_mask >= 0.2] = 1 seed_mask[seed_mask < 0.2] = 0 seeds = utils.seeds_from_mask(seed_mask, affine=np.eye(4), density=1) # tracking in IJK self.seeds = torch.from_numpy(seeds) # -- init bundles for neuroanatomical reward -- print("Init tract masks for neuroanatomical reward") fibers = [] self.bundleNames = os.listdir(pFolderBundles) for fibFile in self.bundleNames: pFibre = pFolderBundles + fibFile #print(" @ " + pFibre) fibers.append( FiberBundleDataset(path_to_files=pFibre, dataset=self.dataset).tractMask) self.tractMasksAllBundles = torch.stack(fibers, dim=0).to(self.device) # -- set default values -- self.reset() # -- init observation space -- obs_shape = self.get_observation_from_state(self.state).shape self.observation_space = Box(low=0, high=150, shape=obs_shape) def _init_odf(self): print("Initialising ODF") # fit DTI model to data if self.odf_mode == "DTI": print("DTI-based ODF computation") self.dti_model = dti.TensorModel(self.dataset.gtab, fit_method='LS') self.dti_fit = self.dti_model.fit(self.dataset.dwi, mask=self.dataset.binary_mask) # compute ODF odf = self.dti_fit.odf(self.sphere_odf) elif self.odf_mode == "CSD": print("CSD-based ODF computation") mask = mask_for_response_ssst(self.dataset.gtab, self.dataset.dwi, roi_radii=10, fa_thr=0.7) num_voxels = np.sum(mask) print(num_voxels) response, ratio = response_from_mask_ssst(self.dataset.gtab, self.dataset.dwi, mask) print(response) self.dti_model = ConstrainedSphericalDeconvModel( self.dataset.gtab, response) self.dti_fit = self.dti_model.fit(self.dataset.dwi) odf = self.dti_fit.odf(self.sphere_odf) # -- set up interpolator for odf evaluation x_range = np.arange(odf.shape[0]) y_range = np.arange(odf.shape[1]) z_range = np.arange(odf.shape[2]) self.odf_interpolator = RegularGridInterpolator( (x_range, y_range, z_range), odf) # print("Computing pmf") # self.pmf = odf.clip(min=0) def interpolate_dwi_at_state(self, stateCoordinates): # TODO: maybe stay in RAS all the time then no need to transfer to IJK ras_points = self.dataset.to_ras( stateCoordinates ) # Transform state to World RAS+ coordinate system ras_points = self.grid + ras_points try: interpolated_dwi = self.dataset.get_interpolated_dwi( ras_points, postprocessing=self.dwi_postprocessor) except PointOutsideOfDWIError: return None interpolated_dwi = np.rollaxis(interpolated_dwi, 3) # CxWxHxD # interpolated_dwi = self.dtype(interpolated_dwi).to(self.device) return interpolated_dwi def interpolate_odf_at_state(self, stateCoordinates): if self.odf_interpolator is None: self._init_odf() ijk_pts = self.grid + stateCoordinates.cpu().detach().numpy() interpol_odf = self.odf_interpolator(ijk_pts) interpol_odf = np.rollaxis(interpol_odf, 3) return interpol_odf def step(self, action, direction="forward"): self.stepCounter += 1 # -- Termination conditions -- # I. number of steps larger than maximum if self.stepCounter >= self.maxSteps: return self.get_observation_from_state(self.state), 0., True, {} # II. fa below threshold? stop tracking if self.dataset.get_fa( self.state.getCoordinate().cpu()) < self.fa_threshold: return self.get_observation_from_state(self.state), 0., True, {} #@todo: III. leaving brain mask #if self.dataset.get_fa(self.state.getCoordinate()) < self.fa_threshold: # return self.get_observation_from_state(self.state), 0., True, {} # -- Tracking -- cur_tangent = self.directions[action].view( -1, 3) # action space = Hemisphere cur_position = self.state.getCoordinate().view(-1, 3).to(self.device) if (direction == "backward"): cur_tangent = cur_tangent * -1 next_position = cur_position + self.step_width * cur_tangent next_state = TractographyState(next_position, self.state_interpol_func) local_reward_na = interpolate3dAt(self.tractMasksAllBundles, cur_position.view(1, -1)).squeeze() self.na_reward_history[self.stepCounter - 1, :] = local_reward_na #print("mu hist", self.stepCounter, torch.mean(self.na_reward_history[0:self.stepCounter-1, :], dim = 0)) # -- REWARD -- # compute reward based on # I. We basically take the normalized odf value corresponding to the encoded (action) tangent as reward # It is normalized in a way such that its maximum equals 1 Crucial assumption is that self.directions == # self.directions_odf odf_cur = torch.from_numpy( self.interpolate_odf_at_state( stateCoordinates=cur_position))[:, 1, 1, 1].view( self.directions_odf.shape[0]) if not torch.count_nonzero( odf_cur ): # if all elements in odf_cur are zero, terminate episode return self.get_observation_from_state(next_state), 0., True, {} reward = odf_cur / torch.max(odf_cur) reward = reward[action] # II. cosine similarity of current tangent to previous tangent # => Agent should prefer going straight prev_tangent = None if self.stepCounter > 1: # DIRTY: .to(self.device) in next line... one element in history is not on device prev_tangent = self.state_history[-1].getCoordinate().to( self.device) - self.state_history[-2].getCoordinate().to( self.device) prev_tangent = prev_tangent.view(-1, 3) prev_tangent = prev_tangent / torch.sqrt( torch.sum(prev_tangent**2, dim=1)) # normalize to unit vector cos_similarity = abs( torch.nn.functional.cosine_similarity(prev_tangent, cur_tangent)) reward = (reward * cos_similarity).squeeze() if cos_similarity <= 0.: return self.get_observation_from_state( next_state), reward, True, {} #@todo: replace the following lines by a call to reward_for_state_action_pair(-) #reward_sap = self.reward_for_state_action_pair(self.state, prev_tangent, action) # -- book keeping -- self.state_history.append(next_state) self.state = next_state return self.get_observation_from_state(next_state), reward, False, {} def reward_for_state(self, state, prev_direction): my_position = state.getCoordinate().squeeze(0).to(self.device) # -- main peak from ODF -- pmf_cur = self.interpolate_odf_at_state(my_position)[:, 1, 1, 1] pmf_cur = torch.from_numpy(pmf_cur).to(self.device) reward = pmf_cur / torch.max(pmf_cur) peak_indices = self._get_odf_peaks(reward, window_width=int( self.action_space.n / 3)) mask = torch.zeros_like(reward).to(self.device) mask[peak_indices] = 1 reward *= mask if prev_direction is not None: reward = reward * abs( torch.nn.functional.cosine_similarity( self.directions, prev_direction.view(1, -1))).view(1, -1) # neuroanatomical reward next_pos = my_position.view( 1, -1 ) + self.directions # gets next positions for all directions actions X 3 local_reward_na = interpolate3dAt( self.tractMasksAllBundles, next_pos).squeeze() # no_tracts X actions self.na_reward_history[self.stepCounter - 1, :] = torch.max( local_reward_na, dim=0) # store local reward #self.na_reward_history[self.stepCounter, :] = local_reward_na #reward_na = torch.mean(self.na_reward_history[0:self.stepCounter, :], dim = 0) #reward_na = torch.max(reward_na) #reward_na_idx = torch.argmax(reward_na) #print("[%d] dominant bundle: %s" % (self.stepCounter, self.bundleNames[reward_na_idx])) reward_na_mu_hist = torch.mean( self.na_reward_history[0:self.stepCounter - 1, :], dim=0).view(-1, 1) # no_tracts X 1 local_reward_na = local_reward_na + reward_na_mu_hist reward_na, _ = torch.max(local_reward_na, dim=0) reward_na = reward_na.view( 1, -1) # marginalize tracts, to get 1 x noActions reward_na_arg = torch.argmax(local_reward_na, dim=0) # get dominant tract per action return (reward + reward_na).cpu().numpy() def reward_for_state_action_pair(self, state, prev_direction, action): reward = self.reward_for_state(state, prev_direction) return reward[action] def _get_best_action(self, state, prev_direction): reward = self.reward_for_state(state, prev_direction) return np.argmax(reward) #best_action_d def _get_odf_peaks(self, odf, window_width=31): odf = odf.squeeze(0) odf_diff = ((odf[:-2] < odf[1:-1]) & (odf[2:] < odf[1:-1])).type( torch.uint8).to(self.device) if window_width % 2 == 0.: window_width += 1 peak_mask = torch.cat([ torch.zeros(1, dtype=torch.uint8).to(self.device), odf_diff, torch.zeros(1, dtype=torch.uint8).to(self.device) ], dim=0) peaks = torch.nn.functional.max_pool1d_with_indices( odf.view(1, 1, -1), window_width, 1, padding=window_width // 2)[1].unique() peak_indices = peaks[peak_mask[peaks].nonzero()] return peak_indices def track(self, action_function=None): streamlines = [] for i in trange(len(self.seeds)): all_states = [] self.reset(seed_index=i) state = self.state # reset function now returns dwi values --> due to compatibility to rainbow agent or stable baselines seed_position = state.getCoordinate().squeeze().to(self.device) current_direction = None all_states.append(seed_position) # -- forward tracking -- terminal = False eval_steps = 0 while not terminal: # current position # get the best choice from environment if action_function is None: action = self._get_best_action(state, current_direction) else: action = action_function( self.get_observation_from_state(state)) # store tangent for next time step current_direction = self.directions[action] #.numpy() # take a step _, reward, terminal, _ = self.step(action) state = self.state # step function now returns dwi values --> due to compatibility to rainbow agent or stable baselines if not terminal: all_states.append(state.getCoordinate().squeeze(0)) eval_steps = eval_steps + 1 # -- backward tracking -- self.reset(seed_index=i, terminal_F=True) state = self.state # reset function now returns dwi values --> due to compatibility to rainbow agent or stable baselines current_direction = None # potentially take tangent of first step of forward tracker terminal = False all_states = all_states[::-1] while not terminal: # current position my_position = state.getCoordinate().double().squeeze(0) # get the best choice from environment if action_function is None: action = self._get_best_action(state, current_direction) else: action = action_function( self.get_observation_from_state(state)) # store tangent for next time step current_direction = self.directions[action] #.numpy() # take a step _, reward, terminal, _ = self.step(action, direction="backward") state = self.state my_position = my_position.to(self.device) # DIRTY!!! my_coord = state.getCoordinate().squeeze(0).to(self.device) if (False in torch.eq(my_coord, my_position)) & (not terminal): all_states.append(my_coord) streamlines.append((all_states)) return streamlines def get_observation_from_state(self, state): dwi_values = state.getValue().flatten() # TODO -> currently only on dwi values, not on past states #past_coordinates = np.array(list(self.state_history)).flatten() #return np.concatenate((dwi_values, past_coordinates)) return dwi_values # reset the game and returns the observed data from the last episode def reset(self, seed_index=None, terminal_F=False, terminal_B=False): # self.seed_index = seed_index if seed_index is not None: self.seed_index = seed_index elif not terminal_F and not terminal_B or terminal_F and terminal_B: self.seed_index = np.random.randint(len(self.seeds)) if self.tracking_in_RAS: reference_seed_point_ras = self.seeds[self.seed_index] reference_seed_point_ijk = self.dataset.to_ijk( reference_seed_point_ras) else: reference_seed_point_ijk = self.seeds[self.seed_index] self.done = False self.stepCounter = 0 self.reward = 0 self.past_reward = 0 self.points_visited = 1 # position_index self.reference_seed_point_ijk = reference_seed_point_ijk self.state = TractographyState(self.reference_seed_point_ijk, self.state_interpol_func) self.state_history = deque([self.state] * 4, maxlen=4) self.na_reward_history = torch.zeros( (self.maxSteps, self.tractMasksAllBundles.shape[0])).to(self.device) return self.get_observation_from_state(self.state) def render(self, mode="human"): pass '''@deprecated
from dipy.tracking import utils from dipy.viz import window, actor, colormap as cmap renderer = window.Renderer() img_pve_csf, img_pve_gm, img_pve_wm = read_stanford_pve_maps() hardi_img, gtab, labels_img = read_stanford_labels() data = hardi_img.get_data() labels = labels_img.get_data() affine = hardi_img.affine shape = labels.shape response, ratio = auto_response(gtab, data, roi_radius=10, fa_thr=0.7) csd_model = ConstrainedSphericalDeconvModel(gtab, response) csd_fit = csd_model.fit(data, mask=img_pve_wm.get_data()) dg = ProbabilisticDirectionGetter.from_shcoeff(csd_fit.shm_coeff, max_angle=20., sphere=default_sphere) """ CMC/ACT Tissue Classifiers --------------------- Continuous map criterion (CMC) [Girard2014]_ and Anatomically-constrained tractography (ACT) [Smith2012]_ both uses PVEs information from anatomical images to determine when the tractography stops. Both tissue classifiers use a trilinear interpolation at the tracking position. CMC tissue classifier uses a probability derived from the PVE maps to determine if the streamline reaches a 'valid' or 'invalid' region. ACT uses a fixed threshold on the PVE maps. Both tissue classifiers can be used in conjunction with PFT. In this example, we used CMC.
def compCsdPeaks(basename, output, mask=None, sh_only=False, invert=[1]): home = os.getcwd() fbase = basename fdwi = fbase+".nii.gz" fbval = fbase+".bvals" fbvec = fbase+".bvecs" print fdwi,fbval,fbvec img = nib.load(fdwi) data = img.get_data() zooms = img.get_header().get_zooms()[:3] affine = img.get_affine() # reslice image into 1x1x1 iso voxel # new_zooms = (1., 1., 1.) # data, affine = resample(data, affine, zooms, new_zooms) # img = nib.Nifti1Image(data, affine) # # print data.shape # print img.get_header().get_zooms() # print "###" # # nib.save(img, 'C5_iso.nii.gz') bval, bvec = dio.read_bvals_bvecs(fbval, fbvec) # invert bvec z for GE scanner for i in invert: bvec[:,i]*= -1 gtab = dgrad.gradient_table(bval, bvec) if mask is None: print 'generate mask' maskdata, mask = median_otsu(data, 3, 1, False, vol_idx=range(10, 50), dilate=2) else: mask = nib.load(mask).get_data() maskdata = applymask(data, mask) # tenmodel = dti.TensorModel(gtab) # tenfit = tenmodel.fit(data) # print('Computing anisotropy measures (FA, MD, RGB)') # # # FA = fractional_anisotropy(tenfit.evals) # FA[np.isnan(FA)] = 0 # # fa_img = nib.Nifti1Image(FA.astype(np.float32), img.get_affine()) # nib.save(fa_img, 'FA.nii.gz') # # return # estimate response function, ratio should be ~0.2 response, ratio = auto_response(gtab, maskdata, roi_radius=10, fa_thr=0.7) print response, ratio # reconstruct csd model print "estimate csd_model" csd_model = ConstrainedSphericalDeconvModel(gtab, response) #a_data = maskdata[40:80, 40:80, 60:61] #c_data = maskdata[40:80, 59:60, 50:80] #s_data = maskdata[59:60, 40:70, 30:80] data_small = maskdata # # evals = response[0] # evecs = np.array([[0, 1, 0], [0, 0, 1], [1, 0, 0]]).T #sphere = get_sphere('symmetric362') if sh_only: print 'fitting csd spherical harmonics to data' csd_fit = csd_model.fit(data_small) outfile = output+'_shfit.dipy' with open(outfile, 'wb') as fout: cPickle.dump(csd_fit, fout, -1) print "done writing to file %s"% (outfile) return csd_fit #csd_odf = csd_fit.odf(sphere) # # #fodf_spheres = fvtk.sphere_funcs(csd_odf, sphere, scale=1, norm=False) ##fodf_spheres.GetProperty().SetOpacity(0.4) ## #fvtk.add(ren, fodf_spheres) ##fvtk.add(ren, fodf_peaks) #fvtk.show(ren) # #sys.exit() # fit csd peaks print "fit csd peaks" proc_num = multiprocessing.cpu_count()-1 print "peaks_from_model using core# =" + str(proc_num) sphere = get_sphere('symmetric724') csd_peaks = peaks_from_model(model=csd_model, data=data, sphere=sphere, mask=mask, relative_peak_threshold=.5, min_separation_angle=25, parallel=True, nbr_processes=proc_num) #fodf_peaks = fvtk.peaks(csd_peaks.peak_dirs, csd_peaks.peak_values, scale=1) # fd, fname = mkstemp() # pickle.save_pickle(fname, csd_peaks) # # os.close(fd) #pickle.dump(csd_peaks, open("csd.p", "wb")) outfile = output+'_csdpeaks.dipy' print 'writing peaks to file...' with open(outfile, 'wb') as fout: cPickle.dump(csd_peaks, fout, -1) print "done writing to file %s"% (outfile) return (csd_peaks, outfile)