def seg_setup(alg): MNI_T2_img = afd.read_mni_template() img = nib.load("dwi.nii") mapping = reg.read_mapping("mapping.nii.gz", img, MNI_T2_img) if alg == "waypoint": bundles = api.make_bundle_dict( bundle_names=[ "CST", "ARC", ], resample_to=MNI_T2_img, ) # CST ARC else: bundles = api.make_bundle_dict( bundle_names=[ "CST", "UF", "CC_ForcepsMajor", "CC_ForcepsMinor", "OR", "VOF", ], seg_algo="reco80", resample_to=MNI_T2_img, ) # CST ARC return { "MNI_T2_img": MNI_T2_img, "img": img, "mapping": mapping, "bundles": bundles, }
def prepare_map(self, mapping=None, reg_prealign=None, reg_template=None): """ Set mapping between DWI space and a template. Parameters ---------- mapping : DiffeomorphicMap object, str or nib.Nifti1Image, optional. A mapping between DWI space and a template. If None, mapping will be registered from data used in prepare_img. Default: None. reg_template : str or nib.Nifti1Image, optional. Template to use for registration (defaults to the MNI T2) Default: None. reg_prealign : array, optional. The linear transformation to be applied to align input images to the reference space before warping under the deformation field. Default: None. """ if reg_template is None: reg_template = afd.read_mni_template() self.reg_template = reg_template if mapping is None: if self.seg_algo == "afq" or self.reg_algo == "syn": gtab = dpg.gradient_table(self.fbval, self.fbvec) self.mapping = reg.syn_register_dwi(self.fdata, gtab, template=reg_template)[1] else: self.mapping = None elif isinstance(mapping, str) or isinstance(mapping, nib.Nifti1Image): if reg_prealign is None: reg_prealign = np.eye(4) if self.img is None: self.img, _, _, _ = \ ut.prepare_data(self.fdata, self.fbval, self.fbvec, b0_threshold=self.b0_threshold) self.mapping = reg.read_mapping( mapping, self.img, reg_template, prealign=np.linalg.inv(reg_prealign)) else: self.mapping = mapping
def syn_register_dwi(dwi, gtab, template=None, **syn_kwargs): """ Register DWI data to a template. Parameters ----------- dwi : nifti image or str Image containing DWI data, or full path to a nifti file with DWI. gtab : GradientTable or list of strings The gradients associated with the DWI data, or a string with [fbcal, ] template : nifti image or str, optional syn_kwargs : key-word arguments for :func:`syn_registration` Returns ------- DiffeomorphicMap object """ if template is None: import AFQ.data as afd template = afd.read_mni_template() if isinstance(template, str): template = nib.load(template) template_data = template.get_fdata() template_affine = template.affine if isinstance(dwi, str): dwi = nib.load(dwi) if not isinstance(gtab, dpg.GradientTable): gtab = dpg.gradient_table(*gtab) dwi_affine = dwi.affine dwi_data = dwi.get_fdata() mean_b0 = np.mean(dwi_data[..., gtab.b0s_mask], -1) warped_b0, mapping = syn_registration(mean_b0, template_data, moving_affine=dwi_affine, static_affine=template_affine, **syn_kwargs) return warped_b0, mapping
# ------------------------------------------- # For the purpose of bundle segmentation, the individual brain is registered to # the MNI T2 template. The waypoint ROIs used in segmentation are then each # brought into each subject's native space to test streamlines for whether they # fulfill the segmentation criteria. # # .. note:: # # To find the right place for the waypoint ROIs, we calculate a non-linear # transformation between the individual's brain DWI measurement (the b0 # measurements) and the MNI T2 template. # Before calculating this non-linear warping, we perform a pre-alignment # using an affine transformation. print("Registering to template...") MNI_T2_img = afd.read_mni_template() if not op.exists(op.join(working_dir, 'mapping.nii.gz')): import dipy.core.gradients as dpg gtab = dpg.gradient_table(hardi_fbval, hardi_fbvec) b0 = np.mean(img.get_fdata()[..., gtab.b0s_mask], -1) # Prealign using affine registration _, prealign = affine_registration(b0, MNI_T2_img.get_fdata(), img.affine, MNI_T2_img.affine) # Then register using a non-linear registration using the affine for # prealignment warped_hardi, mapping = reg.syn_register_dwi(hardi_fdata, gtab, prealign=prealign) reg.write_mapping(mapping, op.join(working_dir, 'mapping.nii.gz'))
import dipy.data as dpd import dipy.core.gradients as dpg from AFQ.registration import (syn_registration, register_series, register_dwi, c_of_mass, translation, rigid, affine, streamline_registration, write_mapping, read_mapping, syn_register_dwi, DiffeomorphicMap, slr_registration) import AFQ.data as afd from dipy.tracking.utils import transform_tracking_output from dipy.io.streamline import load_trk, save_trk, load_tractogram from dipy.io.stateful_tractogram import StatefulTractogram, Space MNI_T2 = afd.read_mni_template() hardi_img, gtab = dpd.read_stanford_hardi() MNI_T2_data = MNI_T2.get_fdata() MNI_T2_affine = MNI_T2.affine hardi_data = hardi_img.get_fdata() hardi_affine = hardi_img.affine b0 = hardi_data[..., gtab.b0s_mask] mean_b0 = np.mean(b0, -1) # We select some arbitrary chunk of data so this goes quicker: subset_b0 = mean_b0[40:50, 40:50, 40:50] subset_dwi_data = nib.Nifti1Image(hardi_data[40:50, 40:50, 40:50], hardi_affine) subset_t2 = MNI_T2_data[40:60, 40:60, 40:60] subset_b0_img = nib.Nifti1Image(subset_b0, hardi_affine) subset_t2_img = nib.Nifti1Image(subset_t2, MNI_T2_affine)
def test_matlab_mori_groups(): fiber_groups = matlab_mori_groups( op.join(DATA_PATH, "MoriGroups_Test.mat"), afd.read_mni_template()) npt.assert_equal(len(fiber_groups.keys()), 20) npt.assert_equal(len(fiber_groups['CST_R'].streamlines), 2)
def test_matlab_tractography(): sft = matlab_tractography(op.join(DATA_PATH, "WholeBrainFG_test.mat"), afd.read_mni_template()) npt.assert_equal(len(sft.streamlines), 2)
def test_AFQ_data_waypoint(): """ Test with some actual data again, this time for track segmentation """ tmpdir, bids_path, _ = get_temp_hardi() t1_path = op.join(tmpdir.name, "T1.nii.gz") nib.save( afd.read_mni_template(mask=True, weight="T1w"), t1_path) bundle_names = ["SLF", "ARC", "CST", "FP"] tracking_params = dict(odf_model="dti", seed_mask=RoiMask(), n_seeds=100, random_seeds=True, rng_seed=42) segmentation_params = dict(filter_by_endpoints=False, seg_algo="AFQ", return_idx=True) clean_params = dict(return_idx=True) myafq = api.AFQ(bids_path=bids_path, dmriprep='vistasoft', bundle_info=bundle_names, scalars=[ "dti_FA", "dti_MD", TemplateScalar("T1", t1_path)], robust_tensor_fitting=True, tracking_params=tracking_params, segmentation_params=segmentation_params, clean_params=clean_params) # Replace the mapping and streamlines with precomputed: file_dict = afd.read_stanford_hardi_tractography() mapping = file_dict['mapping.nii.gz'] streamlines = file_dict['tractography_subsampled.trk'] streamlines = dts.Streamlines( dtu.transform_tracking_output( [s for s in streamlines if s.shape[0] > 100], np.linalg.inv(myafq.dwi_affine[0]))) mapping_file = op.join( myafq.data_frame.results_dir[0], 'sub-01_ses-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz') nib.save(mapping, mapping_file) reg_prealign_file = op.join( myafq.data_frame.results_dir[0], 'sub-01_ses-01_dwi_prealign_from-DWI_to-MNI_xfm.npy') np.save(reg_prealign_file, np.eye(4)) tgram = load_tractogram(myafq.bundles[0], myafq.dwi_img[0]) bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0]) npt.assert_(len(bundles['CST_L']) > 0) # Test ROI exporting: myafq.export_rois() assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'ROIs', 'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json')) # Test bundles exporting: myafq.export_bundles() assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'bundles', 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk')) # noqa # Test creation of file with bundle indices: assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-clean_tractography_idx.json')) # noqa tract_profiles = pd.read_csv(myafq.tract_profiles[0]) assert tract_profiles.shape == (400, 6) myafq.plot_tract_profiles() assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_fa_profile_plots.png')) # noqa assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_md_profile_plots.png')) # noqa # Before we run the CLI, we'll remove the bundles and ROI folders, to see # that the CLI generates them shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles')) shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs')) # Test the CLI: print("Running the CLI:") # Set up config to use the same parameters as above: # ROI mask needs to be put in quotes in config tracking_params = dict(odf_model="DTI", seed_mask="RoiMask()", n_seeds=100, random_seeds=True, rng_seed=42) config = dict(BIDS=dict(bids_path=bids_path, dmriprep='vistasoft'), REGISTRATION=dict( robust_tensor_fitting=True), BUNDLES=dict( bundle_info=bundle_names, scalars=[ "dti_fa", "dti_md", f"TemplateScalar('T1', '{t1_path}')"]), VIZ=dict( viz_backend="plotly_no_gif"), TRACTOGRAPHY=tracking_params, SEGMENTATION=segmentation_params, CLEANING=clean_params) config_file = op.join(tmpdir.name, "afq_config.toml") with open(config_file, 'w') as ff: toml.dump(config, ff) cmd = "pyAFQ " + config_file out = os.system(cmd) assert out == 0 # The combined tract profiles should already exist from the CLI Run: from_file = pd.read_csv( myafq._get_fname(myafq.data_frame.iloc[0], '_profiles.csv')) # And should be identical to what we would get by rerunning this: combined_profiles = myafq.combine_profiles() assert combined_profiles.shape == (400, 8) assert_series_equal(combined_profiles['dti_fa'], from_file['dti_fa']) # Make sure the CLI did indeed generate these: myafq.export_rois() assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'ROIs', 'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json')) myafq.export_bundles() assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'bundles', 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk')) # noqa
# For the purpose of bundle segmentation, the individual brain is registered to # the MNI T1 template. The waypoint ROIs used in segmentation are then each # brought into each subject's native space to test streamlines for whether they # fulfill the segmentation criteria. # # .. note:: # # To find the right place for the waypoint ROIs, we calculate a non-linear # transformation between the individual's brain DWI measurement (the b0 # measurements) and the MNI T1 template. # Before calculating this non-linear warping, we perform a pre-alignment # using an affine transformation. print("Registering to template...") MNI_T1w_img = afd.read_mni_template(weight="T1w") if not op.exists(op.join(working_dir, 'mapping.nii.gz')): import dipy.core.gradients as dpg gtab = dpg.gradient_table(hardi_fbval, hardi_fbvec) # Prealign using affine registration _, prealign = reg.affine_registration(apm, MNI_T1w_img.get_fdata(), img.affine, MNI_T1w_img.affine) # Then register using a non-linear registration using the affine for # prealignment warped_hardi, mapping = reg.syn_register_dwi(hardi_fdata, gtab, prealign=prealign) reg.write_mapping(mapping, op.join(working_dir, 'mapping.nii.gz')) else: