def test_bundles_to_tgram(): affine = np.array([[2., 0., 0., -80.], [0., 2., 0., -120.], [0., 0., 2., -60.], [0., 0., 0., 1.]]) img = nib.Nifti1Image(np.ones((10, 10, 10, 30)), affine) bundles = { 'b1': StatefulTractogram([ np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]), np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]]) ], img, Space.VOX), 'b2': StatefulTractogram([ np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 2], [0, 0, 2.5]]), np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 2, 2]]) ], img, Space.VOX) } bundle_dict = {'b1': {'uid': 1}, 'b2': {'uid': 2}} tgram = aus.bundles_to_tgram(bundles, bundle_dict, img) new_bundles = aus.tgram_to_bundles(tgram, bundle_dict, img) for k1 in bundles.keys(): for k2 in bundles[k1].__dict__.keys(): for sl1, sl2 in zip(bundles[k1].streamlines, new_bundles[k1].streamlines): npt.assert_equal(sl1, sl2)
def test_AFQ_slr(): """ Test if API can run using slr map """ _, bids_path, _ = get_temp_hardi() myafq = api.AFQ(bids_path=bids_path, dmriprep='vistasoft', reg_subject='subject_sls', reg_template='hcp_atlas') tgram = load_tractogram(myafq.get_clean_bundles()[0], myafq.dwi_img[0]) bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0]) npt.assert_(len(bundles['CST_L']) > 0)
def test_AFQ_reco80(): """ Test API segmentation with the 80-bundle atlas """ _, bids_path, _ = get_temp_hardi() myafq = api.AFQ(bids_path=bids_path, dmriprep='vistasoft', segmentation_params={ 'seg_algo': 'reco80', 'rng': 42 }) tgram = load_tractogram(myafq.get_clean_bundles()[0], myafq.dwi_img[0]) bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0]) npt.assert_(len(bundles['CCMid']) > 0)
def test_AFQ_reco(): """ Test if API can run segmentation with recobundles """ _, bids_path, _ = get_temp_hardi() myafq = api.AFQ(bids_path=bids_path, dmriprep='vistasoft', viz_backend="plotly", segmentation_params={ 'seg_algo': 'reco', 'rng': 42 }) tgram = load_tractogram(myafq.get_clean_bundles()[0], myafq.dwi_img[0]) bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0]) npt.assert_(len(bundles['CCMid']) > 0) myafq.export_all()
def test_bundles_to_tgram(): bundles = { 'b1': [ np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]), np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]]) ], 'b2': [ np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]), np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]]) ] } bundle_dict = {'b1': {'uid': 1}, 'b2': {'uid': 2}} affine = np.array([[2., 0., 0., -80.], [0., 2., 0., -120.], [0., 0., 2., -60.], [0., 0., 0., 1.]]) tgram = aus.bundles_to_tgram(bundles, bundle_dict, affine) new_bundles = aus.tgram_to_bundles(tgram, bundle_dict) npt.assert_equal(new_bundles, bundles)
def test_bundles_to_tgram(): bundles = { 'b1': dts.Streamlines([ np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]), np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]]) ]), 'b2': dts.Streamlines([ np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 2], [0, 0, 2.5]]), np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 2, 2]]) ]) } bundle_dict = {'b1': {'uid': 1}, 'b2': {'uid': 2}} affine = np.array([[2., 0., 0., -80.], [0., 2., 0., -120.], [0., 0., 2., -60.], [0., 0., 0., 1.]]) tgram = aus.bundles_to_tgram(bundles, bundle_dict, affine) new_bundles = aus.tgram_to_bundles(tgram, bundle_dict) for k1 in bundles.keys(): for k2 in bundles[k1].__dict__.keys(): npt.assert_equal(new_bundles[k1].__dict__[k2], bundles[k1].__dict__[k2])
def export_sl_counts(subses_dict, bundle_dict, clean_bundles_file, bundles_file, tracking_params, segmentation_params): img = nib.load(subses_dict['dwi_file']) sl_counts_clean = [] sl_counts = [] bundles = list(bundle_dict.keys()) if "whole_brain" not in bundles: bundles.append("whole_brain") bundles_files = [clean_bundles_file, bundles_file] lists = [sl_counts_clean, sl_counts] for bundles_file, count in zip(bundles_files, lists): tg = load_tractogram(bundles_file, img) bundles = aus.tgram_to_bundles(tg, bundle_dict, img) for bundle in bundles: if bundle == "whole_brain": count.append(len(tg.streamlines)) else: count.append(len(bundles[bundle].streamlines)) counts_df = pd.DataFrame(data=dict(n_streamlines=sl_counts, n_streamlines_clean=sl_counts_clean), index=bundles) return counts_df, dict(sources=bundles_files)
def test_AFQ_data2(): """ Test with some actual data again, this time for track segmentation """ tmpdir = nbtmp.InTemporaryDirectory() afd.organize_stanford_data(path=tmpdir.name) preafq_path = op.join(tmpdir.name, 'stanford_hardi', 'derivatives', 'preafq') myafq = api.AFQ(preafq_path=preafq_path, sub_prefix='sub', bundle_list=["SLF", "ARC", "CST", "FP"]) # Replace the mapping and streamlines with precomputed: file_dict = afd.read_stanford_hardi_tractography() mapping = file_dict['mapping.nii.gz'] streamlines = file_dict['tractography_subsampled.trk'] streamlines = dts.Streamlines( dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100], np.linalg.inv(myafq.dwi_affine[0]))) sl_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwiDTI_det_streamlines.trk') aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0]) mapping_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_mapping.nii.gz') nib.save(mapping, mapping_file) reg_prealign_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_reg_prealign.npy') np.save(reg_prealign_file, np.eye(4)) tgram = nib.streamlines.load(myafq.bundles[0]).tractogram bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict) npt.assert_equal(len(bundles['CST_R']), 2) # Test ROI exporting: myafq.export_rois() assert op.exists(op.join(myafq.data_frame['results_dir'][0], 'ROIs', 'CST_R_roi1_include.nii.gz')) # Test bundles exporting: myafq.export_bundles() assert op.exists(op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk')) tract_profiles = pd.read_csv(myafq.tract_profiles[0]) assert tract_profiles.shape == (800, 5) # Before we run the CLI, we'll remove the bundles and ROI folders, to see # that the CLI generates them shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles')) shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs')) # Test the CLI: print("Running the CLI:") cmd = "pyAFQ " + preafq_path out = os.system(cmd) assert out == 0 # The combined tract profiles should already exist from the CLI Run: from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv')) # And should be identical to what we would get by rerunning this: combined_profiles = myafq.combine_profiles() assert combined_profiles.shape == (800, 7) assert_frame_equal(combined_profiles, from_file) # Make sure the CLI did indeed generate these: assert op.exists(op.join(myafq.data_frame['results_dir'][0], 'ROIs', 'CST_R_roi1_include.nii.gz')) assert op.exists(op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk'))
def test_AFQ_data_planes(): """ Test with some actual data again, this time for track segmentation """ tmpdir = nbtmp.InTemporaryDirectory() afd.organize_stanford_data(path=tmpdir.name) dmriprep_path = op.join(tmpdir.name, 'stanford_hardi', 'derivatives', 'dmriprep') seg_algo = "planes" bundle_names = ["SLF", "ARC", "CST", "FP"] myafq = api.AFQ(dmriprep_path=dmriprep_path, sub_prefix='sub', seg_algo=seg_algo, bundle_names=bundle_names, odf_model="DTI") # Replace the mapping and streamlines with precomputed: file_dict = afd.read_stanford_hardi_tractography() mapping = file_dict['mapping.nii.gz'] streamlines = file_dict['tractography_subsampled.trk'] streamlines = dts.Streamlines( dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100], np.linalg.inv(myafq.dwi_affine[0]))) sl_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwiDTI_det_streamlines.trk') aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0]) mapping_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_mapping.nii.gz') nib.save(mapping, mapping_file) reg_prealign_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_reg_prealign.npy') np.save(reg_prealign_file, np.eye(4)) tgram = nib.streamlines.load(myafq.bundles[0]).tractogram bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict) npt.assert_(len(bundles['CST_L']) > 0) # Test ROI exporting: myafq.export_rois() assert op.exists( op.join(myafq.data_frame['results_dir'][0], 'ROIs', 'CST_R_roi1_include.nii.gz')) # Test bundles exporting: myafq.export_bundles() assert op.exists( op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk')) tract_profiles = pd.read_csv(myafq.tract_profiles[0]) assert tract_profiles.shape == (800, 5) # Before we run the CLI, we'll remove the bundles and ROI folders, to see # that the CLI generates them shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles')) shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs')) # Test the CLI: print("Running the CLI:") cmd = "pyAFQ " + dmriprep_path out = os.system(cmd) assert out == 0 # The combined tract profiles should already exist from the CLI Run: from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv')) # And should be identical to what we would get by rerunning this: combined_profiles = myafq.combine_profiles() assert combined_profiles.shape == (800, 7) assert_frame_equal(combined_profiles, from_file) # Make sure the CLI did indeed generate these: assert op.exists( op.join(myafq.data_frame['results_dir'][0], 'ROIs', 'CST_R_roi1_include.nii.gz')) assert op.exists( op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk')) # def test_AFQ_data_recobundles(): # tmpdir = nbtmp.InTemporaryDirectory() # afd.fetch_hcp(["100206"], hcp_bucket='hcp-openaccess', profile_name="hcp", # path=tmpdir.name) # dmriprep_path = op.join(tmpdir.name, 'HCP', 'derivatives', 'dmriprep') # seg_algo = "recobundles" # bundle_names = ["F", "CST", "AF", "CC_ForcepsMajor"] # myafq = api.AFQ(dmriprep_path=dmriprep_path, # sub_prefix='sub', # seg_algo=seg_algo, # bundle_names=bundle_names, # odf_model="DTI", # b0_threshold=15) # # Replace the streamlines with precomputed: # path_to_trk = dpd.fetcher.fetch_target_tractogram_hcp() # path_to_trk = dpd.fetcher.get_target_tractogram_hcp() # sl_file = op.join(myafq.data_frame.results_dir[0], 'sub-100206_sess-01_dwiDTI_det_streamlines.trk') # shutil.copy(path_to_trk, sl_file) # myafq.data_frame["streamlines_file"] = sl_file # print("here") # tgram = nib.streamlines.load(myafq.bundles[0]).tractogram # print("here") # bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict) # npt.assert_(len(bundles['CST_L']) > 0)
def test_AFQ_data_waypoint(): """ Test with some actual data again, this time for track segmentation """ tmpdir, bids_path, _ = get_temp_hardi() bundle_names = ["SLF", "ARC", "CST", "FP"] tracking_params = dict(odf_model="dti", seed_mask=RoiMask(), n_seeds=100, random_seeds=True, rng_seed=42) segmentation_params = dict(filter_by_endpoints=False, seg_algo="AFQ", return_idx=True) clean_params = dict(return_idx=True) myafq = api.AFQ(bids_path=bids_path, dmriprep='vistasoft', bundle_info=bundle_names, scalars=["dti_FA", "dti_MD"], robust_tensor_fitting=True, tracking_params=tracking_params, segmentation_params=segmentation_params, clean_params=clean_params) # Replace the mapping and streamlines with precomputed: file_dict = afd.read_stanford_hardi_tractography() mapping = file_dict['mapping.nii.gz'] streamlines = file_dict['tractography_subsampled.trk'] streamlines = dts.Streamlines( dtu.transform_tracking_output( [s for s in streamlines if s.shape[0] > 100], np.linalg.inv(myafq.dwi_affine[0]))) mapping_file = op.join( myafq.data_frame.results_dir[0], 'sub-01_ses-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz') nib.save(mapping, mapping_file) reg_prealign_file = op.join( myafq.data_frame.results_dir[0], 'sub-01_ses-01_dwi_prealign_from-DWI_to-MNI_xfm.npy') np.save(reg_prealign_file, np.eye(4)) tgram = load_tractogram(myafq.bundles[0], myafq.dwi_img[0]) bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0]) npt.assert_(len(bundles['CST_L']) > 0) # Test ROI exporting: myafq.export_rois() assert op.exists( op.join(myafq.data_frame['results_dir'][0], 'ROIs', 'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json')) # Test bundles exporting: myafq.export_bundles() assert op.exists( op.join( myafq.data_frame['results_dir'][0], 'bundles', 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk' )) # noqa # Test creation of file with bundle indices: assert op.exists( op.join( myafq.data_frame['results_dir'][0], 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-clean_tractography_idx.json' )) # noqa tract_profiles = pd.read_csv(myafq.tract_profiles[0]) assert tract_profiles.shape == (400, 5) myafq.plot_tract_profiles() assert op.exists( op.join( myafq.data_frame['results_dir'][0], 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_fa_profile_plots.png' )) # noqa assert op.exists( op.join( myafq.data_frame['results_dir'][0], 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_md_profile_plots.png' )) # noqa # Before we run the CLI, we'll remove the bundles and ROI folders, to see # that the CLI generates them shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles')) shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs')) # Test the CLI: print("Running the CLI:") # Set up config to use the same parameters as above: # ROI mask needs to be put in quotes in config tracking_params = dict(odf_model="DTI", seed_mask="RoiMask()", n_seeds=100, random_seeds=True, rng_seed=42) config = dict(BIDS=dict(bids_path=bids_path, dmriprep='vistasoft'), REGISTRATION=dict(robust_tensor_fitting=True), BUNDLES=dict(bundle_info=bundle_names, scalars=["dti_fa", "dti_md"]), VIZ=dict(viz_backend="plotly_no_gif"), TRACTOGRAPHY=tracking_params, SEGMENTATION=segmentation_params, CLEANING=clean_params) config_file = op.join(tmpdir.name, "afq_config.toml") with open(config_file, 'w') as ff: toml.dump(config, ff) cmd = "pyAFQ " + config_file out = os.system(cmd) assert out == 0 # The combined tract profiles should already exist from the CLI Run: from_file = pd.read_csv( myafq._get_fname(myafq.data_frame.iloc[0], '_profiles.csv')) # And should be identical to what we would get by rerunning this: combined_profiles = myafq.combine_profiles() assert combined_profiles.shape == (400, 7) assert_series_equal(combined_profiles['dti_fa'], from_file['dti_fa']) # Make sure the CLI did indeed generate these: myafq.export_rois() assert op.exists( op.join(myafq.data_frame['results_dir'][0], 'ROIs', 'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json')) myafq.export_bundles() assert op.exists( op.join( myafq.data_frame['results_dir'][0], 'bundles', 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk' )) # noqa
def test_AFQ_data_waypoint(): """ Test with some actual data again, this time for track segmentation """ tmpdir = nbtmp.InTemporaryDirectory() afd.organize_stanford_data(path=tmpdir.name) dmriprep_path = op.join(tmpdir.name, 'stanford_hardi', 'derivatives', 'dmriprep') bundle_names = ["SLF", "ARC", "CST", "FP"] tracking_params = dict(odf_model="DTI") segmentation_params = dict(filter_by_endpoints=False, seg_algo="AFQ", return_idx=True) clean_params = dict(return_idx=True) myafq = api.AFQ(dmriprep_path=dmriprep_path, sub_prefix='sub', bundle_names=bundle_names, scalars=["dti_fa", "dti_md"], tracking_params=tracking_params, segmentation_params=segmentation_params, clean_params=clean_params) # Replace the mapping and streamlines with precomputed: file_dict = afd.read_stanford_hardi_tractography() mapping = file_dict['mapping.nii.gz'] streamlines = file_dict['tractography_subsampled.trk'] streamlines = dts.Streamlines( dtu.transform_tracking_output( [s for s in streamlines if s.shape[0] > 100], np.linalg.inv(myafq.dwi_affine[0]))) sl_file = op.join( myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det_tractography.trk') sft = StatefulTractogram(streamlines, myafq.data_frame.dwi_file[0], Space.VOX) save_tractogram(sft, sl_file, bbox_valid_check=False) mapping_file = op.join( myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz') nib.save(mapping, mapping_file) reg_prealign_file = op.join( myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_prealign_from-DWI_to-MNI_xfm.npy') np.save(reg_prealign_file, np.eye(4)) tgram = load_tractogram(myafq.bundles[0], myafq.dwi_img[0]) bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0]) npt.assert_(len(bundles['CST_R']) > 0) # Test ROI exporting: myafq.export_rois() assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'ROIs', 'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json')) # Test bundles exporting: myafq.export_bundles() assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'bundles', 'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk')) # noqa # Test creation of file with bundle indices: assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-clean_tractography_idx.json')) # noqa tract_profiles = pd.read_csv(myafq.tract_profiles[0]) assert tract_profiles.shape == (800, 5) # Before we run the CLI, we'll remove the bundles and ROI folders, to see # that the CLI generates them shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles')) shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs')) # Test the CLI: print("Running the CLI:") # Bare bones config only points to the files config = dict(files=dict(dmriprep_path=dmriprep_path)) config_file = op.join(tmpdir.name, "afq_config.toml") with open(config_file, 'w') as ff: toml.dump(config, ff) cmd = "pyAFQ " + config_file out = os.system(cmd) assert out == 0 # The combined tract profiles should already exist from the CLI Run: from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv')) # And should be identical to what we would get by rerunning this: combined_profiles = myafq.combine_profiles() assert combined_profiles.shape == (800, 7) assert_frame_equal(combined_profiles, from_file) # Make sure the CLI did indeed generate these: myafq.export_rois() assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'ROIs', 'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json')) myafq.export_bundles() assert op.exists(op.join( myafq.data_frame['results_dir'][0], 'bundles', 'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk')) # noqa
def test_AFQ_data_waypoint(): """ Test with some actual data again, this time for track segmentation """ tmpdir, bids_path, _ = get_temp_hardi() t1_path = op.join(tmpdir.name, "T1.nii.gz") nib.save(afd.read_mni_template(mask=True, weight="T1w"), t1_path) bundle_names = ["SLF", "ARC", "CST", "FP"] tracking_params = dict(odf_model="dti", seed_mask=RoiMask(), n_seeds=100, random_seeds=True, rng_seed=42) segmentation_params = dict(filter_by_endpoints=False, seg_algo="AFQ", return_idx=True) clean_params = dict(return_idx=True) myafq = api.AFQ( bids_path=bids_path, dmriprep='vistasoft', bundle_info=bundle_names, scalars=["dti_FA", "dti_MD", TemplateScalar("T1", t1_path)], robust_tensor_fitting=True, tracking_params=tracking_params, segmentation_params=segmentation_params, clean_params=clean_params) # Replace the mapping and streamlines with precomputed: file_dict = afd.read_stanford_hardi_tractography() mapping = file_dict['mapping.nii.gz'] streamlines = file_dict['tractography_subsampled.trk'] streamlines = dts.Streamlines( dtu.transform_tracking_output( [s for s in streamlines if s.shape[0] > 100], np.linalg.inv(myafq.dwi_affine["01"]))) mapping_file = op.join( myafq.results_dir["01"], 'sub-01_ses-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz') nib.save(mapping, mapping_file) reg_prealign_file = op.join( myafq.results_dir["01"], 'sub-01_ses-01_dwi_prealign_from-DWI_to-MNI_xfm.npy') np.save(reg_prealign_file, np.eye(4)) tgram = load_tractogram(myafq.bundles["01"], myafq.img["01"]) bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.img["01"]) npt.assert_(len(bundles['CST_L']) > 0) # Test ROI exporting: myafq.export_rois() assert op.exists( op.join(myafq.results_dir["01"], 'ROIs', 'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json')) # Test bundles exporting: myafq.export_indiv_bundles() assert op.exists( op.join( myafq.results_dir["01"], 'bundles', 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk' )) # noqa tract_profile_fname = myafq.profiles["01"] tract_profiles = pd.read_csv(tract_profile_fname) assert tract_profiles.shape == (500, 6) myafq.tract_profile_plots assert op.exists( op.join( myafq.results_dir["01"], "tract_profile_plots", 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_fa_profile_plots.png' )) # noqa assert op.exists( op.join( myafq.results_dir["01"], "tract_profile_plots", 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_md_profile_plots.png' )) # noqa # Before we run the CLI, we'll remove the bundles and ROI folders, to see # that the CLI generates them shutil.rmtree(op.join(myafq.results_dir["01"], 'bundles')) shutil.rmtree(op.join(myafq.results_dir["01"], 'ROIs')) os.remove(tract_profile_fname) # Test the CLI: print("Running the CLI:") # Set up config to use the same parameters as above: # ROI mask needs to be put in quotes in config tracking_params = dict(odf_model="DTI", seed_mask="RoiMask()", n_seeds=100, random_seeds=True, rng_seed=42) config = dict(BIDS=dict(bids_path=bids_path, dmriprep='vistasoft'), DATA=dict(robust_tensor_fitting=True), BUNDLES=dict(bundle_info=bundle_names, scalars=[ "dti_fa", "dti_md", f"TemplateScalar('T1', '{t1_path}')" ]), VIZ=dict(viz_backend="plotly_no_gif"), TRACTOGRAPHY=tracking_params, SEGMENTATION=segmentation_params, CLEANING=clean_params) config_file = op.join(tmpdir.name, "afq_config.toml") with open(config_file, 'w') as ff: toml.dump(config, ff) # save memory results_dir = myafq.results_dir["01"] del myafq gc.collect() cmd = "pyAFQ " + config_file completed_process = subprocess.run(cmd, shell=True, capture_output=True) if completed_process.returncode != 0: print(completed_process.stdout) print(completed_process.stderr) assert completed_process.returncode == 0 # The tract profiles should already exist from the CLI Run: from_file = pd.read_csv(tract_profile_fname) assert from_file.shape == (500, 6) assert_series_equal(tract_profiles['dti_fa'], from_file['dti_fa']) # Make sure the CLI did indeed generate these: assert op.exists( op.join(results_dir, 'ROIs', 'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json')) assert op.exists( op.join( results_dir, 'bundles', 'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk' )) # noqa