Beispiel #1
0
def test_segment():
    dpd.fetch_stanford_hardi()
    hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
    hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
    hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
    hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    templates = afd.read_templates()
    bundles = {'CST_L': {'ROIs': [templates['CST_roi1_L'],
                                  templates['CST_roi2_L']],
                         'rules': [True, True]},
               'CST_R': {'ROIs': [templates['CST_roi1_R'],
                                  templates['CST_roi1_R']],
                         'rules': [True, True]}}

    fiber_groups = seg.segment(hardi_fdata,
                               hardi_fbval,
                               hardi_fbvec,
                               streamlines,
                               bundles,
                               mapping=mapping,
                               as_generator=True)

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # There happen to be 8 fibers in the right CST:
    CST_R_sl = list(fiber_groups['CST_R'])
    npt.assert_equal(len(CST_R_sl), 8)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]),
        CST_R_sl)
    npt.assert_equal(tract_profile, np.ones(100))
Beispiel #2
0
def test_density_map():
    file_dict = afd.read_stanford_hardi_tractography()

    # subsample even more
    subsampled_tractography = file_dict["tractography_subsampled.trk"][441:444]
    sft = StatefulTractogram(subsampled_tractography,
                             file_dict["mapping.nii.gz"], Space.VOX)
    density_map = afv.density_map(sft)
    npt.assert_equal(int(np.sum(density_map.get_fdata())), 69)
Beispiel #3
0
def test_slr_registration():
    # have to import subject sls
    file_dict = afd.read_stanford_hardi_tractography()
    streamlines = file_dict['tractography_subsampled.trk']

    # have to import sls atlas
    afd.fetch_hcp_atlas_16_bundles()
    atlas_fname = op.join(afd.afq_home, 'hcp_atlas_16_bundles',
                          'Atlas_in_MNI_Space_16_bundles', 'whole_brain',
                          'whole_brain_MNI.trk')
    hcp_atlas = load_tractogram(atlas_fname, 'same', bbox_valid_check=False)

    with nbtmp.InTemporaryDirectory() as tmpdir:
        mapping = slr_registration(streamlines,
                                   hcp_atlas.streamlines,
                                   moving_affine=subset_b0_img.affine,
                                   static_affine=subset_t2_img.affine,
                                   moving_shape=subset_b0_img.shape,
                                   static_shape=subset_t2_img.shape,
                                   progressive=False,
                                   greater_than=10,
                                   rm_small_clusters=1,
                                   rng=np.random.RandomState(seed=8))
        warped_moving = mapping.transform(subset_b0)

        npt.assert_equal(warped_moving.shape, subset_t2.shape)
        mapping_fname = op.join(tmpdir, 'mapping.npy')
        write_mapping(mapping, mapping_fname)
        file_mapping = read_mapping(mapping_fname, subset_b0_img,
                                    subset_t2_img)

        # Test that it has the same effect on the data:
        warped_from_file = file_mapping.transform(subset_b0)
        npt.assert_equal(warped_from_file, warped_moving)

        # Test that it is, attribute by attribute, identical:
        for k in mapping.__dict__:
            assert (np.all(
                mapping.__getattribute__(k) == file_mapping.__getattribute__(
                    k)))
Beispiel #4
0
from dipy.io.stateful_tractogram import StatefulTractogram, Space

import AFQ.data as afd
import AFQ.tractography as aft
import AFQ.registration as reg
import AFQ.segmentation as seg
import AFQ.dti as dti
from AFQ.utils.volume import patch_up_roi

dpd.fetch_stanford_hardi()
hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
hardi_img = nib.load(hardi_fdata)
hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
file_dict = afd.read_stanford_hardi_tractography()
mapping = file_dict['mapping.nii.gz']
streamlines = file_dict['tractography_subsampled.trk']
tg = StatefulTractogram(streamlines, hardi_img, Space.RASMM)
tg.to_vox()
streamlines = tg.streamlines

# streamlines = dts.Streamlines(
#     dtu.transform_tracking_output(
#         streamlines[streamlines._lengths > 10],
#         np.linalg.inv(hardi_img.affine)))


def test_segment():

    templates = afd.read_templates()
Beispiel #5
0
def test_AFQ_data2():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    preafq_path = op.join(tmpdir.name, 'stanford_hardi',
                          'derivatives', 'preafq')
    myafq = api.AFQ(preafq_path=preafq_path,
                    sub_prefix='sub',
                    bundle_list=["SLF", "ARC", "CST", "FP"])

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100],
                             np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(myafq.data_frame.results_dir[0],
                      'sub-01_sess-01_dwiDTI_det_streamlines.trk')
    aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0])

    mapping_file = op.join(myafq.data_frame.results_dir[0],
                           'sub-01_sess-01_dwi_mapping.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_reg_prealign.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
    npt.assert_equal(len(bundles['CST_R']), 2)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'ROIs',
                     'CST_R_roi1_include.nii.gz'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'bundles',
                     'CST_R.trk'))

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)


    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                  'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                  'ROIs'))

    # Test the CLI:
    print("Running the CLI:")
    cmd = "pyAFQ " + preafq_path
    out = os.system(cmd)
    assert out ==  0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'ROIs',
                     'CST_R_roi1_include.nii.gz'))

    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'bundles',
                     'CST_R.trk'))
Beispiel #6
0
def test_AFQ_data_planes():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    dmriprep_path = op.join(tmpdir.name, 'stanford_hardi', 'derivatives',
                            'dmriprep')
    seg_algo = "planes"
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    myafq = api.AFQ(dmriprep_path=dmriprep_path,
                    sub_prefix='sub',
                    seg_algo=seg_algo,
                    bundle_names=bundle_names,
                    odf_model="DTI")

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100],
                             np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(myafq.data_frame.results_dir[0],
                      'sub-01_sess-01_dwiDTI_det_streamlines.trk')
    aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0])

    mapping_file = op.join(myafq.data_frame.results_dir[0],
                           'sub-01_sess-01_dwi_mapping.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(myafq.data_frame.results_dir[0],
                                'sub-01_sess-01_dwi_reg_prealign.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
    npt.assert_(len(bundles['CST_L']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'CST_R_roi1_include.nii.gz'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk'))

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs'))

    # Test the CLI:
    print("Running the CLI:")
    cmd = "pyAFQ " + dmriprep_path
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'CST_R_roi1_include.nii.gz'))

    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk'))


# def test_AFQ_data_recobundles():
#     tmpdir = nbtmp.InTemporaryDirectory()
#     afd.fetch_hcp(["100206"], hcp_bucket='hcp-openaccess', profile_name="hcp",
#                   path=tmpdir.name)
#     dmriprep_path = op.join(tmpdir.name, 'HCP', 'derivatives', 'dmriprep')
#     seg_algo = "recobundles"
#     bundle_names = ["F", "CST", "AF", "CC_ForcepsMajor"]
#     myafq = api.AFQ(dmriprep_path=dmriprep_path,
#                     sub_prefix='sub',
#                     seg_algo=seg_algo,
#                     bundle_names=bundle_names,
#                     odf_model="DTI",
#                     b0_threshold=15)

#     # Replace the streamlines with precomputed:
#     path_to_trk = dpd.fetcher.fetch_target_tractogram_hcp()
#     path_to_trk = dpd.fetcher.get_target_tractogram_hcp()
#     sl_file = op.join(myafq.data_frame.results_dir[0], 'sub-100206_sess-01_dwiDTI_det_streamlines.trk')
#     shutil.copy(path_to_trk, sl_file)
#     myafq.data_frame["streamlines_file"] = sl_file
#     print("here")
#     tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
#     print("here")
#     bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
#     npt.assert_(len(bundles['CST_L']) > 0)
Beispiel #7
0
def test_segment():
    dpd.fetch_stanford_hardi()
    hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
    hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
    hardi_img = nib.load(hardi_fdata)
    hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
    hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines(streamlines[streamlines._lengths > 10],
                             np.linalg.inv(hardi_img.affine)))

    templates = afd.read_templates()
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'prob_map': templates['CST_L_prob_map'],
            'cross_midline': None
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'prob_map': templates['CST_R_prob_map'],
            'cross_midline': None
        }
    }

    fiber_groups = seg.segment(hardi_fdata, hardi_fbval, hardi_fbvec,
                               streamlines, bundles, mapping)

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # Here's one of them:
    CST_R_sl = fiber_groups['CST_R']
    # Let's make sure there are streamlines in there:
    npt.assert_(len(CST_R_sl) > 0)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]), CST_R_sl)
    npt.assert_almost_equal(tract_profile, np.ones(100))

    # Test providing an array input to calculate_tract_profile:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]),
        seg._resample_bundle(CST_R_sl, 100))

    npt.assert_almost_equal(tract_profile, np.ones(100))
    clean_sl = seg.clean_fiber_group(CST_R_sl)
    # Since there are only 8 streamlines here, nothing should happen:
    npt.assert_equal(clean_sl, CST_R_sl)

    # Setting minimum number of streamlines to a smaller number and
    # threshold to a relatively small number will exclude some streamlines:
    clean_sl = seg.clean_fiber_group(CST_R_sl, min_sl=2, clean_threshold=2)
    npt.assert_equal(len(clean_sl), 3)

    # What if you don't have probability maps?
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'cross_midline': False
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'cross_midline': False
        }
    }

    fiber_groups = seg.segment(hardi_fdata, hardi_fbval, hardi_fbvec,
                               streamlines, bundles, mapping)

    # This condition should still hold
    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']) > 0)
Beispiel #8
0
def test_AFQ_data_waypoint():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir, bids_path, _ = get_temp_hardi()
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    tracking_params = dict(odf_model="dti",
                           seed_mask=RoiMask(),
                           n_seeds=100,
                           random_seeds=True,
                           rng_seed=42)
    segmentation_params = dict(filter_by_endpoints=False,
                               seg_algo="AFQ",
                               return_idx=True)

    clean_params = dict(return_idx=True)

    myafq = api.AFQ(bids_path=bids_path,
                    dmriprep='vistasoft',
                    bundle_info=bundle_names,
                    scalars=["dti_FA", "dti_MD"],
                    robust_tensor_fitting=True,
                    tracking_params=tracking_params,
                    segmentation_params=segmentation_params,
                    clean_params=clean_params)

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.transform_tracking_output(
            [s for s in streamlines if s.shape[0] > 100],
            np.linalg.inv(myafq.dwi_affine[0])))

    mapping_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_ses-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_ses-01_dwi_prealign_from-DWI_to-MNI_xfm.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = load_tractogram(myafq.bundles[0], myafq.dwi_img[0])

    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0])
    npt.assert_(len(bundles['CST_L']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0], 'bundles',
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'
        ))  # noqa

    # Test creation of file with bundle indices:
    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0],
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-clean_tractography_idx.json'
        ))  # noqa

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (400, 5)

    myafq.plot_tract_profiles()
    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0],
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_fa_profile_plots.png'
        ))  # noqa

    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0],
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_md_profile_plots.png'
        ))  # noqa

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs'))

    # Test the CLI:
    print("Running the CLI:")

    # Set up config to use the same parameters as above:
    # ROI mask needs to be put in quotes in config
    tracking_params = dict(odf_model="DTI",
                           seed_mask="RoiMask()",
                           n_seeds=100,
                           random_seeds=True,
                           rng_seed=42)
    config = dict(BIDS=dict(bids_path=bids_path, dmriprep='vistasoft'),
                  REGISTRATION=dict(robust_tensor_fitting=True),
                  BUNDLES=dict(bundle_info=bundle_names,
                               scalars=["dti_fa", "dti_md"]),
                  VIZ=dict(viz_backend="plotly_no_gif"),
                  TRACTOGRAPHY=tracking_params,
                  SEGMENTATION=segmentation_params,
                  CLEANING=clean_params)

    config_file = op.join(tmpdir.name, "afq_config.toml")
    with open(config_file, 'w') as ff:
        toml.dump(config, ff)

    cmd = "pyAFQ " + config_file
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(
        myafq._get_fname(myafq.data_frame.iloc[0], '_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (400, 7)
    assert_series_equal(combined_profiles['dti_fa'], from_file['dti_fa'])

    # Make sure the CLI did indeed generate these:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json'))

    myafq.export_bundles()
    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0], 'bundles',
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'
        ))  # noqa
Beispiel #9
0
def test_segment():
    dpd.fetch_stanford_hardi()
    hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
    hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
    hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
    hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    templates = afd.read_templates()
    bundles = {'CST_L': {'ROIs': [templates['CST_roi1_L'],
                                  templates['CST_roi2_L']],
                         'rules': [True, True],
                         'prob_map': templates['CST_L_prob_map'],
                         'cross_midline': False},
               'CST_R': {'ROIs': [templates['CST_roi1_R'],
                                  templates['CST_roi1_R']],
                         'rules': [True, True],
                         'prob_map': templates['CST_R_prob_map'],
                         'cross_midline': False}}

    fiber_groups = seg.segment(hardi_fdata,
                               hardi_fbval,
                               hardi_fbvec,
                               streamlines,
                               bundles,
                               mapping=mapping,
                               as_generator=True)

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # There happen to be 5 fibers in the right CST:
    CST_R_sl = fiber_groups['CST_R']
    npt.assert_equal(len(CST_R_sl), 5)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]),
        CST_R_sl)
    npt.assert_almost_equal(tract_profile, np.ones(100))

    # Test providing an array input to calculate_tract_profile:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]),
        seg._resample_bundle(CST_R_sl, 100))


    npt.assert_almost_equal(tract_profile, np.ones(100))
    clean_sl = seg.clean_fiber_group(CST_R_sl)
    # Since there are only 5 streamlines here, nothing should happen:
    npt.assert_equal(clean_sl, CST_R_sl)

    # Setting minimum number of streamlines to a smaller number and
    # threshold to a relatively small number will exclude some streamlines:
    clean_sl = seg.clean_fiber_group(CST_R_sl, min_sl=2, clean_threshold=2)
    npt.assert_equal(len(clean_sl), 3)

    # What if you don't have probability maps?
    bundles = {'CST_L': {'ROIs': [templates['CST_roi1_L'],
                                  templates['CST_roi2_L']],
                         'rules': [True, True],
                         'cross_midline': False},
               'CST_R': {'ROIs': [templates['CST_roi1_R'],
                                  templates['CST_roi1_R']],
                         'rules': [True, True],
                         'cross_midline': False}}

    fiber_groups = seg.segment(hardi_fdata,
                               hardi_fbval,
                               hardi_fbvec,
                               streamlines,
                               bundles,
                               mapping=mapping,
                               as_generator=True)

    # This condition should still hold
    npt.assert_equal(len(fiber_groups), 2)
    # But one of the streamlines has switched identities without the
    # probability map to guide selection
    npt.assert_equal(len(fiber_groups['CST_R']), 6)
Beispiel #10
0
def test_AFQ_data_waypoint():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    dmriprep_path = op.join(tmpdir.name, 'stanford_hardi',
                            'derivatives', 'dmriprep')
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    tracking_params = dict(odf_model="DTI")
    segmentation_params = dict(filter_by_endpoints=False,
                               seg_algo="AFQ",
                               return_idx=True)

    clean_params = dict(return_idx=True)

    myafq = api.AFQ(dmriprep_path=dmriprep_path,
                    sub_prefix='sub',
                    bundle_names=bundle_names,
                    scalars=["dti_fa", "dti_md"],
                    tracking_params=tracking_params,
                    segmentation_params=segmentation_params,
                    clean_params=clean_params)

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.transform_tracking_output(
            [s for s in streamlines if s.shape[0] > 100],
            np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det_tractography.trk')
    sft = StatefulTractogram(streamlines, myafq.data_frame.dwi_file[0],
                             Space.VOX)
    save_tractogram(sft, sl_file, bbox_valid_check=False)

    mapping_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_prealign_from-DWI_to-MNI_xfm.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = load_tractogram(myafq.bundles[0], myafq.dwi_img[0])

    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0])
    npt.assert_(len(bundles['CST_R']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'ROIs',
        'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'bundles',
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'))  # noqa

    # Test creation of file with bundle indices:
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-clean_tractography_idx.json'))  # noqa

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                          'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                          'ROIs'))

    # Test the CLI:
    print("Running the CLI:")

    # Bare bones config only points to the files
    config = dict(files=dict(dmriprep_path=dmriprep_path))

    config_file = op.join(tmpdir.name, "afq_config.toml")
    with open(config_file, 'w') as ff:
        toml.dump(config, ff)

    cmd = "pyAFQ " + config_file
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    myafq.export_rois()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'ROIs',
        'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json'))

    myafq.export_bundles()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'bundles',
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'))  # noqa
Beispiel #11
0
def test_AFQ_data_waypoint():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir, bids_path, _ = get_temp_hardi()
    t1_path = op.join(tmpdir.name, "T1.nii.gz")
    nib.save(afd.read_mni_template(mask=True, weight="T1w"), t1_path)

    bundle_names = ["SLF", "ARC", "CST", "FP"]
    tracking_params = dict(odf_model="dti",
                           seed_mask=RoiMask(),
                           n_seeds=100,
                           random_seeds=True,
                           rng_seed=42)
    segmentation_params = dict(filter_by_endpoints=False,
                               seg_algo="AFQ",
                               return_idx=True)

    clean_params = dict(return_idx=True)

    myafq = api.AFQ(
        bids_path=bids_path,
        dmriprep='vistasoft',
        bundle_info=bundle_names,
        scalars=["dti_FA", "dti_MD",
                 TemplateScalar("T1", t1_path)],
        robust_tensor_fitting=True,
        tracking_params=tracking_params,
        segmentation_params=segmentation_params,
        clean_params=clean_params)

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.transform_tracking_output(
            [s for s in streamlines if s.shape[0] > 100],
            np.linalg.inv(myafq.dwi_affine["01"])))

    mapping_file = op.join(
        myafq.results_dir["01"],
        'sub-01_ses-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(
        myafq.results_dir["01"],
        'sub-01_ses-01_dwi_prealign_from-DWI_to-MNI_xfm.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = load_tractogram(myafq.bundles["01"], myafq.img["01"])

    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.img["01"])
    npt.assert_(len(bundles['CST_L']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.results_dir["01"], 'ROIs',
                'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json'))

    # Test bundles exporting:
    myafq.export_indiv_bundles()
    assert op.exists(
        op.join(
            myafq.results_dir["01"], 'bundles',
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'
        ))  # noqa

    tract_profile_fname = myafq.profiles["01"]
    tract_profiles = pd.read_csv(tract_profile_fname)
    assert tract_profiles.shape == (500, 6)

    myafq.tract_profile_plots
    assert op.exists(
        op.join(
            myafq.results_dir["01"], "tract_profile_plots",
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_fa_profile_plots.png'
        ))  # noqa

    assert op.exists(
        op.join(
            myafq.results_dir["01"], "tract_profile_plots",
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_md_profile_plots.png'
        ))  # noqa

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.results_dir["01"], 'bundles'))

    shutil.rmtree(op.join(myafq.results_dir["01"], 'ROIs'))
    os.remove(tract_profile_fname)

    # Test the CLI:
    print("Running the CLI:")

    # Set up config to use the same parameters as above:
    # ROI mask needs to be put in quotes in config
    tracking_params = dict(odf_model="DTI",
                           seed_mask="RoiMask()",
                           n_seeds=100,
                           random_seeds=True,
                           rng_seed=42)
    config = dict(BIDS=dict(bids_path=bids_path, dmriprep='vistasoft'),
                  DATA=dict(robust_tensor_fitting=True),
                  BUNDLES=dict(bundle_info=bundle_names,
                               scalars=[
                                   "dti_fa", "dti_md",
                                   f"TemplateScalar('T1', '{t1_path}')"
                               ]),
                  VIZ=dict(viz_backend="plotly_no_gif"),
                  TRACTOGRAPHY=tracking_params,
                  SEGMENTATION=segmentation_params,
                  CLEANING=clean_params)

    config_file = op.join(tmpdir.name, "afq_config.toml")
    with open(config_file, 'w') as ff:
        toml.dump(config, ff)

    # save memory
    results_dir = myafq.results_dir["01"]
    del myafq
    gc.collect()

    cmd = "pyAFQ " + config_file
    completed_process = subprocess.run(cmd, shell=True, capture_output=True)
    if completed_process.returncode != 0:
        print(completed_process.stdout)
    print(completed_process.stderr)
    assert completed_process.returncode == 0
    # The tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(tract_profile_fname)

    assert from_file.shape == (500, 6)
    assert_series_equal(tract_profiles['dti_fa'], from_file['dti_fa'])

    # Make sure the CLI did indeed generate these:
    assert op.exists(
        op.join(results_dir, 'ROIs',
                'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json'))

    assert op.exists(
        op.join(
            results_dir, 'bundles',
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'
        ))  # noqa