Esempio n. 1
0
def test_split_streamline():
    streamlines = dts.Streamlines([
        np.array([[1., 2., 3.], [4., 5., 6.]]),
        np.array([[7., 8., 9.], [10., 11., 12.], [13., 14., 15.]])
    ])
    assert streamlines == streamlines
    sl_to_split = 1
    split_idx = 1
    new_streamlines = aus.split_streamline(streamlines, sl_to_split, split_idx)
    test_streamlines = dts.Streamlines([
        np.array([[1., 2., 3.], [4., 5., 6.]]),
        np.array([[7., 8., 9.]]),
        np.array([[10., 11., 12.], [13., 14., 15.]])
    ])

    # Test equality of the underlying dict items:
    for k in new_streamlines.__dict__.keys():
        if isinstance(new_streamlines.__dict__[k], np.ndarray):
            npt.assert_array_equal(new_streamlines.__dict__[k],
                                   test_streamlines.__dict__[k])
        else:
            assert new_streamlines.__dict__[k] == test_streamlines.__dict__[k]
Esempio n. 2
0
def test_bundles_to_tgram():
    bundles = {
        'b1':
        dts.Streamlines([
            np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 1], [0, 0, 1.5]]),
            np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 1, 1]])
        ]),
        'b2':
        dts.Streamlines([
            np.array([[0, 0, 0], [0, 0, 0.5], [0, 0, 2], [0, 0, 2.5]]),
            np.array([[0, 0, 0], [0, 0.5, 0.5], [0, 2, 2]])
        ])
    }

    bundle_dict = {'b1': {'uid': 1}, 'b2': {'uid': 2}}
    affine = np.array([[2., 0., 0., -80.], [0., 2., 0., -120.],
                       [0., 0., 2., -60.], [0., 0., 0., 1.]])
    tgram = aus.bundles_to_tgram(bundles, bundle_dict, affine)
    new_bundles = aus.tgram_to_bundles(tgram, bundle_dict)
    for k1 in bundles.keys():
        for k2 in bundles[k1].__dict__.keys():
            npt.assert_equal(new_bundles[k1].__dict__[k2],
                             bundles[k1].__dict__[k2])
Esempio n. 3
0
    def move_streamlines(self, tg=None, reg_algo='slr'):
        """Streamline-based registration of a whole-brain tractogram to
        the MNI whole-brain atlas.

        registration_algo : str
            "slr" or "syn"
        """
        tg = self._read_tg(tg=tg)
        if reg_algo is None:
            if self.mapping is None:
                reg_algo = 'slr'
            else:
                reg_algo = 'syn'

        if reg_algo == "slr":
            self.logger.info("Registering tractogram with SLR")
            atlas = self.bundle_dict['whole_brain']
            self.moved_sl, _, _, _ = whole_brain_slr(
                atlas,
                self.tg.streamlines,
                x0='affine',
                verbose=False,
                progressive=self.progressive,
                greater_than=self.greater_than,
                rm_small_clusters=self.rm_small_clusters,
                rng=self.rng)
        elif reg_algo == "syn":
            self.logger.info("Registering tractogram based on syn")
            self.tg.to_rasmm()
            delta = dts.values_from_volume(self.mapping.forward,
                                           self.tg.streamlines, np.eye(4))
            self.moved_sl = dts.Streamlines(
                [d + s for d, s in zip(delta, self.tg.streamlines)])
            self.tg.to_vox()

        if self.save_intermediates is not None:
            moved_sft = StatefulTractogram(self.moved_sl, self.reg_template,
                                           Space.RASMM)
            save_tractogram(moved_sft,
                            op.join(self.save_intermediates, 'sls_in_mni.trk'),
                            bbox_valid_check=False)
Esempio n. 4
0
def _local_tracking(seeds,
                    dg,
                    threshold_classifier,
                    affine,
                    step_size=0.5,
                    min_length=10,
                    max_length=250):
    """
    Helper function
    """
    if len(seeds.shape) == 1:
        seeds = seeds[None, ...]
    tracker = LocalTracking(dg,
                            threshold_classifier,
                            seeds,
                            affine,
                            step_size=step_size)

    streamlines = dts.Streamlines(tracker)
    streamlines = streamlines[streamlines._lengths * step_size > min_length]
    streamlines = streamlines[streamlines._lengths * step_size < max_length]
    return streamlines
Esempio n. 5
0
def _local_tracking(seeds,
                    dg,
                    threshold_classifier,
                    affine,
                    step_size=0.5,
                    min_length=10,
                    max_length=250):
    """
    Helper function
    """
    if len(seeds.shape) == 1:
        seeds = seeds[None, ...]
    tracker = LocalTracking(dg,
                            threshold_classifier,
                            seeds,
                            affine,
                            step_size=step_size)

    return dts.Streamlines([
        l for l in tracker if l.shape[0] *
        step_size > min_length and l.shape[0] * step_size < max_length
    ])
Esempio n. 6
0
                                                 gtab,
                                                 template=MNI_T2_img)
    reg.write_mapping(mapping, './mapping.nii.gz')
else:
    mapping = reg.read_mapping('./mapping.nii.gz', img, MNI_T2_img)

bundle_names = ["CST", "UF", "CC_ForcepsMajor", "CC_ForcepsMinor"]
bundles = api.make_bundle_dict(bundle_names=bundle_names, seg_algo="reco")

print("Tracking...")
if not op.exists('dti_streamlines_reco.trk'):
    seed_roi = np.zeros(img.shape[:-1])
    for bundle in bundles:
        if bundle != 'whole_brain':
            sl_xform = dts.Streamlines(
                dtu.transform_tracking_output(bundles[bundle]['sl'],
                                              MNI_T2_img.affine))

            delta = dts.values_from_volume(mapping.backward, sl_xform,
                                           np.eye(4))
            sl_xform = [sum(d, s) for d, s in zip(delta, sl_xform)]

            sl_xform = dts.Streamlines(
                dtu.transform_tracking_output(sl_xform,
                                              np.linalg.inv(
                                                  MNI_T2_img.affine)))

            sft = StatefulTractogram(sl_xform, img, Space.RASMM)
            save_tractogram(sft, f'./{bundle}_atlas.trk')

            sl_xform = dts.Streamlines(
Esempio n. 7
0
def test_AFQ_data2():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    preafq_path = op.join(tmpdir.name, 'stanford_hardi',
                          'derivatives', 'preafq')
    myafq = api.AFQ(preafq_path=preafq_path,
                    sub_prefix='sub',
                    bundle_list=["SLF", "ARC", "CST", "FP"])

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100],
                             np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(myafq.data_frame.results_dir[0],
                      'sub-01_sess-01_dwiDTI_det_streamlines.trk')
    aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0])

    mapping_file = op.join(myafq.data_frame.results_dir[0],
                           'sub-01_sess-01_dwi_mapping.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_reg_prealign.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
    npt.assert_equal(len(bundles['CST_R']), 2)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'ROIs',
                     'CST_R_roi1_include.nii.gz'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'bundles',
                     'CST_R.trk'))

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)


    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                  'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                  'ROIs'))

    # Test the CLI:
    print("Running the CLI:")
    cmd = "pyAFQ " + preafq_path
    out = os.system(cmd)
    assert out ==  0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'ROIs',
                     'CST_R_roi1_include.nii.gz'))

    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'bundles',
                     'CST_R.trk'))
Esempio n. 8
0
def test_AFQ_data_planes():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    dmriprep_path = op.join(tmpdir.name, 'stanford_hardi', 'derivatives',
                            'dmriprep')
    seg_algo = "planes"
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    myafq = api.AFQ(dmriprep_path=dmriprep_path,
                    sub_prefix='sub',
                    seg_algo=seg_algo,
                    bundle_names=bundle_names,
                    odf_model="DTI")

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100],
                             np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(myafq.data_frame.results_dir[0],
                      'sub-01_sess-01_dwiDTI_det_streamlines.trk')
    aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0])

    mapping_file = op.join(myafq.data_frame.results_dir[0],
                           'sub-01_sess-01_dwi_mapping.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(myafq.data_frame.results_dir[0],
                                'sub-01_sess-01_dwi_reg_prealign.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
    npt.assert_(len(bundles['CST_L']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'CST_R_roi1_include.nii.gz'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk'))

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs'))

    # Test the CLI:
    print("Running the CLI:")
    cmd = "pyAFQ " + dmriprep_path
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'CST_R_roi1_include.nii.gz'))

    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk'))


# def test_AFQ_data_recobundles():
#     tmpdir = nbtmp.InTemporaryDirectory()
#     afd.fetch_hcp(["100206"], hcp_bucket='hcp-openaccess', profile_name="hcp",
#                   path=tmpdir.name)
#     dmriprep_path = op.join(tmpdir.name, 'HCP', 'derivatives', 'dmriprep')
#     seg_algo = "recobundles"
#     bundle_names = ["F", "CST", "AF", "CC_ForcepsMajor"]
#     myafq = api.AFQ(dmriprep_path=dmriprep_path,
#                     sub_prefix='sub',
#                     seg_algo=seg_algo,
#                     bundle_names=bundle_names,
#                     odf_model="DTI",
#                     b0_threshold=15)

#     # Replace the streamlines with precomputed:
#     path_to_trk = dpd.fetcher.fetch_target_tractogram_hcp()
#     path_to_trk = dpd.fetcher.get_target_tractogram_hcp()
#     sl_file = op.join(myafq.data_frame.results_dir[0], 'sub-100206_sess-01_dwiDTI_det_streamlines.trk')
#     shutil.copy(path_to_trk, sl_file)
#     myafq.data_frame["streamlines_file"] = sl_file
#     print("here")
#     tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
#     print("here")
#     bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
#     npt.assert_(len(bundles['CST_L']) > 0)
Esempio n. 9
0
    dti_params = dti.fit_dti(hardi_fdata, hardi_fbval, hardi_fbvec,
                             out_dir='.')
else:
    dti_params = {'FA': './dti_FA.nii.gz',
                  'params': './dti_params.nii.gz'}

print("Tracking...")
if not op.exists('dti_streamlines.trk'):
    streamlines = list(aft.track(dti_params['params']))
    aus.write_trk('./dti_streamlines.trk', streamlines, affine=img.affine)
else:
    tg = nib.streamlines.load('./dti_streamlines.trk').tractogram
    streamlines = tg.apply_affine(np.linalg.inv(img.affine)).streamlines

streamlines = dts.Streamlines(dtu.move_streamlines(
    [s for s in streamlines if s.shape[0] > 100],
    np.linalg.inv(img.affine)))

templates = afd.read_templates()
bundle_names = ["CST", "ILF"]

bundles = {}
for name in bundle_names:
    for hemi in ['_R', '_L']:
        bundles[name + hemi] = {
            'ROIs': [templates[name + '_roi1' + hemi],
                     templates[name + '_roi2' + hemi]],
            'rules': [True, True],
            'prob_map': templates[name + hemi + '_prob_map'],
            'cross_midline': False}
Esempio n. 10
0
def test_segment():
    dpd.fetch_stanford_hardi()
    hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
    hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
    hardi_img = nib.load(hardi_fdata)
    hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
    hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines(streamlines[streamlines._lengths > 10],
                             np.linalg.inv(hardi_img.affine)))

    templates = afd.read_templates()
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'prob_map': templates['CST_L_prob_map'],
            'cross_midline': None
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'prob_map': templates['CST_R_prob_map'],
            'cross_midline': None
        }
    }

    fiber_groups = seg.segment(hardi_fdata, hardi_fbval, hardi_fbvec,
                               streamlines, bundles, mapping)

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # Here's one of them:
    CST_R_sl = fiber_groups['CST_R']
    # Let's make sure there are streamlines in there:
    npt.assert_(len(CST_R_sl) > 0)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]), CST_R_sl)
    npt.assert_almost_equal(tract_profile, np.ones(100))

    # Test providing an array input to calculate_tract_profile:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]),
        seg._resample_bundle(CST_R_sl, 100))

    npt.assert_almost_equal(tract_profile, np.ones(100))
    clean_sl = seg.clean_fiber_group(CST_R_sl)
    # Since there are only 8 streamlines here, nothing should happen:
    npt.assert_equal(clean_sl, CST_R_sl)

    # Setting minimum number of streamlines to a smaller number and
    # threshold to a relatively small number will exclude some streamlines:
    clean_sl = seg.clean_fiber_group(CST_R_sl, min_sl=2, clean_threshold=2)
    npt.assert_equal(len(clean_sl), 3)

    # What if you don't have probability maps?
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'cross_midline': False
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'cross_midline': False
        }
    }

    fiber_groups = seg.segment(hardi_fdata, hardi_fbval, hardi_fbvec,
                               streamlines, bundles, mapping)

    # This condition should still hold
    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']) > 0)
Esempio n. 11
0
def test_AFQ_data_waypoint():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir, bids_path, _ = get_temp_hardi()
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    tracking_params = dict(odf_model="dti",
                           seed_mask=RoiMask(),
                           n_seeds=100,
                           random_seeds=True,
                           rng_seed=42)
    segmentation_params = dict(filter_by_endpoints=False,
                               seg_algo="AFQ",
                               return_idx=True)

    clean_params = dict(return_idx=True)

    myafq = api.AFQ(bids_path=bids_path,
                    dmriprep='vistasoft',
                    bundle_info=bundle_names,
                    scalars=["dti_FA", "dti_MD"],
                    robust_tensor_fitting=True,
                    tracking_params=tracking_params,
                    segmentation_params=segmentation_params,
                    clean_params=clean_params)

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.transform_tracking_output(
            [s for s in streamlines if s.shape[0] > 100],
            np.linalg.inv(myafq.dwi_affine[0])))

    mapping_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_ses-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_ses-01_dwi_prealign_from-DWI_to-MNI_xfm.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = load_tractogram(myafq.bundles[0], myafq.dwi_img[0])

    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0])
    npt.assert_(len(bundles['CST_L']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0], 'bundles',
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'
        ))  # noqa

    # Test creation of file with bundle indices:
    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0],
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-clean_tractography_idx.json'
        ))  # noqa

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (400, 5)

    myafq.plot_tract_profiles()
    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0],
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_fa_profile_plots.png'
        ))  # noqa

    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0],
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_md_profile_plots.png'
        ))  # noqa

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs'))

    # Test the CLI:
    print("Running the CLI:")

    # Set up config to use the same parameters as above:
    # ROI mask needs to be put in quotes in config
    tracking_params = dict(odf_model="DTI",
                           seed_mask="RoiMask()",
                           n_seeds=100,
                           random_seeds=True,
                           rng_seed=42)
    config = dict(BIDS=dict(bids_path=bids_path, dmriprep='vistasoft'),
                  REGISTRATION=dict(robust_tensor_fitting=True),
                  BUNDLES=dict(bundle_info=bundle_names,
                               scalars=["dti_fa", "dti_md"]),
                  VIZ=dict(viz_backend="plotly_no_gif"),
                  TRACTOGRAPHY=tracking_params,
                  SEGMENTATION=segmentation_params,
                  CLEANING=clean_params)

    config_file = op.join(tmpdir.name, "afq_config.toml")
    with open(config_file, 'w') as ff:
        toml.dump(config, ff)

    cmd = "pyAFQ " + config_file
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(
        myafq._get_fname(myafq.data_frame.iloc[0], '_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (400, 7)
    assert_series_equal(combined_profiles['dti_fa'], from_file['dti_fa'])

    # Make sure the CLI did indeed generate these:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json'))

    myafq.export_bundles()
    assert op.exists(
        op.join(
            myafq.data_frame['results_dir'][0], 'bundles',
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'
        ))  # noqa
Esempio n. 12
0
def segment(fdata, fbval, fbvec, streamlines, bundle_dict, mapping,
            reg_prealign=None, b0_threshold=0, reg_template=None,
            prob_threshold=0):
    """
    Segment streamlines into bundles based on inclusion ROIs.

    Parameters
    ----------
    fdata, fbval, fbvec : str
        Full path to data, bvals, bvecs

    streamlines : list of 2D arrays
        Each array is a streamline, shape (3, N).

    bundle_dict: dict
        The format is something like::

            {'name': {'ROIs':[img1, img2],
            'rules':[True, True]},
            'prob_map': img3,
            'cross_midline': False}

    mapping : a DiffeomorphicMapping object
        Used to align the ROIs to the data.

    reg_template : str or nib.Nifti1Image, optional.
        Template to use for registration (defaults to the MNI T2)

    mapping : DiffeomorphicMap object, str or nib.Nifti1Image, optional
        A mapping between DWI space and a template. Defaults to generate
        this.

    prob_threshold : float.
        Initial cleaning of fiber groups is done using probability maps from
        [Hua2008]_. Here, we choose an average probability that needs to be
        exceeded for an individual streamline to be retained. Default: 0.

    References
    ----------
    .. [Hua2008] Hua K, Zhang J, Wakana S, Jiang H, Li X, et al. (2008)
       Tract probability maps in stereotaxic spaces: analyses of white
       matter anatomy and tract-specific quantification. Neuroimage 39:
       336-347
    """
    img, _, gtab, _ = ut.prepare_data(fdata, fbval, fbvec,
                                      b0_threshold=b0_threshold)

    tol = dts.dist_to_corner(img.affine)

    if reg_template is None:
        reg_template = dpd.read_mni_template()

    # Classify the streamlines and split those that: 1) cross the
    # midline, and 2) pass under 10 mm below the mid-point of their
    # representation in the template space:
    xform_sl, crosses = split_streamlines(streamlines, img)

    if isinstance(mapping, str) or isinstance(mapping, nib.Nifti1Image):
        if reg_prealign is None:
            reg_prealign = np.eye(4)
        mapping = reg.read_mapping(mapping, img, reg_template,
                                   prealign=reg_prealign)

    fiber_probabilities = np.zeros((len(xform_sl), len(bundle_dict)))

    # For expedience, we approximate each streamline as a 100 point curve:
    fgarray = _resample_bundle(xform_sl, 100)
    streamlines_in_bundles = np.zeros((len(xform_sl), len(bundle_dict)))
    min_dist_coords = np.zeros((len(xform_sl), len(bundle_dict), 2))

    fiber_groups = {}

    for bundle_idx, bundle in enumerate(bundle_dict):
        rules = bundle_dict[bundle]['rules']
        include_rois = []
        exclude_rois = []
        for rule_idx, rule in enumerate(rules):
            roi = bundle_dict[bundle]['ROIs'][rule_idx]
            if not isinstance(roi, np.ndarray):
                roi = roi.get_data()
            warped_roi = auv.patch_up_roi(
                (mapping.transform_inverse(
                    roi,
                    interpolation='linear')) > 0)

            if rule:
                # include ROI:
                include_rois.append(np.array(np.where(warped_roi)).T)
            else:
                # Exclude ROI:
                exclude_rois.append(np.array(np.where(warped_roi)).T)

        crosses_midline = bundle_dict[bundle]['cross_midline']

        # The probability map if doesn't exist is all ones with the same
        # shape as the ROIs:
        prob_map = bundle_dict[bundle].get('prob_map', np.ones(roi.shape))

        if not isinstance(prob_map, np.ndarray):
            prob_map = prob_map.get_data()
        warped_prob_map = mapping.transform_inverse(prob_map,
                                                    interpolation='nearest')
        fiber_probabilities = dts.values_from_volume(warped_prob_map,
                                                     fgarray)
        fiber_probabilities = np.mean(fiber_probabilities, -1)

        for sl_idx, sl in enumerate(xform_sl):
            if fiber_probabilities[sl_idx] > prob_threshold:
                if crosses_midline is not None:
                    if crosses[sl_idx]:
                        # This means that the streamline does
                        # cross the midline:
                        if crosses_midline:
                            # This is what we want, keep going
                            pass
                        else:
                            # This is not what we want, skip to next streamline
                            continue

                is_close, dist = _check_sl_with_inclusion(sl, include_rois,
                                                          tol)
                if is_close:
                    is_far = _check_sl_with_exclusion(sl, exclude_rois,
                                                      tol)
                    if is_far:
                        min_dist_coords[sl_idx, bundle_idx, 0] =\
                            np.argmin(dist[0], 0)[0]
                        min_dist_coords[sl_idx, bundle_idx, 1] =\
                            np.argmin(dist[1], 0)[0]
                        streamlines_in_bundles[sl_idx, bundle_idx] =\
                            fiber_probabilities[sl_idx]

    # Eliminate any fibers not selected using the plane ROIs:
    possible_fibers = np.sum(streamlines_in_bundles, -1) > 0
    xform_sl = xform_sl[possible_fibers]
    streamlines_in_bundles = streamlines_in_bundles[possible_fibers]
    min_dist_coords = min_dist_coords[possible_fibers]
    bundle_choice = np.argmax(streamlines_in_bundles, -1)

    # We do another round through, so that we can orient all the
    # streamlines within a bundle in the same orientation with respect to
    # the ROIs. This order is ARBITRARY but CONSISTENT (going from ROI0
    # to ROI1).
    for bundle_idx, bundle in enumerate(bundle_dict):
        select_idx = np.where(bundle_choice == bundle_idx)
        # Use a list here, because Streamlines don't support item assignment:
        select_sl = list(xform_sl[select_idx])
        if len(select_sl) == 0:
            fiber_groups[bundle] = dts.Streamlines([])
            # There's nothing here, move to the next bundle:
            continue

        # Sub-sample min_dist_coords:
        min_dist_coords_bundle = min_dist_coords[select_idx]
        for idx in range(len(select_sl)):
            min0 = min_dist_coords_bundle[idx, bundle_idx, 0]
            min1 = min_dist_coords_bundle[idx, bundle_idx, 1]
            if min0 > min1:
                select_sl[idx] = select_sl[idx][::-1]
        # Set this to nibabel.Streamlines object for output:
        select_sl = dts.Streamlines(select_sl)
        fiber_groups[bundle] = select_sl

    return fiber_groups
Esempio n. 13
0
def test_AFQ_data_waypoint():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    dmriprep_path = op.join(tmpdir.name, 'stanford_hardi',
                            'derivatives', 'dmriprep')
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    tracking_params = dict(odf_model="DTI")
    segmentation_params = dict(filter_by_endpoints=False,
                               seg_algo="AFQ",
                               return_idx=True)

    clean_params = dict(return_idx=True)

    myafq = api.AFQ(dmriprep_path=dmriprep_path,
                    sub_prefix='sub',
                    bundle_names=bundle_names,
                    scalars=["dti_fa", "dti_md"],
                    tracking_params=tracking_params,
                    segmentation_params=segmentation_params,
                    clean_params=clean_params)

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.transform_tracking_output(
            [s for s in streamlines if s.shape[0] > 100],
            np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det_tractography.trk')
    sft = StatefulTractogram(streamlines, myafq.data_frame.dwi_file[0],
                             Space.VOX)
    save_tractogram(sft, sl_file, bbox_valid_check=False)

    mapping_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_prealign_from-DWI_to-MNI_xfm.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = load_tractogram(myafq.bundles[0], myafq.dwi_img[0])

    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0])
    npt.assert_(len(bundles['CST_R']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'ROIs',
        'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'bundles',
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'))  # noqa

    # Test creation of file with bundle indices:
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-clean_tractography_idx.json'))  # noqa

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                          'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                          'ROIs'))

    # Test the CLI:
    print("Running the CLI:")

    # Bare bones config only points to the files
    config = dict(files=dict(dmriprep_path=dmriprep_path))

    config_file = op.join(tmpdir.name, "afq_config.toml")
    with open(config_file, 'w') as ff:
        toml.dump(config, ff)

    cmd = "pyAFQ " + config_file
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    myafq.export_rois()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'ROIs',
        'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json'))

    myafq.export_bundles()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'bundles',
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'))  # noqa
Esempio n. 14
0
def test_AFQ_data_waypoint():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir, bids_path, _ = get_temp_hardi()
    t1_path = op.join(tmpdir.name, "T1.nii.gz")
    nib.save(afd.read_mni_template(mask=True, weight="T1w"), t1_path)

    bundle_names = ["SLF", "ARC", "CST", "FP"]
    tracking_params = dict(odf_model="dti",
                           seed_mask=RoiMask(),
                           n_seeds=100,
                           random_seeds=True,
                           rng_seed=42)
    segmentation_params = dict(filter_by_endpoints=False,
                               seg_algo="AFQ",
                               return_idx=True)

    clean_params = dict(return_idx=True)

    myafq = api.AFQ(
        bids_path=bids_path,
        dmriprep='vistasoft',
        bundle_info=bundle_names,
        scalars=["dti_FA", "dti_MD",
                 TemplateScalar("T1", t1_path)],
        robust_tensor_fitting=True,
        tracking_params=tracking_params,
        segmentation_params=segmentation_params,
        clean_params=clean_params)

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.transform_tracking_output(
            [s for s in streamlines if s.shape[0] > 100],
            np.linalg.inv(myafq.dwi_affine["01"])))

    mapping_file = op.join(
        myafq.results_dir["01"],
        'sub-01_ses-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(
        myafq.results_dir["01"],
        'sub-01_ses-01_dwi_prealign_from-DWI_to-MNI_xfm.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = load_tractogram(myafq.bundles["01"], myafq.img["01"])

    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.img["01"])
    npt.assert_(len(bundles['CST_L']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.results_dir["01"], 'ROIs',
                'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json'))

    # Test bundles exporting:
    myafq.export_indiv_bundles()
    assert op.exists(
        op.join(
            myafq.results_dir["01"], 'bundles',
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'
        ))  # noqa

    tract_profile_fname = myafq.profiles["01"]
    tract_profiles = pd.read_csv(tract_profile_fname)
    assert tract_profiles.shape == (500, 6)

    myafq.tract_profile_plots
    assert op.exists(
        op.join(
            myafq.results_dir["01"], "tract_profile_plots",
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_fa_profile_plots.png'
        ))  # noqa

    assert op.exists(
        op.join(
            myafq.results_dir["01"], "tract_profile_plots",
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ_dti_md_profile_plots.png'
        ))  # noqa

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.results_dir["01"], 'bundles'))

    shutil.rmtree(op.join(myafq.results_dir["01"], 'ROIs'))
    os.remove(tract_profile_fname)

    # Test the CLI:
    print("Running the CLI:")

    # Set up config to use the same parameters as above:
    # ROI mask needs to be put in quotes in config
    tracking_params = dict(odf_model="DTI",
                           seed_mask="RoiMask()",
                           n_seeds=100,
                           random_seeds=True,
                           rng_seed=42)
    config = dict(BIDS=dict(bids_path=bids_path, dmriprep='vistasoft'),
                  DATA=dict(robust_tensor_fitting=True),
                  BUNDLES=dict(bundle_info=bundle_names,
                               scalars=[
                                   "dti_fa", "dti_md",
                                   f"TemplateScalar('T1', '{t1_path}')"
                               ]),
                  VIZ=dict(viz_backend="plotly_no_gif"),
                  TRACTOGRAPHY=tracking_params,
                  SEGMENTATION=segmentation_params,
                  CLEANING=clean_params)

    config_file = op.join(tmpdir.name, "afq_config.toml")
    with open(config_file, 'w') as ff:
        toml.dump(config, ff)

    # save memory
    results_dir = myafq.results_dir["01"]
    del myafq
    gc.collect()

    cmd = "pyAFQ " + config_file
    completed_process = subprocess.run(cmd, shell=True, capture_output=True)
    if completed_process.returncode != 0:
        print(completed_process.stdout)
    print(completed_process.stderr)
    assert completed_process.returncode == 0
    # The tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(tract_profile_fname)

    assert from_file.shape == (500, 6)
    assert_series_equal(tract_profiles['dti_fa'], from_file['dti_fa'])

    # Make sure the CLI did indeed generate these:
    assert op.exists(
        op.join(results_dir, 'ROIs',
                'sub-01_ses-01_dwi_desc-ROI-CST_R-1-include.json'))

    assert op.exists(
        op.join(
            results_dir, 'bundles',
            'sub-01_ses-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'
        ))  # noqa