Пример #1
0
def make_bundle_dict(bundle_names=BUNDLES):
    """
    Create a bundle dictionary, needed for the segmentation

    Parameters
    ----------
    bundle_names : list, optional
        A list of the bundles to be used in this case. Default: all of them
    """
    templates = afd.read_templates()
    # For the arcuate, we need to rename a few of these and duplicate the SLF
    # ROI:
    templates['ARC_roi1_L'] = templates['SLF_roi1_L']
    templates['ARC_roi1_R'] = templates['SLF_roi1_R']
    templates['ARC_roi2_L'] = templates['SLFt_roi2_L']
    templates['ARC_roi2_R'] = templates['SLFt_roi2_R']

    afq_bundles = {}
    for name in bundle_names:
        # Considder hard coding since we might have different rulse for
        # some tracts
        for hemi in ['_R', '_L']:
            afq_bundles[name + hemi] = {'ROIs': [templates[name + '_roi1' +
                                                           hemi],
                                                 templates[name + '_roi2' +
                                                           hemi]],
                                        'rules': [True, True]}
    return afq_bundles
Пример #2
0
def test_segment():
    dpd.fetch_stanford_hardi()
    hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
    hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
    hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
    hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    templates = afd.read_templates()
    bundles = {'CST_L': {'ROIs': [templates['CST_roi1_L'],
                                  templates['CST_roi2_L']],
                         'rules': [True, True]},
               'CST_R': {'ROIs': [templates['CST_roi1_R'],
                                  templates['CST_roi1_R']],
                         'rules': [True, True]}}

    fiber_groups = seg.segment(hardi_fdata,
                               hardi_fbval,
                               hardi_fbvec,
                               streamlines,
                               bundles,
                               mapping=mapping,
                               as_generator=True)

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # There happen to be 8 fibers in the right CST:
    CST_R_sl = list(fiber_groups['CST_R'])
    npt.assert_equal(len(CST_R_sl), 8)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]),
        CST_R_sl)
    npt.assert_equal(tract_profile, np.ones(100))
Пример #3
0
def make_bundle_dict(bundle_names=BUNDLES):
    """
    Create a bundle dictionary, needed for the segmentation

    Parameters
    ----------
    bundle_names : list, optional
        A list of the bundles to be used in this case. Default: all of them
    """
    templates = afd.read_templates()
    # For the arcuate, we need to rename a few of these and duplicate the SLF
    # ROI:
    templates['ARC_roi1_L'] = templates['SLF_roi1_L']
    templates['ARC_roi1_R'] = templates['SLF_roi1_R']
    templates['ARC_roi2_L'] = templates['SLFt_roi2_L']
    templates['ARC_roi2_R'] = templates['SLFt_roi2_R']

    afq_bundles = {}
    # Each bundles gets a digit identifier (to be stored in the tractogram)
    uid = 1
    for name in bundle_names:
        # Considder hard coding since we might have different rulse for
        # some tracts
        for hemi in ['_R', '_L']:
            afq_bundles[name + hemi] = {'ROIs': [templates[name + '_roi1' +
                                                           hemi],
                                                 templates[name + '_roi2' +
                                                           hemi]],
                                        'rules': [True, True],
                                        'uid': uid}
            uid += 1

    return afq_bundles
Пример #4
0
def test_BundleDict():
    """
    Tests bundle dict
    """

    # test defaults
    afq_bundles = api.BundleDict()

    # bundles restricted within hemisphere
    # NOTE: FA and FP cross midline so are removed
    # NOTE: all others generate two bundles
    num_hemi_bundles = (len(api.BUNDLES) - 2) * 2

    # bundles that cross the midline
    num_whole_bundles = 2

    assert len(afq_bundles) == num_hemi_bundles + num_whole_bundles

    # Arcuate Fasciculus
    afq_bundles = api.BundleDict(["ARC"])

    assert len(afq_bundles) == 2

    # Forceps Minor
    afq_bundles = api.BundleDict(["FA"])

    assert len(afq_bundles) == 1

    # Cingulum Hippocampus
    # not included but exists in templates
    afq_bundles = api.BundleDict(["HCC"])

    assert len(afq_bundles) == 2

    # Test "custom" bundle
    afq_templates = afd.read_templates()
    afq_bundles = api.BundleDict({
        "custom_bundle": {
            "ROIs": [afq_templates["FA_L"], afq_templates["FP_R"]],
            "rules": [True, True],
            "cross_midline": False,
            "uid": 1
        }
    })
    afq_bundles.get("custom_bundle")

    assert len(afq_bundles) == 1

    # Vertical Occipital Fasciculus
    # not included and does not exist in afq templates
    with pytest.raises(ValueError, match="VOF_R is not in AFQ templates"):
        afq_bundles = api.BundleDict(["VOF"])
        afq_bundles["VOF_R"]

    afq_bundles = api.BundleDict(["VOF"], seg_algo="reco80")
    assert len(afq_bundles) == 2

    afq_bundles = api.BundleDict(["whole_brain"], seg_algo="reco80")
    assert len(afq_bundles) == 1
Пример #5
0
def main():
    with open('config.json') as config_json:
        config = json.load(config_json)

    data_file = str(config['data_file'])
    data_bval = str(config['data_bval'])
    data_bvec = str(config['data_bvec'])

    img = nib.load(data_file)

    print("Calculating DTI...")
    if not op.exists('./dti_FA.nii.gz'):
        dti_params = dti.fit_dti(data_file, data_bval, data_bvec, out_dir='.')
    else:
        dti_params = {'FA': './dti_FA.nii.gz', 'params': './dti_params.nii.gz'}

    tg = nib.streamlines.load('csa_prob.trk').tractogram
    streamlines = tg.apply_affine(np.linalg.inv(img.affine)).streamlines

    # Use only a small portion of the streamlines, for expedience:
    streamlines = streamlines[::100]

    templates = afd.read_templates()
    bundle_names = ["CST", "ILF"]

    bundles = {}
    for name in bundle_names:
        for hemi in ['_R', '_L']:
            bundles[name + hemi] = {
                'ROIs': [
                    templates[name + '_roi1' + hemi],
                    templates[name + '_roi1' + hemi]
                ],
                'rules': [True, True]
            }

    print("Registering to template...")
    MNI_T2_img = dpd.read_mni_template()
    bvals, bvecs = read_bvals_bvecs(data_bval, data_bvec)
    gtab = gradient_table(bvals, bvecs, b0_threshold=100)
    mapping = reg.syn_register_dwi(data_file, gtab)
    reg.write_mapping(mapping, './mapping.nii.gz')

    print("Segmenting fiber groups...")
    fiber_groups = seg.segment(data_file,
                               data_bval,
                               data_bvec,
                               streamlines,
                               bundles,
                               reg_template=MNI_T2_img,
                               mapping=mapping,
                               as_generator=False,
                               affine=img.affine)
    """
Пример #6
0
def make_bundle_dict(bundle_names=BUNDLES, seg_algo="afq", resample_to=False):
    """
    Create a bundle dictionary, needed for the segmentation

    Parameters
    ----------
    bundle_names : list, optional
        A list of the bundles to be used in this case. Default: all of them

    resample_to : Nifti1Image, optional
        If set, templates will be resampled to the affine and shape of this
        image.
    """
    if seg_algo == "afq":
        templates = afd.read_templates(resample_to=resample_to)
        callosal_templates = afd.read_callosum_templates(
            resample_to=resample_to)
        # For the arcuate, we need to rename a few of these and duplicate the
        # SLF ROI:
        templates['ARC_roi1_L'] = templates['SLF_roi1_L']
        templates['ARC_roi1_R'] = templates['SLF_roi1_R']
        templates['ARC_roi2_L'] = templates['SLFt_roi2_L']
        templates['ARC_roi2_R'] = templates['SLFt_roi2_R']

        afq_bundles = {}
        # Each bundles gets a digit identifier (to be stored in the tractogram)
        uid = 1
        for name in bundle_names:
            # Consider hard coding since we might have different rules for
            # some tracts
            if name in ["FA", "FP"]:
                afq_bundles[name] = {
                    'ROIs': [
                        templates[name + "_L"], templates[name + "_R"],
                        callosal_templates["Callosum_midsag"]
                    ],
                    'rules': [True, True, True],
                    'prob_map':
                    templates[name + "_prob_map"],
                    'cross_midline':
                    True,
                    'uid':
                    uid
                }
                uid += 1
            # SLF is a special case, because it has an exclusion ROI:
            elif name == "SLF":
                for hemi in ['_R', '_L']:
                    afq_bundles[name + hemi] = {
                        'ROIs': [
                            templates[name + '_roi1' + hemi],
                            templates[name + '_roi2' + hemi],
                            templates["SLFt_roi2" + hemi]
                        ],
                        'rules': [True, True, False],
                        'prob_map':
                        templates[name + hemi + '_prob_map'],
                        'cross_midline':
                        False,
                        'uid':
                        uid
                    }
                    uid += 1
            else:
                for hemi in ['_R', '_L']:
                    afq_bundles[name + hemi] = {
                        'ROIs': [
                            templates[name + '_roi1' + hemi],
                            templates[name + '_roi2' + hemi]
                        ],
                        'rules': [True, True],
                        'prob_map':
                        templates[name + hemi + '_prob_map'],
                        'cross_midline':
                        False,
                        'uid':
                        uid
                    }

                    uid += 1

    elif seg_algo == "reco":
        afq_bundles = {}
        uid = 1
        bundle_dict = afd.read_hcp_atlas_16_bundles()
        afq_bundles["whole_brain"] = bundle_dict["whole_brain"]
        for name in bundle_names:
            if name in ['CCMid', 'CC_ForcepsMajor', 'CC_ForcepsMinor', 'MCP']:
                afq_bundles[name] = bundle_dict[name]
                afq_bundles[name]['uid'] = uid
                uid += 1
            else:
                for hemi in ["_R", "_L"]:
                    afq_bundles[name + hemi] = bundle_dict[name + hemi]
                    afq_bundles[name + hemi]['uid'] = uid
                    uid += 1
    else:
        raise ValueError("Input: %s is not a valid input`seg_algo`" % seg_algo)

    return afq_bundles
Пример #7
0
def test_segment():

    templates = afd.read_templates()
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'prob_map': templates['CST_L_prob_map'],
            'cross_midline': None
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'prob_map': templates['CST_R_prob_map'],
            'cross_midline': None
        }
    }

    segmentation = seg.Segmentation()
    segmentation.segment(bundles,
                         tg,
                         hardi_fdata,
                         hardi_fbval,
                         hardi_fbvec,
                         mapping=mapping)
    fiber_groups = segmentation.fiber_groups

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # Here's one of them:
    CST_R_sl = fiber_groups['CST_R']
    # Let's make sure there are streamlines in there:
    npt.assert_(len(CST_R_sl) > 0)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = afq_profile(np.ones(nib.load(hardi_fdata).shape[:3]),
                                CST_R_sl.streamlines, np.eye(4))
    npt.assert_almost_equal(tract_profile, np.ones(100))

    clean_sl = seg.clean_bundle(CST_R_sl)
    npt.assert_equal(len(clean_sl), len(CST_R_sl))

    # What if you don't have probability maps?
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'cross_midline': False
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'cross_midline': False
        }
    }

    segmentation.segment(bundles,
                         tg,
                         hardi_fdata,
                         hardi_fbval,
                         hardi_fbvec,
                         mapping=mapping)
    fiber_groups = segmentation.fiber_groups

    # This condition should still hold
    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']) > 0)

    # Test with the return_idx kwarg set to True:
    segmentation = seg.Segmentation(return_idx=True)
    segmentation.segment(bundles,
                         tg,
                         hardi_fdata,
                         hardi_fbval,
                         hardi_fbvec,
                         mapping=mapping)
    fiber_groups = segmentation.fiber_groups

    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']['sl']) > 0)
    npt.assert_(len(fiber_groups['CST_R']['idx']) > 0)

    # get bundles for reco method
    bundles = afd.read_hcp_atlas_16_bundles()
    bundle_names = ['whole_brain', 'CST_R', 'CST_L']
    for key in list(bundles):
        if key not in bundle_names:
            bundles.pop(key, None)

    # Try recobundles method
    segmentation = seg.Segmentation(seg_algo='Reco',
                                    progressive=False,
                                    greater_than=10,
                                    rm_small_clusters=1,
                                    rng=np.random.RandomState(seed=8))
    fiber_groups = segmentation.segment(bundles, tg, hardi_fdata, hardi_fbval,
                                        hardi_fbvec)

    # This condition should still hold
    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']) > 0)

    # Test with the return_idx kwarg set to True:
    segmentation = seg.Segmentation(seg_algo='Reco',
                                    progressive=False,
                                    greater_than=10,
                                    rm_small_clusters=1,
                                    rng=np.random.RandomState(seed=8),
                                    return_idx=True)

    fiber_groups = segmentation.segment(bundles, tg, hardi_fdata, hardi_fbval,
                                        hardi_fbvec)
    fiber_groups = segmentation.fiber_groups

    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']['sl']) > 0)
    npt.assert_(len(fiber_groups['CST_R']['idx']) > 0)
Пример #8
0
                             out_dir='.')
else:
    dti_params = {'FA': './dti_FA.nii.gz', 'params': './dti_params.nii.gz'}

print("Tracking...")
if not op.exists('dti_streamlines.trk'):
    streamlines = list(aft.track(dti_params['params']))
    aus.write_trk('./dti_streamlines.trk', streamlines, affine=img.affine)
else:
    tg = nib.streamlines.load('./dti_streamlines.trk').tractogram
    streamlines = tg.apply_affine(np.linalg.inv(img.affine)).streamlines

# Use only a small portion of the streamlines, for expedience:
streamlines = streamlines[::100]

templates = afd.read_templates()
bundle_names = ["CST", "ILF"]

bundles = {}
for name in bundle_names:
    for hemi in ['_R', '_L']:
        bundles[name + hemi] = {
            'ROIs': [
                templates[name + '_roi1' + hemi],
                templates[name + '_roi1' + hemi]
            ],
            'rules': [True, True]
        }

print("Registering to template...")
MNI_T2_img = dpd.read_mni_template()
Пример #9
0
        aft.track(dti_params['params'],
                  directions="det",
                  seed_mask=wm_mask,
                  seeds=2,
                  stop_mask=FA,
                  stop_threshold=0.2,
                  step_size=step_size,
                  min_length=min_length_mm / step_size))
    aus.write_trk('./dti_streamlines.trk', streamlines, affine=img.affine)
else:
    tg = nib.streamlines.load('./dti_streamlines.trk').tractogram
    streamlines = tg.apply_affine(np.linalg.inv(img.affine)).streamlines

print("We're looking at: %s streamlines" % len(streamlines))

templates = afd.read_templates()
templates['ARC_roi1_L'] = templates['SLF_roi1_L']
templates['ARC_roi1_R'] = templates['SLF_roi1_R']
templates['ARC_roi2_L'] = templates['SLFt_roi2_L']
templates['ARC_roi2_R'] = templates['SLFt_roi2_R']


bundle_names = ["ATR", "CGC", "CST", "HCC", "IFO", "ILF", "SLF", "ARC", "UNC"]

bundles = {}
for name in bundle_names:
    for hemi in ['_R', '_L']:
        bundles[name + hemi] = {
            'ROIs': [templates[name + '_roi1' + hemi],
                     templates[name + '_roi2' + hemi]],
            'rules': [True, True],
Пример #10
0
def test_segment():
    dpd.fetch_stanford_hardi()
    hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
    hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
    hardi_img = nib.load(hardi_fdata)
    hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
    hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines(streamlines[streamlines._lengths > 10],
                             np.linalg.inv(hardi_img.affine)))

    templates = afd.read_templates()
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'prob_map': templates['CST_L_prob_map'],
            'cross_midline': None
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'prob_map': templates['CST_R_prob_map'],
            'cross_midline': None
        }
    }

    fiber_groups = seg.segment(hardi_fdata, hardi_fbval, hardi_fbvec,
                               streamlines, bundles, mapping)

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # Here's one of them:
    CST_R_sl = fiber_groups['CST_R']
    # Let's make sure there are streamlines in there:
    npt.assert_(len(CST_R_sl) > 0)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]), CST_R_sl)
    npt.assert_almost_equal(tract_profile, np.ones(100))

    # Test providing an array input to calculate_tract_profile:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]),
        seg._resample_bundle(CST_R_sl, 100))

    npt.assert_almost_equal(tract_profile, np.ones(100))
    clean_sl = seg.clean_fiber_group(CST_R_sl)
    # Since there are only 8 streamlines here, nothing should happen:
    npt.assert_equal(clean_sl, CST_R_sl)

    # Setting minimum number of streamlines to a smaller number and
    # threshold to a relatively small number will exclude some streamlines:
    clean_sl = seg.clean_fiber_group(CST_R_sl, min_sl=2, clean_threshold=2)
    npt.assert_equal(len(clean_sl), 3)

    # What if you don't have probability maps?
    bundles = {
        'CST_L': {
            'ROIs': [templates['CST_roi1_L'], templates['CST_roi2_L']],
            'rules': [True, True],
            'cross_midline': False
        },
        'CST_R': {
            'ROIs': [templates['CST_roi1_R'], templates['CST_roi1_R']],
            'rules': [True, True],
            'cross_midline': False
        }
    }

    fiber_groups = seg.segment(hardi_fdata, hardi_fbval, hardi_fbvec,
                               streamlines, bundles, mapping)

    # This condition should still hold
    npt.assert_equal(len(fiber_groups), 2)
    npt.assert_(len(fiber_groups['CST_R']) > 0)
Пример #11
0
def main():
    with open('config.json') as config_json:
        config = json.load(config_json)

    #Paths to data
    data_file = str(config['data_file'])
    data_bval = str(config['data_bval'])
    data_bvec = str(config['data_bvec'])

    img = nib.load(data_file)
    """
	print("Calculating DTI...")
	if not op.exists('./dti_FA.nii.gz'):
	    dti_params = dti.fit_dti(data_file, data_bval, data_bvec, out_dir='.')
	else:
	    dti_params = {'FA': './dti_FA.nii.gz',
			  'params': './dti_params.nii.gz'}
	"""
    #tg = nib.streamlines.load('track.trk').tractogram

    tg = nib.streamlines.load(config['tck_data']).tractogram
    streamlines = tg.apply_affine(np.linalg.inv(img.affine)).streamlines

    # Use only a small portion of the streamlines, for expedience:
    #streamlines = streamlines[::100]

    templates = afd.read_templates()
    bundle_names = ["CST", "ILF"]

    bundles = {}
    for name in bundle_names:
        for hemi in ['_R', '_L']:
            bundles[name + hemi] = {
                'ROIs': [
                    templates[name + '_roi1' + hemi],
                    templates[name + '_roi1' + hemi]
                ],
                'rules': [True, True]
            }

    print("Registering to template...")
    if not op.exists('mapping.nii.gz'):
        gtab = gradient_table(data_bval, data_bvec)
        mapping = reg.syn_register_dwi(data_file, gtab)
        reg.write_mapping(mapping, './mapping.nii.gz')
    else:
        mapping = reg.read_mapping('./mapping.nii.gz', img, MNI_T2_img)
    """
	MNI_T2_img = dpd.read_mni_template()
	bvals, bvecs = read_bvals_bvecs(data_bval, data_bvec)
	gtab = gradient_table(bvals, bvecs, b0_threshold=100)
	mapping = reg.syn_register_dwi(data_file, gtab)
	reg.write_mapping(mapping, './mapping.nii.gz')
	"""

    print("Segmenting fiber groups...")
    fiber_groups = seg.segment(data_file,
                               data_bval,
                               data_bvec,
                               streamlines,
                               bundles,
                               reg_template=MNI_T2_img,
                               mapping=mapping,
                               as_generator=False,
                               affine=img.affine)

    path = os.getcwd() + '/tract1/'
    if not os.path.exists(path):
        os.makedirs(path)

    for fg in fiber_groups:
        streamlines = fiber_groups[fg]
        fname = fg + ".tck"
        trg = nib.streamlines.Tractogram(streamlines,
                                         affine_to_rasmm=img.affine)
        nib.streamlines.save(trg, path + fname)
Пример #12
0
def make_bundle_dict(bundle_names=BUNDLES):
    """
    Create a bundle dictionary, needed for the segmentation

    Parameters
    ----------
    bundle_names : list, optional
        A list of the bundles to be used in this case. Default: all of them
    """
    templates = afd.read_templates()
    callosal_templates = afd.read_callosum_templates()
    # For the arcuate, we need to rename a few of these and duplicate the SLF
    # ROI:
    templates['ARC_roi1_L'] = templates['SLF_roi1_L']
    templates['ARC_roi1_R'] = templates['SLF_roi1_R']
    templates['ARC_roi2_L'] = templates['SLFt_roi2_L']
    templates['ARC_roi2_R'] = templates['SLFt_roi2_R']

    afq_bundles = {}
    # Each bundles gets a digit identifier (to be stored in the tractogram)
    uid = 1
    for name in bundle_names:
        # Consider hard coding since we might have different rules for
        # some tracts
        if name in ["FA", "FP"]:
            afq_bundles[name] = {
                'ROIs': [
                    templates[name + "_L"], templates[name + "_R"],
                    callosal_templates["Callosum_midsag"]
                ],
                'rules': [True, True, True],
                'prob_map':
                templates[name + "_prob_map"],
                'cross_midline':
                True,
                'uid':
                uid
            }
            uid += 1
        # SLF is a special case, because it has an exclusion ROI:
        elif name == "SLF":
            for hemi in ['_R', '_L']:
                afq_bundles[name + hemi] = {
                    'ROIs': [
                        templates[name + '_roi1' + hemi],
                        templates[name + '_roi2' + hemi],
                        templates["SLFt_roi2" + hemi]
                    ],
                    'rules': [True, True, False],
                    'prob_map':
                    templates[name + hemi + '_prob_map'],
                    'cross_midline':
                    False,
                    'uid':
                    uid
                }
                uid += 1
        else:
            for hemi in ['_R', '_L']:
                afq_bundles[name + hemi] = {
                    'ROIs': [
                        templates[name + '_roi1' + hemi],
                        templates[name + '_roi2' + hemi]
                    ],
                    'rules': [True, True],
                    'prob_map':
                    templates[name + hemi + '_prob_map'],
                    'cross_midline':
                    False,
                    'uid':
                    uid
                }

                uid += 1

    return afq_bundles
Пример #13
0
def test_segment():
    dpd.fetch_stanford_hardi()
    hardi_dir = op.join(fetcher.dipy_home, "stanford_hardi")
    hardi_fdata = op.join(hardi_dir, "HARDI150.nii.gz")
    hardi_fbval = op.join(hardi_dir, "HARDI150.bval")
    hardi_fbvec = op.join(hardi_dir, "HARDI150.bvec")
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    templates = afd.read_templates()
    bundles = {'CST_L': {'ROIs': [templates['CST_roi1_L'],
                                  templates['CST_roi2_L']],
                         'rules': [True, True],
                         'prob_map': templates['CST_L_prob_map'],
                         'cross_midline': False},
               'CST_R': {'ROIs': [templates['CST_roi1_R'],
                                  templates['CST_roi1_R']],
                         'rules': [True, True],
                         'prob_map': templates['CST_R_prob_map'],
                         'cross_midline': False}}

    fiber_groups = seg.segment(hardi_fdata,
                               hardi_fbval,
                               hardi_fbvec,
                               streamlines,
                               bundles,
                               mapping=mapping,
                               as_generator=True)

    # We asked for 2 fiber groups:
    npt.assert_equal(len(fiber_groups), 2)
    # There happen to be 5 fibers in the right CST:
    CST_R_sl = fiber_groups['CST_R']
    npt.assert_equal(len(CST_R_sl), 5)
    # Calculate the tract profile for a volume of all-ones:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]),
        CST_R_sl)
    npt.assert_almost_equal(tract_profile, np.ones(100))

    # Test providing an array input to calculate_tract_profile:
    tract_profile = seg.calculate_tract_profile(
        np.ones(nib.load(hardi_fdata).shape[:3]),
        seg._resample_bundle(CST_R_sl, 100))


    npt.assert_almost_equal(tract_profile, np.ones(100))
    clean_sl = seg.clean_fiber_group(CST_R_sl)
    # Since there are only 5 streamlines here, nothing should happen:
    npt.assert_equal(clean_sl, CST_R_sl)

    # Setting minimum number of streamlines to a smaller number and
    # threshold to a relatively small number will exclude some streamlines:
    clean_sl = seg.clean_fiber_group(CST_R_sl, min_sl=2, clean_threshold=2)
    npt.assert_equal(len(clean_sl), 3)

    # What if you don't have probability maps?
    bundles = {'CST_L': {'ROIs': [templates['CST_roi1_L'],
                                  templates['CST_roi2_L']],
                         'rules': [True, True],
                         'cross_midline': False},
               'CST_R': {'ROIs': [templates['CST_roi1_R'],
                                  templates['CST_roi1_R']],
                         'rules': [True, True],
                         'cross_midline': False}}

    fiber_groups = seg.segment(hardi_fdata,
                               hardi_fbval,
                               hardi_fbvec,
                               streamlines,
                               bundles,
                               mapping=mapping,
                               as_generator=True)

    # This condition should still hold
    npt.assert_equal(len(fiber_groups), 2)
    # But one of the streamlines has switched identities without the
    # probability map to guide selection
    npt.assert_equal(len(fiber_groups['CST_R']), 6)