예제 #1
0
파일: test_api.py 프로젝트: gkiar/pyAFQ
def get_temp_hardi():
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    bids_path = op.join(tmpdir.name, 'stanford_hardi')

    sub_path = op.join(tmpdir.name, 'stanford_hardi', 'derivatives',
                       'vistasoft', 'sub-01', 'ses-01', 'dwi')

    return tmpdir, bids_path, sub_path
예제 #2
0
def test_AFQ_no_prealign():
    """
    Test if API can run without prealign
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    myafq = api.AFQ(dmriprep_path=op.join(tmpdir.name, 'stanford_hardi',
                                          'derivatives', 'dmriprep'),
                    sub_prefix='sub',
                    use_prealign=False)
    myafq.export_rois()
예제 #3
0
def test_AFQ_data():
    """
    Test with some actual data
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    myafq = api.AFQ(preproc_path=op.join(tmpdir.name, 'stanford_hardi'),
                    sub_prefix='sub')
    npt.assert_equal(nib.load(myafq.brain_mask[0]).shape,
                     nib.load(myafq['dwi_file'][0]).shape[:3])
    npt.assert_equal(nib.load(myafq.brain_mask[0]).shape,
                     nib.load(myafq.dti[0]).shape[:3])
예제 #4
0
파일: test_api.py 프로젝트: jyeatman/pyAFQ
def test_AFQ_data():
    """
    Test with some actual data
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    myafq = api.AFQ(preproc_path=op.join(tmpdir.name, 'stanford_hardi'),
                    sub_prefix='sub')
    npt.assert_equal(nib.load(myafq.brain_mask[0]).shape,
                     nib.load(myafq['dwi_file'][0]).shape[:3])
    npt.assert_equal(nib.load(myafq.brain_mask[0]).shape,
                     nib.load(myafq.dti[0]).shape[:3])
예제 #5
0
def main():
    parser = ArgumentParser()
    parser.add_argument("--run-id", "-i", default=None)
    parser.add_argument("--data-path", "-p", default=None)
    parser.add_argument("--dataset", "-d", default="stanford_hardi")
    parser.add_argument("--setup", "-s", action="store_true")

    results = parser.parse_args()

    if results.setup:
        afd.organize_stanford_data()
        afd.fetch_templates()
        afd.fetch_hcp_atlas_16_bundles()
        afd.fetch_hcp_atlas_80_bundles()
        return 0

    orig_dp = op.join(afd.afq_home, results.dataset)
    bp = results.data_path if results.data_path else afd.afq_home

    dp = op.join(bp, results.dataset)
    if results.run_id:
        dp = op.join(dp, results.run_id)

    if dp != orig_dp:
        try:
            # Create the data directory
            os.makedirs(op.join(dp, 'derivatives'))

            # Make a symbolic link to the original
            os.symlink(op.join(orig_dp, 'derivatives', 'freesurfer'),
                       op.join(dp, 'derivatives', 'freesurfer'))
            os.symlink(op.join(orig_dp, 'derivatives', 'vistasoft'),
                       op.join(dp, 'derivatives', 'vistasoft'))
            os.symlink(op.join(orig_dp, 'dataset_description.json'),
                       op.join(dp, 'dataset_description.json'))
        except FileExistsError:
            pass

    # Run AFQ
    myafq = api.AFQ(bids_path=dp,
                    reg_template="mni_T2",
                    reg_subject="b0",
                    dmriprep='vistasoft',
                    viz_backend='plotly_no_gif')

    bundle_html = myafq.viz_bundles(export=True, n_points=50)
    plotly.io.show(bundle_html[0])

    myafq.plot_tract_profiles()
    fig_files = myafq.data_frame['tract_profiles_viz'][0]
예제 #6
0
import os.path as op

import plotly

from AFQ import api
from AFQ.mask import RoiMask
import AFQ.data as afd

##########################################################################
# Get some example data
# ---------------------
#
# Retrieves `Stanford HARDI dataset <https://purl.stanford.edu/ng782rw8378>`_.
#

afd.organize_stanford_data(clear_previous_afq=True)

##########################################################################
# Set tractography parameters (optional)
# ---------------------
# We make this tracking_params which we will pass to the AFQ object
# which specifies that we want 100,000 seeds randomly distributed
# in the ROIs of every bundle.
#
# We only do this to make this example faster and consume less space.

tracking_params = dict(seed_mask=RoiMask(),
                       n_seeds=10000,
                       random_seeds=True,
                       rng_seed=42)
예제 #7
0
def test_AFQ_data2():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    preafq_path = op.join(tmpdir.name, 'stanford_hardi',
                          'derivatives', 'preafq')
    myafq = api.AFQ(preafq_path=preafq_path,
                    sub_prefix='sub',
                    bundle_list=["SLF", "ARC", "CST", "FP"])

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100],
                             np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(myafq.data_frame.results_dir[0],
                      'sub-01_sess-01_dwiDTI_det_streamlines.trk')
    aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0])

    mapping_file = op.join(myafq.data_frame.results_dir[0],
                           'sub-01_sess-01_dwi_mapping.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(myafq.data_frame.results_dir[0], 'sub-01_sess-01_dwi_reg_prealign.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
    npt.assert_equal(len(bundles['CST_R']), 2)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'ROIs',
                     'CST_R_roi1_include.nii.gz'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'bundles',
                     'CST_R.trk'))

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)


    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                  'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                  'ROIs'))

    # Test the CLI:
    print("Running the CLI:")
    cmd = "pyAFQ " + preafq_path
    out = os.system(cmd)
    assert out ==  0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'ROIs',
                     'CST_R_roi1_include.nii.gz'))

    assert op.exists(op.join(myafq.data_frame['results_dir'][0],
                     'bundles',
                     'CST_R.trk'))
예제 #8
0
def test_AFQ_data_planes():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    dmriprep_path = op.join(tmpdir.name, 'stanford_hardi', 'derivatives',
                            'dmriprep')
    seg_algo = "planes"
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    myafq = api.AFQ(dmriprep_path=dmriprep_path,
                    sub_prefix='sub',
                    seg_algo=seg_algo,
                    bundle_names=bundle_names,
                    odf_model="DTI")

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.move_streamlines([s for s in streamlines if s.shape[0] > 100],
                             np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(myafq.data_frame.results_dir[0],
                      'sub-01_sess-01_dwiDTI_det_streamlines.trk')
    aus.write_trk(sl_file, streamlines, affine=myafq.dwi_affine[0])

    mapping_file = op.join(myafq.data_frame.results_dir[0],
                           'sub-01_sess-01_dwi_mapping.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(myafq.data_frame.results_dir[0],
                                'sub-01_sess-01_dwi_reg_prealign.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
    npt.assert_(len(bundles['CST_L']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'CST_R_roi1_include.nii.gz'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk'))

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0], 'ROIs'))

    # Test the CLI:
    print("Running the CLI:")
    cmd = "pyAFQ " + dmriprep_path
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'ROIs',
                'CST_R_roi1_include.nii.gz'))

    assert op.exists(
        op.join(myafq.data_frame['results_dir'][0], 'bundles', 'CST_R.trk'))


# def test_AFQ_data_recobundles():
#     tmpdir = nbtmp.InTemporaryDirectory()
#     afd.fetch_hcp(["100206"], hcp_bucket='hcp-openaccess', profile_name="hcp",
#                   path=tmpdir.name)
#     dmriprep_path = op.join(tmpdir.name, 'HCP', 'derivatives', 'dmriprep')
#     seg_algo = "recobundles"
#     bundle_names = ["F", "CST", "AF", "CC_ForcepsMajor"]
#     myafq = api.AFQ(dmriprep_path=dmriprep_path,
#                     sub_prefix='sub',
#                     seg_algo=seg_algo,
#                     bundle_names=bundle_names,
#                     odf_model="DTI",
#                     b0_threshold=15)

#     # Replace the streamlines with precomputed:
#     path_to_trk = dpd.fetcher.fetch_target_tractogram_hcp()
#     path_to_trk = dpd.fetcher.get_target_tractogram_hcp()
#     sl_file = op.join(myafq.data_frame.results_dir[0], 'sub-100206_sess-01_dwiDTI_det_streamlines.trk')
#     shutil.copy(path_to_trk, sl_file)
#     myafq.data_frame["streamlines_file"] = sl_file
#     print("here")
#     tgram = nib.streamlines.load(myafq.bundles[0]).tractogram
#     print("here")
#     bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict)
#     npt.assert_(len(bundles['CST_L']) > 0)
예제 #9
0
"""
import os.path as op

import matplotlib.pyplot as plt
import nibabel as nib
import pandas as pd

from AFQ import api
import AFQ.data as afd


##########################################################################
# Get some example data
# ---------------------

afd.organize_stanford_data()
base_dir = op.join(op.expanduser('~'), 'AFQ_data', 'stanford_hardi')

##########################################################################
# Initialize an AFQ object:
# ------------------------

myafq = api.AFQ(op.join(afd.afq_home,
                        'stanford_hardi',
                        'derivatives',
                        'dmriprep'),
                sub_prefix='sub')

##########################################################################
# Reading in DTI FA
# -----------------
예제 #10
0
"""
==========================
AFQ API
==========================

An example using the AFQ API

"""
import os.path as op

import matplotlib.pyplot as plt
import nibabel as nib

from AFQ import api
import AFQ.data as afd

afd.organize_stanford_data()
base_dir = op.join(op.expanduser('~'), 'AFQ_data', 'stanford_hardi')
myafq = api.AFQ(preproc_path=op.join(afd.afq_home, 'stanford_hardi'),
                sub_prefix='sub')

FA = nib.load(myafq.dti_fa[0]).get_data()

fig, ax = plt.subplots(1)
ax.matshow(FA[:, :, FA.shape[-1] // 2], cmap='viridis')
ax.axis("off")
plt.show()
예제 #11
0
def test_AFQ_data_waypoint():
    """
    Test with some actual data again, this time for track segmentation
    """
    tmpdir = nbtmp.InTemporaryDirectory()
    afd.organize_stanford_data(path=tmpdir.name)
    dmriprep_path = op.join(tmpdir.name, 'stanford_hardi',
                            'derivatives', 'dmriprep')
    bundle_names = ["SLF", "ARC", "CST", "FP"]
    tracking_params = dict(odf_model="DTI")
    segmentation_params = dict(filter_by_endpoints=False,
                               seg_algo="AFQ",
                               return_idx=True)

    clean_params = dict(return_idx=True)

    myafq = api.AFQ(dmriprep_path=dmriprep_path,
                    sub_prefix='sub',
                    bundle_names=bundle_names,
                    scalars=["dti_fa", "dti_md"],
                    tracking_params=tracking_params,
                    segmentation_params=segmentation_params,
                    clean_params=clean_params)

    # Replace the mapping and streamlines with precomputed:
    file_dict = afd.read_stanford_hardi_tractography()
    mapping = file_dict['mapping.nii.gz']
    streamlines = file_dict['tractography_subsampled.trk']
    streamlines = dts.Streamlines(
        dtu.transform_tracking_output(
            [s for s in streamlines if s.shape[0] > 100],
            np.linalg.inv(myafq.dwi_affine[0])))

    sl_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det_tractography.trk')
    sft = StatefulTractogram(streamlines, myafq.data_frame.dwi_file[0],
                             Space.VOX)
    save_tractogram(sft, sl_file, bbox_valid_check=False)

    mapping_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_mapping_from-DWI_to_MNI_xfm.nii.gz')
    nib.save(mapping, mapping_file)
    reg_prealign_file = op.join(
        myafq.data_frame.results_dir[0],
        'sub-01_sess-01_dwi_prealign_from-DWI_to-MNI_xfm.npy')
    np.save(reg_prealign_file, np.eye(4))

    tgram = load_tractogram(myafq.bundles[0], myafq.dwi_img[0])

    bundles = aus.tgram_to_bundles(tgram, myafq.bundle_dict, myafq.dwi_img[0])
    npt.assert_(len(bundles['CST_R']) > 0)

    # Test ROI exporting:
    myafq.export_rois()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'ROIs',
        'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json'))

    # Test bundles exporting:
    myafq.export_bundles()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'bundles',
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'))  # noqa

    # Test creation of file with bundle indices:
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-clean_tractography_idx.json'))  # noqa

    tract_profiles = pd.read_csv(myafq.tract_profiles[0])
    assert tract_profiles.shape == (800, 5)

    # Before we run the CLI, we'll remove the bundles and ROI folders, to see
    # that the CLI generates them
    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                          'bundles'))

    shutil.rmtree(op.join(myafq.data_frame['results_dir'][0],
                          'ROIs'))

    # Test the CLI:
    print("Running the CLI:")

    # Bare bones config only points to the files
    config = dict(files=dict(dmriprep_path=dmriprep_path))

    config_file = op.join(tmpdir.name, "afq_config.toml")
    with open(config_file, 'w') as ff:
        toml.dump(config, ff)

    cmd = "pyAFQ " + config_file
    out = os.system(cmd)
    assert out == 0
    # The combined tract profiles should already exist from the CLI Run:
    from_file = pd.read_csv(op.join(myafq.afq_dir, 'tract_profiles.csv'))
    # And should be identical to what we would get by rerunning this:
    combined_profiles = myafq.combine_profiles()
    assert combined_profiles.shape == (800, 7)
    assert_frame_equal(combined_profiles, from_file)

    # Make sure the CLI did indeed generate these:
    myafq.export_rois()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'ROIs',
        'sub-01_sess-01_dwi_desc-ROI-CST_R-1-include.json'))

    myafq.export_bundles()
    assert op.exists(op.join(
        myafq.data_frame['results_dir'][0],
        'bundles',
        'sub-01_sess-01_dwi_space-RASMM_model-DTI_desc-det-AFQ-CST_L_tractography.trk'))  # noqa