Esempio n. 1
0
def responders(l2_dir,
               roi="DSURQEc_ctx",
               data_root="~/ni_data/ofM.dr",
               roi_root="~/ni_data/templates/roi"):

    data_regex = "(?P<subject>.+)/tstat1.nii.gz"
    data_path = "{data_root}/l2/{l2_dir}/".format(data_root=data_root,
                                                  l2_dir=l2_dir)
    data_path = path.expanduser(data_path)
    roi_path = "{roi_root}/{roi}.nii.gz".format(roi_root=roi_root, roi=roi)
    roi_path = path.expanduser(roi_path)

    data_find = DataFinder()
    data_find.inputs.root_paths = data_path
    data_find.inputs.match_regex = path.join(data_path, data_regex)
    found_data = data_find.run().outputs

    masker = NiftiMasker(mask_img=roi_path)
    voxeldf = pd.DataFrame({})
    for subject, data_file in zip(found_data.subject, found_data.out_paths):
        subject_data = {}
        print(subject, data_file)
        img = nib.load(data_file)
        img = masker.fit_transform(img)
        img = img.flatten()
        subject_data["subject"] = subject
        for i in img:
            voxel_data = deepcopy(subject_data)
            voxel_data["t"] = i
            df_ = pd.DataFrame(voxel_data, index=[None])
            voxeldf = pd.concat([voxeldf, df_])
    voxeldf.to_csv('{}/ctx_responders.csv'.format(data_path))
Esempio n. 2
0
def test_datafinder_unpack():
    outdir = mkdtemp()
    single_res = os.path.join(outdir, "findme.txt")
    open(single_res, 'a').close()
    open(os.path.join(outdir, "dontfindme"), 'a').close()

    from nipype.interfaces.io import DataFinder
    df = DataFinder()
    df.inputs.root_paths = outdir
    df.inputs.match_regex = '.+/(?P<basename>.+)\.txt'
    df.inputs.unpack_single = True
    result = df.run()
    print result.outputs.out_paths
    yield assert_equal, result.outputs.out_paths, single_res
Esempio n. 3
0
def test_datafinder_unpack():
    outdir = mkdtemp()
    single_res = os.path.join(outdir, "findme.txt")
    open(single_res, 'a').close()
    open(os.path.join(outdir, "dontfindme"), 'a').close()

    from nipype.interfaces.io import DataFinder
    df = DataFinder()
    df.inputs.root_paths = outdir
    df.inputs.match_regex = '.+/(?P<basename>.+)\.txt'
    df.inputs.unpack_single = True
    result = df.run()
    print result.outputs.out_paths
    yield assert_equal, result.outputs.out_paths, single_res
def test_DataFinder_outputs():
    output_map = dict()
    outputs = DataFinder.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Esempio n. 5
0
def test_DataFinder_outputs():
    output_map = dict()
    outputs = DataFinder.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Esempio n. 6
0
def test_datafinder_depth():
    outdir = mkdtemp()
    os.makedirs(os.path.join(outdir, '0', '1', '2', '3'))

    from nipype.interfaces.io import DataFinder
    df = DataFinder()
    df.inputs.root_paths = os.path.join(outdir, '0')
    for min_depth in range(4):
        for max_depth in range(min_depth, 4):
            df.inputs.min_depth = min_depth
            df.inputs.max_depth = max_depth
            result = df.run()
            expected = [str(x) for x in range(min_depth, max_depth + 1)]
            for path, exp_fname in zip(result.outputs.out_paths, expected):
                _, fname = os.path.split(path)
                yield assert_equal, fname, exp_fname

    shutil.rmtree(outdir)
Esempio n. 7
0
def test_datafinder_depth():
    outdir = mkdtemp()
    os.makedirs(os.path.join(outdir, '0', '1', '2', '3'))

    from nipype.interfaces.io import DataFinder
    df = DataFinder()
    df.inputs.root_paths = os.path.join(outdir, '0')
    for min_depth in range(4):
        for max_depth in range(min_depth, 4):
            df.inputs.min_depth = min_depth
            df.inputs.max_depth = max_depth
            result = df.run()
            expected = [str(x) for x in range(min_depth, max_depth + 1)]
            for path, exp_fname in zip(result.outputs.out_paths, expected):
                _, fname = os.path.split(path)
                yield assert_equal, fname, exp_fname

    shutil.rmtree(outdir)
Esempio n. 8
0
def responders(
    l2_dir,
    roi="dsurqec_200micron_roi-dr",
    data_root="~/ni_data/ofM.dr",
    roi_root='/usr/share/mouse-brain-atlases',
    save_inplace=True,
    save_as='',
):

    data_regex = "(?P<subject>.+)/.*?_tstat\.nii\.gz"
    data_path = "{data_root}/l2/{l2_dir}/".format(data_root=data_root,
                                                  l2_dir=l2_dir)
    data_path = path.expanduser(data_path)
    roi_path = "{roi_root}/{roi}.nii".format(roi_root=roi_root, roi=roi)
    roi_path = path.expanduser(roi_path)

    data_find = DataFinder()
    data_find.inputs.root_paths = data_path
    data_find.inputs.match_regex = path.join(data_path, data_regex)
    found_data = data_find.run().outputs
    print(found_data)

    masker = NiftiMasker(mask_img=roi_path)
    voxeldf = pd.DataFrame({})
    for subject, data_file in zip(found_data.subject, found_data.out_paths):
        subject_data = {}
        print(subject, data_file)
        img = nib.load(data_file)
        img = masker.fit_transform(img)
        img = img.flatten()
        subject_data["subject"] = subject
        for i in img:
            voxel_data = deepcopy(subject_data)
            voxel_data["t"] = i
            df_ = pd.DataFrame(voxel_data, index=[None])
            voxeldf = pd.concat([voxeldf, df_])
    if save_inplace:
        voxeldf.to_csv('{}/ctx_responders.csv'.format(data_path))
    else:
        voxeldf.to_csv(path.abspath(path.expanduser(save_as)))
Esempio n. 9
0
def test_datafinder_copydir():
    outdir = mkdtemp()
    open(os.path.join(outdir, "findme.txt"), 'a').close()
    open(os.path.join(outdir, "dontfindme"), 'a').close()
    open(os.path.join(outdir, "dontfindmealsotxt"), 'a').close()
    open(os.path.join(outdir, "findmetoo.txt"), 'a').close()
    open(os.path.join(outdir, "ignoreme.txt"), 'a').close()
    open(os.path.join(outdir, "alsoignore.txt"), 'a').close()

    from nipype.interfaces.io import DataFinder
    df = DataFinder()
    df.inputs.root_paths = outdir
    df.inputs.match_regex = '.+/(?P<basename>.+)\.txt'
    df.inputs.ignore_regexes = ['ignore']
    result = df.run()
    expected = ["findme.txt", "findmetoo.txt"]
    for path, expected_fname in zip(result.outputs.out_paths, expected):
        _, fname = os.path.split(path)
        yield assert_equal, fname, expected_fname

    yield assert_equal, result.outputs.basename, ["findme", "findmetoo"]

    shutil.rmtree(outdir)
Esempio n. 10
0
def test_datafinder_copydir():
    outdir = mkdtemp()
    open(os.path.join(outdir, "findme.txt"), 'a').close()
    open(os.path.join(outdir, "dontfindme"), 'a').close()
    open(os.path.join(outdir, "dontfindmealsotxt"), 'a').close()
    open(os.path.join(outdir, "findmetoo.txt"), 'a').close()
    open(os.path.join(outdir, "ignoreme.txt"), 'a').close()
    open(os.path.join(outdir, "alsoignore.txt"), 'a').close()

    from nipype.interfaces.io import DataFinder
    df = DataFinder()
    df.inputs.root_paths = outdir
    df.inputs.match_regex = '.+/(?P<basename>.+)\.txt'
    df.inputs.ignore_regexes = ['ignore']
    result = df.run()
    expected = ["findme.txt", "findmetoo.txt"]
    for path, expected_fname in zip(result.outputs.out_paths, expected):
        _, fname = os.path.split(path)
        yield assert_equal, fname, expected_fname

    yield assert_equal, result.outputs.basename, ["findme", "findmetoo"]

    shutil.rmtree(outdir)
Esempio n. 11
0
def rawdataChecker(input_file):
    # If the input is a single DCM-file instead of a multi-dim-NifTI, we have to fetch all the other files in the series
    if input_file.endswith('.dcm'):
        from nipype.interfaces.io import DataFinder
        from os import path
        from nipype import Node

        # Setup a datafinder to find the paths to the specific DICOM files
        t1FinderNode = Node(DataFinder(), name='t1Finder')
        t1FinderNode.inputs.match_regex = '.*\.dcm'
        t1FinderNode.inputs.root_paths = path.split(input_file)[0]

        return t1FinderNode.run().outputs.out_paths
    else:
        return input_file  # If other datatype just return the same path
def test_DataFinder_inputs():
    input_map = dict(
        ignore_exception=dict(
            nohash=True,
            usedefault=True,
        ),
        ignore_regexes=dict(),
        match_regex=dict(usedefault=True, ),
        max_depth=dict(),
        min_depth=dict(),
        root_paths=dict(mandatory=True, ),
        unpack_single=dict(usedefault=True, ),
    )
    inputs = DataFinder.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Esempio n. 13
0
def test_DataFinder_inputs():
    input_map = dict(ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    ignore_regexes=dict(),
    match_regex=dict(usedefault=True,
    ),
    max_depth=dict(),
    min_depth=dict(),
    root_paths=dict(mandatory=True,
    ),
    unpack_single=dict(usedefault=True,
    ),
    )
    inputs = DataFinder.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Esempio n. 14
0
                 mandatory_inputs=True,
                 name='input_node')

mergedOutputs = pathBuildingNode.outputs.copyable_trait_names()
mergedOutputs.extend(fileNames.keys())

outputNode = Node(IdentityInterface(fields=mergedOutputs),
                  mandatory_inputs=False,
                  name='output_node')

# ### Structural Data (T1) preprocessing

# In[ ]:

# Setup a datafinder to find the paths to the specific DICOM files
t1FinderNode = Node(DataFinder(), name='t1Finder')
t1FinderNode.inputs.match_regex = '.*\.dcm'
#df = DataFinder(root_paths = T1RawFolder, match_regex = '.*\.dcm')
#firstFile = df.run().outputs.out_paths[0]

# Set recon-all parameters
reconallNode = Node(freesurfer.preprocess.ReconAll(), name='reconall')
#reconallNode.inputs.T1_files = firstFile
#reconallNode.inputs.subjects_dir = subPath
reconallNode.inputs.subject_id = reconallFolderName
reconallNode.inputs.directive = 'all'
reconallNode.inputs.openmp = cpu_count()

# Convert the T1 mgz image to nifti format for later usage
mriConverter = Node(freesurfer.preprocess.MRIConvert(),
                    name='convertAparcAseg')
Esempio n. 15
0
    return SC_cap_row_filename, SC_dist_row_filename


import numpy as np
debugPath = '/Users/srothmei/Desktop/charite/toronto/Adalberto/debug/'

roi = 68
subid = 'Adalberto'
tracksPath = debugPath

wmBorder_file = debugPath + 'wmborder.npy'

wmborder = np.load(wmBorder_file)

affine_matrix_file = debugPath + 'affine_matrix.npy'

affine_matrix = np.load(affine_matrix_file)

from nipype import Node
from nipype.interfaces.io import DataFinder
tckFinder = Node(DataFinder(match_regex='.*\.npy', root_paths=tracksPath),
                 name='tckFinder')

res = tckFinder.run()
track_files = res.outputs.out_paths

#
compute_connectivity_row(roi, subid, affine_matrix, wmborder, tracksPath,
                         track_files)
Esempio n. 16
0
from nipype.interfaces.io import DataFinder

df = DataFinder()
df.inputs.root_paths = '/tmp/exp_01'
df.inputs.match_regex = '.+/(?P<series_dir>.+)/(?P<basename>.+)\.nii.gz'
result = df.run()
print(result.outputs.out_paths)