コード例 #1
0
def test_SelectFiles_outputs():
    output_map = dict()
    outputs = SelectFiles.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
コード例 #2
0
def test_SelectFiles_outputs():
    output_map = dict()
    outputs = SelectFiles.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
コード例 #3
0
def define_workflow(subject_list, run_list, experiment_dir, output_dir):
    """run the smooth workflow given subject and runs"""
    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=4, t_size=-1, output_type='NIFTI'),
                   name="extract")

    # Smooth - image smoothing
    smooth = Node(Smooth(fwhm=[8, 8, 8]), name="smooth")

    # Mask - applying mask to smoothed
    # mask_func = Node(ApplyMask(output_type='NIFTI'),
    # name="mask_func")

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'run_num']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list),
                            ('run_num', run_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    func_file = opj(
        'sub-{subject_id}', 'func',
        'sub-{subject_id}_task-tsl_run-{run_num}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
    )
    templates = {'func': func_file}
    selectfiles = Node(SelectFiles(templates, base_directory=data_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    ## Use the following DataSink output substitutions
    substitutions = [('_subject_id_', 'sub-'), ('ssub', 'sub'),
                     ('_space-MNI152NLin2009cAsym_desc-preproc_', '_fwhm-8_'),
                     ('_fwhm_', ''), ('_roi', '')]
    substitutions += [('_run_num_%s' % r, '') for r in run_list]
    datasink.inputs.substitutions = substitutions

    # Create a preprocessing workflow
    preproc = Workflow(name='preproc')
    preproc.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the preprocessing workflow (spm smooth)
    preproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
                                                ('run_num', 'run_num')]),
                     (selectfiles, extract, [('func', 'in_file')]),
                     (extract, smooth, [('roi_file', 'in_files')]),
                     (smooth, datasink, [('smoothed_files', 'preproc.@smooth')
                                         ])])
    return preproc
コード例 #4
0
def test_SelectFiles_inputs():
    input_map = dict(
        base_directory=dict(),
        force_lists=dict(usedefault=True, ),
        ignore_exception=dict(
            nohash=True,
            usedefault=True,
        ),
        raise_on_empty=dict(usedefault=True, ),
        sort_filelist=dict(usedefault=True, ),
    )
    inputs = SelectFiles.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
コード例 #5
0
def test_SelectFiles_inputs():
    input_map = dict(base_directory=dict(),
    force_lists=dict(usedefault=True,
    ),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    raise_on_empty=dict(usedefault=True,
    ),
    sort_filelist=dict(usedefault=True,
    ),
    )
    inputs = SelectFiles.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
コード例 #6
0
def runNipypeBet(controller, subject_list, anatomical_id, proj_directory):

    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list)]

    #anat_file = opj('{subject_id}','{subject_id}_{anatomical_id}.nii')
    seperator = ''
    concat_words = ('{subject_id}_', anatomical_id, '.nii.gz')
    anat_file_name = seperator.join(concat_words)

    if controller.b_radiological_convention.get() == True:
        anat_file = opj('{subject_id}', anat_file_name)
    else:
        anat_file = opj('{subject_id}', 'Intermediate_Files', 'Original_Files',
                        anat_file_name)

    templates = {'anat': anat_file}

    selectfiles = Node(SelectFiles(templates, base_directory=proj_directory),
                       name="selectfiles")

    skullstrip = Node(BET(robust=True,
                          frac=0.5,
                          vertical_gradient=0,
                          output_type='NIFTI_GZ'),
                      name="skullstrip")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=proj_directory), name="datasink")

    wf_sub = Workflow(name="wf_sub")
    wf_sub.base_dir = proj_directory
    wf_sub.connect(infosource, "subject_id", selectfiles, "subject_id")
    wf_sub.connect(selectfiles, "anat", skullstrip, "in_file")
    wf_sub.connect(skullstrip, "out_file", datasink, "bet.@out_file")

    substitutions = [('%s_brain' % (anatomical_id), 'brain')]
    # Feed the substitution strings to the DataSink node
    datasink.inputs.substitutions = substitutions
    # Run the workflow again with the substitutions in place
    wf_sub.run(plugin='MultiProc')

    return 'brain'
コード例 #7
0
                            write_composite_transform=True),
               name='antsreg')

###
# Input & Output Stream

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = [('subject_id', subject_list)]

# SelectFiles - to grab the data (alternative to DataGrabber)
anat_file = opj('{subject_id}', 'ses-test', 'anat',
                '{subject_id}_ses-test_T1w.nii.gz')
templates = {'anat': anat_file}

selectfiles = Node(SelectFiles(templates, base_directory='/data/ds000114'),
                   name="selectfiles")

# Datasink - creates output folder for important outputs
datasink = Node(DataSink(base_directory=experiment_dir, container=output_dir),
                name="datasink")

# Use the following DataSink output substitutions
substitutions = [('_subject_id_', '')]
datasink.inputs.substitutions = substitutions

###
# Specify Normalization Workflow & Connect Nodes

# Initiation of the ANTS normalization workflow
regflow = Workflow(name='regflow')
コード例 #8
0
min_clust_size = 25

# In[2]:

## File handling
# Identity node- select subjects
infosource = Node(IdentityInterface(fields=['subject_id', 'ROIs']),
                  name='infosource')
infosource.iterables = [('subject_id', subjects_list), ('ROIs', ROIs)]

# Data grabber- select fMRI and ROIs
templates = {
    'orig_func':
    preproc_dir + '/smoothed_filt_func/{subject_id}/func_filtered_smooth.nii'
}
selectfiles = Node(SelectFiles(templates), name='selectfiles')

# Datasink- where our select outputs will go
datasink = Node(DataSink(), name='datasink')
datasink.inputs.base_directory = output_dir
datasink.inputs.container = output_dir
substitutions = [('_subject_id_', ''),
                 ('_ROIs_..home..camachocm2..Box_home..CARS_rest..ROIs..', '')]
datasink.inputs.substitutions = substitutions

# In[3]:

## Seed-based level 1

# Extract ROI timeseries
ROI_timeseries = Node(ImageMeants(), name='ROI_timeseries', iterfield='mask')
                #'sub-MCS_RB',
                #'sub-MCS_RP',
                #'sub-MCS_SC',
                #'sub-MCS_ST',
                #'sub-MCS_TP',
                #'sub-MCS_VW']

# Infosource-a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = [('subject_id', subject_list)]

# SelectFiles-to grab the data (alternativ to DataGrabber)
time_serie_file = opj('{subject_id}', 'parcellation_from_lasso/time_series.txt')

templates = {'time_series': time_serie_file}
selectfiles = Node(SelectFiles(templates, base_directory=preprocessing_path), name="selectfiles")

datasink = Node(DataSink(base_directory=experiment_dir, container=output_dir), name="datasink")

experiment_dir = opj(preprocessing_path, 'output/')
preproc = Workflow(name='preproc')
preproc.base_dir = opj(experiment_dir, working_dir)

compute_correlation_matrix = Node(
    c.ComputeCorrelationMatrix(hypergraph_path=hypergraph_path, correlation_matrix_out_file=correlation_matrix_out_file,
                             correlation_matrix_plot_out_file=correlation_matrix_plot_out_file),
    name='compute_correlation_matrix')

preproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id')]),
                 (selectfiles, compute_correlation_matrix, [('time_series', 'time_series_path')]),
                 (compute_correlation_matrix, datasink,
コード例 #10
0
def create_preprocessing_workflow(analysis_params, name='yesno_3T'):
    import os.path as op
    import nipype.pipeline as pe
    from nipype.interfaces import fsl
    from nipype.interfaces.utility import Function, Merge, IdentityInterface
    from nipype.interfaces.io import SelectFiles, DataSink
    from IPython import embed as shell

    # Importing of custom nodes from spynoza packages; assumes that spynoza is installed:
    # pip install git+https://github.com/spinoza-centre/spynoza.git@develop
    from spynoza.utils import get_scaninfo, pickfirst, average_over_runs, set_nifti_intercept_slope
    from spynoza.uniformization.workflows import create_non_uniformity_correct_4D_file
    from spynoza.unwarping.b0.workflows import create_B0_workflow
    from spynoza.motion_correction.workflows import create_motion_correction_workflow
    from spynoza.registration.workflows import create_registration_workflow
    from spynoza.filtering.nodes import sgfilter
    from spynoza.conversion.nodes import psc
    from spynoza.denoising.retroicor.workflows import create_retroicor_workflow
    from spynoza.masking.workflows import create_masks_from_surface_workflow
    from spynoza.glm.nodes import fit_nuisances

    ########################################################################################
    # nodes
    ########################################################################################

    input_node = pe.Node(
        IdentityInterface(fields=[
            'task',  # main
            'sub_id',  # main
            'ses_id',  # main
            'raw_data_dir',  # main
            'output_directory',  # main
            'sub_FS_id',  # main
            'FS_subject_dir',  # motion correction
            'RepetitionTime',  # motion correction
            'which_file_is_EPI_space',  # motion correction
            'standard_file',  # registration
            'topup_conf_file',  # unwarping
            'EchoTimeDiff',  # unwarping
            'EpiFactor',  # unwarping
            'SenseFactor',  # unwarping
            'WaterFatShift',  # unwarping
            'PhaseEncodingDirection',  # unwarping
            'EchoSpacing'  # unwarping
            'psc_func',  # percent signal change
            'sg_filter_window_length',  # temporal filtering
            'sg_filter_order',  # temporal filtering
            'SliceEncodingDirection',  # retroicor
            'PhysiologySampleRate',  # retroicor
            'SliceTiming',  # retroicor
            'SliceOrder',  # retroicor
            'NumberDummyScans',  # retroicor
            'MultiBandFactor',  # retroicor
            'hr_rvt',  # retroicor
            'av_func',  # extra
            'EchoTime',  # extra
            'bd_design_matrix_file',  # extra
        ]),
        name='inputspec')

    for param in analysis_params:
        exec('input_node.inputs.{} = analysis_params[param]'.format(param))

    # i/o node
    datasource_templates = dict(
        func=
        '{sub_id}/{ses_id}/func/{sub_id}_{ses_id}_task-{task}*_bold.nii.gz',
        magnitude='{sub_id}/{ses_id}/fmap/{sub_id}_{ses_id}*magnitude.nii.gz',
        phasediff='{sub_id}/{ses_id}/fmap/{sub_id}_{ses_id}*phasediff.nii.gz',
        #physio='{sub_id}/{ses_id}/func/*{task}*physio.*',
        #events='{sub_id}/{ses_id}/func/*{task}*_events.pickle',
        #eye='{sub_id}/{ses_id}/func/*{task}*_eyedata.edf'
    )
    datasource = pe.Node(SelectFiles(datasource_templates,
                                     sort_filelist=True,
                                     raise_on_empty=False),
                         name='datasource')

    output_node = pe.Node(IdentityInterface(
        fields=(['temporal_filtered_files', 'percent_signal_change_files'])),
                          name='outputspec')

    # nodes for setting the slope/intercept of incoming niftis to (1, 0)
    # this is apparently necessary for the B0 map files
    int_slope_B0_magnitude = pe.Node(Function(
        input_names=['in_file'],
        output_names=['out_file'],
        function=set_nifti_intercept_slope),
                                     name='int_slope_B0_magnitude')
    int_slope_B0_phasediff = pe.Node(Function(
        input_names=['in_file'],
        output_names=['out_file'],
        function=set_nifti_intercept_slope),
                                     name='int_slope_B0_phasediff')

    # reorient nodes
    reorient_epi = pe.MapNode(interface=fsl.Reorient2Std(),
                              name='reorient_epi',
                              iterfield=['in_file'])
    reorient_B0_magnitude = pe.Node(interface=fsl.Reorient2Std(),
                                    name='reorient_B0_magnitude')
    reorient_B0_phasediff = pe.Node(interface=fsl.Reorient2Std(),
                                    name='reorient_B0_phasediff')

    # bet_epi = pe.MapNode(interface=
    #     fsl.BET(frac=analysis_parameters['bet_f_value'], vertical_gradient = analysis_parameters['bet_g_value'],
    #             functional=True, mask = True), name='bet_epi', iterfield=['in_file'])

    datasink = pe.Node(DataSink(), name='sinker')
    datasink.inputs.parameterization = False

    ########################################################################################
    # workflow
    ########################################################################################

    # the actual top-level workflow
    preprocessing_workflow = pe.Workflow(name=name)
    preprocessing_workflow.base_dir = op.join(analysis_params['base_dir'],
                                              'temp/')

    # data source
    preprocessing_workflow.connect(input_node, 'raw_data_dir', datasource,
                                   'base_directory')
    preprocessing_workflow.connect(input_node, 'sub_id', datasource, 'sub_id')
    preprocessing_workflow.connect(input_node, 'ses_id', datasource, 'ses_id')
    preprocessing_workflow.connect(input_node, 'task', datasource, 'task')

    # and data sink
    preprocessing_workflow.connect(input_node, 'output_directory', datasink,
                                   'base_directory')

    # BET (we don't do this, because we expect the raw data in the bids folder to be betted
    # already for anonymization purposes)
    # preprocessing_workflow.connect(datasource, 'func', bet_epi, 'in_file')

    # non-uniformity correction
    # preprocessing_workflow.connect(bet_epi, 'out_file', nuc, 'in_file')
    # preprocessing_workflow.connect(datasource, 'func', nuc, 'in_file')

    # reorient images
    preprocessing_workflow.connect(datasource, 'func', reorient_epi, 'in_file')
    preprocessing_workflow.connect(datasource, 'magnitude',
                                   reorient_B0_magnitude, 'in_file')
    preprocessing_workflow.connect(datasource, 'phasediff',
                                   reorient_B0_phasediff, 'in_file')
    preprocessing_workflow.connect(reorient_epi, 'out_file', datasink,
                                   'reorient')

    #B0 field correction:
    if analysis_params['B0_or_topup'] == 'B0':
        # set slope/intercept to unity for B0 map
        preprocessing_workflow.connect(reorient_B0_magnitude, 'out_file',
                                       int_slope_B0_magnitude, 'in_file')
        preprocessing_workflow.connect(reorient_B0_phasediff, 'out_file',
                                       int_slope_B0_phasediff, 'in_file')
        #B0 field correction:
        if 'EchoSpacing' in analysis_params:
            B0_wf = create_B0_workflow(name='B0', scanner='siemens')
            preprocessing_workflow.connect(input_node, 'EchoSpacing', B0_wf,
                                           'inputspec.echo_spacing')
        else:
            B0_wf = create_B0_workflow(name='B0', scanner='philips')
            preprocessing_workflow.connect(input_node, 'WaterFatShift', B0_wf,
                                           'inputspec.wfs')
            preprocessing_workflow.connect(input_node, 'EpiFactor', B0_wf,
                                           'inputspec.epi_factor')
        preprocessing_workflow.connect(input_node, 'SenseFactor', B0_wf,
                                       'inputspec.acceleration')
        preprocessing_workflow.connect(reorient_epi, 'out_file', B0_wf,
                                       'inputspec.in_files')
        preprocessing_workflow.connect(int_slope_B0_magnitude, 'out_file',
                                       B0_wf, 'inputspec.fieldmap_mag')
        preprocessing_workflow.connect(int_slope_B0_phasediff, 'out_file',
                                       B0_wf, 'inputspec.fieldmap_pha')
        preprocessing_workflow.connect(input_node, 'EchoTimeDiff', B0_wf,
                                       'inputspec.te_diff')
        preprocessing_workflow.connect(input_node, 'PhaseEncodingDirection',
                                       B0_wf,
                                       'inputspec.phase_encoding_direction')
        preprocessing_workflow.connect(B0_wf, 'outputspec.field_coefs',
                                       datasink, 'B0.fieldcoef')
        preprocessing_workflow.connect(B0_wf, 'outputspec.out_files', datasink,
                                       'B0')

    # motion correction
    motion_proc = create_motion_correction_workflow(
        'moco', method=analysis_params['moco_method'])
    if analysis_params['B0_or_topup'] == 'B0':
        preprocessing_workflow.connect(B0_wf, 'outputspec.out_files',
                                       motion_proc, 'inputspec.in_files')
    elif analysis_params['B0_or_topup'] == 'neither':
        preprocessing_workflow.connect(bet_epi, 'out_file', motion_proc,
                                       'inputspec.in_files')
    preprocessing_workflow.connect(input_node, 'RepetitionTime', motion_proc,
                                   'inputspec.tr')
    preprocessing_workflow.connect(input_node, 'output_directory', motion_proc,
                                   'inputspec.output_directory')
    preprocessing_workflow.connect(input_node, 'which_file_is_EPI_space',
                                   motion_proc,
                                   'inputspec.which_file_is_EPI_space')

    # registration
    reg = create_registration_workflow(analysis_params, name='reg')
    preprocessing_workflow.connect(input_node, 'output_directory', reg,
                                   'inputspec.output_directory')
    preprocessing_workflow.connect(motion_proc, 'outputspec.EPI_space_file',
                                   reg, 'inputspec.EPI_space_file')
    preprocessing_workflow.connect(input_node, 'sub_FS_id', reg,
                                   'inputspec.freesurfer_subject_ID')
    preprocessing_workflow.connect(input_node, 'FS_subject_dir', reg,
                                   'inputspec.freesurfer_subject_dir')
    preprocessing_workflow.connect(input_node, 'standard_file', reg,
                                   'inputspec.standard_file')

    # temporal filtering
    preprocessing_workflow.connect(input_node, 'sg_filter_window_length',
                                   sgfilter, 'window_length')
    preprocessing_workflow.connect(input_node, 'sg_filter_order', sgfilter,
                                   'polyorder')
    preprocessing_workflow.connect(motion_proc,
                                   'outputspec.motion_corrected_files',
                                   sgfilter, 'in_file')
    preprocessing_workflow.connect(sgfilter, 'out_file', datasink, 'tf')

    # node for percent signal change
    preprocessing_workflow.connect(input_node, 'psc_func', psc, 'func')
    preprocessing_workflow.connect(sgfilter, 'out_file', psc, 'in_file')
    preprocessing_workflow.connect(psc, 'out_file', datasink, 'psc')

    # # retroicor functionality
    # if analysis_params['perform_physio'] == 1:
    #     retr = create_retroicor_workflow(name = 'retroicor', order_or_timing = analysis_params['retroicor_order_or_timing'])
    #
    #     # # retroicor can take the crudest form of epi file, so that it proceeds quickly
    #     preprocessing_workflow.connect(datasource, 'func', retr, 'inputspec.in_files')
    #     preprocessing_workflow.connect(datasource, 'physio', retr, 'inputspec.phys_files')
    #     preprocessing_workflow.connect(input_node, 'analysis_params.nr_dummies', retr, 'inputspec.nr_dummies')
    #     preprocessing_workflow.connect(input_node, 'analysis_params.MultiBandFactor', retr, 'inputspec.MB_factor')
    #     preprocessing_workflow.connect(input_node, 'analysis_params.tr', retr, 'inputspec.tr')
    #     preprocessing_workflow.connect(input_node, 'analysis_params.SliceEncodingDirection', retr, 'inputspec.slice_direction')
    #     preprocessing_workflow.connect(input_node, 'analysis_params.SliceTiming', retr, 'inputspec.slice_timing')
    #     preprocessing_workflow.connect(input_node, 'analysis_params.SliceOrder', retr, 'inputspec.slice_order')
    #     preprocessing_workflow.connect(input_node, 'analysis_params.PhysiologySampleRate', retr, 'inputspec.phys_sample_rate')
    #     preprocessing_workflow.connect(input_node, 'analysis_params.hr_rvt', retr, 'inputspec.hr_rvt')
    #
    #     # fit nuisances from retroicor
    #     # preprocessing_workflow.connect(retr, 'outputspec.evs', fit_nuis, 'slice_regressor_list')
    #     # preprocessing_workflow.connect(motion_proc, 'outputspec.extended_motion_correction_parameters', fit_nuis, 'vol_regressors')
    #     # preprocessing_workflow.connect(psc, 'out_file', fit_nuis, 'in_file')
    #
    #     # preprocessing_workflow.connect(fit_nuis, 'res_file', av_r, 'in_files')
    #
    #     preprocessing_workflow.connect(retr, 'outputspec.new_phys', datasink, 'phys.log')
    #     preprocessing_workflow.connect(retr, 'outputspec.fig_file', datasink, 'phys.figs')
    #     preprocessing_workflow.connect(retr, 'outputspec.evs', datasink, 'phys.evs')
    #     # preprocessing_workflow.connect(fit_nuis, 'res_file', datasink, 'phys.res')
    #     # preprocessing_workflow.connect(fit_nuis, 'rsq_file', datasink, 'phys.rsq')
    #     # preprocessing_workflow.connect(fit_nuis, 'beta_file', datasink, 'phys.betas')
    #
    #     # preprocessing_workflow.connect(av_r, 'out_file', datasink, 'av_r')

    #
    # ########################################################################################
    # # masking stuff if doing mri analysis
    # ########################################################################################
    #
    #     all_mask_opds = ['dc'] + analysis_parameters[u'avg_subject_RS_label_folders']
    #     all_mask_lds = [''] + analysis_parameters[u'avg_subject_RS_label_folders']
    #
    #     # loop across different folders to mask
    #     # untested as yet.
    #     masking_list = []
    #     dilate_list = []
    #     for opd, label_directory in zip(all_mask_opds,all_mask_lds):
    #         dilate_list.append(
    #             pe.MapNode(interface=fsl.maths.DilateImage(
    #                 operation = 'mean', kernel_shape = 'sphere', kernel_size = analysis_parameters['dilate_kernel_size']),
    #                 name='dilate_'+label_directory, iterfield=['in_file']))
    #
    #         masking_list.append(create_masks_from_surface_workflow(name = 'masks_from_surface_'+label_directory))
    #
    #         masking_list[-1].inputs.inputspec.label_directory = label_directory
    #         masking_list[-1].inputs.inputspec.fill_thresh = 0.005
    #         masking_list[-1].inputs.inputspec.re = '*.label'
    #
    #         preprocessing_workflow.connect(motion_proc, 'outputspec.EPI_space_file', masking_list[-1], 'inputspec.EPI_space_file')
    #         preprocessing_workflow.connect(input_node, 'output_directory', masking_list[-1], 'inputspec.output_directory')
    #         preprocessing_workflow.connect(input_node, 'FS_subject_dir', masking_list[-1], 'inputspec.freesurfer_subject_dir')
    #         preprocessing_workflow.connect(input_node, 'FS_ID', masking_list[-1], 'inputspec.freesurfer_subject_ID')
    #         preprocessing_workflow.connect(reg, 'rename_register.out_file', masking_list[-1], 'inputspec.reg_file')
    #
    #         preprocessing_workflow.connect(masking_list[-1], 'outputspec.masks', dilate_list[-1], 'in_file')
    #         preprocessing_workflow.connect(dilate_list[-1], 'out_file', datasink, 'masks.'+opd)
    #
    #     # # surface-based label import in to EPI space, but now for RS labels
    #     # these should have been imported to the subject's FS folder,
    #     # see scripts/annot_conversion.sh
    #     RS_masks_from_surface = create_masks_from_surface_workflow(name = 'RS_masks_from_surface')
    #     RS_masks_from_surface.inputs.inputspec.label_directory = analysis_parameters['avg_subject_label_folder']
    #     RS_masks_from_surface.inputs.inputspec.fill_thresh = 0.005
    #     RS_masks_from_surface.inputs.inputspec.re = '*.label'
    #
    #     preprocessing_workflow.connect(motion_proc, 'outputspec.EPI_space_file', RS_masks_from_surface, 'inputspec.EPI_space_file')
    #     preprocessing_workflow.connect(input_node, 'output_directory', RS_masks_from_surface, 'inputspec.output_directory')
    #     preprocessing_workflow.connect(input_node, 'FS_subject_dir', RS_masks_from_surface, 'inputspec.freesurfer_subject_dir')
    #     preprocessing_workflow.connect(input_node, 'FS_ID', RS_masks_from_surface, 'inputspec.freesurfer_subject_ID')
    #     preprocessing_workflow.connect(reg, 'rename_register.out_file', RS_masks_from_surface, 'inputspec.reg_file')
    #
    #     preprocessing_workflow.connect(RS_masks_from_surface, 'outputspec.masks', RS_dilate_cortex, 'in_file')
    #     preprocessing_workflow.connect(RS_dilate_cortex, 'out_file', datasink, 'masks.'+analysis_parameters['avg_subject_label_folder'])

    ########################################################################################
    # wrapping up, sending data to datasink
    ########################################################################################

    # preprocessing_workflow.connect(bet_epi, 'out_file', datasink, 'bet.epi')
    # preprocessing_workflow.connect(bet_epi, 'mask_file', datasink, 'bet.epimask')
    # preprocessing_workflow.connect(bet_topup, 'out_file', datasink, 'bet.topup')
    # preprocessing_workflow.connect(bet_topup, 'mask_file', datasink, 'bet.topupmask')

    # preprocessing_workflow.connect(nuc, 'out_file', datasink, 'nuc')
    # preprocessing_workflow.connect(sgfilter, 'out_file', datasink, 'tf')
    # preprocessing_workflow.connect(psc, 'out_file', datasink, 'psc')
    # preprocessing_workflow.connect(datasource, 'physio', datasink, 'phys')

    return preprocessing_workflow
コード例 #11
0
# create the nipype workflow
wf = Workflow(name='connectivity')
wf.config['execution']['crashfile_format'] = 'txt'

# define inputs to the workflow
infosource = Node(IdentityInterface(fields=['subject_id', 'roi']),
                  name='infosource')
infosource.iterables = [('subject_id', list(subj_rois.keys())), ('roi', rois)]

# grab data
#templates = {'trk': 'analysis/mrtrix/{subject_id}/tracks.trk'}
templates = {
    'trk':
    'analysis/fathresh-0.5/{subject_id}/recon/{subject_id}_csd_streamline.trk'
}
grabber = Node(SelectFiles(templates), name='grabber')
grabber.inputs.base_directory = proj_dir
grabber.inputs.sort_filelist = True

wf.connect(infosource, 'subject_id', grabber, 'subject_id')
''' define ROI mask files '''


# get subject-specific list of ROI filenames:
def rois_fetcher(subj_rois, subj):
    return subj_rois[subj], subj


fetch_rois = Node(Function(input_names=['subj_rois', 'subj'],
                           output_names=['target_roi_filenames', 'subj'],
                           function=rois_fetcher),
コード例 #12
0
def create_all_calcarine_reward_2_h5_workflow(
        analysis_info, name='all_calcarine_reward_nii_2_h5'):
    import os.path as op
    import tempfile
    import nipype.pipeline as pe
    from nipype.interfaces import fsl
    from nipype.interfaces.utility import Function, Merge, IdentityInterface
    from spynoza.nodes.utils import get_scaninfo, dyns_min_1, topup_scan_params, apply_scan_params
    from nipype.interfaces.io import SelectFiles, DataSink

    # Importing of custom nodes from spynoza packages; assumes that spynoza is installed:
    # pip install git+https://github.com/spinoza-centre/spynoza.git@develop
    from utils.utils import mask_nii_2_hdf5, combine_eye_hdfs_to_nii_hdf

    input_node = pe.Node(
        IdentityInterface(fields=['sub_id', 'preprocessed_data_dir']),
        name='inputspec')

    # i/o node
    datasource_templates = dict(mcf='{sub_id}/mcf/*.nii.gz',
                                psc='{sub_id}/psc/*.nii.gz',
                                tf='{sub_id}/tf/*.nii.gz',
                                GLM='{sub_id}/GLM/*.nii.gz',
                                eye='{sub_id}/eye/h5/*.h5',
                                rois='{sub_id}/roi/*_vol.nii.gz')
    datasource = pe.Node(SelectFiles(datasource_templates,
                                     sort_filelist=True,
                                     raise_on_empty=False),
                         name='datasource')

    hdf5_psc_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                              name='hdf5_psc_masker')
    hdf5_psc_masker.inputs.folder_alias = 'psc'
    hdf5_psc_masker.inputs.hdf5_file = op.join(tempfile.mkdtemp(), 'roi.h5')

    hdf5_tf_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                             name='hdf5_tf_masker')
    hdf5_tf_masker.inputs.folder_alias = 'tf'
    hdf5_psc_masker.inputs.hdf5_file = op.join(tempfile.mkdtemp(), 'roi.h5')

    hdf5_mcf_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                              name='hdf5_mcf_masker')
    hdf5_mcf_masker.inputs.folder_alias = 'mcf'

    hdf5_GLM_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                              name='hdf5_GLM_masker')
    hdf5_GLM_masker.inputs.folder_alias = 'GLM'

    eye_hdfs_to_nii_masker = pe.Node(Function(
        input_names=['nii_hdf5_file', 'eye_hdf_filelist', 'new_alias'],
        output_names=['nii_hdf5_file'],
        function=combine_eye_hdfs_to_nii_hdf),
                                     name='eye_hdfs_to_nii_masker')
    eye_hdfs_to_nii_masker.inputs.new_alias = 'eye'

    # node for datasinking
    datasink = pe.Node(DataSink(), name='sinker')
    datasink.inputs.parameterization = False

    all_calcarine_reward_nii_2_h5_workflow = pe.Workflow(name=name)

    all_calcarine_reward_nii_2_h5_workflow.connect(input_node,
                                                   'preprocessed_data_dir',
                                                   datasink, 'base_directory')
    all_calcarine_reward_nii_2_h5_workflow.connect(input_node, 'sub_id',
                                                   datasink, 'container')

    all_calcarine_reward_nii_2_h5_workflow.connect(input_node,
                                                   'preprocessed_data_dir',
                                                   datasource,
                                                   'base_directory')
    all_calcarine_reward_nii_2_h5_workflow.connect(input_node, 'sub_id',
                                                   datasource, 'sub_id')

    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'psc',
                                                   hdf5_psc_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_psc_masker,
                                                   'mask_files')

    # the hdf5_file is created by the psc node, and then passed from masker to masker on into the datasink.
    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_psc_masker,
                                                   'hdf5_file', hdf5_tf_masker,
                                                   'hdf5_file')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'tf',
                                                   hdf5_tf_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_tf_masker,
                                                   'mask_files')

    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_tf_masker, 'hdf5_file',
                                                   hdf5_mcf_masker,
                                                   'hdf5_file')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'mcf',
                                                   hdf5_mcf_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_mcf_masker,
                                                   'mask_files')

    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'GLM',
                                                   hdf5_GLM_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_GLM_masker,
                                                   'mask_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_mcf_masker,
                                                   'hdf5_file',
                                                   hdf5_GLM_masker,
                                                   'hdf5_file')

    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_GLM_masker,
                                                   'hdf5_file',
                                                   eye_hdfs_to_nii_masker,
                                                   'nii_hdf5_file')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'eye',
                                                   eye_hdfs_to_nii_masker,
                                                   'eye_hdf_filelist')

    all_calcarine_reward_nii_2_h5_workflow.connect(eye_hdfs_to_nii_masker,
                                                   'nii_hdf5_file', datasink,
                                                   'h5')

    return all_calcarine_reward_nii_2_h5_workflow
コード例 #13
0
# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['map_id']), name="infosource")
infosource.iterables = [('map_id', map_list)]

#-----------------------------------------------------------------------------------------------------
# In[4]:

templates = {
    'all_skeleton': 'Waxholm_Template/*/{map_id}/All_*_skeletonised.nii.gz',
    'skeleton_mask':
    'Waxholm_Template/*/{map_id}/mean_FA_skeleton_mask.nii.gz',
    'all_image': 'Waxholm_Template/*/{map_id}/All_{map_id}_WAX.nii.gz',
    'mean_FA': 'Waxholm_Template/*/{map_id}/mean_FA.nii.gz',
}

selectfiles = Node(SelectFiles(templates, base_directory=experiment_dir),
                   name="selectfiles")
#-----------------------------------------------------------------------------------------------------
# In[5]:

datasink = Node(DataSink(), name='datasink')
datasink.inputs.container = output_dir
datasink.inputs.base_directory = experiment_dir

substitutions = [('_map_id_', ' ')]

datasink.inputs.substitutions = substitutions

#-----------------------------------------------------------------------------------------------------
#Design with two contrasts only
コード例 #14
0
                name="mr_convertwmparc")
mr_convertwm=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertwm")
###############################
#specify input output
###############################
# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id']),
                  name="infosource")
infosource.iterables = [('subject_id', subject_list)]

t1_file = opj('sub_{subject_id}','t1_mprage.nii.gz')

templates = {'t1': t1_file}

selectfiles = Node(SelectFiles(templates,
                               base_directory='/home/luiscp/Documents/Data/ADRC_90Plus'),
                   name="selectfiles")

datasink = Node(DataSink(base_directory=experiment_dir,
                         container=output_dir),
                name="datasink")
substitutions = [('_subject_id_', 'sub_'),
                 ('_out',''),
                 ('_mr_convertaparc_aseg0/',''),
                 ('_mr_convertaparc_aseg1/',''),
                 ('_mr_convertaparc_aseg2/','')]

datasink.inputs.substitutions = substitutions

###############################
#Specify workflow
コード例 #15
0
# specify input and output stream

# Infosource - a function free node to iterate over the list of subject names

infosource = Node(IdentityInterface(fields=['subject_id', 'task_name']),
                  name="infosource")
infosource.iterables = [('subject_id', patient_list), ('task_name', task_list)]

# SelectFiles - to grab the data
anat_file = opj('NIFTI_Renamed_test', '{subject_id}', 'T1.nii.gz')
extra_file = opj('NIFTI_Renamed_test', '{subject_id}', '{task_name}.nii.gz')

templates = {'anat': anat_file, 'extra': extra_file}

selectfiles = Node(SelectFiles(templates, base_directory=working_dir),
                   name="selectfiles")

# Datasink - creates output folder for important outputs
datasink = Node(DataSink(base_directory=experiment_dir, container=output_dir),
                name="datasink")

## Use the following DataSink output substitutions
substitutions = [
    ('_subject_id_', 'sub-'),
    ('_task_name_', '/task-'),
    ('_fwhm_', 'fwhm-'),
    ('_roi', ''),
    ('_mcf', ''),
    ('_st', ''),
    ('_flirt', ''),
コード例 #16
0
def create_pupil_workflow(analysis_info, name='pupil'):
    import nipype.pipeline as pe
    from nipype.interfaces.utility import Function, Merge, IdentityInterface
    from nipype.interfaces.io import SelectFiles, DataSink

    from utils.pupil import fit_FIR_pupil_files

    imports = [
        'from utils.behavior import behavior_timing',
        'from utils.plotting import plot_fir_results_unpredictable',
        'from utils.plotting import plot_fir_results_predictable',
        'from utils.plotting import plot_fir_results_variable',
    ]

    input_node = pe.Node(
        IdentityInterface(fields=['preprocessed_directory', 'sub_id']),
        name='inputspec')

    # i/o node
    datasource_templates = dict(
        all_roi_file='{sub_id}/h5/roi.h5',
        # predictable reward experiment needs behavior files and moco but no physio
        predictable_in_files='{sub_id}/psc/*-predictable_reward_*.nii.gz',
        predictable_behavior_tsv_files=
        '{sub_id}/events/tsv/*-predictable_reward_*.tsv',
        predictable_eye_h5_files='{sub_id}/eye/h5/*-predictable_reward_*.h5',
        # unpredictable reward experiment needs behavior files, moco and physio
        unpredictable_in_files='{sub_id}/psc/*-unpredictable_reward_*.nii.gz',
        unpredictable_behavior_tsv_files=
        '{sub_id}/events/tsv/*-unpredictable_reward_*.tsv',
        unpredictable_eye_h5_files=
        '{sub_id}/eye/h5/*-unpredictable_reward_*.h5',
        # variable reward experiment needs behavior files, moco and physio
        variable_in_files='{sub_id}/psc/*-variable_*_reward_*.nii.gz',
        variable_behavior_tsv_files=
        '{sub_id}/events/tsv/*-variable_*_reward_*.tsv',
        variable_eye_h5_files='{sub_id}/eye/h5/*-variable_*_reward_*.h5',
    )
    datasource = pe.Node(SelectFiles(datasource_templates,
                                     sort_filelist=True,
                                     raise_on_empty=False),
                         name='datasource')

    predictable_pupil_FIR = pe.Node(Function(input_names=[
        'experiment', 'eye_h5_file_list', 'behavior_file_list', 'h5_file',
        'in_files', 'fir_frequency', 'fir_interval', 'data_type',
        'lost_signal_rate_threshold'
    ],
                                             output_names=['out_figures'],
                                             function=fit_FIR_pupil_files,
                                             imports=imports),
                                    name='predictable_pupil_FIR')
    predictable_pupil_FIR.inputs.fir_frequency = analysis_info[
        'pupil_fir_frequency']
    predictable_pupil_FIR.inputs.fir_interval = analysis_info[
        'pupil_fir_interval']
    predictable_pupil_FIR.inputs.experiment = 'predictable'
    predictable_pupil_FIR.inputs.data_type = analysis_info['pupil_data_type']
    predictable_pupil_FIR.inputs.lost_signal_rate_threshold = analysis_info[
        'pupil_lost_signal_rate_threshold']

    unpredictable_pupil_FIR = pe.Node(Function(input_names=[
        'experiment', 'eye_h5_file_list', 'behavior_file_list', 'h5_file',
        'in_files', 'fir_frequency', 'fir_interval', 'data_type',
        'lost_signal_rate_threshold'
    ],
                                               output_names=['out_figures'],
                                               function=fit_FIR_pupil_files,
                                               imports=imports),
                                      name='unpredictable_pupil_FIR')
    unpredictable_pupil_FIR.inputs.fir_frequency = analysis_info[
        'pupil_fir_frequency']
    unpredictable_pupil_FIR.inputs.fir_interval = analysis_info[
        'pupil_fir_interval']
    unpredictable_pupil_FIR.inputs.experiment = 'unpredictable'
    unpredictable_pupil_FIR.inputs.data_type = analysis_info['pupil_data_type']
    unpredictable_pupil_FIR.inputs.lost_signal_rate_threshold = analysis_info[
        'pupil_lost_signal_rate_threshold']

    variable_pupil_FIR = pe.Node(Function(input_names=[
        'experiment', 'eye_h5_file_list', 'behavior_file_list', 'h5_file',
        'in_files', 'fir_frequency', 'fir_interval', 'data_type',
        'lost_signal_rate_threshold'
    ],
                                          output_names=['out_figures'],
                                          function=fit_FIR_pupil_files,
                                          imports=imports),
                                 name='variable_pupil_FIR')
    variable_pupil_FIR.inputs.fir_frequency = analysis_info[
        'pupil_fir_frequency']
    variable_pupil_FIR.inputs.fir_interval = analysis_info[
        'pupil_fir_interval']
    variable_pupil_FIR.inputs.experiment = 'variable'
    variable_pupil_FIR.inputs.data_type = analysis_info['pupil_data_type']
    variable_pupil_FIR.inputs.lost_signal_rate_threshold = analysis_info[
        'pupil_lost_signal_rate_threshold']

    # the actual top-level workflow
    pupil_analysis_workflow = pe.Workflow(name=name)

    pupil_analysis_workflow.connect(input_node, 'preprocessed_directory',
                                    datasource, 'base_directory')
    pupil_analysis_workflow.connect(input_node, 'sub_id', datasource, 'sub_id')

    # variable reward pupil FIR
    pupil_analysis_workflow.connect(datasource, 'variable_eye_h5_files',
                                    variable_pupil_FIR, 'eye_h5_file_list')
    pupil_analysis_workflow.connect(datasource, 'variable_behavior_tsv_files',
                                    variable_pupil_FIR, 'behavior_file_list')
    pupil_analysis_workflow.connect(datasource, 'all_roi_file',
                                    variable_pupil_FIR, 'h5_file')
    pupil_analysis_workflow.connect(datasource, 'variable_in_files',
                                    variable_pupil_FIR, 'in_files')

    # predictable reward pupil FIR
    pupil_analysis_workflow.connect(datasource, 'predictable_eye_h5_files',
                                    predictable_pupil_FIR, 'eye_h5_file_list')
    pupil_analysis_workflow.connect(datasource,
                                    'predictable_behavior_tsv_files',
                                    predictable_pupil_FIR,
                                    'behavior_file_list')
    pupil_analysis_workflow.connect(datasource, 'all_roi_file',
                                    predictable_pupil_FIR, 'h5_file')
    pupil_analysis_workflow.connect(datasource, 'predictable_in_files',
                                    predictable_pupil_FIR, 'in_files')

    # unpredictable reward pupil FIR
    pupil_analysis_workflow.connect(datasource, 'unpredictable_eye_h5_files',
                                    unpredictable_pupil_FIR,
                                    'eye_h5_file_list')
    pupil_analysis_workflow.connect(datasource,
                                    'unpredictable_behavior_tsv_files',
                                    unpredictable_pupil_FIR,
                                    'behavior_file_list')
    pupil_analysis_workflow.connect(datasource, 'all_roi_file',
                                    unpredictable_pupil_FIR, 'h5_file')
    pupil_analysis_workflow.connect(datasource, 'unpredictable_in_files',
                                    unpredictable_pupil_FIR, 'in_files')

    # datasink
    datasink = pe.Node(DataSink(), name='sinker')
    datasink.inputs.parameterization = False

    pupil_analysis_workflow.connect(input_node, 'preprocessed_directory',
                                    datasink, 'base_directory')
    pupil_analysis_workflow.connect(input_node, 'sub_id', datasink,
                                    'container')

    pupil_analysis_workflow.connect(unpredictable_pupil_FIR, 'out_figures',
                                    datasink, 'pupil.@unpredictable_pupil_FIR')
    pupil_analysis_workflow.connect(predictable_pupil_FIR, 'out_figures',
                                    datasink, 'pupil.@predictable_pupil_FIR')
    pupil_analysis_workflow.connect(variable_pupil_FIR, 'out_figures',
                                    datasink, 'pupil.@variable_pupil_FIR')

    return pupil_analysis_workflow
コード例 #17
0
                  name="infosource")
infosource.iterables = [('subject', subject_list), ('ses', ses_list)]


templates = {#tse
             'tse' : '{subject}/{ses}/anat/{subject}_{ses}*run-1_T2w.nii.gz',
             #mprage
             'mprage' : '{subject}/{ses}/anat/{subject}_{ses}*run-1_T1w.nii.gz',
             }
# change and add more strings to include all necessary templates for histmatch
histmatch_files = {
    'ashs_t1_template': 'template/template.nii.gz',
    'ashs_t2_template': 'train/train000/tse.nii.gz',
}

selectfiles = Node(SelectFiles(templates, base_directory=data_dir),
                   name='selectfiles')

selecttemplates = Node(SelectFiles(histmatch_files, base_directory=atlas_dir),
                       name='selecttemplates')

wf.connect([(infosource, selectfiles, [('subject', 'subject'),
                                       ('ses', 'ses')])])

#wf.connect([(infosource, selecttemplates, [('ses','ses')])])

############
## Step 1 ##
############
# Bias correct the T1 and TSE
#input_image not input
コード例 #18
0
def create_DWI_workflow(
    subject_list,
    bids_dir,
    work_dir,
    out_dir,
    bids_templates,
):

    # create initial workflow
    wf = Workflow(name='DWI', base_dir=work_dir)

    # use infosource to iterate workflow across subject list
    n_infosource = Node(interface=IdentityInterface(fields=['subject_id']),
                        name="subject_source"
                        # input: 'subject_id'
                        # output: 'subject_id'
                        )
    # runs the node with subject_id = each element in subject_list
    n_infosource.iterables = ('subject_id', subject_list)

    # select matching files from bids_dir
    n_selectfiles = Node(interface=SelectFiles(templates=bids_templates,
                                               base_directory=bids_dir),
                         name='get_subject_data')
    wf.connect([(n_infosource, n_selectfiles, [('subject_id', 'subject_id_p')])
                ])

    # DWIDenoise
    # https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.mrtrix3.preprocess.html
    n_denoise = Node(interface=mrt.DWIDenoise(), name='n_denoise')
    wf.connect([(n_selectfiles, n_denoise, [('DWI_all', 'in_file')])])

    # datasink
    n_datasink = Node(interface=DataSink(base_directory=out_dir),
                      name='datasink')

    wf.connect([(n_selectfiles, n_datasink, [('all_b0_PA',
                                              'all_b0_PA_unchanged')]),
                (n_denoise, n_datasink, [('out_file', 'DWI_all_denoised')])])

    ########## I'VE ADDED IN ##########################################################################
    # MRDeGibbs
    # https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.mrtrix3.preprocess.html
    n_degibbs = Node(
        interface=mrt.MRDeGibbs(out_file='DWI_all_denoised_degibbs.mif'),
        name='n_degibbs')
    wf.connect([(n_denoise, n_degibbs, [('out_file', 'in_file')])])

    wf.connect([(n_degibbs, n_datasink, [('out_file',
                                          'DWI_all_denoised_degibbs.mif')])])

    # DWI Extract
    n_dwiextract = Node(interface=mrt.DWIExtract(bzero=True,
                                                 out_file='b0vols.mif'),
                        name='n_dwiextract')

    wf.connect([(n_degibbs, n_dwiextract, [('out_file', 'in_file')])])

    wf.connect([(n_dwiextract, n_datasink, [('out_file', 'noddi_b0_degibbs')])
                ])

    # MRcat
    n_mrcat = Node(
        interface=mrcatfunc.MRCat(
            #axis=3,
            out_file='b0s.mif'),
        name='n_mrcat')

    # Connect DTI_B0_PA to mrcat node
    wf.connect([(n_selectfiles, n_mrcat, [('DTI_B0_PA', 'in_file1')])])

    wf.connect([(n_dwiextract, n_mrcat, [('out_file', 'in_file2')])])

    # Output the mrcat file into file 'noddi_and_PA_b0s.mif'
    wf.connect([(n_mrcat, n_datasink, [('out_file', 'noddi_and_PA_b0s.mif')])])

    # DWIfslpreproc
    n_dwifslpreproc = Node(interface=preprocfunc.DWIFslPreProc(
        out_file='preprocessedDWIs.mif', use_header=True),
                           name='n_dwifslpreproc')

    # Connect output of degibbs to dwifslpreproc node
    wf.connect([(n_degibbs, n_dwifslpreproc, [('out_file', 'in_file')])])
    # Connect output of mrcat to se_epi input
    wf.connect([(n_mrcat, n_dwifslpreproc, [('out_file', 'se_epi_file')])])
    # Put output of dwifslpreproc into 'preprocessedDWIs.mif'
    wf.connect([(n_dwifslpreproc, n_datasink, [('out_file',
                                                'preprocessedDWIs.mif')])])

    # DWI bias correct
    n_dwibiascorrect = Node(
        interface=preprocess.DWIBiasCorrect(use_ants=True),
        name='n_dwibiascorrect',
    )

    wf.connect([(n_dwifslpreproc, n_dwibiascorrect, [('out_file', 'in_file')])
                ])
    wf.connect([(n_dwibiascorrect, n_datasink,
                 [('out_file', 'ANTSpreprocessedDWIs.mif')])])

    #DWI2mask
    n_dwi2mask = Node(interface=mrt.BrainMask(out_file='mask.mif'),
                      name='n_dwi2mask')
    wf.connect([(n_dwibiascorrect, n_dwi2mask, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2mask, n_datasink, [('out_file', 'mask.mif')])])

    ## A) Fixel-based analysis
    #DWI2response
    n_dwi2response = Node(interface=mrt.ResponseSD(algorithm='dhollander',
                                                   wm_file='wm_res.txt',
                                                   gm_file='gm_res.txt',
                                                   csf_file='csf_res.txt'),
                          name='n_dwi2response')

    wf.connect([(n_dwibiascorrect, n_dwi2response, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2response, n_datasink, [('wm_file', 'wm_res.txt')])])
    wf.connect([(n_dwi2response, n_datasink, [('gm_file', 'gm_res.txt')])])
    wf.connect([(n_dwi2response, n_datasink, [('csf_file', 'csf_res.txt')])])

    #DWI2fod
    n_dwi2fod = Node(interface=mrt.ConstrainedSphericalDeconvolution(
        algorithm='msmt_csd',
        wm_odf='wmfod.mif',
        gm_odf='gmfod.mif',
        csf_odf='csffod.mif'),
                     name='n_dwi2fod')
    # connect outputs of dwi2fod into dwi2response
    wf.connect([(n_dwibiascorrect, n_dwi2fod, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('wm_file', 'wm_txt')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('gm_file', 'gm_txt')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('csf_file', 'csf_txt')])])
    # output wmfod file from dwi2fod
    wf.connect([(n_dwi2fod, n_datasink, [('wm_odf', 'wmfod.mif')])])
    wf.connect([(n_dwi2fod, n_datasink, [('gm_odf', 'gmfod.mif')])])
    wf.connect([(n_dwi2fod, n_datasink, [('csf_odf', 'csffod.mif')])])

    #mrconvert to extract Z component of wmfod
    n_mrconvert_fod = Node(interface=utils.MRConvert(out_file='Zwmfod.mif',
                                                     coord=[3, 0]),
                           name='n_mrconvert_fod')

    wf.connect([(n_dwi2fod, n_mrconvert_fod, [('wm_odf', 'in_file')])])

    wf.connect([(n_mrconvert_fod, n_datasink, [('out_file', 'Zwmfod.mif')])])

    # Concatenate all wm, gm, csf fod files to see their distribution throughout Brain
    n_mrcat_fod = Node(interface=mrcatfunc.MRCat(out_file='vf.mif'),
                       name='n_mrcat_fod')
    # Connect Zwmfod, gmfod and csffod as inputs
    wf.connect([(n_mrconvert_fod, n_mrcat_fod, [('out_file', 'in_file1')])])
    wf.connect([(n_dwi2fod, n_mrcat_fod, [('gm_odf', 'in_file2')])])
    wf.connect([(n_dwi2fod, n_mrcat_fod, [('csf_odf', 'in_file3')])])
    # Output the mrcat file into file into 'vf.mif'
    wf.connect([(n_mrcat_fod, n_datasink, [('out_file', 'vf.mif')])])

    #fod2fixel wmfod.mif wmfixels -fmls_peak_value 0 -fmls_integral 0.10 -afd afd.mif -peak peak.mif -disp disp.mif
    # OUTPUTS: -afd afd.mif -peak peak.mif -disp disp.mif
    n_fod2fixel = Node(
        interface=fod2fixelfunc.fod2fixel(
            out_file='wmfixels',
            #afd_file = 'afd.mif',
            peak_file='peak.mif',
            disp_file='disp.mif'),
        name='n_fod2fixel')
    # let the peak value parameter be trialed as multiple values
    n_fod2fixel.iterables = ('fmls_peak_value', [0, 0.10, 0.50])
    n_fod2fixel.iterables = ('fmls_integral', [0, 0.10, 0.50])

    # obtain wm fibre image as input
    wf.connect([(n_dwi2fod, n_fod2fixel, [('wm_odf', 'in_file')])])
    # ouputs of fod2fixel
    wf.connect([(n_fod2fixel, n_datasink, [('out_file', 'wmfixels')])])
    wf.connect([(n_fod2fixel, n_datasink, [('afd_file', 'afd.mif')])])
    wf.connect([(n_fod2fixel, n_datasink, [('peak_file', 'peak.mif')])])
    wf.connect([(n_fod2fixel, n_datasink, [('disp_file', 'disp.mif')])])

    ## Fixel2peaks
    n_fixel2peaks = Node(interface=fixel2peaksfunc.fixel2peaks(
        out_file='peaks_wmdirections.mif'),
                         name='n_fixel2peaks')

    n_fixel2peaks.iterables = ('number', [1, 2, 3])

    # obtain directions file in output folder of fod2fixel, as input
    wf.connect([(n_fod2fixel, n_fixel2peaks, [('out_file', 'in_file')])])
    # ouputs of fixel2peaks
    wf.connect([(n_fixel2peaks, n_datasink, [('out_file',
                                              'peaks_wmdirections.mif')])])

    #mrmath to find normalised value of peak WM directions
    n_mrmath = Node(interface=mrt.MRMath(
        axis=3, operation='norm', out_file='norm_peaks_wmdirections.mif'),
                    name='n_mrmath')

    wf.connect([(n_fixel2peaks, n_mrmath, [('out_file', 'in_file')])])

    wf.connect([(n_mrmath, n_datasink, [('out_file',
                                         'norm_peaks_wmdirections.mif')])])

    # mrcalc to divide peak WM direction by normalised value
    n_mrcalc = Node(interface=mrcalcfunc.MRCalc(operation='divide',
                                                out_file='wm_peak_dir.mif'),
                    name='n_mrcalc')

    wf.connect([(n_fixel2peaks, n_mrcalc, [('out_file', 'in_file1')])])

    wf.connect([(n_mrmath, n_mrcalc, [('out_file', 'in_file2')])])

    wf.connect([(n_mrcalc, n_datasink, [('out_file', 'WM_peak_dir.mif')])])

    #mrconvert to extract Z component of peak directions
    n_mrconvert2 = Node(interface=utils.MRConvert(
        out_file='Zpeak_WM_Directions.mif', coord=[3, 2]),
                        name='n_mrconvert2')

    wf.connect([(n_mrcalc, n_mrconvert2, [('out_file', 'in_file')])])

    wf.connect([(n_mrconvert2, n_datasink, [('out_file',
                                             'Zpeak_WM_Directions.mif')])])

    # mrcalc to find absolute value
    n_mrcalc2 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='absZpeak_WM_Directions.mif'),
                     name='n_mrcalc2')

    wf.connect([(n_mrconvert2, n_mrcalc2, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc2, n_datasink, [('out_file',
                                          'absZpeak_WM_Directions.mif')])])

    # mrcalc to get angle by doing inverse cosine
    n_mrcalc3 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acosZpeak_WM_Directions.mif'),
                     name='n_mrcalc3')

    wf.connect([(n_mrcalc2, n_mrcalc3, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc3, n_datasink, [('out_file',
                                          'acosZpeak_WM_Directions.mif')])])

    # mrcalc to convert angle to degrees
    n_mrcalc4 = Node(interface=mrcalcfunc.MRCalc(
        operation='multiply', operand=180, out_file='Fixel1_Z_angle.mif'),
                     name='n_mrcalc4')

    wf.connect([(n_mrcalc3, n_mrcalc4, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc4, n_datasink, [('out_file', 'Fixel1_Z_angle.mif')])])

    n_mrcalc5 = Node(interface=mrcalcfunc.MRCalc(
        operation='divide',
        operand=3.14159265,
        out_file='Fixel1_Z_cos_deg.mif'),
                     name='n_mrcalc5')

    wf.connect([(n_mrcalc4, n_mrcalc5, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc5, n_datasink, [('out_file', 'Fixel1_Z_cos_deg.mif')])
                ])

    ## B) Tensor-based analysis
    #dwi2tensor
    n_dwi2tensor = Node(interface=mrt.FitTensor(out_file='dti.mif'),
                        name='n_dwi2tensor')

    wf.connect([(n_dwibiascorrect, n_dwi2tensor, [('out_file', 'in_file')])])

    wf.connect([(n_dwi2mask, n_dwi2tensor, [('out_file', 'in_mask')])])

    wf.connect([(n_dwi2tensor, n_datasink, [('out_file', 'dt.mif')])])

    #tensor2metric
    n_tensor2metric = Node(interface=tensor2metricfunc.tensor2metric(
        modulate='none', num=1, vector_file='eigenvector.mif'),
                           name='n_tensor2metric')

    wf.connect([(n_dwi2tensor, n_tensor2metric, [('out_file', 'input_file')])])

    wf.connect([(n_tensor2metric, n_datasink, [('vector_file',
                                                'eigenvector.mif')])])

    #mrconvert to get Z eigenvector
    n_mrconvert3 = Node(interface=utils.MRConvert(coord=[3, 2],
                                                  out_file='eigenvectorZ.mif'),
                        name='n_mrconvert3')

    wf.connect([(n_tensor2metric, n_mrconvert3, [('vector_file', 'in_file')])])

    wf.connect([(n_mrconvert3, n_datasink, [('out_file', 'eigenvectorZ.mif')])
                ])

    #ALL SUBSEQUENT STEPS GET ANGLE IN DEGREES
    # mrcalc to find absolute value
    n_mrcalc6 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='abs_eigenvectorZ.mif'),
                     name='n_mrcalc6')

    wf.connect([(n_mrconvert3, n_mrcalc6, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc6, n_datasink, [('out_file', 'abs_eigenvectorZ.mif')])
                ])

    # mrcalc to get angle by doing inverse cosine
    n_mrcalc7 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acos_eigenvectorZ.mif'),
                     name='n_mrcalc7')

    wf.connect([(n_mrcalc6, n_mrcalc7, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc7, n_datasink, [('out_file', 'acos_eigenvectorZ.mif')
                                         ])])

    # mrcalc to convert angle to degrees
    n_mrcalc8 = Node(
        interface=mrcalcfunc.MRCalc(operation='multiply',
                                    operand=180,
                                    out_file='degrees_eigenvectorZ.mif'),
        name='n_mrcalc8')

    wf.connect([(n_mrcalc7, n_mrcalc8, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc8, n_datasink, [('out_file',
                                          'degrees_eigenvectorZ.mif')])])

    n_mrcalc9 = Node(interface=mrcalcfunc.MRCalc(operation='divide',
                                                 operand=3.14159265,
                                                 out_file='dti_z_cos_deg.mif'),
                     name='n_mrcalc9')

    wf.connect([(n_mrcalc8, n_mrcalc9, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc9, n_datasink, [('out_file', 'dti_z_cos_deg.mif')])])

    # Difference image between fixel based and tensor based outputs
    n_mrcalc10 = Node(interface=mrcalcfunc.MRCalc(
        operation='subtract', out_file='diff_imag_tensor_minus_fixel.mif'),
                      name='n_mrcalc10')

    wf.connect([(n_mrcalc9, n_mrcalc10, [('out_file', 'in_file1')])])

    wf.connect([(n_mrcalc5, n_mrcalc10, [('out_file', 'in_file2')])])

    wf.connect([(n_mrcalc10, n_datasink,
                 [('out_file', 'diff_imag_tensor_minus_fixel.mif')])])

    #################################################################################3
    return wf
# Different images used in this pipeline
templates = {#tse
             'umc_tse_native' : 'ashs_atlas_umcutrecht/train/{shorter_id}/tse_native_chunk_{side_id}.nii.gz',
             'umc_tse_whole' : 'ashs_atlas_umcutrecht/train/{shorter_id}/tse.nii.gz',
             #seg
             'umc_seg_native' : 'ashs_atlas_umcutrecht/train/{shorter_id}/tse_native_chunk_{side_id}_seg.nii.gz',
             #mprage
             'umc_mprage_chunk' : 'ashs_atlas_umcutrecht/train/{shorter_id}/mprage_to_chunktemp_{side_id}.nii.gz',
             }

# Different templates used in this pipeline
bespoke_files = {'mprage_inthist_template' : '{side_id}_mprage_template_resampled-0.35mmIso_rescaled_0meanUv_pad-176x144x128.nii.gz',
                 'tse_inthist_template' : '{side_id}_tse_template_resampled-0.35mmIso_rescaled_0meanUv_pad-176x144x128.nii.gz'
                 }

selectfiles = Node(SelectFiles(templates, base_directory=src_path), name='selectfiles')

selecttemplates = Node(SelectFiles(bespoke_files, base_directory=atlas_dir), name='selecttemplates')

wf.connect([(infosource, selectfiles, [('shorter_id', 'shorter_id'),
                                       ('side_id', 'side_id')])]) 

wf.connect([(infosource, selecttemplates, [('side_id','side_id')])])

## Overview of the UMC preprocessing steps ##
# Step 1 Trim and pad the umc_TSE_native chunks
# Step 2 Reslice umc_TSE_native chunks
# Step 3 Reslice the umc segmentation
# Step 4 Register umc_MPRAGE to umc_TSE_native chunks
# Step 5 Reslice umc_MPRAGE
# Step 6 Normalize TSE and MPRAGE images to respective TSE and MPRAGE templates
コード例 #20
0
highpass_filtering = True

# Specify subjects used for analysis
subject_list = [
    d for d in os.listdir(DATA_DIR) if os.path.isdir(os.path.join(DATA_DIR, d))
]

# Infosource - function free node to iterate over the list of subject names (and/or sessions)
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = [('subject_id', subject_list)]

# SelectFiles - uses glob and regex to find your files
templates = dict(struct='{subject_id}/structural/structural.nii.gz',
                 func='{subject_id}/functional/*.nii.gz')
selectfiles = Node(SelectFiles(templates), "selectfiles")
selectfiles.inputs.base_directory = DATA_DIR

# Node which might come in handy when piping data to interfaces that are incompatible with gzipped format
gunzip_struct = Node(Gunzip(), name="gunzip_struct")

# Reorient images to match approximate orientation of the standard template images (MNI152)
reorient_func = Node(fsl.Reorient2Std(output_type='NIFTI_GZ'),
                     name='reorient_func')
reorient_struct = Node(fsl.Reorient2Std(output_type='NIFTI_GZ'),
                       name='reorient_struct')

# Convert functional images to float representation (FLOAT32)
img2float = Node(fsl.ImageMaths(out_data_type='float',
                                op_string='',
                                suffix='_dtype'),
コード例 #21
0
def run(base_dir):
    template = '/home/brainlab/Desktop/Rudas/Data/Parcellation/TPM.nii'
    matlab_cmd = '/home/brainlab/Desktop/Rudas/Tools/spm12_r7487/spm12/run_spm12.sh /home/brainlab/Desktop/Rudas/Tools/MCR/v713/ script'
    spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True)

    print('SPM version: ' + str(spm.SPMCommand().version))

    structural_dir = '/home/brainlab/Desktop/Rudas/Data/Propofol/Structurals/'
    experiment_dir = opj(base_dir, 'output/')
    output_dir = 'datasink'
    working_dir = 'workingdir'
    '''

    subject_list = ['2014_05_02_02CB',
                    '2014_05_16_16RA',
                    '2014_05_30_30AQ',
                    '2014_07_04_04HD']
    '''
    subject_list = [
        '2014_05_02_02CB', '2014_05_16_16RA', '2014_05_30_30AQ',
        '2014_07_04_04HD', '2014_07_04_04SG', '2014_08_13_13CA',
        '2014_10_08_08BC', '2014_10_08_08VR', '2014_10_22_22CY',
        '2014_10_22_22TK', '2014_11_17_17EK', '2014_11_17_17NA',
        '2014_11_19_19SA', '2014_11_19_AK', '2014_11_25.25JK',
        '2014_11_27_27HF', '2014_12_10_10JR'
    ]

    # list of subject identifiers

    fwhm = 8  # Smoothing widths to apply (Gaussian kernel size)
    TR = 2  # Repetition time
    init_volume = 0  # Firts volumen identification which will use in the pipeline
    iso_size = 2  # Isometric resample of functional images to voxel size (in mm)

    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=init_volume,
                              t_size=-1,
                              output_type='NIFTI'),
                   name="extract")

    # MCFLIRT - motion correction
    mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True,
                           output_type='NIFTI'),
                   name="motion_correction")

    # SliceTimer - correct for slice wise acquisition
    slicetimer = Node(SliceTimer(index_dir=False,
                                 interleaved=True,
                                 output_type='NIFTI',
                                 time_repetition=TR),
                      name="slice_timing_correction")

    # Smooth - image smoothing
    smooth = Node(spm.Smooth(fwhm=fwhm), name="smooth")

    n4bias = Node(N4Bias(out_file='t1_n4bias.nii.gz'), name='n4bias')

    descomposition = Node(Descomposition(n_components=20,
                                         low_pass=0.1,
                                         high_pass=0.01,
                                         tr=TR),
                          name='descomposition')

    # Artifact Detection - determines outliers in functional images
    art = Node(ArtifactDetect(norm_threshold=2,
                              zintensity_threshold=3,
                              mask_type='spm_global',
                              parameter_source='FSL',
                              use_differences=[True, False],
                              plot_type='svg'),
               name="artifact_detection")

    extract_confounds_ws_csf = Node(
        ExtractConfounds(out_file='ev_without_gs.csv'),
        name='extract_confounds_ws_csf')

    extract_confounds_gs = Node(ExtractConfounds(out_file='ev_with_gs.csv',
                                                 delimiter=','),
                                name='extract_confounds_global_signal')

    signal_extraction = Node(SignalExtraction(
        time_series_out_file='time_series.csv',
        correlation_matrix_out_file='correlation_matrix.png',
        atlas_identifier='cort-maxprob-thr25-2mm',
        tr=TR,
        plot=True),
                             name='signal_extraction')

    art_remotion = Node(ArtifacRemotion(out_file='fmri_art_removed.nii'),
                        name='artifact_remotion')

    # BET - Skullstrip anatomical anf funtional images
    bet_t1 = Node(BET(frac=0.5, robust=True, mask=True,
                      output_type='NIFTI_GZ'),
                  name="bet_t1")

    # FAST - Image Segmentation
    segmentation = Node(FAST(output_type='NIFTI'), name="segmentation")

    # Normalize - normalizes functional and structural images to the MNI template
    normalize_fmri = Node(Normalize12(jobtype='estwrite',
                                      tpm=template,
                                      write_voxel_sizes=[2, 2, 2],
                                      write_bounding_box=[[-90, -126, -72],
                                                          [90, 90, 108]]),
                          name="normalize_fmri")

    gunzip = Node(Gunzip(), name="gunzip")

    normalize_t1 = Node(Normalize12(
        jobtype='estwrite',
        tpm=template,
        write_voxel_sizes=[iso_size, iso_size, iso_size],
        write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                        name="normalize_t1")

    normalize_masks = Node(Normalize12(
        jobtype='estwrite',
        tpm=template,
        write_voxel_sizes=[iso_size, iso_size, iso_size],
        write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                           name="normalize_masks")

    # Threshold - Threshold WM probability image
    threshold = Node(Threshold(thresh=0.5, args='-bin',
                               output_type='NIFTI_GZ'),
                     name="wm_mask_threshold")

    # FLIRT - pre-alignment of functional images to anatomical images
    coreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'),
                     name="linear_warp_estimation")

    # FLIRT - coregistration of functional images to anatomical images with BBR
    coreg_bbr = Node(FLIRT(dof=6,
                           cost='bbr',
                           schedule=opj(os.getenv('FSLDIR'),
                                        'etc/flirtsch/bbr.sch'),
                           output_type='NIFTI_GZ'),
                     name="nonlinear_warp_estimation")

    # Apply coregistration warp to functional images
    applywarp = Node(FLIRT(interp='spline',
                           apply_isoxfm=iso_size,
                           output_type='NIFTI'),
                     name="registration_fmri")

    # Apply coregistration warp to mean file
    applywarp_mean = Node(FLIRT(interp='spline',
                                apply_isoxfm=iso_size,
                                output_type='NIFTI_GZ'),
                          name="registration_mean_fmri")

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    anat_file = opj(structural_dir, '{subject_id}', 't1.nii')
    func_file = opj('{subject_id}', 'fmri.nii')

    templates = {'anat': anat_file, 'func': func_file}

    selectfiles = Node(SelectFiles(templates, base_directory=base_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    # Create a coregistration workflow
    coregwf = Workflow(name='coreg_fmri_to_t1')
    coregwf.base_dir = opj(experiment_dir, working_dir)

    # Create a preprocessing workflow
    preproc = Workflow(name='preproc')
    preproc.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the coregistration workflow

    coregwf.connect([
        (bet_t1, n4bias, [('out_file', 'in_file')]),
        (n4bias, segmentation, [('out_file', 'in_files')]),
        (segmentation, threshold, [(('partial_volume_files', get_latest),
                                    'in_file')]),
        (n4bias, coreg_pre, [('out_file', 'reference')]),
        (threshold, coreg_bbr, [('out_file', 'wm_seg')]),
        (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),
        (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),
        (n4bias, applywarp, [('out_file', 'reference')]),
        (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')]),
        (n4bias, applywarp_mean, [('out_file', 'reference')]),
    ])

    ## Use the following DataSink output substitutions
    substitutions = [('_subject_id_', 'sub-')]
    #                 ('_fwhm_', 'fwhm-'),
    #                 ('_roi', ''),
    #                 ('_mcf', ''),
    #                 ('_st', ''),
    #                 ('_flirt', ''),
    #                 ('.nii_mean_reg', '_mean'),
    #                 ('.nii.par', '.par'),
    #                 ]
    #subjFolders = [('fwhm-%s/' % f, 'fwhm-%s_' % f) for f in fwhm]

    #substitutions.extend(subjFolders)
    datasink.inputs.substitutions = substitutions

    # Connect all components of the preprocessing workflow
    preproc.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id')]),
        (selectfiles, extract, [('func', 'in_file')]),
        (extract, mcflirt, [('roi_file', 'in_file')]),
        (mcflirt, slicetimer, [('out_file', 'in_file')]),
        (selectfiles, coregwf, [('anat', 'bet_t1.in_file'),
                                ('anat', 'nonlinear_warp_estimation.reference')
                                ]),
        (mcflirt, coregwf, [('mean_img', 'linear_warp_estimation.in_file'),
                            ('mean_img', 'nonlinear_warp_estimation.in_file'),
                            ('mean_img', 'registration_mean_fmri.in_file')]),
        (slicetimer, coregwf, [('slice_time_corrected_file',
                                'registration_fmri.in_file')]),
        (coregwf, art, [('registration_fmri.out_file', 'realigned_files')]),
        (mcflirt, art, [('par_file', 'realignment_parameters')]),
        (art, art_remotion, [('outlier_files', 'outlier_files')]),
        (coregwf, art_remotion, [('registration_fmri.out_file', 'in_file')]),
        (coregwf, gunzip, [('n4bias.out_file', 'in_file')]),
        (selectfiles, normalize_fmri, [('anat', 'image_to_align')]),
        (art_remotion, normalize_fmri, [('out_file', 'apply_to_files')]),
        (selectfiles, normalize_t1, [('anat', 'image_to_align')]),
        (gunzip, normalize_t1, [('out_file', 'apply_to_files')]),
        (selectfiles, normalize_masks, [('anat', 'image_to_align')]),
        (coregwf, normalize_masks, [(('segmentation.partial_volume_files',
                                      get_wm_csf), 'apply_to_files')]),
        (normalize_fmri, smooth, [('normalized_files', 'in_files')]),
        (smooth, extract_confounds_ws_csf, [('smoothed_files', 'in_file')]),
        (normalize_masks, extract_confounds_ws_csf, [('normalized_files',
                                                      'list_mask')]),
        (mcflirt, extract_confounds_ws_csf, [('par_file', 'file_concat')]),

        #(smooth, extract_confounds_gs, [('smoothed_files', 'in_file')]),
        #(normalize_t1, extract_confounds_gs, [(('normalized_files',change_to_list), 'list_mask')]),
        #(extract_confounds_ws_csf, extract_confounds_gs, [('out_file', 'file_concat')]),
        (smooth, signal_extraction, [('smoothed_files', 'in_file')]),
        #(extract_confounds_gs, signal_extraction, [('out_file', 'confounds_file')]),
        (extract_confounds_ws_csf, signal_extraction, [('out_file',
                                                        'confounds_file')]),

        #(smooth, descomposition, [('smoothed_files', 'in_file')]),
        #(extract_confounds_ws_csf, descomposition, [('out_file', 'confounds_file')]),

        #(extract_confounds_gs, datasink, [('out_file', 'preprocessing.@confounds_with_gs')]),
        (extract_confounds_ws_csf, datasink,
         [('out_file', 'preprocessing.@confounds_without_gs')]),
        (smooth, datasink, [('smoothed_files', 'preprocessing.@smoothed')]),
        (normalize_fmri, datasink, [('normalized_files',
                                     'preprocessing.@fmri_normalized')]),
        (normalize_t1, datasink, [('normalized_files',
                                   'preprocessing.@t1_normalized')]),
        (normalize_masks, datasink, [('normalized_files',
                                      'preprocessing.@masks_normalized')]),
        (signal_extraction, datasink, [('time_series_out_file',
                                        'preprocessing.@time_serie')]),
        (signal_extraction, datasink, [('correlation_matrix_out_file',
                                        'preprocessing.@correlation_matrix')]),
        (signal_extraction, datasink,
         [('fmri_cleaned_out_file', 'preprocessing.@fmri_cleaned_out_file')]),
        #(descomposition, datasink, [('out_file', 'preprocessing.@descomposition')]),
        #(descomposition, datasink, [('plot_files', 'preprocessing.@descomposition_plot_files')])
    ])

    preproc.write_graph(graph2use='colored', format='png', simple_form=True)
    preproc.run()
コード例 #22
0
aal_corrmat = Node(Function(function=make_aal_corrmat,
                            input_names=['smoothed_files'],
                            output_names=[
                                'file_global', 'file_global_trans',
                                'file_noglobal', 'file_noglobal_trans',
                                'file_noclean'
                            ]),
                   name='aal_corrmat')

####Nipype script begins below####

#Set up iteration over subjects
infosource = Node(IdentityInterface(fields=['subject_id']), name='infosource')
infosource.iterables = ('subject_id', subject_list)

selectfiles = Node(SelectFiles(template), name='selectfiles')
selectfiles.inputs.base_directory = rawdir
selectfiles.inputs.sort_files = True
#Outputs: anat, epi, flair, mask, wm_noise, csf_noise, mni_template

####EPI preprocessing####

#Convert EPI dicoms to nii (with embeded metadata)
epi_stack = Node(dcmstack.DcmStack(), name='epistack')
epi_stack.inputs.embed_meta = True
epi_stack.inputs.out_format = 'epi'
epi_stack.inputs.out_ext = '.nii'
#Outputs: out_file

#Despiking using afni (position based on Jo et al. (2013)).
despike = Node(afni.Despike(), name='despike')
コード例 #23
0
def preprocessing(*argu):

    argu = argu[0]
    json_file = argu[1]

    with open(json_file, 'r') as jsonfile:
        info = json.load(jsonfile, object_pairs_hook=OrderedDict)

    subject_list = info["subject_list"]
    experiment_dir = info["experiment_dir"]
    output_dir = 'datasink'
    working_dir = 'workingdir'

    task_list = info["task_list"]

    fwhm = [*map(int, info["fwhm"])]
    TR = float(info["TR"])
    iso_size = 4
    slice_list = [*map(int, info["slice order"])]

    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=int(info["dummy scans"]),
                              t_size=-1,
                              output_type='NIFTI'),
                   name="extract")

    slicetime = Node(SliceTiming(num_slices=len(slice_list),
                                 ref_slice=int(median(slice_list)),
                                 slice_order=slice_list,
                                 time_repetition=TR,
                                 time_acquisition=TR - (TR / len(slice_list))),
                     name="slicetime")

    mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True,
                           output_type='NIFTI'),
                   name="mcflirt")

    # Smooth - image smoothing
    smooth = Node(Smooth(), name="smooth")
    smooth.iterables = ("fwhm", fwhm)

    # Artifact Detection - determines outliers in functional images
    art = Node(ArtifactDetect(norm_threshold=2,
                              zintensity_threshold=3,
                              mask_type='spm_global',
                              parameter_source='FSL',
                              use_differences=[True, False],
                              plot_type='svg'),
               name="art")

    # BET - Skullstrip anatomical Image
    bet_anat = Node(BET(frac=0.5, robust=True, output_type='NIFTI_GZ'),
                    name="bet_anat")

    # FAST - Image Segmentation
    segmentation = Node(FAST(output_type='NIFTI_GZ'),
                        name="segmentation",
                        mem_gb=4)

    # Select WM segmentation file from segmentation output
    def get_wm(files):
        return files[-1]

    # Threshold - Threshold WM probability image
    threshold = Node(Threshold(thresh=0.5, args='-bin',
                               output_type='NIFTI_GZ'),
                     name="threshold")

    # FLIRT - pre-alignment of functional images to anatomical images
    coreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'), name="coreg_pre")

    # FLIRT - coregistration of functional images to anatomical images with BBR
    coreg_bbr = Node(FLIRT(dof=6,
                           cost='bbr',
                           schedule=opj(os.getenv('FSLDIR'),
                                        'etc/flirtsch/bbr.sch'),
                           output_type='NIFTI_GZ'),
                     name="coreg_bbr")

    # Apply coregistration warp to functional images
    applywarp = Node(FLIRT(interp='spline',
                           apply_isoxfm=iso_size,
                           output_type='NIFTI'),
                     name="applywarp")

    # Apply coregistration warp to mean file
    applywarp_mean = Node(FLIRT(interp='spline',
                                apply_isoxfm=iso_size,
                                output_type='NIFTI_GZ'),
                          name="applywarp_mean")

    # Create a coregistration workflow
    coregwf = Workflow(name='coregwf')
    coregwf.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the coregistration workflow
    coregwf.connect([
        (bet_anat, segmentation, [('out_file', 'in_files')]),
        (segmentation, threshold, [(('partial_volume_files', get_wm),
                                    'in_file')]),
        (bet_anat, coreg_pre, [('out_file', 'reference')]),
        (threshold, coreg_bbr, [('out_file', 'wm_seg')]),
        (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),
        (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),
        (bet_anat, applywarp, [('out_file', 'reference')]),
        (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')]),
        (bet_anat, applywarp_mean, [('out_file', 'reference')]),
    ])

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'task_name']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list),
                            ('task_name', task_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    anat_file = opj('sub-{subject_id}', 'anat', 'sub-{subject_id}_T1w.nii.gz')
    func_file = opj('sub-{subject_id}', 'func',
                    'sub-{subject_id}_task-{task_name}_bold.nii.gz')

    templates = {'anat': anat_file, 'func': func_file}
    selectfiles = Node(SelectFiles(templates,
                                   base_directory=info["base directory"]),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    ## Use the following DataSink output substitutions
    substitutions = [
        ('_subject_id_', 'sub-'),
        ('_task_name_', '/task-'),
        ('_fwhm_', 'fwhm-'),
        ('_roi', ''),
        ('_mcf', ''),
        ('_st', ''),
        ('_flirt', ''),
        ('.nii_mean_reg', '_mean'),
        ('.nii.par', '.par'),
    ]
    subjFolders = [('fwhm-%s/' % f, 'fwhm-%s_' % f) for f in fwhm]
    substitutions.extend(subjFolders)
    datasink.inputs.substitutions = substitutions

    # Create a preprocessing workflow
    preproc = Workflow(name='preproc')
    preproc.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the preprocessing workflow
    preproc.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id'),
                                   ('task_name', 'task_name')]),
        (selectfiles, extract, [('func', 'in_file')]),
        (extract, slicetime, [('roi_file', 'in_files')]),
        (slicetime, mcflirt, [('timecorrected_files', 'in_file')]),
        (selectfiles, coregwf, [('anat', 'bet_anat.in_file'),
                                ('anat', 'coreg_bbr.reference')]),
        (mcflirt, coregwf, [('mean_img', 'coreg_pre.in_file'),
                            ('mean_img', 'coreg_bbr.in_file'),
                            ('mean_img', 'applywarp_mean.in_file')]),
        (mcflirt, coregwf, [('out_file', 'applywarp.in_file')]),
        (coregwf, smooth, [('applywarp.out_file', 'in_files')]),
        (mcflirt, datasink, [('par_file', 'preproc.@par')]),
        (smooth, datasink, [('smoothed_files', 'preproc.@smooth')]),
        (coregwf, datasink, [('applywarp_mean.out_file', 'preproc.@mean')]),
        (coregwf, art, [('applywarp.out_file', 'realigned_files')]),
        (mcflirt, art, [('par_file', 'realignment_parameters')]),
        (coregwf, datasink, [('coreg_bbr.out_matrix_file',
                              'preproc.@mat_file'),
                             ('bet_anat.out_file', 'preproc.@brain')]),
        (art, datasink, [('outlier_files', 'preproc.@outlier_files'),
                         ('plot_files', 'preproc.@plot_files')]),
    ])
    # Create preproc output graph# Creat # Create
    preproc.write_graph(graph2use='colored', format='png', simple_form=True)

    # Visualize the graph
    img1 = imread(opj(preproc.base_dir, 'preproc', 'graph.png'))
    plt.imshow(img1)
    plt.xticks([]), plt.yticks([])
    plt.show()

    # Visualize the detailed graph# Visua # Visual
    preproc.write_graph(graph2use='flat', format='png', simple_form=True)
    img2 = imread(opj(preproc.base_dir, 'preproc', 'graph_detailed.png'))
    plt.imshow(img2)
    plt.xticks([]), plt.yticks([])
    plt.show()

    print("Workflow all set. Check the workflow image :)")

    response = input('Should run the workflow? Enter yes or no :')

    if response == 'yes':
        preproc.run('MultiProc', plugin_args={'n_procs': 10})
    elif response == 'no':
        print('Exits the program since you entered no')
    else:
        raise RuntimeError('Should enter either yes or no')
    noclean_corr_trans_fn = os.path.join(os.getcwd(), 'noclean_correlation_aal_trans.csv')
    np.savetxt(noclean_corr_trans_fn, np.arctanh(noclean_corrmat), fmt = str('%.5f'), delimiter = ',')   

    return(global_corr_fn, global_lasso_fn, global_corr_trans_fn, noglobal_corr_fn, noglobal_lasso_fn, noglobal_corr_trans_fn, noclean_corr_fn, noclean_lasso_fn, noclean_corr_trans_fn)

aal_corrmat = Node(Function(function = make_aal_corrmat,input_names = ['smoothed_files'], output_names = ['global_corr_fn', 'global_lasso_fn', 'global_corr_trans_fn', 'noglobal_corr_fn', 'noglobal_lasso_fn', 'noglobal_corr_trans_fn', 'noclean_corr_fn', 'noclean_lasso_fn', 'noclean_corr_trans_fn']), name = 'aal_corrmat')

####Nipype script begins below####


#Set up iteration over subjects
infosource=Node(IdentityInterface(fields=['subject_id']),name='infosource')
infosource.iterables=('subject_id',subject_list)

#Select files
selectfiles=Node(SelectFiles(template),name='selectfiles') #Note: template is template from parameters section
selectfiles.inputs.base_directory=raw_dir
selectfiles.inputs.sort_files=True
#Outputs: anat, epi, flair, mask,mni_template

####EPI preprocessing####

#Convert EPI dicoms to nii (with embeded metadata)
epi_stack=Node(dcmstack.DcmStack(),name='epistack')
epi_stack.inputs.embed_meta=True
epi_stack.inputs.out_format='epi'
epi_stack.inputs.out_ext='.nii'
#Outputs: out_file

#Despiking using afni (position based on Jo et al. (2013)).
despike=Node(afni.Despike(),name='despike')
コード例 #25
0
dofx = 12  # Grados de libertad para el coregistro

for kx in range(2):
    subject_list = [subject_list0[kx]]
    '''-------------------------------------------------------------------------'''
    #Documentos adicionales
    CNN_H5 = pif.downloadH5(opj(experiment_dir, working_dir))
    Aropy = pif.downloadAROMA(opj(experiment_dir, working_dir))
    '''-------------------------Nodos de seleccion de sujetos-------------------'''
    '''#########################################################################'''
    infosource = Node(IdentityInterface(fields=['asubject_id', 'session_num']),
                      name="infosource")
    infosource.iterables = [('asubject_id', subject_list),
                            ('session_num', session)]

    selectfiles = Node(SelectFiles(templates, base_directory=Subjects_dir),
                       name="selectfiles")
    '''-------------------------Nodos Coregistro--------------------------------'''
    '''#########################################################################'''
    bet_anat = Node(BET(frac=0.5,
                        robust=True,
                        mask=True,
                        output_type='NIFTI_GZ'),
                    name="bet_anat")

    segmentation = Node(FAST(output_type='NIFTI_GZ'), name="segmentation")

    threshold = Node(Threshold(thresh=0.5, args='-bin',
                               output_type='NIFTI_GZ'),
                     name="threshold")
コード例 #26
0
def main():

    #######################
    # Commandline Arguments
    #######################
    # list of subject identifiers
    task_name = "Training" if training else "Test"
    print(project_folder, subject_list, task_name, nb_prc)

    #############################################################
    # Extracting fMRI Params (Only works with Kamitani's Dataset)
    #############################################################
    TR = 3.0
    voxel_size = (3, 3, 3)
    number_of_slices = 50
    json_file1 = opj(project_folder,
                     "dataset/ds001246-download/task-imagery_bold.json")
    json_file2 = opj(project_folder,
                     "dataset/ds001246-download/task-perception_bold.json")

    file = open(json_file1)
    data = json.load(file)
    slice_timing1 = data['SliceTiming']
    file.close()

    file = open(json_file2)
    data = json.load(file)
    slice_timing2 = data['SliceTiming']
    file.close()

    sorted1 = np.argsort(slice_timing1)
    sorted2 = np.argsort(slice_timing2)
    print(np.all(sorted1 == sorted2))

    slice_order = list(sorted1 + 1)
    print("Slice order:", slice_order)

    ##########################
    # Creating essential nodes
    ##########################
    # Model Spec
    modelspec_node = Node(SpecifySPMModel(concatenate_runs=True,
                                          input_units='secs',
                                          output_units='secs',
                                          time_repetition=TR,
                                          high_pass_filter_cutoff=128),
                          name='modelspec')

    # Level1Design - Generates a SPM design matrix
    level1design_node = Node(Level1Design(bases={'hrf': {
        'derivs': [0, 0]
    }},
                                          timing_units='secs',
                                          interscan_interval=TR,
                                          model_serial_correlations='AR(1)',
                                          mask_threshold='-Inf'),
                             name="level1design")

    # EstimateModel - estimate the parameters of the model (GLM)
    level1estimate_node = Node(
        EstimateModel(estimation_method={'Classical': 1}),
        name="level1estimate")

    # Infosource - a function free node to iterate over the list of subject names
    infosrc_subjects = Node(IdentityInterface(fields=['subject_id']),
                            name="infosrc_subjects")
    infosrc_subjects.iterables = [('subject_id', subject_list)]

    # SelectFiles - it select files based on template matching
    tsv_file = opj('dataset', 'ds001246-download', '{subject_id}',
                   'ses-p*' + task_name + '*', 'func',
                   '{subject_id}_ses-p*' + task_name + '*_task-*_events.tsv')
    reg_file = opj('preprocess', '_subject_id_{subject_id}',
                   '_session_id_ses-p*' + task_name + '*', 'Realign',
                   'rp_a{subject_id}_ses-p*' + task_name + '*_task-*_bold.txt')
    func_file = opj(
        'preprocess', '_subject_id_{subject_id}',
        '_session_id_ses-p*' + task_name + '*', 'Coregister',
        'rara{subject_id}_ses-p*' + task_name + '*_task-*_bold.nii')
    mask_file = opj('datasink', 'preprocessed_masks', '{subject_id}',
                    '{subject_id}_full_mask.nii')

    templates = {
        'tsv': tsv_file,
        'reg': reg_file,
        'func': func_file,
        'mask': mask_file
    }

    selectfiles = Node(SelectFiles(templates, base_directory=project_folder),
                       name="selectfiles")

    # Subject Info
    subject_info_node = Node(Function(
        input_names=['tsv_files'],
        output_names=['subject_info'],
        function=read_tsv_train if training else read_tsv_test),
                             name='subject_info')

    # Datasink - creates output folder for important outputs
    datasink_node = Node(DataSink(base_directory=project_folder,
                                  container='datasink'),
                         name="datasink")

    substitutions = [('_subject_id_', '')]
    datasink_node.inputs.substitutions = substitutions

    #####################
    # Create the workflow
    #####################
    wf_name = 'glm_train_nomod' if training else 'glm_test'
    glm = Workflow(name=wf_name)
    glm.base_dir = project_folder

    # connect infosource to selectfile
    glm.connect([(infosrc_subjects, selectfiles, [('subject_id', 'subject_id')
                                                  ])])
    glm.connect([(selectfiles, subject_info_node, [('tsv', 'tsv_files')])])

    # connect infos to modelspec
    glm.connect([(subject_info_node, modelspec_node, [('subject_info',
                                                       'subject_info')])])
    glm.connect([(selectfiles, modelspec_node, [('reg',
                                                 'realignment_parameters')])])
    glm.connect([(selectfiles, modelspec_node, [('func', 'functional_runs')])])

    # connect modelspec to level1design
    glm.connect([(modelspec_node, level1design_node, [('session_info',
                                                       'session_info')])])
    glm.connect([(selectfiles, level1design_node, [('mask', 'mask_image')])])

    # connect design to estimate
    glm.connect([(level1design_node, level1estimate_node, [('spm_mat_file',
                                                            'spm_mat_file')])])

    # keeping estimate files params
    glm.connect([(level1estimate_node, datasink_node,
                  [('mask_image', f'{wf_name}.@mask_img')])])
    glm.connect([(level1estimate_node, datasink_node,
                  [('beta_images', f'{wf_name}.@beta_imgs')])])
    glm.connect([(level1estimate_node, datasink_node,
                  [('residual_image', f'{wf_name}.@res_img')])])
    glm.connect([(level1estimate_node, datasink_node,
                  [('RPVimage', f'{wf_name}.@rpv_img')])])
    glm.connect([(level1estimate_node, datasink_node,
                  [('spm_mat_file', f'{wf_name}.@spm_mat_file')])])

    glm.write_graph(graph2use='flat', format='png', simple_form=True)
    #     from IPython.display import Image
    #     Image(filename=opj(glm.base_dir, {wf_name}, 'graph_detailed.png'))

    ##################
    # Run the workflow
    ##################
    glm.run('MultiProc', plugin_args={'n_procs': nb_prc})
コード例 #27
0
def create_fatsegnet_workflow(
    subject_list,
    bids_dir,
    work_dir,
    out_dir,
    bids_templates,
    n4=False
):
    # create initial workflow
    wf = Workflow(name='workflow_fatsegnet', base_dir=work_dir)

    # use infosource to iterate workflow across subject list
    n_infosource = Node(
        interface=IdentityInterface(
            fields=['subject_id']
        ),
        name="subject_source"
        # input: 'subject_id'
        # output: 'subject_id'
    )
    # runs the node with subject_id = each element in subject_list
    n_infosource.iterables = ('subject_id', subject_list)

    # select matching files from bids_dir
    n_selectfiles = Node(
        interface=SelectFiles(
            templates=bids_templates,
            base_directory=bids_dir
        ),
        name='get_subject_data'
        # output: ['fat_composed', 'water_composed']
    )
    wf.connect([
        (n_infosource, n_selectfiles, [('subject_id', 'subject_id_p')])
    ])

    if n4:
        mn_n4_fat = Node(
            interface=N4BiasFieldCorrection(),
            iterfield=['input_image'],
            name='N4_fat',
            # output: 'output_image'
        )
        wf.connect([
            (n_selectfiles, mn_n4_fat, [('fat_composed', 'input_image')]),
        ])

        # https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.ants.html
        mn_n4_water = Node(
            interface=N4BiasFieldCorrection(),
            iterfield=['input_image'],
            name='N4_water',
            # output: 'output_image'
        )
        wf.connect([
            (n_selectfiles, mn_n4_water, [('water_composed', 'input_image')]),
        ])

    # scale data
    # or better: https://intensity-normalization.readthedocs.io/en/latest/utilities.html
    def scale(min_and_max):
        min_value = min_and_max[0][0]
        max_value = min_and_max[0][1]
        fsl_cmd = ""

        # set range to [0, 2pi]
        fsl_cmd += "-mul %.10f " % (4)

        return fsl_cmd

    mn_fat_stats = MapNode(
        # -R : <min intensity> <max intensity>
        interface=ImageStats(op_string='-R'),
        iterfield=['in_file'],
        name='get_stats_fat',
        # output: 'out_stat'
    )
    mn_water_stats = MapNode(
        # -R : <min intensity> <max intensity>
        interface=ImageStats(op_string='-R'),
        iterfield=['in_file'],
        name='get_stats_water',
        # output: 'out_stat'
    )

    if n4:
        wf.connect([
            (mn_n4_fat, mn_fat_stats, [('output_image', 'in_file')]),
            (mn_n4_water, mn_water_stats, [('output_image', 'in_file')])
        ])
    else:
        wf.connect([
            (n_selectfiles, mn_fat_stats, [('fat_composed', 'in_file')]),
            (n_selectfiles, mn_water_stats, [('water_composed', 'in_file')])
        ])
    
    mn_fat_scaled = Node(
        interface=ImageMaths(suffix="_scaled"),
        name='fat_scaled',
        iterfield=['in_file']
        # inputs: 'in_file', 'op_string'
        # output: 'out_file'
    )
    mn_water_scaled = Node(
        interface=ImageMaths(suffix="_scaled"),
        name='water_scaled',
        iterfield=['in_file']
        # inputs: 'in_file', 'op_string'
        # output: 'out_file'
    )
    if n4:
        wf.connect([
            (mn_n4_fat, mn_fat_scaled, [('output_image', 'in_file')]),
            (mn_n4_water, mn_water_scaled, [('output_image', 'in_file')]),
            (mn_fat_stats, mn_fat_scaled, [(('out_stat', scale), 'op_string')]),
            (mn_water_stats, mn_water_scaled, [(('out_stat', scale), 'op_string')])
        ])
    else:
        wf.connect([
            (n_selectfiles, mn_fat_scaled, [('fat_composed', 'in_file')]),
            (n_selectfiles, mn_water_scaled, [('water_composed', 'in_file')]),
            (mn_fat_stats, mn_fat_scaled, [(('out_stat', scale), 'op_string')]),
            (mn_water_stats, mn_water_scaled, [(('out_stat', scale), 'op_string')])
        ])

    # fatsegnet could work here when running on cluster, but in multiproc memory issues on GPU
    # mn_fatsegnet = MapNode(
    #         interface=fatsegnet.FatSegNetInterface(
    #             out_suffix='/afm02/Q2/Q2653/data/2021-01-18-fatsegnet-output/out_5'
    #         ),
    #         iterfield=['water_file', 'fat_file'],
    #         name='fatsegnet'
    #         # output: 'out_file'
    #     )

    # wf.connect([
    #         (mn_fat_scaled, mn_fatsegnet, [('out_file', 'fat_file')]),
    #         (mn_water_scaled, mn_fatsegnet, [('out_file', 'water_file')]),
    #     ])


    # datasink
    n_datasink_fat = Node(
        interface=DataSink(base_directory=bids_dir, 
            container=out_dir,
            parameterization=True, 
            substitutions=[('_subject_id_', '')],
            regexp_substitutions=[('sub-\w{5}_t1.*', 'FatImaging_F.nii.gz')]),
        name='datasink_fat'
    )    
    n_datasink_water = Node(
        interface=DataSink(base_directory=bids_dir, 
            container=out_dir,
            parameterization=True, 
            substitutions=[('_subject_id_', '')],
            regexp_substitutions=[('sub-\w{5}_t1.*', 'FatImaging_W.nii.gz')]),
        name='datasink_water'
    )
    # https://pythex.org/: search for sub-, then 5 numbers then _t1 and grab the rest

    wf.connect([
            (mn_fat_scaled, n_datasink_fat, [('out_file', 'preprocessed_mul4.@fat')]),
            (mn_water_scaled, n_datasink_water, [('out_file', 'preprocessed_mul4.@water')]),
        ])
    # https://nipype.readthedocs.io/en/0.11.0/users/grabbing_and_sinking.html
    # https://miykael.github.io/nipype_tutorial/notebooks/example_1stlevel.html
    # The period (.) indicates that a subfolder should be created. 
    # But if we wanted to store it in the same folder, 
    # we would use the .@ syntax. The @ tells the DataSink interface to not create the subfolder. 

    return wf
コード例 #28
0
    IdentityInterface(fields=['subject_id', 'session_id', 'frequency_id']),
    name="infosource_func")
infosource_func.iterables = [('subject_id', subject_list),
                             ('session_id', session_list),
                             ('frequency_id', frequency_list)]

# ========================================================================================================
# In[4]:
# /home/in/aeed/poldrack_gabmling/ds000005/sub-01/anat/sub-01_T1w.nii.gz
# anatomical images
templates_anat = {
    'anat':
    '/media/amr/Amr_4TB/Work/stimulation/Data_CA3/{subject_id}/Anat_{subject_id}_bet.nii.gz'
}

selectfiles_anat = Node(SelectFiles(templates_anat,
                                    base_directory=experiment_dir),
                        name="selectfiles_anat")

# sub-01_task-mixedgamblestask_run-01_bold.nii.gz
# functional runs
templates_func = {
    'bold':
    '/media/amr/Amr_4TB/Work/stimulation/Data_CA3/{subject_id}/Stim_{subject_id}_??_{frequency_id}_{session_id}.nii.gz',
    'bold_mask':
    '/media/amr/Amr_4TB/Work/stimulation/Data_CA3/{subject_id}/EPI_{subject_id}_Mask.nii.gz'
}

selectfiles_func = Node(SelectFiles(templates_func,
                                    base_directory=experiment_dir),
                        name="selectfiles_func")
# ========================================================================================================
コード例 #29
0
        'realignment_parameters_file':
        "probands/{subject_id}/preprocessed/lsd_resting/{scan_id}/realign/rest_realigned.par",
        'wm_file':
        "probands/{subject_id}/preprocessed/anat/T1_brain_wmedge.nii.gz",
        'mean_epi_file':
        "probands/{subject_id}/preprocessed/lsd_resting/{scan_id}/coregister/rest_mean2fmap_unwarped.nii.gz",
        'mean_epi_uncorrected_file':
        "probands/{subject_id}/preprocessed/lsd_resting/{scan_id}/coregister/rest_mean2fmap.nii.gz",
        'mask_file':
        "probands/{subject_id}/preprocessed/lsd_resting/{scan_id}/denoise/mask/T1_brain_mask2epi.nii.gz",
        'reg_file':
        "probands/{subject_id}/preprocessed/lsd_resting/{scan_id}/coregister/transforms2anat/rest2anat.dat",
        'mincost_file':
        "probands/{subject_id}/preprocessed/lsd_resting/{scan_id}/coregister/rest2anat.dat.mincost"
    }
    selectfiles = Node(SelectFiles(templates, base_directory=data_dir),
                       name="selectfiles")

    def make_out(out_dir, subject_id, scan_id):
        f = out_dir + "%s_%s_report.pdf" % (subject_id, scan_id)
        return f

    make_outfile = Node(Function(
        input_names=['out_dir', 'subject_id', 'scan_id'],
        output_names=['output_file'],
        function=make_out),
                        name='make_outfile')
    make_outfile.inputs.out_dir = out_dir

    report = Node(Function(input_names=[
        'subject_id', 'tsnr_file', 'realignment_parameters_file',
コード例 #30
0
    # SelectFiles - to grab the data (alternativ to DataGrabber)
    templates = {
        'bold':
        opj('/data/wellbeing_analysis/datasink/preproc', 'sub-{subject_id}',
            'task-{task_name}',
            'fwhm-{fwhm_id}_sasub-{subject_id}_task-{task_name}_bold.nii'),
        'anat':
        opj('/data/wellbeing_analysis/datasink/preproc', 'sub-{subject_id}',
            'task-{task_name}', 'sub-{subject_id}_T1w_brain.nii.gz'),
        'transform':
        opj('/data/wellbeing_analysis/datasink/antsreg', 'sub-{subject_id}',
            'transformComposite.h5')
    }

    selectfiles = Node(SelectFiles(templates,
                                   base_directory=experiment_dir,
                                   sort_filelist=True),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    # Use the following DataSink output substitutions
    substitutions = [
        ('_fwhm_id_%s_subject_id_%s_task_name_empathy/_apply_norm_anat0' %
         (f, sub), 'sub-%s/anat/' % (sub)) for f in fwhm
        for sub in subject_list
    ]
    subjFolders = [
コード例 #31
0
def preproc_workflow(input_dir,
                     output_dir,
                     subject_list,
                     ses_list,
                     anat_file,
                     func_file,
                     scan_size=477,
                     bet_frac=0.37):
    """
    The preprocessing workflow used in the preparation of the psilocybin vs escitalopram rsFMRI scans.
    Workflows and notes are defined throughout. Inputs are designed to be general and masks/default MNI space is provided

    :param input_dir: The input file directory containing all scans in BIDS format
    :param output_dir: The output file directory
    :param subject_list: a list of subject numbers
    :param ses_list: a list of scan numbers (session numbers)
    :param anat_file: The format of the anatomical scan within the input directory
    :param func_file: The format of the functional scan within the input directory
    :param scan_size: The length of the scan by number of images, most 10 minutes scans are around 400-500 depending
    upon scanner defaults and parameters - confirm by looking at your data
    :param bet_frac: brain extraction fractional intensity threshold
    :return: the preprocessing workflow
    """
    preproc = Workflow(name='preproc')
    preproc.base_dir = output_dir

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'ses']),
                      name="infosource")

    infosource.iterables = [('subject_id', subject_list), ('ses', ses_list)]

    # SelectFiles - to grab the data (alternative to DataGrabber)
    templates = {
        'anat': anat_file,
        'func': func_file
    }  # define the template of each file input

    selectfiles = Node(SelectFiles(templates, base_directory=input_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=output_dir, container=output_dir),
                    name="datasink")

    preproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
                                                ('ses', 'ses')])])
    ''' 
    This is your functional processing workflow, used to trim scans, despike the signal, slice-time correct, 
    and motion correct your data 
    '''

    fproc = Workflow(name='fproc')  # the functional processing workflow

    # ExtractROI - skip dummy scans at the beginning of the recording by removing the first three
    trim = Node(ExtractROI(t_min=3, t_size=scan_size, output_type='NIFTI_GZ'),
                name="trim")

    # 3dDespike - despike
    despike = Node(Despike(outputtype='NIFTI_GZ', args='-NEW'), name="despike")
    fproc.connect([(trim, despike, [('roi_file', 'in_file')])])
    preproc.connect([(selectfiles, fproc, [('func', 'trim.in_file')])])

    # 3dTshift - slice time correction
    slicetime = Node(TShift(outputtype='NIFTI_GZ', tpattern='alt+z2'),
                     name="slicetime")
    fproc.connect([(despike, slicetime, [('out_file', 'in_file')])])

    # 3dVolreg - correct motion and output 1d matrix
    moco = Node(Volreg(outputtype='NIFTI_GZ',
                       interp='Fourier',
                       zpad=4,
                       args='-twopass'),
                name="moco")
    fproc.connect([(slicetime, moco, [('out_file', 'in_file')])])

    moco_bpfdt = Node(
        MOCObpfdt(), name='moco_bpfdt'
    )  # use the matlab function to correct the motion regressor
    fproc.connect([(moco, moco_bpfdt, [('oned_file', 'in_file')])])
    '''
    This is the co-registration workflow using FSL and ANTs
    '''

    coreg = Workflow(name='coreg')

    # BET - structural data brain extraction
    bet_anat = Node(BET(output_type='NIFTI_GZ', frac=bet_frac, robust=True),
                    name="bet_anat")

    # FSL segmentation process to get WM map
    seg = Node(FAST(bias_iters=6,
                    img_type=1,
                    output_biascorrected=True,
                    output_type='NIFTI_GZ'),
               name="seg")
    coreg.connect([(bet_anat, seg, [('out_file', 'in_files')])])

    # functional to structural registration
    mean = Node(MCFLIRT(mean_vol=True, output_type='NIFTI_GZ'), name="mean")

    # BBR using linear methods for initial transform fit
    func2struc = Node(FLIRT(cost='bbr', dof=6, output_type='NIFTI_GZ'),
                      name='func2struc')
    coreg.connect([(seg, func2struc, [('restored_image', 'reference')])])
    coreg.connect([(mean, func2struc, [('mean_img', 'in_file')])])
    coreg.connect([(seg, func2struc, [(('tissue_class_files', pickindex, 2),
                                       'wm_seg')])])

    # convert the FSL linear transform into a C3d format for AFNI
    f2s_c3d = Node(C3dAffineTool(itk_transform=True, fsl2ras=True),
                   name='f2s_c3d')
    coreg.connect([(func2struc, f2s_c3d, [('out_matrix_file', 'transform_file')
                                          ])])
    coreg.connect([(mean, f2s_c3d, [('mean_img', 'source_file')])])
    coreg.connect([(seg, f2s_c3d, [('restored_image', 'reference_file')])])

    # Functional to structural registration via ANTs non-linear registration
    reg = Node(Registration(
        fixed_image='default_images/MNI152_T1_2mm_brain.nii.gz',
        transforms=['Affine', 'SyN'],
        transform_parameters=[(0.1, ), (0.1, 3.0, 0.0)],
        number_of_iterations=[[1500, 1000, 1000], [100, 70, 50, 20]],
        dimension=3,
        write_composite_transform=True,
        collapse_output_transforms=True,
        metric=['MI'] + ['CC'],
        metric_weight=[1] * 2,
        radius_or_number_of_bins=[32] + [4],
        convergence_threshold=[1.e-8, 1.e-9],
        convergence_window_size=[20] + [10],
        smoothing_sigmas=[[2, 1, 0], [4, 2, 1, 0]],
        sigma_units=['vox'] * 2,
        shrink_factors=[[4, 2, 1], [6, 4, 2, 1]],
        use_histogram_matching=[False] + [True],
        use_estimate_learning_rate_once=[True, True],
        output_warped_image=True),
               name='reg')

    coreg.connect([(seg, reg, [('restored_image', 'moving_image')])
                   ])  # connect segmentation node to registration node

    merge1 = Node(niu.Merge(2), iterfield=['in2'],
                  name='merge1')  # merge the linear and nonlinear transforms
    coreg.connect([(f2s_c3d, merge1, [('itk_transform', 'in2')])])
    coreg.connect([(reg, merge1, [('composite_transform', 'in1')])])

    # warp the functional images into MNI space using the transforms from FLIRT and SYN
    warp = Node(ApplyTransforms(
        reference_image='default_images/MNI152_T1_2mm_brain.nii.gz',
        input_image_type=3),
                name='warp')
    coreg.connect([(moco, warp, [('out_file', 'input_image')])])
    coreg.connect([(merge1, warp, [('out', 'transforms')])])

    preproc.connect([(selectfiles, coreg, [('anat', 'bet_anat.in_file')])])
    preproc.connect([(fproc, coreg, [('moco.out_file', 'mean.in_file')])])
    '''
    Scrubbing workflow - find the motion outliers, bandpass filter, re-mean the data after bpf
    '''

    scrub = Workflow(name='scrub')

    # Generate the Scrubbing Regressor
    scrub_metrics = Node(MotionOutliers(dummy=4,
                                        out_file='FD_outliers.1D',
                                        metric='fd',
                                        threshold=0.4),
                         name="scrub_metrics")

    # regress out timepoints
    scrub_frames = Node(Bandpass(highpass=0,
                                 lowpass=99999,
                                 outputtype='NIFTI_GZ'),
                        name='scrub_frames')
    scrub.connect([(scrub_metrics, scrub_frames, [('out_file',
                                                   'orthogonalize_file')])])
    preproc.connect([(coreg, scrub, [('warp.output_image',
                                      'scrub_frames.in_file')])])
    preproc.connect([(selectfiles, scrub, [('func', 'scrub_metrics.in_file')])
                     ])

    # mean image for remeaning after bandpass
    premean = Node(TStat(args='-mean', outputtype='NIFTI_GZ'), name='premean')
    # remean the image
    remean2 = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean2')
    scrub.connect([(scrub_frames, remean2, [('out_file', 'in_file_a')])])
    scrub.connect([(premean, remean2, [('out_file', 'in_file_b')])])
    preproc.connect([(coreg, scrub, [('warp.output_image', 'premean.in_file')])
                     ])
    '''
    Regressors for final cleaning steps
    '''

    regressors = Workflow(name='regressors')

    # Using registered structural image to create the masks for both WM and CSF
    regbet = Node(BET(robust=True, frac=0.37, output_type='NIFTI_GZ'),
                  name='regbet')

    regseg = Node(FAST(img_type=1,
                       output_type='NIFTI_GZ',
                       no_pve=True,
                       no_bias=True,
                       segments=True),
                  name='regseg')
    regressors.connect([(regbet, regseg, [('out_file', 'in_files')])])
    preproc.connect([(coreg, regressors, [('reg.warped_image',
                                           'regbet.in_file')])])
    '''
    Create a cerebrospinal fluid (CSF) regressor 
    '''

    # subtract subcortical GM from the CSF mask
    subcortgm = Node(BinaryMaths(
        operation='sub',
        operand_file='default_images/subcortical_gm_mask_bin.nii.gz',
        output_type='NIFTI_GZ',
        args='-bin'),
                     name='subcortgm')
    regressors.connect([(regseg, subcortgm, [(('tissue_class_files', pickindex,
                                               0), 'in_file')])])

    # Fill the mask holes

    fillcsf = Node(MaskTool(fill_holes=True, outputtype='NIFTI_GZ'),
                   name='fillcsf')
    regressors.connect([(subcortgm, fillcsf, [('out_file', 'in_file')])])

    # Erode the mask

    erocsf = Node(MaskTool(outputtype='NIFTI_GZ', dilate_inputs='-1'),
                  name='erocsf')
    regressors.connect([(fillcsf, erocsf, [('out_file', 'in_file')])])

    # Take mean csf signal from functional image
    meancsf = Node(ImageMeants(output_type='NIFTI_GZ'), name='meancsf')
    regressors.connect([(erocsf, meancsf, [('out_file', 'mask')])])
    preproc.connect([(coreg, regressors, [('warp.output_image',
                                           'meancsf.in_file')])])

    bpf_dt_csf = Node(CSFbpfdt(), name='bpf_dt_csf')
    regressors.connect([(meancsf, bpf_dt_csf, [('out_file', 'in_file')])])
    '''
    Creates a local white matter regressor
    '''

    # subtract subcortical gm
    subcortgm2 = Node(BinaryMaths(
        operation='sub',
        operand_file='default_images/subcortical_gm_mask_bin.nii.gz',
        output_type='NIFTI_GZ',
        args='-bin'),
                      name='subcortgm2')
    regressors.connect([(regseg, subcortgm2, [(('tissue_class_files',
                                                pickindex, 2), 'in_file')])])

    # fill mask
    fillwm = Node(MaskTool(fill_holes=True, outputtype='NIFTI_GZ'),
                  name='fillwm')
    regressors.connect([(subcortgm2, fillwm, [('out_file', 'in_file')])])

    # erod mask
    erowm = Node(MaskTool(outputtype='NIFTI_GZ', dilate_inputs='-1'),
                 name='erowm')
    regressors.connect([(fillwm, erowm, [('out_file', 'in_file')])])

    # generate local wm
    localwm = Node(Localstat(neighborhood=('SPHERE', 25),
                             stat='mean',
                             nonmask=True,
                             outputtype='NIFTI_GZ'),
                   name='localwm')
    regressors.connect([(erowm, localwm, [('out_file', 'mask_file')])])
    preproc.connect([(coreg, regressors, [('warp.output_image',
                                           'localwm.in_file')])])

    # bandpass filter the local wm regressor
    localwm_bpf = Node(Fourier(highpass=0.01,
                               lowpass=0.08,
                               args='-retrend',
                               outputtype='NIFTI_GZ'),
                       name='loacwm_bpf')
    regressors.connect([(localwm, localwm_bpf, [('out_file', 'in_file')])])

    # detrend the local wm regressor

    localwm_bpf_dt = Node(Detrend(args='-polort 2', outputtype='NIFTI_GZ'),
                          name='localwm_bpf_dt')
    regressors.connect([(localwm_bpf, localwm_bpf_dt, [('out_file', 'in_file')
                                                       ])])
    '''
    Clean up your functional image with the regressors you have created above
    '''

    # create a mask for blurring filtering, and detrending

    clean = Workflow(name='clean')

    mask = Node(BET(mask=True, functional=True), name='mask')

    mean_mask = Node(MCFLIRT(mean_vol=True, output_type='NIFTI_GZ'),
                     name="mean_mask")

    dilf = Node(DilateImage(operation='max', output_type='NIFTI_GZ'),
                name='dilf')
    clean.connect([(mask, dilf, [('mask_file', 'in_file')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'mask.in_file')])])

    fill = Node(MaskTool(in_file='default_images/MNI152_T1_2mm_brain.nii.gz',
                         fill_holes=True,
                         outputtype='NIFTI_GZ'),
                name='fill')

    axb = Node(Calc(expr='a*b', outputtype='NIFTI_GZ'), name='axb')
    clean.connect([(dilf, axb, [('out_file', 'in_file_a')])])
    clean.connect([(fill, axb, [('out_file', 'in_file_b')])])

    bxc = Node(Calc(expr='ispositive(a)*b', outputtype='NIFTI_GZ'), name='bxc')
    clean.connect([(mean_mask, bxc, [('mean_img', 'in_file_a')])])
    clean.connect([(axb, bxc, [('out_file', 'in_file_b')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'mean_mask.in_file')
                                     ])])

    #### BLUR, FOURIER BPF, and DETREND

    blurinmask = Node(BlurInMask(fwhm=6, outputtype='NIFTI_GZ'),
                      name='blurinmask')
    clean.connect([(bxc, blurinmask, [('out_file', 'mask')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'blurinmask.in_file')
                                     ])])

    fourier = Node(Fourier(highpass=0.01,
                           lowpass=0.08,
                           retrend=True,
                           outputtype='NIFTI_GZ'),
                   name='fourier')
    clean.connect([(blurinmask, fourier, [('out_file', 'in_file')])])

    tstat = Node(TStat(args='-mean', outputtype='NIFTI_GZ'), name='tstat')
    clean.connect([(fourier, tstat, [('out_file', 'in_file')])])

    detrend = Node(Detrend(args='-polort 2', outputtype='NIFTI_GZ'),
                   name='detrend')
    clean.connect([(fourier, detrend, [('out_file', 'in_file')])])

    remean = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean')
    clean.connect([(detrend, remean, [('out_file', 'in_file_a')])])
    clean.connect([(tstat, remean, [('out_file', 'in_file_b')])])

    concat = Node(ConcatModel(), name='concat')

    # Removes nuisance regressors via regression function
    clean_rs = Node(Bandpass(highpass=0, lowpass=99999, outputtype='NIFTI_GZ'),
                    name='clean_rs')

    clean.connect([(concat, clean_rs, [('out_file', 'orthogonalize_file')])])

    remean1 = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean1')
    clean.connect([(clean_rs, remean1, [('out_file', 'in_file_a')])])
    clean.connect([(tstat, remean1, [('out_file', 'in_file_b')])])

    preproc.connect([(regressors, clean, [('bpf_dt_csf.out_file',
                                           'concat.in_file_a')])])
    preproc.connect([(fproc, clean, [('moco_bpfdt.out_file',
                                      'concat.in_file_b')])])

    preproc.connect([(regressors, clean, [('localwm_bpf_dt.out_file',
                                           'clean_rs.orthogonalize_dset')])])
    clean.connect([(remean, clean_rs, [('out_file', 'in_file')])])
    '''
    Write graphical output detailing the workflows and nodes 
    '''

    fproc.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./fproc.dot')
    fproc.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./fproc_color.dot')

    coreg.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./coreg.dot')
    coreg.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./coreg_color.dot')

    scrub.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./scrub.dot')
    scrub.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./scrub_color.dot')

    regressors.write_graph(graph2use='flat',
                           format='png',
                           simple_form=True,
                           dotfilename='./reg.dot')
    regressors.write_graph(graph2use='colored',
                           format='png',
                           simple_form=True,
                           dotfilename='./reg_color.dot')

    preproc.write_graph(graph2use='flat',
                        format='png',
                        simple_form=True,
                        dotfilename='./preproc.dot')
    preproc.write_graph(graph2use='colored',
                        format='png',
                        simple_form=True,
                        dotfilename='./preproc_color.dot')

    return preproc
コード例 #32
0
contrasts = ['con_0001', 'con_0002', 'con_0003']

# collect all the con images for each contrast.
contrast_ids = list(range(1, len(contrasts) + 1))

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['contrast_id']), name="infosource")
infosource.iterables = [('contrast_id', contrasts)]

# Select files from derivatives.

templates = {
    'cons':
    '/data/pt_nmc002/other/narps/derivatives/first_lev/tmp/equalRange/*/contraste_estimate/{contrast_id}.nii'
}
selectderivs = Node(SelectFiles(templates, sort_filelist=True),
                    name='selectderivs')
#selectderivs.inputs.sub_id = subs

# One Sample T-Test Design - creates one sample T-Test Design
onesamplettestdes = Node(
    OneSampleTTestDesign(),
    #overwrite=True,
    name="onesampttestdes")
onesamplettestdes.inputs.explicit_mask_file = mask
# EstimateModel - estimate the parameters of the model
# Even for second level it should be 'Classical': 1.
level2estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                      name="level2estimate")
# EstimateContrast - estimates simple group contrast
level2conestimate = Node(EstimateContrast(group_contrast=True),
コード例 #33
0
from nipype.interfaces.io import SelectFiles

subjects = pd.read_csv('/scr/ilz3/myelinconnect/subjects.csv')
subjects=list(subjects['DB'])
subjects.remove('KSMT')


labels= [11, 12, 13, 16, 18] + range(30,42)
templates={'seg': '/scr/ilz3/myelinconnect/struct/seg/{subject}*seg_merged.nii.gz'}
mask_file = '/scr/ilz3/myelinconnect/struct/myelinated_thickness/subcortex_mask/%s_subcortical_mask.nii.gz'


for subject in subjects:
    

    select = SelectFiles(templates)
    select.inputs.subject = subject
    select.run()
    seg_file = select.aggregate_outputs().seg
    
    binarize = Binarize(match = labels,
                        out_type = 'nii.gz')
    binarize.inputs.binary_file = mask_file%subject
    binarize.inputs.in_file=seg_file
    
    binarize.run()