def create_workflow(xfm_dir,
                    xfm_pattern,
                    atlas_dir,
                    atlas_pattern,
                    source_dir,
                    source_pattern,
                    work_dir,
                    out_dir,
                    name="new_data_to_atlas_space"):

    wf = Workflow(name=name)
    wf.base_dir = os.path.join(work_dir)

    datasource_source = Node(interface=DataGrabber(sort_filelist=True),
                             name='datasource_source')
    datasource_source.inputs.base_directory = os.path.abspath(source_dir)
    datasource_source.inputs.template = source_pattern

    datasource_xfm = Node(interface=DataGrabber(sort_filelist=True),
                          name='datasource_xfm')
    datasource_xfm.inputs.base_directory = os.path.abspath(xfm_dir)
    datasource_xfm.inputs.template = xfm_pattern

    datasource_atlas = Node(interface=DataGrabber(sort_filelist=True),
                            name='datasource_atlas')
    datasource_atlas.inputs.base_directory = os.path.abspath(atlas_dir)
    datasource_atlas.inputs.template = atlas_pattern

    resample = MapNode(interface=Resample(sinc_interpolation=True),
                       name='resample_',
                       iterfield=['input_file', 'transformation'])
    wf.connect(datasource_source, 'outfiles', resample, 'input_file')
    wf.connect(datasource_xfm, 'outfiles', resample, 'transformation')
    wf.connect(datasource_atlas, 'outfiles', resample, 'like')

    bigaverage = Node(interface=BigAverage(output_float=True, robust=False),
                      name='bigaverage',
                      iterfield=['input_file'])

    wf.connect(resample, 'output_file', bigaverage, 'input_files')

    datasink = Node(interface=DataSink(base_directory=out_dir,
                                       container=out_dir),
                    name='datasink')

    wf.connect([(bigaverage, datasink, [('output_file', 'average')])])
    wf.connect([(resample, datasink, [('output_file', 'atlas_space')])])
    wf.connect([(datasource_xfm, datasink, [('outfiles', 'transforms')])])

    return wf
def test_DataGrabber_outputs():
    output_map = dict()
    outputs = DataGrabber.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Exemple #3
0
def test_DataGrabber_outputs():
    output_map = dict()
    outputs = DataGrabber.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Exemple #4
0
def datagrabber(infields, template, field_template, template_args,
                base_directory, name):
    assert field_template.keys() == template_args.keys(
    ), "Template keys do not match!"
    grabber = pipe.Node(interface=DataGrabber(infields=infields,
                                              outfields=field_template.keys()),
                        name=name)
    grabber.inputs.sort_filelist = False
    grabber.inputs.base_directory = base_directory
    grabber.inputs.template = template
    grabber.inputs.field_template = field_template
    grabber.inputs.template_args = template_args
    return grabber
Exemple #5
0
def cope_merge_wf(subject_id, sink_directory, name='cope_merge_wf'):
    cope_merge_wf = Workflow(name='cope_merge_wf')

    info = dict(
        learning_cope=[['subject_id']],  #dictionary for Datagrabber
        nonlearning_cope=[['subject_id']])

    #node to grab corr and incorr cope files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/frstlvl/model_LSS2')
    datasource.inputs.field_template = dict(
        learning_cope='%s/deriv/learn/*.nii.gz',
        nonlearning_cope='%s/deriv/nonlearn/*.nii.gz')
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    #node to merge learning trials across all 6 runs
    merge_learning = Node(Merge(), name='merge_learning')
    merge_learning.inputs.dimension = 't'
    merge_learning.inputs.output_type = 'NIFTI_GZ'
    merge_learning.inputs.merged_file = 'cope_learning.nii.gz'
    merge_learning.inputs.tr = 2.00
    cope_merge_wf.connect(datasource, 'learning_cope', merge_learning,
                          'in_files')

    #node to merge nonlearning trials across all 6 runs
    merge_nonlearning = Node(Merge(), name='merge_nonlearning')
    merge_nonlearning.inputs.dimension = 't'
    merge_nonlearning.inputs.output_type = 'NIFTI_GZ'
    merge_nonlearning.inputs.merged_file = 'cope_nonlearning.nii.gz'
    merge_nonlearning.inputs.tr = 2.00
    cope_merge_wf.connect(datasource, 'nonlearning_cope', merge_nonlearning,
                          'in_files')

    #node to output data
    dsink = Node(DataSink(), name='dsink')
    dsink.inputs.base_directory = sink_directory
    dsink.inputs.container = subject_id
    cope_merge_wf.connect(merge_learning, 'merged_file', dsink,
                          'merged.@learning')
    cope_merge_wf.connect(merge_nonlearning, 'merged_file', dsink,
                          'merged.@nonlearning')

    return cope_merge_wf
def test_DataGrabber_inputs():
    input_map = dict(ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    raise_on_empty=dict(usedefault=True,
    ),
    sort_filelist=dict(mandatory=True,
    ),
    template_args=dict(),
    template=dict(mandatory=True,
    ),
    base_directory=dict(),
    )
    inputs = DataGrabber.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Exemple #7
0
def test_DataGrabber_inputs():
    input_map = dict(base_directory=dict(),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    raise_on_empty=dict(usedefault=True,
    ),
    sort_filelist=dict(mandatory=True,
    ),
    template=dict(mandatory=True,
    ),
    template_args=dict(),
    )
    inputs = DataGrabber.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Exemple #8
0
def transformGrabber(experiment):
    grabber = pipe.Node(interface=DataGrabber(
        infields=["session_id"],
        outfields=["atlasToSessionTransform", "sessionToAtlasTransform"]),
                        name="transformGrabber")
    grabber.inputs.sort_filelist = False
    grabber.inputs.base_directory = "/Shared/paulsen/Experiments"
    grabber.inputs.template = "*"
    transformRegex = transform.format(experiment=experiment)
    grabber.inputs.field_template = dict(
        atlasToSessionTransform=transformRegex,
        sessionToAtlasTransform=transformRegex)
    grabber.inputs.template_args = dict(
        atlasToSessionTransform=[[
            "session_id", "AtlasToSubjectPostBABC_SyNComposite"
        ]],
        sessionToAtlasTransform=[[
            "session_id", "AtlasToSubjectPostBABC_SyNInverseComposite"
        ]])
    return grabber
def secondlevel_wf(subject_id, sink_directory, name='GLM1_scndlvl_wf'):
    scndlvl_wf = Workflow(name='scndlvl_wf')
    base_dir = os.path.abspath('/home/data/madlab/data/mri/wmaze/')

    contrasts = [
        'all_before_B_corr', 'all_before_B_incorr', 'all_remaining',
        'all_corr_minus_all_incorr', 'all_incorr_minus_all_corr'
    ]

    cnt_file_list = []
    for curr_contrast in contrasts:
        cnt_file_list.append(
            glob(
                os.path.join(
                    base_dir,
                    'frstlvl/model_GLM1/{0}/modelfit/contrasts/_estimate_model*/cope??_{1}.nii.gz'
                    .format(subject_id, curr_contrast))))

    dof_runs = [[], [], [], [], []]
    for i, curr_file_list in enumerate(cnt_file_list):
        if not isinstance(curr_file_list, list):
            curr_file_list = [curr_file_list]
        for curr_file in curr_file_list:
            dof_runs[i].append(
                curr_file.split('/')[-2][-1])  #grabs the estimate_model #

    info = dict(copes=[['subject_id', contrasts]],
                varcopes=[['subject_id', contrasts]],
                mask_file=[['subject_id', 'aparc+aseg_thresh']],
                dof_files=[['subject_id', dof_runs, 'dof']])

    #datasource node to get task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/')
    datasource.inputs.field_template = dict(
        copes=
        'frstlvl/model_GLM1/%s/modelfit/contrasts/_estimate_model*/cope*_%s.nii.gz',
        varcopes=
        'frstlvl/model_GLM1/%s/modelfit/contrasts/_estimate_model*/varcope*_%s.nii.gz',
        mask_file='preproc/%s/ref/_fs_threshold20/%s*_thresh.nii',
        dof_files='frstlvl/model_GLM1/%s/modelfit/dofs/_estimate_model%s/%s')
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    #inputspec to deal with copes and varcopes doublelist issues
    fixedfx_inputspec = Node(IdentityInterface(
        fields=['copes', 'varcopes', 'dof_files'], mandatory_inputs=True),
                             name='fixedfx_inputspec')
    scndlvl_wf.connect(datasource, ('copes', doublelist), fixedfx_inputspec,
                       'copes')
    scndlvl_wf.connect(datasource, ('varcopes', doublelist), fixedfx_inputspec,
                       'varcopes')
    scndlvl_wf.connect(datasource, ('dof_files', doublelist),
                       fixedfx_inputspec, 'dof_files')

    #merge all of copes into a single matrix across subject runs
    copemerge = MapNode(Merge(), iterfield=['in_files'], name='copemerge')
    copemerge.inputs.dimension = 't'
    copemerge.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    copemerge.inputs.ignore_exception = False
    copemerge.inputs.output_type = 'NIFTI_GZ'
    copemerge.inputs.terminal_output = 'stream'
    scndlvl_wf.connect(fixedfx_inputspec, 'copes', copemerge, 'in_files')

    #generate DOF volume for second level
    gendofvolume = Node(Function(input_names=['dof_files', 'cope_files'],
                                 output_names=['dof_volumes'],
                                 function=get_dofvolumes),
                        name='gendofvolume')
    gendofvolume.inputs.ignore_exception = False
    scndlvl_wf.connect(fixedfx_inputspec, 'dof_files', gendofvolume,
                       'dof_files')
    scndlvl_wf.connect(copemerge, 'merged_file', gendofvolume, 'cope_files')

    #merge all of the varcopes into a single matrix across subject runs per voxel
    varcopemerge = MapNode(Merge(),
                           iterfield=['in_files'],
                           name='varcopemerge')
    varcopemerge.inputs.dimension = 't'
    varcopemerge.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    varcopemerge.inputs.ignore_exception = False
    varcopemerge.inputs.output_type = 'NIFTI_GZ'
    varcopemerge.inputs.terminal_output = 'stream'
    scndlvl_wf.connect(fixedfx_inputspec, 'varcopes', varcopemerge, 'in_files')

    #define contrasts from the names of the copes
    getcontrasts = Node(Function(input_names=['data_inputs'],
                                 output_names=['contrasts'],
                                 function=get_contrasts),
                        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    scndlvl_wf.connect(datasource, ('copes', doublelist), getcontrasts,
                       'data_inputs')

    #rename output files to be more descriptive
    getsubs = Node(Function(input_names=['subject_id', 'cons'],
                            output_names=['subs'],
                            function=get_subs),
                   name='getsubs')
    getsubs.inputs.ignore_exception = False
    getsubs.inputs.subject_id = subject_id
    scndlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    #l2model node for fixed effects analysis (aka within subj across runs)
    l2model = MapNode(L2Model(), iterfield=['num_copes'], name='l2model')
    l2model.inputs.ignore_exception = False
    scndlvl_wf.connect(datasource, ('copes', num_copes), l2model, 'num_copes')

    #FLAMEO Node to run the fixed effects analysis
    flameo_fe = MapNode(FLAMEO(),
                        iterfield=[
                            'cope_file', 'var_cope_file', 'dof_var_cope_file',
                            'design_file', 't_con_file', 'cov_split_file'
                        ],
                        name='flameo_fe')
    flameo_fe.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    flameo_fe.inputs.ignore_exception = False
    flameo_fe.inputs.log_dir = 'stats'
    flameo_fe.inputs.output_type = 'NIFTI_GZ'
    flameo_fe.inputs.run_mode = 'fe'
    flameo_fe.inputs.terminal_output = 'stream'
    scndlvl_wf.connect(varcopemerge, 'merged_file', flameo_fe, 'var_cope_file')
    scndlvl_wf.connect(l2model, 'design_mat', flameo_fe, 'design_file')
    scndlvl_wf.connect(l2model, 'design_con', flameo_fe, 't_con_file')
    scndlvl_wf.connect(l2model, 'design_grp', flameo_fe, 'cov_split_file')
    scndlvl_wf.connect(gendofvolume, 'dof_volumes', flameo_fe,
                       'dof_var_cope_file')
    scndlvl_wf.connect(datasource, 'mask_file', flameo_fe, 'mask_file')
    scndlvl_wf.connect(copemerge, 'merged_file', flameo_fe, 'cope_file')

    #outputspec node
    scndlvl_outputspec = Node(IdentityInterface(
        fields=['res4d', 'copes', 'varcopes', 'zstats', 'tstats'],
        mandatory_inputs=True),
                              name='scndlvl_outputspec')
    scndlvl_wf.connect(flameo_fe, 'res4d', scndlvl_outputspec, 'res4d')
    scndlvl_wf.connect(flameo_fe, 'copes', scndlvl_outputspec, 'copes')
    scndlvl_wf.connect(flameo_fe, 'var_copes', scndlvl_outputspec, 'varcopes')
    scndlvl_wf.connect(flameo_fe, 'zstats', scndlvl_outputspec, 'zstats')
    scndlvl_wf.connect(flameo_fe, 'tstats', scndlvl_outputspec, 'tstats')

    #datasink node
    sinkd = Node(DataSink(), name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    scndlvl_wf.connect(scndlvl_outputspec, 'copes', sinkd, 'fixedfx.@copes')
    scndlvl_wf.connect(scndlvl_outputspec, 'varcopes', sinkd,
                       'fixedfx.@varcopes')
    scndlvl_wf.connect(scndlvl_outputspec, 'tstats', sinkd, 'fixedfx.@tstats')
    scndlvl_wf.connect(scndlvl_outputspec, 'zstats', sinkd, 'fixedfx.@zstats')
    scndlvl_wf.connect(scndlvl_outputspec, 'res4d', sinkd, 'fixedfx.@pvals')
    scndlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')

    return scndlvl_wf
Exemple #10
0
def create_ba_maps_wf(name="Brodmann_Area_Maps", th3=True):
    # Brodmann Area Maps (BA Maps) and Hinds V1 Atlas
    inputs = [
        'lh_sphere_reg', 'rh_sphere_reg', 'lh_white', 'rh_white', 'lh_pial',
        'rh_pial', 'lh_orig', 'rh_orig', 'transform', 'lh_thickness',
        'rh_thickness', 'lh_cortex_label', 'rh_cortex_label', 'brainmask',
        'aseg', 'ribbon', 'wm', 'src_subject_id', 'src_subject_dir',
        'color_table'
    ]

    inputspec = pe.Node(IdentityInterface(fields=inputs), name="inputspec")

    ba_WF = pe.Workflow(name=name)

    ba_outputs = [
        'lh_BAMaps_stats', 'lh_color', 'lh_BAMaps_labels',
        'lh_BAMaps_annotation', 'lh_thresh_BAMaps_stats', 'lh_thresh_color',
        'lh_thresh_BAMaps_labels', 'lh_thresh_BAMaps_annotation',
        'rh_BAMaps_stats', 'rh_color', 'rh_BAMaps_labels',
        'rh_BAMaps_annotation', 'rh_thresh_BAMaps_stats', 'rh_thresh_color',
        'rh_thresh_BAMaps_labels', 'rh_thresh_BAMaps_annotation'
    ]

    outputspec = pe.Node(IdentityInterface(fields=ba_outputs),
                         name="outputspec")

    labels = [
        "BA1", "BA2", "BA3a", "BA3b", "BA4a", "BA4p", "BA6", "BA44", "BA45",
        "V1", "V2", "MT", "entorhinal", "perirhinal"
    ]
    for hemisphere in ['lh', 'rh']:
        for threshold in [True, False]:
            field_template = dict(
                sphere_reg='surf/{0}.sphere.reg'.format(hemisphere),
                white='surf/{0}.white'.format(hemisphere))

            out_files = list()
            if threshold:
                for label in labels:
                    out_file = '{0}.{1}_exvivo.thresh.label'.format(
                        hemisphere, label)
                    out_files.append(out_file)
                    field_template[label] = 'label/' + out_file
                node_name = 'BA_Maps_' + hemisphere + '_Tresh'
            else:
                for label in labels:
                    out_file = '{0}.{1}_exvivo.label'.format(hemisphere, label)
                    out_files.append(out_file)
                    field_template[label] = 'label/' + out_file
                node_name = 'BA_Maps_' + hemisphere

            source_fields = labels + ['sphere_reg', 'white']
            source_subject = pe.Node(DataGrabber(outfields=source_fields),
                                     name=node_name + "_srcsubject")
            source_subject.inputs.template = '*'
            source_subject.inputs.sort_filelist = False
            source_subject.inputs.field_template = field_template
            ba_WF.connect([(inputspec, source_subject, [('src_subject_dir',
                                                         'base_directory')])])

            merge_labels = pe.Node(Merge(len(labels)),
                                   name=node_name + "_Merge")
            for i, label in enumerate(labels):
                ba_WF.connect([(source_subject, merge_labels,
                                [(label, 'in{0}'.format(i + 1))])])

            node = pe.MapNode(Label2Label(),
                              name=node_name,
                              iterfield=['source_label', 'out_file'])
            node.inputs.hemisphere = hemisphere
            node.inputs.out_file = out_files
            node.inputs.copy_inputs = True

            ba_WF.connect([
                (merge_labels, node, [('out', 'source_label')]),
                (source_subject, node, [('sphere_reg', 'source_sphere_reg'),
                                        ('white', 'source_white')]),
                (inputspec, node, [('src_subject_id', 'source_subject')])
            ])

            label2annot = pe.Node(Label2Annot(), name=node_name + '_2_Annot')
            label2annot.inputs.hemisphere = hemisphere
            label2annot.inputs.verbose_off = True
            label2annot.inputs.keep_max = True
            label2annot.inputs.copy_inputs = True

            stats_node = pe.Node(ParcellationStats(),
                                 name=node_name + '_Stats')
            stats_node.inputs.hemisphere = hemisphere
            stats_node.inputs.mgz = True
            stats_node.inputs.th3 = th3
            stats_node.inputs.surface = 'white'
            stats_node.inputs.tabular_output = True
            stats_node.inputs.copy_inputs = True

            if threshold:
                label2annot.inputs.out_annot = "BA_exvivo.thresh"
                ba_WF.connect([
                    (stats_node, outputspec,
                     [('out_color', '{0}_thresh_color'.format(hemisphere)),
                      ('out_table',
                       '{0}_thresh_BAMaps_stats'.format(hemisphere))]),
                    (label2annot, outputspec,
                     [('out_file',
                       '{0}_thresh_BAMaps_annotation'.format(hemisphere))]),
                    (node, outputspec,
                     [('out_file',
                       '{0}_thresh_BAMaps_labels'.format(hemisphere))])
                ])
            else:
                label2annot.inputs.out_annot = "BA_exvivo"
                ba_WF.connect([
                    (stats_node, outputspec,
                     [('out_color', '{0}_color'.format(hemisphere)),
                      ('out_table', '{0}_BAMaps_stats'.format(hemisphere))]),
                    (label2annot, outputspec,
                     [('out_file', '{0}_BAMaps_annotation'.format(hemisphere))
                      ]),
                    (node, outputspec,
                     [('out_file', '{0}_BAMaps_labels'.format(hemisphere))])
                ])

            ba_WF.connect([
                (inputspec, node, [
                    ('{0}_sphere_reg'.format(hemisphere), 'sphere_reg'),
                    ('{0}_white'.format(hemisphere), 'white'),
                ]), (node, label2annot, [('out_file', 'in_labels')]),
                (inputspec, label2annot, [('{0}_orig'.format(hemisphere),
                                           'orig'),
                                          ('color_table', 'color_table')]),
                (label2annot, stats_node, [('out_file', 'in_annotation')]),
                (inputspec, stats_node,
                 [('{0}_thickness'.format(hemisphere), 'thickness'),
                  ('{0}_cortex_label'.format(hemisphere), 'cortex_label'),
                  ('lh_white', 'lh_white'), ('rh_white', 'rh_white'),
                  ('lh_pial', 'lh_pial'), ('rh_pial', 'rh_pial'),
                  ('transform', 'transform'), ('brainmask', 'brainmask'),
                  ('aseg', 'aseg'), ('wm', 'wm'), ('ribbon', 'ribbon')])
            ])

    return ba_WF, ba_outputs
Exemple #11
0
def genFreesurferSBMglmWF(name='fsSBM',
                          base_dir=op.abspath('.'),
                          group_sublist=[],
                          model_dir=None,
                          model_info={'Model1': 'dods'},
                          design_input='fsgd',
                          fs_subjects_dir='/data/analyses/work_in_progress/freesurfer/fsmrishare-flair6.0/',
                          fwhm=[0.0, 10.0],
                          measure_list=['thickness', 'area'],
                          target_atlas='fsaverage',
                          target_atlas_surfreg='sphere.reg',
                          correction_method='FDR'):
    
    wf = Workflow(name)
    wf.base_dir = base_dir

    
    # Node: model List
    modelList = Node(IdentityInterface(fields=['model_name'], mandatory_inputs=True),
                    name='modelList')
    modelList.iterables = ('model_name', list(model_info.keys()))

    
    # Grab fsgd or design mat and contrast mtx files from model_dir
    fileList_temp_args = {'contrast_files': [['model_name', '*.mtx']],
                          'contrast_sign_files':  [['model_name', '*.mdtx']]}
    if design_input == 'fsgd':
        fileList_temp_args['fsgd_file'] = [['model_name', '*.fsgd']]
    elif design_input == 'design_mat':
        fileList_temp_args['design_mat'] = [['model_name', 'X.mat']]
        
    fileList = Node(DataGrabber(infields=['model_name'],
                                outfields=list(fileList_temp_args.keys())), 
                    name="fileList")
    fileList.inputs.base_directory = model_dir
    fileList.inputs.ignore_exception = False
    fileList.inputs.raise_on_empty = True
    fileList.inputs.sort_filelist = True
    fileList.inputs.template = '%s/%s'
    fileList.inputs.template_args =  fileList_temp_args
    wf.connect(modelList, "model_name", fileList, "model_name")


    # preproc for each hemisphere to produce concatenated file for glmfit and 
    # also a mean map
    
    # Define a few other iterables
    measList = Node(IdentityInterface(fields=['measure'],
                                      mandatory_inputs=True),
                    name='measList')
    measList.iterables = ('measure', measure_list)
    
    smoothList = Node(IdentityInterface(fields=['fwhm'],
                                        mandatory_inputs=True),
                      name='smoothList')
    smoothList.iterables = ('fwhm', fwhm)
    
    surfaces = ['inflated', 'pial']
    plotSurfList = Node(IdentityInterface(fields=['surf']),
                      name='plotSurfList')
    plotSurfList.iterables = ('surf', surfaces)
    
    
    # MRI_preproc
    lhSBMpreproc = MapNode(MRISPreproc(),
                           name='lhSBMpreproc',
                           iterfield=['args', 'out_file'])
    lhSBMpreproc.inputs.subjects_dir = fs_subjects_dir
    lhSBMpreproc.inputs.target = target_atlas
    lhSBMpreproc.inputs.hemi = 'lh'
    lhSBMpreproc.inputs.args = ['', '--mean']
    lhSBMpreproc.inputs.out_file = ['{}.lh.{}.mgh'.format(out_name, target_atlas) for out_name in ['stacked', 'mean']]
    lhSBMpreproc.inputs.subjects = group_sublist
    wf.connect(measList, "measure", lhSBMpreproc, "surf_measure")
    
    rhSBMpreproc = MapNode(MRISPreproc(),
                           name='rhSBMpreproc',
                           iterfield=['args', 'out_file'])
    rhSBMpreproc.inputs.subjects_dir = fs_subjects_dir
    rhSBMpreproc.inputs.target = target_atlas
    rhSBMpreproc.inputs.hemi = 'rh'
    rhSBMpreproc.inputs.args = ['', '--mean']
    rhSBMpreproc.inputs.out_file = ['{}.rh.{}.mgh'.format(out_name, target_atlas) for out_name in ['stacked', 'mean']]
    rhSBMpreproc.inputs.subjects = group_sublist
    wf.connect(measList, "measure", rhSBMpreproc, "surf_measure")
    
    
    # Create smoothed mean maps for each non-zero fwhm
    non_zero_fwhm = [val for val in fwhm if val != 0.0]
    lhSmoothMean = MapNode(SurfaceSmooth(),
                           name='lhSmoothMean',
                           iterfield=['fwhm', 'out_file'])
    lhSmoothMean.inputs.subject_id = target_atlas
    lhSmoothMean.inputs.hemi = 'lh'
    lhSmoothMean.inputs.subjects_dir = fs_subjects_dir
    lhSmoothMean.inputs.fwhm = non_zero_fwhm
    lhSmoothMean.inputs.cortex = True
    lhSmoothMean.inputs.out_file = ['mean.lh.fwhm{}.{}.mgh'.format(str(int(val)), target_atlas) for val in non_zero_fwhm]
    wf.connect(lhSBMpreproc, ('out_file', getElementFromList, 1), lhSmoothMean, 'in_file')
    
    rhSmoothMean = MapNode(SurfaceSmooth(),
                           name='rhSmoothMean',
                           iterfield=['fwhm', 'out_file'])
    rhSmoothMean.inputs.subject_id = target_atlas
    rhSmoothMean.inputs.hemi = 'rh'
    rhSmoothMean.inputs.subjects_dir = fs_subjects_dir
    rhSmoothMean.inputs.fwhm = non_zero_fwhm
    rhSmoothMean.inputs.cortex = True
    rhSmoothMean.inputs.out_file = ['mean.rh.fwhm{}.{}.mgh'.format(str(int(val)), target_atlas) for val in non_zero_fwhm]
    wf.connect(rhSBMpreproc, ('out_file', getElementFromList, 1), rhSmoothMean, 'in_file')

    
    # For each concatenated surfaces produced by the SBMpreproc, run glmfit
    
    if correction_method == 'FDR':
        save_res = False
    elif correction_method == 'perm':
        save_res = True
    
    if design_input == 'fsgd': 
        fsgdInput = Node(Function(input_names=['item1', 'item2'],
                                  output_names=['out_tuple'],
                                  function=createTuple2),
                         name='fsgdInput')
        wf.connect(fileList, 'fsgd_file', fsgdInput, 'item1')
        wf.connect(modelList, ('model_name', getValFromDict, model_info),
                   fsgdInput, 'item2')
    
    lhSBMglmfit = Node(GLMFit(),
                       name='lhSBMglmfit')
    lhSBMglmfit.inputs.subjects_dir = fs_subjects_dir
    lhSBMglmfit.inputs.surf = True
    lhSBMglmfit.inputs.subject_id = target_atlas
    lhSBMglmfit.inputs.hemi = 'lh'
    lhSBMglmfit.inputs.cortex = True
    lhSBMglmfit.inputs.save_residual = save_res
    wf.connect(smoothList, 'fwhm', lhSBMglmfit, 'fwhm')
    wf.connect(lhSBMpreproc, ('out_file', getElementFromList, 0), lhSBMglmfit, 'in_file')
    if design_input == 'fsgd':
        wf.connect(fsgdInput, 'out_tuple', lhSBMglmfit, 'fsgd')
    elif design_input == 'design_mat':
        wf.connect(fileList, 'design_mat', lhSBMglmfit, 'design')
    wf.connect(fileList, 'contrast_files', lhSBMglmfit, 'contrast')
    
    rhSBMglmfit = Node(GLMFit(),
                       name='rhSBMglmfit')
    rhSBMglmfit.inputs.subjects_dir = fs_subjects_dir
    rhSBMglmfit.inputs.surf = True
    rhSBMglmfit.inputs.subject_id = target_atlas
    rhSBMglmfit.inputs.hemi = 'rh'
    rhSBMglmfit.inputs.cortex = True
    rhSBMglmfit.inputs.save_residual = save_res
    wf.connect(smoothList, 'fwhm', rhSBMglmfit, 'fwhm')
    wf.connect(rhSBMpreproc, ('out_file', getElementFromList, 0), rhSBMglmfit, 'in_file')
    if design_input == 'fsgd':
        wf.connect(fsgdInput, 'out_tuple', rhSBMglmfit, 'fsgd')
    elif design_input == 'design_mat':
        wf.connect(fileList, 'design_mat', rhSBMglmfit, 'design')
    wf.connect(fileList, 'contrast_files', rhSBMglmfit, 'contrast')


    # perfrom FDR correction if 'FDR' is chosen
    if correction_method == 'FDR':
        
        mriFDR = MapNode(FDR(),
                         iterfield=['in_file1', 'in_file2', 'fdr_sign'],
                         name='mriFDR')
        mriFDR.inputs.fdr = 0.05
        mriFDR.inputs.out_thr_file = 'fdr_threshold.txt'
        mriFDR.inputs.out_file1 = 'lh.sig_corr.mgh'
        mriFDR.inputs.out_file2 = 'rh.sig_corr.mgh'
        wf.connect(lhSBMglmfit, 'sig_file', mriFDR, 'in_file1')
        wf.connect(lhSBMglmfit, 'mask_file', mriFDR, 'in_mask1')
        wf.connect(rhSBMglmfit, 'sig_file', mriFDR, 'in_file2')
        wf.connect(rhSBMglmfit, 'mask_file', mriFDR, 'in_mask2')
        wf.connect(fileList, ('contrast_sign_files', getElementsFromTxtList),
                   mriFDR, 'fdr_sign')
        

    # perform Permutation if 'perm' is chosen
    elif correction_method == 'perm':
        
#        glmSim = MapNode(GLMFitSim(),
#                         iterfield=['glm_dir', 'permutation'],
#                         name='glmSim')
#        glmSim.inputs.spaces = '2spaces'
        
         raise NotImplementedError
     
        
    ### Plotting ###
    lh_bg_map = op.join(fs_subjects_dir, target_atlas, 'surf', 'lh.sulc')
    rh_bg_map = op.join(fs_subjects_dir, target_atlas, 'surf', 'rh.sulc')
    
    # Plot the mean map
    plotMeanMaps = MapNode(Function(input_names=['lh_surf', 'lh_surf_map', 'lh_bg_map',
                                                 'rh_surf', 'rh_surf_map', 'rh_bg_map',
                                                 'out_fname'],
                                    output_name=['out_file'],
                                    function=plot_surf_map),
                           iterfield=['lh_surf_map', 'rh_surf_map', 'out_fname'],
                           name='plotMeanMaps')
    plotMeanMaps.inputs.lh_bg_map = lh_bg_map
    plotMeanMaps.inputs.rh_bg_map = rh_bg_map
    plotMeanMaps.inputs.out_fname = ['mean_fwhm{}.png'.format(s) for s in non_zero_fwhm]
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'lh.')),
               plotMeanMaps, 'lh_surf')
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'rh.')),
               plotMeanMaps, 'rh_surf')
    wf.connect(lhSmoothMean, 'out_file', plotMeanMaps, 'lh_surf_map')
    wf.connect(rhSmoothMean, 'out_file', plotMeanMaps, 'rh_surf_map')
    
      
    # Plot uncorrected maps
    plot_stat_inputs = ['lh_surf', 'lh_stat_map', 'lh_bg_map',
                        'rh_surf', 'rh_stat_map', 'rh_bg_map',
                        'out_fname', 'cmap', 'upper_lim', 'threshold']
    
    plotUncorrectedG = MapNode(Function(input_names=plot_stat_inputs,
                                        output_name=['out_file'],
                                        function=plot_surf_stat),
                               iterfield=['lh_stat_map', 'rh_stat_map', 'out_fname'],
                               name='plotUncorrectedG')
    plotUncorrectedG.inputs.lh_bg_map = lh_bg_map
    plotUncorrectedG.inputs.rh_bg_map = rh_bg_map
    plotUncorrectedG.inputs.cmap = 'jet'
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'lh.')),
               plotUncorrectedG, 'lh_surf')
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'rh.')),
               plotUncorrectedG, 'rh_surf')
    wf.connect(fileList, ('contrast_files',  makeFStringElementFromFnameList, '.mtx', '_uncorrected_gamma_map.png', True),
               plotUncorrectedG, 'out_fname')
    wf.connect(lhSBMglmfit, 'gamma_file', plotUncorrectedG, 'lh_stat_map')
    wf.connect(rhSBMglmfit, 'gamma_file', plotUncorrectedG, 'rh_stat_map')
    
    plotUncorrectedP = MapNode(Function(input_names=plot_stat_inputs,
                                        output_name=['out_file'],
                                        function=plot_surf_stat),
                               iterfield=['lh_stat_map', 'rh_stat_map', 'out_fname'],
                               name='plotUncorrectedP')
    plotUncorrectedP.inputs.lh_bg_map = lh_bg_map
    plotUncorrectedP.inputs.rh_bg_map = rh_bg_map
    plotUncorrectedP.inputs.upper_lim = 10.0
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'lh.')),
               plotUncorrectedP, 'lh_surf')
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'rh.')),
               plotUncorrectedP, 'rh_surf')
    wf.connect(fileList, ('contrast_files',  makeFStringElementFromFnameList, '.mtx', '_uncorrected_p_map.png', True),
               plotUncorrectedP, 'out_fname')
    wf.connect(lhSBMglmfit, 'sig_file', plotUncorrectedP, 'lh_stat_map')
    wf.connect(rhSBMglmfit, 'sig_file', plotUncorrectedP, 'rh_stat_map')
    
    # Plot the corrected map
    
    # For gamma first create gamma masked by corrected p
    lhMaskGamma = MapNode(MRIsCalc(),
                          iterfield = ['in_file1', 'in_file2'],
                          name='lhMaskGamma')
    lhMaskGamma.inputs.action = 'masked'
    lhMaskGamma.inputs.out_file = 'lh.masked_gamma.mgh'
    wf.connect(lhSBMglmfit, 'gamma_file', lhMaskGamma, 'in_file1')
    if correction_method == 'FDR':
        wf.connect(mriFDR, 'out_file1', lhMaskGamma, 'in_file2')
        
    rhMaskGamma = MapNode(MRIsCalc(),
                          iterfield = ['in_file1', 'in_file2'],
                          name='rhMaskGamma')
    rhMaskGamma.inputs.action = 'masked'
    rhMaskGamma.inputs.out_file = 'rh.masked_gamma.mgh'
    wf.connect(rhSBMglmfit, 'gamma_file', rhMaskGamma, 'in_file1')
    if correction_method == 'FDR':
        wf.connect(mriFDR, 'out_file2', rhMaskGamma, 'in_file2')
        
    # Plot masked gamma 
    plotCorrectedG = MapNode(Function(input_names=plot_stat_inputs,
                                      output_name=['out_file'],
                                      function=plot_surf_stat),
                             iterfield=['lh_stat_map', 'rh_stat_map', 'out_fname'],
                             name='plotCorrectedG')
    plotCorrectedG.inputs.lh_bg_map = lh_bg_map
    plotCorrectedG.inputs.rh_bg_map = rh_bg_map
    plotCorrectedG.inputs.cmap = 'jet'
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'lh.')),
               plotCorrectedG, 'lh_surf')
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'rh.')),
               plotCorrectedG, 'rh_surf')
    wf.connect(fileList, ('contrast_files',  makeFStringElementFromFnameList, '.mtx', '_masked_gamma_map.png', True),
               plotCorrectedG, 'out_fname')
    wf.connect(lhMaskGamma, 'out_file', plotCorrectedG, 'lh_stat_map')
    wf.connect(rhMaskGamma, 'out_file', plotCorrectedG, 'rh_stat_map')
    
    # Plot thresholded P
    plotCorrectedP = MapNode(Function(input_names=plot_stat_inputs,
                                      output_name=['out_file'],
                                      function=plot_surf_stat),
                             iterfield=['lh_stat_map', 'rh_stat_map',
                                        'threshold', 'out_fname'],
                             name='plotCorrectedP')
    plotCorrectedP.inputs.lh_bg_map = op.join(fs_subjects_dir, target_atlas, 'surf', 'lh.sulc')
    plotCorrectedP.inputs.rh_bg_map = op.join(fs_subjects_dir, target_atlas, 'surf', 'rh.sulc')
    plotCorrectedP.inputs.upper_lim = 10.0
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'lh.')),
               plotCorrectedP, 'lh_surf')
    wf.connect(plotSurfList, ('surf', prependString, op.join(fs_subjects_dir, target_atlas, 'surf', 'rh.')),
               plotCorrectedP, 'rh_surf')
    wf.connect(lhSBMglmfit, 'sig_file', plotCorrectedP, 'lh_stat_map')
    wf.connect(rhSBMglmfit, 'sig_file', plotCorrectedP, 'rh_stat_map')
    wf.connect(fileList, ('contrast_files', makeFStringElementFromFnameList, '.mtx', '_corrected_p_map.png', True),
               plotCorrectedP, 'out_fname')
    if correction_method == 'FDR':
        wf.connect(mriFDR, 'out_thr_file', plotCorrectedP, 'threshold')
    
    
#    # Datasink
#    datasink = Node(DataSink(base_directory=base_dir,
#                             container='%sSink' % name),
#                    name='Datasink')
#    
#    glm_outputs = ['gamma_file', 'gamma_var_file', 'sig_file', 'ftest_file']
#    for out in glm_outputs:
#        wf.connect(lhSBMglmfit, out, datasink, 'lhSBMglm_{}'.format(out))
#        wf.connect(rhSBMglmfit, out, datasink, 'rhSBMglm_{}'.format(out))
#    
#    if correction_method == 'FDR':
#        wf.connect(mriFDR, 'out_file1', datasink, 'lhSBM_fdr_corrected_sig')
#        wf.connect(mriFDR, 'out_file2', datasink, 'rhSBM_fdr_corrected_sig')
#        
#    wf.connect(lhMaskGamma, 'out_file', datasink, 'lhSBM_masked_gamma')
#    wf.connect(rhMaskGamma, 'out_file', datasink, 'rhSBM_masked_gamma')
#    
#    wf.connect(plotMeanMaps, 'out_file', datasink, 'mean_map_png')  
#    wf.connect(plotUncorrectedG, 'out_file', datasink, 'uncorrected_gamma_png')
#    wf.connect(plotUncorrectedP, 'out_file', datasink, 'uncorrected_p_png')
#    wf.connect(plotCorrectedG, 'out_file', datasink, 'masked_gamma_png')
#    wf.connect(plotCorrectedP, 'out_file', datasink, 'corrected_p_png')
#    
    return wf
Exemple #12
0
    (skull_stripper, dtifit, [('mask_file', 'mask')])
])

# <codecell>

fl = glob('/opt/data/NIPYPE_DATA/2475376/session*/DTI_mx_137/*-0001.dcm')
convert_flow.inputs.convert_dicom.source_names = fl
convert_flow.base_dir = '/mnt/mydir/'

# <codecell>

from nipype.interfaces.io import DataGrabber, DataSink

# <codecell>

dg = pe.Node(DataGrabber(infields=['subject_id', 'session'],
                         outfields=['diffusion']),
             name='datasource')
dg.inputs.base_directory = '/opt/data/NIPYPE_DATA/'
dg.inputs.template = '%s/session%d/DTI*/*-0001.dcm'
dg.inputs.subject_id = '2475376'
dg.inputs.session = [1, 2]

# <codecell>

convert_flow.connect(dg, 'diffusion', convert, 'source_names')

# <codecell>

ds = pe.Node(DataSink(), name='sinker')
ds.inputs.base_directory = '/mnt/mydir/outputs'
convert_flow.connect(skull_stripper, 'mask_file', ds, 'mask')
Exemple #13
0
    def create_workflow(self):
        """Create the Niype workflow of the super-resolution pipeline.

        It is composed of a succession of Nodes and their corresponding parameters,
        where the output of node i goes to the input of node i+1.

        """
        sub_ses = self.subject
        if self.session is not None:
            sub_ses = ''.join([sub_ses, '_', self.session])

        if self.session is None:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk",
                                                   __version__]), self.subject)
        else:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, self.session, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk", __version__]),
                                         self.subject, self.session)

        if not os.path.exists(wf_base_dir):
            os.makedirs(wf_base_dir)
        print("Process directory: {}".format(wf_base_dir))

        # Initialization (Not sure we can control the name of nipype log)
        if os.path.isfile(os.path.join(wf_base_dir, "pypeline.log")):
            os.unlink(os.path.join(wf_base_dir, "pypeline.log"))

        self.wf = Workflow(name=self.pipeline_name, base_dir=wf_base_dir)

        config.update_config({
            'logging': {
                'log_directory': os.path.join(wf_base_dir),
                'log_to_file': True
            },
            'execution': {
                'remove_unnecessary_outputs': False,
                'stop_on_first_crash': True,
                'stop_on_first_rerun': False,
                'crashfile_format': "txt",
                'use_relative_paths': True,
                'write_provenance': False
            }
        })

        # Update nypipe logging with config
        nipype_logging.update_logging(config)
        # config.enable_provenance()

        if self.use_manual_masks:
            dg = Node(interface=DataGrabber(outfields=['T2ws', 'masks']),
                      name='data_grabber')
            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            if self.session is not None:
                t2ws_template = os.path.join(
                    self.subject, self.session, 'anat',
                    '_'.join([sub_ses, '*run-*', '*T2w.nii.gz']))
                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat',
                        '_'.join([sub_ses, '*run-*', '*mask.nii.gz']))
            else:
                t2ws_template = os.path.join(self.subject, 'anat',
                                             sub_ses + '*_run-*_T2w.nii.gz')

                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, 'anat', sub_ses + '*_run-*_*mask.nii.gz')

            dg.inputs.field_template = dict(T2ws=t2ws_template,
                                            masks=masks_template)

            brainMask = MapNode(
                interface=IdentityInterface(fields=['out_file']),
                name='brain_masks_bypass',
                iterfield=['out_file'])

            if self.m_stacks is not None:
                custom_masks_filter = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='custom_masks_filter')
                custom_masks_filter.inputs.stacks_id = self.m_stacks

        else:
            dg = Node(interface=DataGrabber(outfields=['T2ws']),
                      name='data_grabber')

            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            dg.inputs.field_template = dict(
                T2ws=os.path.join(self.subject, 'anat', sub_ses +
                                  '*_run-*_T2w.nii.gz'))
            if self.session is not None:
                dg.inputs.field_template = dict(T2ws=os.path.join(
                    self.subject, self.session, 'anat', '_'.join(
                        [sub_ses, '*run-*', '*T2w.nii.gz'])))

            if self.m_stacks is not None:
                t2ws_filter_prior_masks = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='t2ws_filter_prior_masks')
                t2ws_filter_prior_masks.inputs.stacks_id = self.m_stacks

            brainMask = MapNode(interface=preprocess.BrainExtraction(),
                                name='brainExtraction',
                                iterfield=['in_file'])

            brainMask.inputs.bids_dir = self.bids_dir
            brainMask.inputs.in_ckpt_loc = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_localization",
                             "Unet.ckpt-88000.index")).split('.index')[0]
            brainMask.inputs.threshold_loc = 0.49
            brainMask.inputs.in_ckpt_seg = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_segmentation",
                             "Unet.ckpt-20000.index")).split('.index')[0]
            brainMask.inputs.threshold_seg = 0.5

        t2ws_filtered = Node(interface=preprocess.FilteringByRunid(),
                             name='t2ws_filtered')
        masks_filtered = Node(interface=preprocess.FilteringByRunid(),
                              name='masks_filtered')

        if not self.m_skip_stacks_ordering:
            stacksOrdering = Node(interface=preprocess.StacksOrdering(),
                                  name='stackOrdering')
        else:
            stacksOrdering = Node(
                interface=IdentityInterface(fields=['stacks_order']),
                name='stackOrdering')
            stacksOrdering.inputs.stacks_order = self.m_stacks

        if not self.m_skip_nlm_denoising:
            nlmDenoise = MapNode(interface=preprocess.BtkNLMDenoising(),
                                 name='nlmDenoise',
                                 iterfield=['in_file', 'in_mask'])
            nlmDenoise.inputs.bids_dir = self.bids_dir

            # Sans le mask le premier correct slice intensity...
            srtkCorrectSliceIntensity01_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity01_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity01_nlm.inputs.bids_dir = self.bids_dir
            srtkCorrectSliceIntensity01_nlm.inputs.out_postfix = '_uni'

        srtkCorrectSliceIntensity01 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity01',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity01.inputs.bids_dir = self.bids_dir
        srtkCorrectSliceIntensity01.inputs.out_postfix = '_uni'

        srtkSliceBySliceN4BiasFieldCorrection = MapNode(
            interface=preprocess.MialsrtkSliceBySliceN4BiasFieldCorrection(),
            name='srtkSliceBySliceN4BiasFieldCorrection',
            iterfield=['in_file', 'in_mask'])
        srtkSliceBySliceN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        srtkSliceBySliceCorrectBiasField = MapNode(
            interface=preprocess.MialsrtkSliceBySliceCorrectBiasField(),
            name='srtkSliceBySliceCorrectBiasField',
            iterfield=['in_file', 'in_mask', 'in_field'])
        srtkSliceBySliceCorrectBiasField.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        if not self.m_skip_nlm_denoising:
            srtkCorrectSliceIntensity02_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity02_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity02_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization01_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization01_nlm')
            srtkIntensityStandardization01_nlm.inputs.bids_dir = self.bids_dir

            srtkHistogramNormalization_nlm = Node(
                interface=preprocess.MialsrtkHistogramNormalization(),
                name='srtkHistogramNormalization_nlm')
            srtkHistogramNormalization_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization02_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization02_nlm')
            srtkIntensityStandardization02_nlm.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        srtkCorrectSliceIntensity02 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity02',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity02.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization01 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization01')
        srtkIntensityStandardization01.inputs.bids_dir = self.bids_dir

        srtkHistogramNormalization = Node(
            interface=preprocess.MialsrtkHistogramNormalization(),
            name='srtkHistogramNormalization')
        srtkHistogramNormalization.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization02 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization02')
        srtkIntensityStandardization02.inputs.bids_dir = self.bids_dir

        srtkMaskImage01 = MapNode(interface=preprocess.MialsrtkMaskImage(),
                                  name='srtkMaskImage01',
                                  iterfield=['in_file', 'in_mask'])
        srtkMaskImage01.inputs.bids_dir = self.bids_dir

        srtkImageReconstruction = Node(
            interface=reconstruction.MialsrtkImageReconstruction(),
            name='srtkImageReconstruction')
        srtkImageReconstruction.inputs.bids_dir = self.bids_dir
        srtkImageReconstruction.inputs.sub_ses = sub_ses
        srtkImageReconstruction.inputs.no_reg = self.m_skip_svr

        srtkTVSuperResolution = Node(
            interface=reconstruction.MialsrtkTVSuperResolution(),
            name='srtkTVSuperResolution')
        srtkTVSuperResolution.inputs.bids_dir = self.bids_dir
        srtkTVSuperResolution.inputs.sub_ses = sub_ses
        srtkTVSuperResolution.inputs.in_loop = self.primal_dual_loops
        srtkTVSuperResolution.inputs.in_deltat = self.deltatTV
        srtkTVSuperResolution.inputs.in_lambda = self.lambdaTV
        srtkTVSuperResolution.inputs.use_manual_masks = self.use_manual_masks

        srtkN4BiasFieldCorrection = Node(
            interface=postprocess.MialsrtkN4BiasFieldCorrection(),
            name='srtkN4BiasFieldCorrection')
        srtkN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        if self.m_do_refine_hr_mask:
            srtkHRMask = Node(
                interface=postprocess.MialsrtkRefineHRMaskByIntersection(),
                name='srtkHRMask')
            srtkHRMask.inputs.bids_dir = self.bids_dir
        else:
            srtkHRMask = Node(interface=postprocess.BinarizeImage(),
                              name='srtkHRMask')

        srtkMaskImage02 = Node(interface=preprocess.MialsrtkMaskImage(),
                               name='srtkMaskImage02')
        srtkMaskImage02.inputs.bids_dir = self.bids_dir

        # Build workflow : connections of the nodes
        # Nodes ready : Linking now
        if self.use_manual_masks:
            if self.m_stacks is not None:
                self.wf.connect(dg, "masks", custom_masks_filter,
                                "input_files")
                self.wf.connect(custom_masks_filter, "output_files", brainMask,
                                "out_file")
            else:
                self.wf.connect(dg, "masks", brainMask, "out_file")
        else:
            if self.m_stacks is not None:
                self.wf.connect(dg, "T2ws", t2ws_filter_prior_masks,
                                "input_files")
                self.wf.connect(t2ws_filter_prior_masks, "output_files",
                                brainMask, "in_file")
            else:
                self.wf.connect(dg, "T2ws", brainMask, "in_file")

        if not self.m_skip_stacks_ordering:
            self.wf.connect(brainMask, "out_file", stacksOrdering,
                            "input_masks")

        self.wf.connect(stacksOrdering, "stacks_order", t2ws_filtered,
                        "stacks_id")
        self.wf.connect(dg, "T2ws", t2ws_filtered, "input_files")

        self.wf.connect(stacksOrdering, "stacks_order", masks_filtered,
                        "stacks_id")
        self.wf.connect(brainMask, "out_file", masks_filtered, "input_files")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(t2ws_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_mask")  ## Comment to match docker process

            self.wf.connect(nlmDenoise, ("out_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_mask")

        self.wf.connect(t2ws_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkCorrectSliceIntensity01_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        else:
            self.wf.connect(srtkCorrectSliceIntensity01,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceN4BiasFieldCorrection, "in_mask")

        self.wf.connect(srtkCorrectSliceIntensity01,
                        ("out_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_file")
        self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                        ("out_fld_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_field")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                            ("out_im_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_mask")
            self.wf.connect(srtkCorrectSliceIntensity02_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkIntensityStandardization01_nlm, "input_images")
            self.wf.connect(srtkIntensityStandardization01_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_masks")
            self.wf.connect(srtkHistogramNormalization_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkIntensityStandardization02_nlm, "input_images")

        self.wf.connect(srtkSliceBySliceCorrectBiasField,
                        ("out_im_file", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_mask")
        self.wf.connect(srtkCorrectSliceIntensity02,
                        ("out_file", utils.sort_ascending),
                        srtkIntensityStandardization01, "input_images")

        self.wf.connect(srtkIntensityStandardization01,
                        ("output_images", utils.sort_ascending),
                        srtkHistogramNormalization, "input_images")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkHistogramNormalization, "input_masks")
        self.wf.connect(srtkHistogramNormalization,
                        ("output_images", utils.sort_ascending),
                        srtkIntensityStandardization02, "input_images")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkIntensityStandardization02_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")
        else:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")

        self.wf.connect(srtkMaskImage01, "out_im_file",
                        srtkImageReconstruction, "input_images")
        self.wf.connect(masks_filtered, "output_files",
                        srtkImageReconstruction, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order",
                        srtkImageReconstruction, "stacks_order")

        self.wf.connect(srtkIntensityStandardization02, "output_images",
                        srtkTVSuperResolution, "input_images")
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending),
                        srtkTVSuperResolution, "input_transforms")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkTVSuperResolution, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order", srtkTVSuperResolution,
                        "stacks_order")

        self.wf.connect(srtkImageReconstruction, "output_sdi",
                        srtkTVSuperResolution, "input_sdi")

        if self.m_do_refine_hr_mask:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkHRMask, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), srtkHRMask,
                            "input_masks")
            self.wf.connect(srtkImageReconstruction,
                            ("output_transforms", utils.sort_ascending),
                            srtkHRMask, "input_transforms")
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_sr")
        else:
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_image")

        self.wf.connect(srtkTVSuperResolution, "output_sr", srtkMaskImage02,
                        "in_file")
        self.wf.connect(srtkHRMask, "output_srmask", srtkMaskImage02,
                        "in_mask")

        self.wf.connect(srtkTVSuperResolution, "output_sr",
                        srtkN4BiasFieldCorrection, "input_image")
        self.wf.connect(srtkHRMask, "output_srmask", srtkN4BiasFieldCorrection,
                        "input_mask")

        # Datasinker
        finalFilenamesGeneration = Node(
            interface=postprocess.FilenamesGeneration(), name='filenames_gen')
        finalFilenamesGeneration.inputs.sub_ses = sub_ses
        finalFilenamesGeneration.inputs.sr_id = self.sr_id
        finalFilenamesGeneration.inputs.use_manual_masks = self.use_manual_masks

        self.wf.connect(stacksOrdering, "stacks_order",
                        finalFilenamesGeneration, "stacks_order")

        datasink = Node(interface=DataSink(), name='data_sinker')
        datasink.inputs.base_directory = final_res_dir

        if not self.m_skip_stacks_ordering:
            self.wf.connect(stacksOrdering, "report_image", datasink,
                            'figures.@stackOrderingQC')
            self.wf.connect(stacksOrdering, "motion_tsv", datasink,
                            'anat.@motionTSV')
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        datasink, 'anat.@LRmasks')
        self.wf.connect(srtkIntensityStandardization02,
                        ("output_images", utils.sort_ascending), datasink,
                        'anat.@LRsPreproc')
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending), datasink,
                        'xfm.@transforms')
        self.wf.connect(finalFilenamesGeneration, "substitutions", datasink,
                        "substitutions")
        self.wf.connect(srtkMaskImage01, ("out_im_file", utils.sort_ascending),
                        datasink, 'anat.@LRsDenoised')
        self.wf.connect(srtkImageReconstruction, "output_sdi", datasink,
                        'anat.@SDI')
        self.wf.connect(srtkN4BiasFieldCorrection, "output_image", datasink,
                        'anat.@SR')
        self.wf.connect(srtkTVSuperResolution, "output_json_path", datasink,
                        'anat.@SRjson')
        self.wf.connect(srtkTVSuperResolution, "output_sr_png", datasink,
                        'figures.@SRpng')
        self.wf.connect(srtkHRMask, "output_srmask", datasink, 'anat.@SRmask')
def genIshareAnat(name='iShareAnat',
                  base_dir=os.path.abspath('morphometry_tutorial/workdir'),
                  use_FLAIR=True,
                  do_recon_all=True,
                  do_fast_first=False,
                  fs_subjects_dir=os.path.abspath('morphometry_tutorial/subjects_dir'),
                  input_dir=os.path.abspath('morphometry_tutorial/data'),
                  subjects=None,
                  sinks=False,
                  spm=op.join(os.getenv('HOME'),'matlab', 'spm12'),
                  spm_standalone=None,
                  mcr=None):
    '''
    Generates a nipype workflow for the structural parts : Freesurfer, SPM
    NewSegment, optionally FSL FAST/FIRST.
    
    Either just process T1 or use FLAIR with use_FLAIR.
        
    Inputs:
        - name: name of the workflow
        - base_dir: the working directory
        - use_FLAIR:
        - do_recon_all:
        - do_fast_first:
        - fs_subjects_dir: the freesurfer SUBJECTS_DIR to use
        - input_dir: the directory were the input files downloaded from XNAT are written
        - subjects: a subject list (names should correspond to the folders inside the input_dir
        - sinks: create output sink nodes
        - spm_standalone: True if using the SPM standalone version and matlab MCR.
        - mcr: path to matlab compiled runtime folder
        - mcr2015:
        
    '''
    ## Construction of the workflow
    ishanat = Workflow(name)
    ishanat.base_dir = op.abspath(base_dir)

    ## List of subject
    if subjects:
        subject_list = subjects
    else:
        subject_list = [subject_dir.split("/")[-1] for subject_dir in os.listdir(input_dir) if op.isdir(op.join(input_dir,subject_dir))]
    
    # List --> Nipype iterable
    subjectList = Node(IdentityInterface(fields=['subject_id'],
                                         mandatory_inputs=True),
                       name="subjectList")    
    subjectList.iterables = ('subject_id', subject_list)
    

    ## List of scans
    outfields = ['T1']
    if use_FLAIR:
        outfields.append('FLAIR')
        
    scanList = Node(DataGrabber(infields=['subject_id'],
                                outfields=outfields),
                    name="scanList")
    scanList.inputs.base_directory = input_dir
    scanList.inputs.ignore_exception = False
    scanList.inputs.raise_on_empty = True
    scanList.inputs.sort_filelist = True
    scanList.inputs.template = '%s/%s/%s'
        ishanat.connect(subjectList, ("subject_id", get_scanlist_BIDS, input_dir, use_FLAIR, False, False),
                        scanList, "template_args")
    name='inputspec'
    )
# inputnode.inputs.in_sub = 'GK011RZJA'


###############################################################################
#
#     DATA GRABBER NODE
#
###############################################################################

from nipype.interfaces.io import DataGrabber
from os.path import abspath as opap


ds = Node(DataGrabber(
    infields=['subject_id', 'hand'], outfields=['func']), name='datasource'
    )
ds.inputs.base_directory = opap(base_directory)
ds.inputs.template = '%s/%s_Hand/*.nii*'
ds.inputs.sort_filelist = True
# ds.inputs.subject_id = 'GK011RZJA'
# ds.inputs.hand = 'Left'

featreg_merge.connect(inputnode, 'in_hand', ds, 'hand')
featreg_merge.connect(inputnode, 'in_sub', ds, 'subject_id')

'''
    To print the list of files being taken uncomment the following lines.
'''
#  functional_input = ds.run().outputs
#  input_files = functional_input.get()['func']
Exemple #16
0
def genNormalizeDwiWF(
        name='NormalizeDwi',
        base_dir=op.abspath('.'),
        input_dir=None,
        input_temp='%s/%s/*%s',
        input_temp_args={
            'ref_T1': [['subject_id', 'bias_corrected_images', '_mT1.nii.gz']],
            'forward_deformation_field':
            [['subject_id', 'forward_deformation_field', '_y_T1.nii.gz']],
            'denoised_dwi':
            [['subject_id', 'denoised_dwi_series', '_dwi_denoised.nii.gz']],
            'bval': [['subject_id', 'raw_bvals', '_bval.gz']],
            'bvec': [['subject_id', 'processed_bvecs', '_bvecs.gz']],
            'apply_to': [[
                'subject_id', 'apply_to_files',
                ['_ICVF.nii.gz', '_ISOVF.nii.gz', '_OD.nii.gz']
            ]]
        },
        subjects=None,
        spm_standalone=None,
        mcr=None):

    # Generate WF
    wf = Workflow(name=name)
    wf.base_dir = base_dir

    #Node: subject List
    subjectList = Node(IdentityInterface(fields=['subject_id'],
                                         mandatory_inputs=True),
                       name="subjectList")
    if subjects:
        subjectList.iterables = ('subject_id', subjects)
    else:
        subjectList.iterables = ('subject_id', [
            pth for pth in os.listdir(input_dir)
            if os.path.isdir(op.join(input_dir, pth))
        ])
        print subjectList.iterables

    scanList = Node(DataGrabber(infields=['subject_id'],
                                outfields=[
                                    'ref_T1', 'forward_deformation_field',
                                    'denoised_dwi', 'bval', 'bvec', 'apply_to'
                                ]),
                    name="scanList")
    scanList.inputs.base_directory = input_dir
    scanList.inputs.ignore_exception = False
    scanList.inputs.raise_on_empty = True
    scanList.inputs.sort_filelist = False
    scanList.inputs.template = input_temp
    scanList.inputs.template_args = input_temp_args
    wf.connect(subjectList, "subject_id", scanList, "subject_id")

    # Unzip everythin for spm
    gunzipT1 = Node(Gunzip(), name='gunzipT1')
    wf.connect(scanList, "ref_T1", gunzipT1, "in_file")

    gunzipDF = Node(Gunzip(), name='gunzipDF')
    wf.connect(scanList, "forward_deformation_field", gunzipDF, "in_file")

    gunzipbval = Node(Gunzip(), name='gunzipbval')
    wf.connect(scanList, "bval", gunzipbval, "in_file")

    gunzipbvec = Node(Gunzip(), name='gunzipbvec')
    wf.connect(scanList, "bvec", gunzipbvec, "in_file")

    gunzipApplyTo = MapNode(Gunzip(),
                            iterfield=["in_file"],
                            name='gunzipApplyTo')
    wf.connect(scanList, "apply_to", gunzipApplyTo, "in_file")

    # Extract b=0 frames from denoised DWI and average them to make a ref_dwi
    dwib0 = Node(DWIExtract(), name="dwib0")
    dwib0.inputs.bzero = True
    dwib0.inputs.out_file = "dwib0.nii.gz"
    wf.connect(scanList, "denoised_dwi", dwib0, "in_file")
    wf.connect(gunzipbval, "out_file", dwib0, "in_bval")
    wf.connect(gunzipbvec, "out_file", dwib0, "in_bvec")

    # Make an average image
    avgb0 = Node(MeanImage(), name="avgb0")
    avgb0.inputs.nan2zeros = True
    avgb0.inputs.output_type = "NIFTI"
    avgb0.inputs.out_file = "avg_dwib0.nii"
    avgb0.inputs.dimension = "T"
    wf.connect(dwib0, "out_file", avgb0, "in_file")

    # spm Normalize WF
    spmNormProc = genSpmNormalizeDwiWF(name="spmNormProc",
                                       spm_standalone=spm_standalone,
                                       mcr=mcr)
    wf.connect(gunzipT1, "out_file", spmNormProc, "inputNode.ref_T1")
    wf.connect(gunzipDF, "out_file", spmNormProc,
               "inputNode.forward_deformation_field")
    wf.connect(avgb0, "out_file", spmNormProc, "inputNode.ref_dwi")
    wf.connect(gunzipApplyTo, "out_file", spmNormProc, "inputNode.apply_to")

    # Datasink
    datasink = Node(DataSink(base_directory=base_dir,
                             container='%sSink' % name),
                    name='Datasink')
    wf.connect(spmNormProc, "outputNode.normalized_files", datasink,
               "normalized_files")

    return wf
Exemple #17
0
    'roi_standard',
    get_roi_standard(base_directory + '/mvpa/ROIs_standard_space')
    )


###############################################################################
#
#     DATA GRABBER NODE
#
###############################################################################

from nipype.interfaces.io import DataGrabber
from os.path import abspath as opap

grabber = Node(DataGrabber(
    infields=['subject_id', 'hand'],
    outfields=['reference', 'matrix']),
    name='moloch'
    )
grabber.inputs.base_directory = opap(base_directory)
grabber.inputs.template = '*'
reg_dir_template = '%s/Analyzed_Data/Main*Exp_%sHand_Run-3.feat/reg/'
grabber.inputs.field_template = dict(
    reference=reg_dir_template + 'example_func.nii.gz',
    matrix=reg_dir_template + 'standard2example*'
    )
grabber.inputs.template_args = dict(
    reference=[['subject_id', 'hand']],
    matrix=[['subject_id', 'hand']]
    )
grabber.inputs.sort_filelist = True
"Test the DataGrabber interface"
from nipype.interfaces.io import DataGrabber

dg = DataGrabber()

dg.inputs.base_directory = '../../data/ds000171/'
dg.inputs.template = 'sub-*/anat/sub-*_T1w.nii.gz'
dg.inputs.sort_filelist = True

results = dg.run()
results.outputs

# Try it out for func images too
dg2 = DataGrabber()

dg2.inputs.base_directory = '../../data/ds000171/'
dg2.inputs.template = 'sub-*/func/s*.nii.gz'
dg2.inputs.sort_filelist = True

results2 = dg2.run()
results2.outputs
preproc = create_featreg_preproc(highpass=True, whichvol='mean')
preproc.inputs.inputspec.fwhm = 0
preproc.inputs.inputspec.highpass = 128. / (2 * 2.5)

###############################################################################
#
#     DATA GRABBER NODE
#
###############################################################################

from nipype.interfaces.io import DataGrabber
from os.path import abspath as opap

base_directory = '/Users/AClab/Documents/mikbuch/Maestro_Project1'

ds = Node(DataGrabber(infields=['subject_id', 'hand'], outfields=['func']),
          name='datasource')
ds.inputs.base_directory = opap(base_directory)
ds.inputs.template = '%s/%s_Hand/*.nii'
ds.inputs.sort_filelist = True
ds.inputs.subject_id = 'GK011RZJA'
ds.inputs.hand = 'Left'
'''
    To print the list of files being taken uncomment the following lines.
'''
#  functional_input = ds.run().outputs
#  input_files = functional_input.get()['func']
#  print input_files

###############################################################################
#
Exemple #20
0
def test_pipeline():
    maskWhiteMatterFromSeeds = True
    CACHE_DIR = 'seed_Test_cache'
    preproc = pipe.Workflow(name=CACHE_DIR)
    preproc.base_dir = os.getcwd()

    labelFile = "/Shared/paulsen/Experiments/rsFMRI/rs-fMRI-pilot/ReferenceAtlas/template_ABC_labels.nii.gz"
    downsampledLabel = 'template_downsampled.nii.gz'
    nacResampleResolution = (2.0, 2.0, 2.0)
    downsampleAtlas = pipe.Node(interface=Function(
        function=resampleImage,
        input_names=['inputVolume', 'outputVolume', 'resolution'],
        output_names=['outputVolume']),
                                name="downsampleAtlas")
    downsampleAtlas.inputs.inputVolume = labelFile
    downsampleAtlas.inputs.outputVolume = downsampledLabel
    downsampleAtlas.inputs.resolution = [int(x) for x in nacResampleResolution]

    grabber = pipe.Node(interface=DataGrabber(infields=['label'],
                                              outfields=['seedfile']),
                        name='dataGrabber')
    grabber.inputs.base_directory = os.path.abspath('seeds/Atlas')
    grabber.inputs.template = '%s_mask.nii.gz'

    labels, seeds = getAtlasPoints(
        '/Shared/paulsen/Experiments/rsFMRI/rs-fMRI-pilot/seeds.fcsv')
    print labels
    seedsIdentity = pipe.Node(interface=IdentityInterface(fields=['index']),
                              name='seedsIdentity')
    seedsIdentity.iterables = ('index', range(len(labels)))

    labelNode = pipe.Node(interface=IdentityInterface(fields=['label']),
                          name='labelNode')
    labelNode.iterables = ('label', labels)
    preproc.connect(labelNode, 'label', grabber, 'label')

    # import glob
    # seedfiles = [os.path.abspath(x) for x in glob.glob('seeds/Atlas/*mask.nii.gz')]
    # infiles.iterables = ('infile', seedfiles)

    atlas_DataSink = pipe.Node(interface=DataSink(), name="atlas_DataSink")
    atlas_DataSink.inputs.base_directory = preproc.base_dir  # '/Shared/paulsen/Experiments/20130417_rsfMRI_Results'
    atlas_DataSink.inputs.container = 'seed_Test'
    atlas_DataSink.inputs.parameterization = False
    atlas_DataSink.overwrite = True

    clipSeedWithVentriclesNode = pipe.Node(interface=Function(
        function=clipSeedWithVentricles,
        input_names=[
            'unclipped_seed_fn', 'fmriBABCSeg_fn', 'desired_out_seed_fn'
        ],
        output_names=['clipped_seed_fn']),
                                           name='clipSeedWithVentriclesNode')

    def create_label(label, _type):
        return '{0}_{1}_clipped.nii.gz'.format(label, _type)

    preproc.connect([(labelNode, clipSeedWithVentriclesNode, [
        (('label', create_label, 'csf'), 'desired_out_seed_fn')
    ])])
    preproc.connect(downsampleAtlas, 'outputVolume',
                    clipSeedWithVentriclesNode, 'fmriBABCSeg_fn')

    preproc.connect(grabber, 'seedfile', clipSeedWithVentriclesNode,
                    'unclipped_seed_fn')
    # preproc.connect(infiles, 'infile', clipSeedWithVentriclesNode, 'desired_out_seed_fn')
    preproc.connect(clipSeedWithVentriclesNode, 'clipped_seed_fn',
                    atlas_DataSink, 'CSF')

    if maskWhiteMatterFromSeeds:
        clipSeedWithWhiteMatterNode = pipe.Node(
            interface=Function(function=clipSeedWithWhiteMatter,
                               input_names=['seed', 'mask', 'outfile'],
                               output_names=['outfile']),
            name='clipSeedWithWhiteMatterNode')
        preproc.connect([(labelNode, clipSeedWithWhiteMatterNode,
                          [(('label', create_label, 'wm'), 'outfile')])])
        preproc.connect(downsampleAtlas, 'outputVolume',
                        clipSeedWithWhiteMatterNode, 'mask')

        preproc.connect(clipSeedWithVentriclesNode, 'clipped_seed_fn',
                        clipSeedWithWhiteMatterNode, 'seed')
        preproc.connect(clipSeedWithWhiteMatterNode, 'outfile', atlas_DataSink,
                        'WM')

    preproc.run()
def create_reconall(config):
    """
    This function...

    :param config:
    :return:
    """
    ar1_wf = create_autorecon1(config)
    ar2_wf, ar2_lh, ar2_rh = create_autorecon2(config)
    ar3_wf = create_autorecon3(config)

    # Connect workflows
    reconall = pe.Workflow(name="recon-all")
    if config["longitudinal"]:
        # grab files from the initial single session run
        grab_inittp_files = pe.Node(
            DataGrabber(),
            name="Grab_Initial_Files",
            infields=["subject_id"],
            outfileds=["inputvols", "iscales", "ltas"],
        )
        grab_inittp_files.inputs.template = "*"
        grab_inittp_files.inputs.base_directory = config["subjects_dir"]
        grab_inittp_files.inputs.field_template = dict(
            inputvols="%s/mri/orig/0*.mgz",
            iscales="%s/mri/orig/0*-iscale.txt",
            ltas="%s/mri/orig/0*.lta",
        )

        grab_inittp_files.inputs.template_args = dict(
            inputvols=[["subject_id"]],
            iscales=[["subject_id"]],
            ltas=[["subject_id"]])

        reconall.connect([(
            grab_inittp_files,
            ar1_wf,
            [
                ("inputvols", "AutoRecon1_Inputs.in_T1s"),
                ("iscales", "AutoRecon1_Inputs.iscales"),
                ("ltas", "AutoRecon1_Inputs.ltas"),
            ],
        )])

        merge_norms = pe.Node(Merge(len(config["timepoints"])),
                              name="Merge_Norms")
        merge_segs = pe.Node(Merge(len(config["timepoints"])),
                             name="Merge_Segmentations")
        merge_segs_noCC = pe.Node(Merge(len(config["timepoints"])),
                                  name="Merge_Segmentations_noCC")
        merge_template_ltas = pe.Node(Merge(len(config["timepoints"])),
                                      name="Merge_Template_ltas")

        for i, tp in enumerate(config["timepoints"]):
            # datasource timepoint files
            tp_data_source = pe.Node(FreeSurferSource(),
                                     name="{0}_DataSource".format(tp))
            tp_data_source.inputs.subject_id = tp
            tp_data_source.inputs.subjects_dir = config["subjects_dir"]

            tp_data_grabber = pe.Node(
                DataGrabber(),
                name="{0}_DataGrabber".format(tp),
                infields=["tp", "long_tempate"],
                outfileds=["subj_to_template_lta", "seg_noCC", "seg_presurf"],
            )
            tp_data_grabber.inputs.template = "*"
            tp_data_grabber.inputs.base_directory = config["subjects_dir"]
            tp_data_grabber.inputs.field_template = dict(
                subj_to_template_lta="%s/mri/transforms/%s_to_%s.lta",
                seg_noCC="%s/mri/aseg.auto_noCCseg.mgz",
                seg_presurf="%s/mri/aseg.presurf.mgz",
            )

            tp_data_grabber.inputs.template_args = dict(
                subj_to_template_lta=[["long_template", "tp",
                                       "long_template"]],
                seg_noCC=[["tp"]],
                seg_presurf=[["tp"]],
            )

            reconall.connect([
                (tp_data_source, merge_norms, [("norm", "in{0}".format(i))]),
                (tp_data_grabber, merge_segs, [("seg_presurf",
                                                "in{0}".format(i))]),
                (
                    tp_data_grabber,
                    merge_segs_noCC,
                    [("seg_noCC", "in{0}".format(i))],
                ),
                (
                    tp_data_grabber,
                    merge_template_ltas,
                    [("subj_to_template_lta", "in{0}".format(i))],
                ),
            ])

            if tp == config["subject_id"]:
                reconall.connect([
                    (tp_data_source, ar2_wf, [("wm",
                                               "AutoRecon2_Inputs.init_wm")]),
                    (
                        tp_data_grabber,
                        ar2_wf,
                        [(
                            "subj_to_template_lta",
                            "AutoRecon2_Inputs.subj_to_template_lta",
                        )],
                    ),
                    (
                        tp_data_grabber,
                        ar2_wf,
                        [(
                            "subj_to_template_lta",
                            "AutoRecon1_Inputs.subj_to_template_lta",
                        )],
                    ),
                ])

        reconall.connect([
            (merge_norms, ar2_wf, [("out", "AutoRecon2_Inputs.alltps_norms")]),
            (merge_segs, ar2_wf, [("out", "AutoRecon2_Inputs.alltps_segs")]),
            (
                merge_template_ltas,
                ar2_wf,
                [("out", "AutoRecon2_Inputs.alltps_to_template_ltas")],
            ),
            (
                merge_segs_noCC,
                ar2_wf,
                [("out", "AutoRecon2_Inputs.alltps_segs_noCC")],
            ),
        ])

        # datasource files from the template run
        ds_template_files = pe.Node(FreeSurferSource(),
                                    name="Datasource_Template_Files")
        ds_template_files.inputs.subject_id = config["subject_id"]
        ds_template_files.inputs.subjects_dir = config["subjects_dir"]

        reconall.connect([
            (
                ds_template_files,
                ar1_wf,
                [("brainmask", "AutoRecon1_Inputs.template_brainmask")],
            ),
            (
                ds_template_files,
                ar2_wf,
                [("aseg", "AutoRecon2_Inputs.template_aseg")],
            ),
        ])

        # grab files from template run
        grab_template_files = pe.Node(
            DataGrabber(),
            name="Grab_Template_Files",
            infields=["subject_id", "long_template"],
            outfields=[
                "template_talairach_xfm",
                "template_talairach_lta",
                "template_talairach_m3z",
                "template_label_intensities",
                "template_lh_white",
                "template_rh_white",
                "template_lh_pial",
                "template_rh_pial",
            ],
        )
        grab_template_files.inputs.template = "*"
        grab_template_files.inputs.base_directory = config["subjects_dir"]
        grab_template_files.inputs.subject_id = config["subject_id"]
        grab_template_files.inputs.long_template = config["long_template"]
        grab_template_files.inputs.field_template = dict(
            template_talairach_xfm="%s/mri/transfroms/talairach.xfm",
            template_talairach_lta="%s/mri/transfroms/talairach.lta",
            template_talairach_m3z="%s/mri/transfroms/talairach.m3z",
            template_label_intensities=
            "%s/mri/aseg.auto_noCCseg.label_intensities.txt",
            template_lh_white="%s/surf/lh.white",
            template_rh_white="%s/surf/rh.white",
            template_lh_pial="%s/surf/lh.pial",
            template_rh_pial="%s/surf/rh.pial",
        )

        grab_template_files.inputs.template_args = dict(
            template_talairach_xfm=[["long_template"]],
            template_talairach_lta=[["long_template"]],
            template_talairach_m3z=[["long_template"]],
            template_lh_white=[["long_template"]],
            template_rh_white=[["long_template"]],
            template_lh_pial=[["long_template"]],
            template_rh_pial=[["long_template"]],
        )
        reconall.connect([
            (
                grab_template_files,
                ar1_wf,
                [(
                    "template_talairach_xfm",
                    "AutoRecon1_Inputs.template_talairach_xfm",
                )],
            ),
            (
                grab_template_files,
                ar2_wf,
                [
                    (
                        "template_talairach_lta",
                        "AutoRecon2_Inputs.template_talairach_lta",
                    ),
                    (
                        "template_talairach_m3z",
                        "AutoRecon2_Inputs.template_talairach_m3z",
                    ),
                    (
                        "template_label_intensities",
                        "AutoRecon2_Inputs.template_label_intensities",
                    ),
                    ("template_lh_white",
                     "AutoRecon2_Inputs.template_lh_white"),
                    ("template_rh_white",
                     "AutoRecon2_Inputs.template_rh_white"),
                    ("template_lh_pial", "AutoRecon2_Inputs.template_lh_pial"),
                    ("template_rh_pial", "AutoRecon2_Inputs.template_rh_pial"),
                ],
            ),
        ])
        # end longitudinal data collection

    # connect autorecon 1 - 3
    reconall.connect([
        (
            ar1_wf,
            ar3_wf,
            [
                ("AutoRecon1_Inputs.subject_id",
                 "AutoRecon3_Inputs.subject_id"),
                (
                    "AutoRecon1_Inputs.subjects_dir",
                    "AutoRecon3_Inputs.subjects_dir",
                ),
                ("Copy_Brainmask.out_file", "AutoRecon3_Inputs.brainmask"),
                ("Copy_Transform.out_file", "AutoRecon3_Inputs.transform"),
                ("Add_Transform_to_Header.out_file",
                 "AutoRecon3_Inputs.orig_mgz"),
                ("Robust_Template.out_file", "AutoRecon3_Inputs.rawavg"),
            ],
        ),
        (
            ar1_wf,
            ar2_wf,
            [
                ("Copy_Brainmask.out_file", "AutoRecon2_Inputs.brainmask"),
                ("Copy_Transform.out_file", "AutoRecon2_Inputs.transform"),
                ("Add_Transform_to_Header.out_file", "AutoRecon2_Inputs.orig"),
                ("AutoRecon1_Inputs.subject_id",
                 "AutoRecon2_Inputs.subject_id"),
                (
                    "AutoRecon1_Inputs.subjects_dir",
                    "AutoRecon2_Inputs.subjects_dir",
                ),
            ],
        ),
        (
            ar2_lh,
            ar3_wf,
            [
                ("inflate2.out_file", "AutoRecon3_Inputs.lh_inflated"),
                ("Smooth2.surface", "AutoRecon3_Inputs.lh_smoothwm"),
                ("Make_Surfaces.out_white", "AutoRecon3_Inputs.lh_white"),
                ("Make_Surfaces.out_cortex",
                 "AutoRecon3_Inputs.lh_cortex_label"),
                ("Make_Surfaces.out_area", "AutoRecon3_Inputs.lh_area"),
                ("Make_Surfaces.out_curv", "AutoRecon3_Inputs.lh_curv"),
                ("inflate2.out_sulc", "AutoRecon3_Inputs.lh_sulc"),
                (
                    "Extract_Main_Component.out_file",
                    "AutoRecon3_Inputs.lh_orig_nofix",
                ),
                ("Remove_Intersection.out_file", "AutoRecon3_Inputs.lh_orig"),
                ("Curvature1.out_mean", "AutoRecon3_Inputs.lh_white_H"),
                ("Curvature1.out_gauss", "AutoRecon3_Inputs.lh_white_K"),
            ],
        ),
        (
            ar2_rh,
            ar3_wf,
            [
                ("inflate2.out_file", "AutoRecon3_Inputs.rh_inflated"),
                ("Smooth2.surface", "AutoRecon3_Inputs.rh_smoothwm"),
                ("Make_Surfaces.out_white", "AutoRecon3_Inputs.rh_white"),
                ("Make_Surfaces.out_cortex",
                 "AutoRecon3_Inputs.rh_cortex_label"),
                ("Make_Surfaces.out_area", "AutoRecon3_Inputs.rh_area"),
                ("Make_Surfaces.out_curv", "AutoRecon3_Inputs.rh_curv"),
                ("inflate2.out_sulc", "AutoRecon3_Inputs.rh_sulc"),
                (
                    "Extract_Main_Component.out_file",
                    "AutoRecon3_Inputs.rh_orig_nofix",
                ),
                ("Remove_Intersection.out_file", "AutoRecon3_Inputs.rh_orig"),
                ("Curvature1.out_mean", "AutoRecon3_Inputs.rh_white_H"),
                ("Curvature1.out_gauss", "AutoRecon3_Inputs.rh_white_K"),
            ],
        ),
        (
            ar2_wf,
            ar3_wf,
            [
                ("Copy_CCSegmentation.out_file",
                 "AutoRecon3_Inputs.aseg_presurf"),
                (
                    "Mask_Brain_Final_Surface.out_file",
                    "AutoRecon3_Inputs.brain_finalsurfs",
                ),
                ("MRI_Pretess.out_file", "AutoRecon3_Inputs.wm"),
                ("Fill.out_file", "AutoRecon3_Inputs.filled"),
                ("CA_Normalize.out_file", "AutoRecon3_Inputs.norm"),
            ],
        ),
    ])

    return reconall
def firstlevel_wf(subject_id, sink_directory, name='wmaze_frstlvl_wf'):
    # Create the frstlvl workflow
    frstlvl_wf = Workflow(name='frstlvl_wf')

    # Dictionary holding the wildcard used in datasource
    info = dict(task_mri_files=[['subject_id', 'wmaze']],
                motion_noise_files=[['subject_id']])

    # Calls the subjectinfo function with the name, onset, duration, and amplitude info
    subject_info = Node(Function(input_names=['subject_id'],
                                 output_names=['output'],
                                 function=subjectinfo),
                        name='subject_info')
    subject_info.inputs.ignore_exception = False
    subject_info.inputs.subject_id = subject_id

    # Create another Function node to define the contrasts for the experiment
    getcontrasts = Node(
        Function(
            input_names=['subject_id', 'info'],
            output_names=['contrasts'],
            # Calls the function 'get_contrasts'
            function=get_contrasts),
        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    # Receives subject_id as input
    getcontrasts.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getcontrasts, 'info')

    #### subject_info (output) ----> getcontrasts (info)

    # Create a Function node to substitute names of folders and files created during pipeline
    getsubs = Node(
        Function(
            input_names=['cons'],
            output_names=['subs'],
            # Calls the function 'get_subs'
            function=get_subs),
        name='getsubs')
    getsubs.inputs.ignore_exception = False
    # Receives subject_id as input
    getsubs.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getsubs, 'info')
    frstlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    # Create a datasource node to get the task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    # Indicates the string template to match (in this case, any that match the field template)
    datasource.inputs.template = '*'
    # Receives subject_id as an input
    datasource.inputs.subject_id = subject_id
    # Base directory to allow branching pathways
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/preproc/')
    datasource.inputs.field_template = dict(
        task_mri_files='%s/func/smoothed_fullspectrum/_maskfunc2*/*%s*.nii.gz',
        # Filter regressor noise files
        motion_noise_files='%s/noise/filter_regressor*.txt')
    # Inputs from the infields argument ('subject_id') that satisfy the template
    datasource.inputs.template_args = info
    # Forces DataGrabber to return data in sorted order when using wildcards
    datasource.inputs.sort_filelist = True
    # Do not ignore exceptions
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    # Function to remove last three volumes from functional data
    # Start from the first volume and end on the -3 volume
    fslroi_epi = MapNode(ExtractROI(t_min=0, t_size=197),
                         iterfield=['in_file'],
                         name='fslroi_epi')
    fslroi_epi.output_type = 'NIFTI_GZ'
    fslroi_epi.terminal_output = 'stream'
    frstlvl_wf.connect(datasource, 'task_mri_files', fslroi_epi, 'in_file')

    # Function node to modify the motion and noise files to be single regressors
    motionnoise = Node(
        Function(
            input_names=['subjinfo', 'files'],
            output_names=['subjinfo'],
            # Calls the function 'motion_noise'
            function=motion_noise),
        name='motionnoise')
    motionnoise.inputs.ignore_exception = False
    # The bunch from subject_info function containing regressor names, onsets, durations, and amplitudes
    frstlvl_wf.connect(subject_info, 'output', motionnoise, 'subjinfo')
    frstlvl_wf.connect(datasource, 'motion_noise_files', motionnoise, 'files')

    # Makes a model specification compatible with spm/fsl designers
    # Requires subjectinfo to be received in the form of a Bunch of a list of Bunch
    specify_model = Node(SpecifyModel(), name='specify_model')
    # High-pass filter cutoff in seconds
    specify_model.inputs.high_pass_filter_cutoff = -1.0
    specify_model.inputs.ignore_exception = False
    # input units in either 'secs' or 'scans'
    specify_model.inputs.input_units = 'secs'
    # Time between start of one volume and the start of following volume
    specify_model.inputs.time_repetition = 2.0
    # Editted data files for model -- list of 4D files
    frstlvl_wf.connect(fslroi_epi, 'roi_file', specify_model,
                       'functional_runs')
    # List of event description files in 3 column format corresponding to onsets, durations, and amplitudes
    frstlvl_wf.connect(motionnoise, 'subjinfo', specify_model, 'subject_info')

    # Basic interface class generates identity mappings
    modelfit_inputspec = Node(IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ],
                                                mandatory_inputs=True),
                              name='modelfit_inputspec')
    # Set bases to a dictionary with a second dictionary setting the value of dgamma derivatives as 'False'
    modelfit_inputspec.inputs.bases = {'dgamma': {'derivs': False}}
    # Film threshold
    modelfit_inputspec.inputs.film_threshold = 0.0
    # Interscan_interval
    modelfit_inputspec.inputs.interscan_interval = 2.0
    # Create model serial correlations for Level1Design
    modelfit_inputspec.inputs.model_serial_correlations = True
    frstlvl_wf.connect(fslroi_epi, 'roi_file', modelfit_inputspec,
                       'functional_data')
    frstlvl_wf.connect(getcontrasts, 'contrasts', modelfit_inputspec,
                       'contrasts')
    frstlvl_wf.connect(specify_model, 'session_info', modelfit_inputspec,
                       'session_info')

    # Creates a first level SPM design matrix to demonstrate contrasts and motion/noise regressors
    level1_design = MapNode(Level1Design(),
                            iterfield=['contrasts', 'session_info'],
                            name='level1_design')
    level1_design.inputs.ignore_exception = False
    # Inputs the interscan interval (in secs)
    frstlvl_wf.connect(modelfit_inputspec, 'interscan_interval', level1_design,
                       'interscan_interval')
    # Session specific information generated by ``modelgen.SpecifyModel``
    frstlvl_wf.connect(modelfit_inputspec, 'session_info', level1_design,
                       'session_info')
    # List of contrasts with each contrast being a list of the form -[('name', 'stat', [condition list], [weight list], [session list])].
    # If session list is None or not provided, all sessions are used.
    frstlvl_wf.connect(modelfit_inputspec, 'contrasts', level1_design,
                       'contrasts')
    # Name of basis function and options e.g., {'dgamma': {'derivs': True}}
    frstlvl_wf.connect(modelfit_inputspec, 'bases', level1_design, 'bases')
    # Option to model serial correlations using an autoregressive estimator (order 1)
    # Setting this option is only useful in the context of the fsf file
    frstlvl_wf.connect(modelfit_inputspec, 'model_serial_correlations',
                       level1_design, 'model_serial_correlations')

    # Create a MapNode to generate a design.mat file for each run
    generate_model = MapNode(FEATModel(),
                             iterfield=['fsf_file', 'ev_files'],
                             name='generate_model')
    generate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    generate_model.inputs.ignore_exception = False
    generate_model.inputs.output_type = 'NIFTI_GZ'
    generate_model.inputs.terminal_output = 'stream'
    # File specifying the feat design spec file
    frstlvl_wf.connect(level1_design, 'fsf_files', generate_model, 'fsf_file')
    # Event spec files generated by level1design (condition information files)
    frstlvl_wf.connect(level1_design, 'ev_files', generate_model, 'ev_files')

    # Create a MapNode to estimate the model using FILMGLS -- fits the design matrix to the voxel timeseries
    estimate_model = MapNode(FILMGLS(),
                             iterfield=['design_file', 'in_file', 'tcon_file'],
                             name='estimate_model')
    estimate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    estimate_model.inputs.ignore_exception = False
    # Susan-smooth mask size
    estimate_model.inputs.mask_size = 5
    estimate_model.inputs.output_type = 'NIFTI_GZ'
    estimate_model.inputs.results_dir = 'results'
    # Smooth auto-correlation estimates
    estimate_model.inputs.smooth_autocorr = True
    estimate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(modelfit_inputspec, 'film_threshold', estimate_model,
                       'threshold')
    frstlvl_wf.connect(modelfit_inputspec, 'functional_data', estimate_model,
                       'in_file')
    # Mat file containing ascii matrix for design
    frstlvl_wf.connect(generate_model, 'design_file', estimate_model,
                       'design_file')
    # Contrast file containing contrast vectors
    frstlvl_wf.connect(generate_model, 'con_file', estimate_model, 'tcon_file')

    # Create a merge node to merge the contrasts - necessary for fsl 5.0.7 and greater
    merge_contrasts = MapNode(Merge(2),
                              iterfield=['in1'],
                              name='merge_contrasts')
    frstlvl_wf.connect(estimate_model, 'zstats', merge_contrasts, 'in1')

    # Create a MapNode to transform the z2pval
    z2pval = MapNode(ImageMaths(), iterfield=['in_file'], name='z2pval')
    z2pval.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    # Do not ignore exceptions
    z2pval.inputs.ignore_exception = False
    # Defines the operation used
    z2pval.inputs.op_string = '-ztop'
    # Set the outfile type to nii.gz
    z2pval.inputs.output_type = 'NIFTI_GZ'
    # Out-file suffix
    z2pval.inputs.suffix = '_pval'
    # Set output to stream in terminal
    z2pval.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(merge_contrasts, ('out', pop_lambda), z2pval, 'in_file')

    # Create an outputspec node using IdentityInterface() to receive information from estimate_model,
    # merge_contrasts, z2pval, generate_model, and estimate_model
    modelfit_outputspec = Node(IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates',
        'zstats', 'design_image', 'design_file', 'design_cov', 'sigmasquareds'
    ],
                                                 mandatory_inputs=True),
                               name='modelfit_outputspec')
    # All lvl1 cope files
    frstlvl_wf.connect(estimate_model, 'copes', modelfit_outputspec, 'copes')
    # All lvl1 varcope files
    frstlvl_wf.connect(estimate_model, 'varcopes', modelfit_outputspec,
                       'varcopes')
    # All zstats across runs
    frstlvl_wf.connect(merge_contrasts, 'out', modelfit_outputspec, 'zstats')
    #
    frstlvl_wf.connect(z2pval, 'out_file', modelfit_outputspec, 'pfiles')
    # Graphical representation of design matrix
    frstlvl_wf.connect(generate_model, 'design_image', modelfit_outputspec,
                       'design_image')
    # Mat file containing ascii matrix for design
    frstlvl_wf.connect(generate_model, 'design_file', modelfit_outputspec,
                       'design_file')
    # Graphical representation of design covariance
    frstlvl_wf.connect(generate_model, 'design_cov', modelfit_outputspec,
                       'design_cov')
    # Parameter estimates for each column of the design matrix
    frstlvl_wf.connect(estimate_model, 'param_estimates', modelfit_outputspec,
                       'parameter_estimates')
    # Degrees of freedom
    frstlvl_wf.connect(estimate_model, 'dof_file', modelfit_outputspec,
                       'dof_file')
    # Summary of residuals
    frstlvl_wf.connect(estimate_model, 'sigmasquareds', modelfit_outputspec,
                       'sigmasquareds')

    # Create a datasink node to save output from multiple points in the pipeline
    sinkd = MapNode(DataSink(),
                    iterfield=[
                        'substitutions', 'modelfit.contrasts.@copes',
                        'modelfit.contrasts.@varcopes', 'modelfit.estimates',
                        'modelfit.contrasts.@zstats'
                    ],
                    name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    frstlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')
    frstlvl_wf.connect(modelfit_outputspec, 'parameter_estimates', sinkd,
                       'modelfit.estimates')
    frstlvl_wf.connect(modelfit_outputspec, 'sigmasquareds', sinkd,
                       'modelfit.estimates.@sigsq')
    frstlvl_wf.connect(modelfit_outputspec, 'dof_file', sinkd, 'modelfit.dofs')
    frstlvl_wf.connect(modelfit_outputspec, 'copes', sinkd,
                       'modelfit.contrasts.@copes')
    frstlvl_wf.connect(modelfit_outputspec, 'varcopes', sinkd,
                       'modelfit.contrasts.@varcopes')
    frstlvl_wf.connect(modelfit_outputspec, 'zstats', sinkd,
                       'modelfit.contrasts.@zstats')
    frstlvl_wf.connect(modelfit_outputspec, 'design_image', sinkd,
                       'modelfit.design')
    frstlvl_wf.connect(modelfit_outputspec, 'design_cov', sinkd,
                       'modelfit.design.@cov')
    frstlvl_wf.connect(modelfit_outputspec, 'design_file', sinkd,
                       'modelfit.design.@matrix')
    frstlvl_wf.connect(modelfit_outputspec, 'pfiles', sinkd,
                       'modelfit.contrasts.@pstats')

    return frstlvl_wf
Exemple #23
0
def Lesion_extractor(
    name='Lesion_Extractor',
    wf_name='Test',
    base_dir='/homes_unix/alaurent/',
    input_dir=None,
    subjects=None,
    main=None,
    acc=None,
    atlas='/homes_unix/alaurent/cbstools-public-master/atlases/brain-segmentation-prior3.0/brain-atlas-quant-3.0.8.txt'
):

    wf = Workflow(wf_name)
    wf.base_dir = base_dir

    #file = open(subjects,"r")
    #subjects = file.read().split("\n")
    #file.close()

    # Subject List
    subjectList = Node(IdentityInterface(fields=['subject_id'],
                                         mandatory_inputs=True),
                       name="subList")
    subjectList.iterables = ('subject_id', [
        sub for sub in subjects if sub != '' and sub != '\n'
    ])

    # T1w and FLAIR
    scanList = Node(DataGrabber(infields=['subject_id'],
                                outfields=['T1', 'FLAIR']),
                    name="scanList")
    scanList.inputs.base_directory = input_dir
    scanList.inputs.ignore_exception = False
    scanList.inputs.raise_on_empty = True
    scanList.inputs.sort_filelist = True
    #scanList.inputs.template = '%s/%s.nii'
    #scanList.inputs.template_args = {'T1': [['subject_id','T1*']],
    #                                 'FLAIR': [['subject_id','FLAIR*']]}
    scanList.inputs.template = '%s/anat/%s'
    scanList.inputs.template_args = {
        'T1': [['subject_id', '*_T1w.nii.gz']],
        'FLAIR': [['subject_id', '*_FLAIR.nii.gz']]
    }
    wf.connect(subjectList, "subject_id", scanList, "subject_id")

    #     # T1w and FLAIR
    #     dg = Node(DataGrabber(outfields=['T1', 'FLAIR']), name="T1wFLAIR")
    #     dg.inputs.base_directory = "/homes_unix/alaurent/LesionPipeline"
    #     dg.inputs.template = "%s/NIFTI/*.nii.gz"
    #     dg.inputs.template_args['T1']=[['7']]
    #     dg.inputs.template_args['FLAIR']=[['9']]
    #     dg.inputs.sort_filelist=True

    # Reorient Volume
    T1Conv = Node(Reorient2Std(), name="ReorientVolume")
    T1Conv.inputs.ignore_exception = False
    T1Conv.inputs.terminal_output = 'none'
    T1Conv.inputs.out_file = "T1_reoriented.nii.gz"
    wf.connect(scanList, "T1", T1Conv, "in_file")

    # Reorient Volume (2)
    T2flairConv = Node(Reorient2Std(), name="ReorientVolume2")
    T2flairConv.inputs.ignore_exception = False
    T2flairConv.inputs.terminal_output = 'none'
    T2flairConv.inputs.out_file = "FLAIR_reoriented.nii.gz"
    wf.connect(scanList, "FLAIR", T2flairConv, "in_file")

    # N3 Correction
    T1NUC = Node(N4BiasFieldCorrection(), name="N3Correction")
    T1NUC.inputs.dimension = 3
    T1NUC.inputs.environ = {'NSLOTS': '1'}
    T1NUC.inputs.ignore_exception = False
    T1NUC.inputs.num_threads = 1
    T1NUC.inputs.save_bias = False
    T1NUC.inputs.terminal_output = 'none'
    wf.connect(T1Conv, "out_file", T1NUC, "input_image")

    # N3 Correction (2)
    T2flairNUC = Node(N4BiasFieldCorrection(), name="N3Correction2")
    T2flairNUC.inputs.dimension = 3
    T2flairNUC.inputs.environ = {'NSLOTS': '1'}
    T2flairNUC.inputs.ignore_exception = False
    T2flairNUC.inputs.num_threads = 1
    T2flairNUC.inputs.save_bias = False
    T2flairNUC.inputs.terminal_output = 'none'
    wf.connect(T2flairConv, "out_file", T2flairNUC, "input_image")
    '''
    #####################
    ### PRE-NORMALIZE ###
    #####################
    To make sure there's no outlier values (negative, or really high) to offset the initialization steps
    '''

    # Intensity Range Normalization
    getMaxT1NUC = Node(ImageStats(op_string='-r'), name="getMaxT1NUC")
    wf.connect(T1NUC, 'output_image', getMaxT1NUC, 'in_file')

    T1NUCirn = Node(AbcImageMaths(), name="IntensityNormalization")
    T1NUCirn.inputs.op_string = "-div"
    T1NUCirn.inputs.out_file = "normT1.nii.gz"
    wf.connect(T1NUC, 'output_image', T1NUCirn, 'in_file')
    wf.connect(getMaxT1NUC, ('out_stat', getElementFromList, 1), T1NUCirn,
               "op_value")

    # Intensity Range Normalization (2)
    getMaxT2NUC = Node(ImageStats(op_string='-r'), name="getMaxT2")
    wf.connect(T2flairNUC, 'output_image', getMaxT2NUC, 'in_file')

    T2NUCirn = Node(AbcImageMaths(), name="IntensityNormalization2")
    T2NUCirn.inputs.op_string = "-div"
    T2NUCirn.inputs.out_file = "normT2.nii.gz"
    wf.connect(T2flairNUC, 'output_image', T2NUCirn, 'in_file')
    wf.connect(getMaxT2NUC, ('out_stat', getElementFromList, 1), T2NUCirn,
               "op_value")
    '''
    ########################
    #### COREGISTRATION ####
    ########################
    '''

    # Optimized Automated Registration
    T2flairCoreg = Node(FLIRT(), name="OptimizedAutomatedRegistration")
    T2flairCoreg.inputs.output_type = 'NIFTI_GZ'
    wf.connect(T2NUCirn, "out_file", T2flairCoreg, "in_file")
    wf.connect(T1NUCirn, "out_file", T2flairCoreg, "reference")
    '''    
    #########################
    #### SKULL-STRIPPING ####
    #########################
    '''

    # SPECTRE
    T1ss = Node(BET(), name="SPECTRE")
    T1ss.inputs.frac = 0.45  #0.4
    T1ss.inputs.mask = True
    T1ss.inputs.outline = True
    T1ss.inputs.robust = True
    wf.connect(T1NUCirn, "out_file", T1ss, "in_file")

    # Image Calculator
    T2ss = Node(ApplyMask(), name="ImageCalculator")
    wf.connect(T1ss, "mask_file", T2ss, "mask_file")
    wf.connect(T2flairCoreg, "out_file", T2ss, "in_file")
    '''
    ####################################
    #### 2nd LAYER OF N3 CORRECTION ####
    ####################################
    This time without the skull: there were some significant amounts of inhomogeneities leftover.
    '''

    # N3 Correction (3)
    T1ssNUC = Node(N4BiasFieldCorrection(), name="N3Correction3")
    T1ssNUC.inputs.dimension = 3
    T1ssNUC.inputs.environ = {'NSLOTS': '1'}
    T1ssNUC.inputs.ignore_exception = False
    T1ssNUC.inputs.num_threads = 1
    T1ssNUC.inputs.save_bias = False
    T1ssNUC.inputs.terminal_output = 'none'
    wf.connect(T1ss, "out_file", T1ssNUC, "input_image")

    # N3 Correction (4)
    T2ssNUC = Node(N4BiasFieldCorrection(), name="N3Correction4")
    T2ssNUC.inputs.dimension = 3
    T2ssNUC.inputs.environ = {'NSLOTS': '1'}
    T2ssNUC.inputs.ignore_exception = False
    T2ssNUC.inputs.num_threads = 1
    T2ssNUC.inputs.save_bias = False
    T2ssNUC.inputs.terminal_output = 'none'
    wf.connect(T2ss, "out_file", T2ssNUC, "input_image")
    '''
    ####################################
    ####    NORMALIZE FOR MGDM      ####
    ####################################
    This normalization is a bit aggressive: only useful to have a 
    cropped dynamic range into MGDM, but possibly harmful to further 
    processing, so the unprocessed images are passed to the subsequent steps.
    '''

    # Intensity Range Normalization
    getMaxT1ssNUC = Node(ImageStats(op_string='-r'), name="getMaxT1ssNUC")
    wf.connect(T1ssNUC, 'output_image', getMaxT1ssNUC, 'in_file')

    T1ssNUCirn = Node(AbcImageMaths(), name="IntensityNormalization3")
    T1ssNUCirn.inputs.op_string = "-div"
    T1ssNUCirn.inputs.out_file = "normT1ss.nii.gz"
    wf.connect(T1ssNUC, 'output_image', T1ssNUCirn, 'in_file')
    wf.connect(getMaxT1ssNUC, ('out_stat', getElementFromList, 1), T1ssNUCirn,
               "op_value")

    # Intensity Range Normalization (2)
    getMaxT2ssNUC = Node(ImageStats(op_string='-r'), name="getMaxT2ssNUC")
    wf.connect(T2ssNUC, 'output_image', getMaxT2ssNUC, 'in_file')

    T2ssNUCirn = Node(AbcImageMaths(), name="IntensityNormalization4")
    T2ssNUCirn.inputs.op_string = "-div"
    T2ssNUCirn.inputs.out_file = "normT2ss.nii.gz"
    wf.connect(T2ssNUC, 'output_image', T2ssNUCirn, 'in_file')
    wf.connect(getMaxT2ssNUC, ('out_stat', getElementFromList, 1), T2ssNUCirn,
               "op_value")
    '''
    ####################################
    ####      ESTIMATE CSF PV       ####
    ####################################
    Here we try to get a better handle on CSF voxels to help the segmentation step
    '''

    # Recursive Ridge Diffusion
    CSF_pv = Node(RecursiveRidgeDiffusion(), name='estimate_CSF_pv')
    CSF_pv.plugin_args = {'sbatch_args': '--mem 6000'}
    CSF_pv.inputs.ridge_intensities = "dark"
    CSF_pv.inputs.ridge_filter = "2D"
    CSF_pv.inputs.orientation = "undefined"
    CSF_pv.inputs.ang_factor = 1.0
    CSF_pv.inputs.min_scale = 0
    CSF_pv.inputs.max_scale = 3
    CSF_pv.inputs.propagation_model = "diffusion"
    CSF_pv.inputs.diffusion_factor = 0.5
    CSF_pv.inputs.similarity_scale = 0.1
    CSF_pv.inputs.neighborhood_size = 4
    CSF_pv.inputs.max_iter = 100
    CSF_pv.inputs.max_diff = 0.001
    CSF_pv.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, CSF_pv.name),
        CSF_pv, 'output_dir')
    wf.connect(T1ssNUCirn, 'out_file', CSF_pv, 'input_image')
    '''
    ####################################
    ####            MGDM            ####
    ####################################
    '''

    # Multi-contrast Brain Segmentation
    MGDM = Node(MGDMSegmentation(), name='MGDM')
    MGDM.plugin_args = {'sbatch_args': '--mem 7000'}
    MGDM.inputs.contrast_type1 = "Mprage3T"
    MGDM.inputs.contrast_type2 = "FLAIR3T"
    MGDM.inputs.contrast_type3 = "PVDURA"
    MGDM.inputs.save_data = True
    MGDM.inputs.atlas_file = atlas
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, MGDM.name), MGDM,
        'output_dir')
    wf.connect(T1ssNUCirn, 'out_file', MGDM, 'contrast_image1')
    wf.connect(T2ssNUCirn, 'out_file', MGDM, 'contrast_image2')
    wf.connect(CSF_pv, 'ridge_pv', MGDM, 'contrast_image3')

    # Enhance Region Contrast
    ERC = Node(EnhanceRegionContrast(), name='ERC')
    ERC.plugin_args = {'sbatch_args': '--mem 7000'}
    ERC.inputs.enhanced_region = "crwm"
    ERC.inputs.contrast_background = "crgm"
    ERC.inputs.partial_voluming_distance = 2.0
    ERC.inputs.save_data = True
    ERC.inputs.atlas_file = atlas
    wf.connect(subjectList,
               ('subject_id', createOutputDir, wf.base_dir, wf.name, ERC.name),
               ERC, 'output_dir')
    wf.connect(T1ssNUC, 'output_image', ERC, 'intensity_image')
    wf.connect(MGDM, 'segmentation', ERC, 'segmentation_image')
    wf.connect(MGDM, 'distance', ERC, 'levelset_boundary_image')

    # Enhance Region Contrast (2)
    ERC2 = Node(EnhanceRegionContrast(), name='ERC2')
    ERC2.plugin_args = {'sbatch_args': '--mem 7000'}
    ERC2.inputs.enhanced_region = "crwm"
    ERC2.inputs.contrast_background = "crgm"
    ERC2.inputs.partial_voluming_distance = 2.0
    ERC2.inputs.save_data = True
    ERC2.inputs.atlas_file = atlas
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, ERC2.name), ERC2,
        'output_dir')
    wf.connect(T2ssNUC, 'output_image', ERC2, 'intensity_image')
    wf.connect(MGDM, 'segmentation', ERC2, 'segmentation_image')
    wf.connect(MGDM, 'distance', ERC2, 'levelset_boundary_image')

    # Define Multi-Region Priors
    DMRP = Node(DefineMultiRegionPriors(), name='DefineMultRegPriors')
    DMRP.plugin_args = {'sbatch_args': '--mem 6000'}
    #DMRP.inputs.defined_region = "ventricle-horns"
    #DMRP.inputs.definition_method = "closest-distance"
    DMRP.inputs.distance_offset = 3.0
    DMRP.inputs.save_data = True
    DMRP.inputs.atlas_file = atlas
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, DMRP.name), DMRP,
        'output_dir')
    wf.connect(MGDM, 'segmentation', DMRP, 'segmentation_image')
    wf.connect(MGDM, 'distance', DMRP, 'levelset_boundary_image')
    '''
    ###############################################
    ####      REMOVE VENTRICLE POSTERIOR       ####
    ###############################################
    Due to topology constraints, the ventricles are often not fully segmented:
    here add back all ventricle voxels from the posterior probability (without the topology constraints)
    '''

    # Posterior label
    PostLabel = Node(Split(), name='PosteriorLabel')
    PostLabel.inputs.dimension = "t"
    wf.connect(MGDM, 'labels', PostLabel, 'in_file')

    # Posterior proba
    PostProba = Node(Split(), name='PosteriorProba')
    PostProba.inputs.dimension = "t"
    wf.connect(MGDM, 'memberships', PostProba, 'in_file')

    # Threshold binary mask : ventricle label part 1
    VentLabel1 = Node(Threshold(), name="VentricleLabel1")
    VentLabel1.inputs.thresh = 10.5
    VentLabel1.inputs.direction = "below"
    wf.connect(PostLabel, ("out_files", getFirstElement), VentLabel1,
               "in_file")

    # Threshold binary mask : ventricle label part 2
    VentLabel2 = Node(Threshold(), name="VentricleLabel2")
    VentLabel2.inputs.thresh = 13.5
    VentLabel2.inputs.direction = "above"
    wf.connect(VentLabel1, "out_file", VentLabel2, "in_file")

    # Image calculator : ventricle proba
    VentProba = Node(ImageMaths(), name="VentricleProba")
    VentProba.inputs.op_string = "-mul"
    VentProba.inputs.out_file = "ventproba.nii.gz"
    wf.connect(PostProba, ("out_files", getFirstElement), VentProba, "in_file")
    wf.connect(VentLabel2, "out_file", VentProba, "in_file2")

    # Image calculator : remove inter ventricles
    RmInterVent = Node(ImageMaths(), name="RemoveInterVent")
    RmInterVent.inputs.op_string = "-sub"
    RmInterVent.inputs.out_file = "rmintervent.nii.gz"
    wf.connect(ERC, "region_pv", RmInterVent, "in_file")
    wf.connect(DMRP, "inter_ventricular_pv", RmInterVent, "in_file2")

    # Image calculator : add horns
    AddHorns = Node(ImageMaths(), name="AddHorns")
    AddHorns.inputs.op_string = "-add"
    AddHorns.inputs.out_file = "rmvent.nii.gz"
    wf.connect(RmInterVent, "out_file", AddHorns, "in_file")
    wf.connect(DMRP, "ventricular_horns_pv", AddHorns, "in_file2")

    # Image calculator : remove ventricles
    RmVent = Node(ImageMaths(), name="RemoveVentricles")
    RmVent.inputs.op_string = "-sub"
    RmVent.inputs.out_file = "rmvent.nii.gz"
    wf.connect(AddHorns, "out_file", RmVent, "in_file")
    wf.connect(VentProba, "out_file", RmVent, "in_file2")

    # Image calculator : remove internal capsule
    RmIC = Node(ImageMaths(), name="RemoveInternalCap")
    RmIC.inputs.op_string = "-sub"
    RmIC.inputs.out_file = "rmic.nii.gz"
    wf.connect(RmVent, "out_file", RmIC, "in_file")
    wf.connect(DMRP, "internal_capsule_pv", RmIC, "in_file2")

    # Intensity Range Normalization (3)
    getMaxRmIC = Node(ImageStats(op_string='-r'), name="getMaxRmIC")
    wf.connect(RmIC, 'out_file', getMaxRmIC, 'in_file')

    RmICirn = Node(AbcImageMaths(), name="IntensityNormalization5")
    RmICirn.inputs.op_string = "-div"
    RmICirn.inputs.out_file = "normRmIC.nii.gz"
    wf.connect(RmIC, 'out_file', RmICirn, 'in_file')
    wf.connect(getMaxRmIC, ('out_stat', getElementFromList, 1), RmICirn,
               "op_value")

    # Probability To Levelset : WM orientation
    WM_Orient = Node(ProbabilityToLevelset(), name='WM_Orientation')
    WM_Orient.plugin_args = {'sbatch_args': '--mem 6000'}
    WM_Orient.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, WM_Orient.name),
        WM_Orient, 'output_dir')
    wf.connect(RmICirn, 'out_file', WM_Orient, 'probability_image')

    # Recursive Ridge Diffusion : PVS in WM only
    WM_pvs = Node(RecursiveRidgeDiffusion(), name='PVS_in_WM')
    WM_pvs.plugin_args = {'sbatch_args': '--mem 6000'}
    WM_pvs.inputs.ridge_intensities = "bright"
    WM_pvs.inputs.ridge_filter = "1D"
    WM_pvs.inputs.orientation = "orthogonal"
    WM_pvs.inputs.ang_factor = 1.0
    WM_pvs.inputs.min_scale = 0
    WM_pvs.inputs.max_scale = 3
    WM_pvs.inputs.propagation_model = "diffusion"
    WM_pvs.inputs.diffusion_factor = 1.0
    WM_pvs.inputs.similarity_scale = 1.0
    WM_pvs.inputs.neighborhood_size = 2
    WM_pvs.inputs.max_iter = 100
    WM_pvs.inputs.max_diff = 0.001
    WM_pvs.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, WM_pvs.name),
        WM_pvs, 'output_dir')
    wf.connect(ERC, 'background_proba', WM_pvs, 'input_image')
    wf.connect(WM_Orient, 'levelset', WM_pvs, 'surface_levelset')
    wf.connect(RmICirn, 'out_file', WM_pvs, 'loc_prior')

    # Extract Lesions : extract WM PVS
    extract_WM_pvs = Node(LesionExtraction(), name='ExtractPVSfromWM')
    extract_WM_pvs.plugin_args = {'sbatch_args': '--mem 6000'}
    extract_WM_pvs.inputs.gm_boundary_partial_vol_dist = 1.0
    extract_WM_pvs.inputs.csf_boundary_partial_vol_dist = 3.0
    extract_WM_pvs.inputs.lesion_clust_dist = 1.0
    extract_WM_pvs.inputs.prob_min_thresh = 0.1
    extract_WM_pvs.inputs.prob_max_thresh = 0.33
    extract_WM_pvs.inputs.small_lesion_size = 4.0
    extract_WM_pvs.inputs.save_data = True
    extract_WM_pvs.inputs.atlas_file = atlas
    wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir,
                             wf.name, extract_WM_pvs.name), extract_WM_pvs,
               'output_dir')
    wf.connect(WM_pvs, 'propagation', extract_WM_pvs, 'probability_image')
    wf.connect(MGDM, 'segmentation', extract_WM_pvs, 'segmentation_image')
    wf.connect(MGDM, 'distance', extract_WM_pvs, 'levelset_boundary_image')
    wf.connect(RmICirn, 'out_file', extract_WM_pvs, 'location_prior_image')
    '''
    2nd branch
    '''

    # Image calculator : internal capsule witout ventricules
    ICwoVent = Node(ImageMaths(), name="ICWithoutVentricules")
    ICwoVent.inputs.op_string = "-sub"
    ICwoVent.inputs.out_file = "icwovent.nii.gz"
    wf.connect(DMRP, "internal_capsule_pv", ICwoVent, "in_file")
    wf.connect(DMRP, "inter_ventricular_pv", ICwoVent, "in_file2")

    # Image calculator : remove ventricles IC
    RmVentIC = Node(ImageMaths(), name="RmVentIC")
    RmVentIC.inputs.op_string = "-sub"
    RmVentIC.inputs.out_file = "RmVentIC.nii.gz"
    wf.connect(ICwoVent, "out_file", RmVentIC, "in_file")
    wf.connect(VentProba, "out_file", RmVentIC, "in_file2")

    # Intensity Range Normalization (4)
    getMaxRmVentIC = Node(ImageStats(op_string='-r'), name="getMaxRmVentIC")
    wf.connect(RmVentIC, 'out_file', getMaxRmVentIC, 'in_file')

    RmVentICirn = Node(AbcImageMaths(), name="IntensityNormalization6")
    RmVentICirn.inputs.op_string = "-div"
    RmVentICirn.inputs.out_file = "normRmVentIC.nii.gz"
    wf.connect(RmVentIC, 'out_file', RmVentICirn, 'in_file')
    wf.connect(getMaxRmVentIC, ('out_stat', getElementFromList, 1),
               RmVentICirn, "op_value")

    # Probability To Levelset : IC orientation
    IC_Orient = Node(ProbabilityToLevelset(), name='IC_Orientation')
    IC_Orient.plugin_args = {'sbatch_args': '--mem 6000'}
    IC_Orient.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, IC_Orient.name),
        IC_Orient, 'output_dir')
    wf.connect(RmVentICirn, 'out_file', IC_Orient, 'probability_image')

    # Recursive Ridge Diffusion : PVS in IC only
    IC_pvs = Node(RecursiveRidgeDiffusion(), name='RecursiveRidgeDiffusion2')
    IC_pvs.plugin_args = {'sbatch_args': '--mem 6000'}
    IC_pvs.inputs.ridge_intensities = "bright"
    IC_pvs.inputs.ridge_filter = "1D"
    IC_pvs.inputs.orientation = "undefined"
    IC_pvs.inputs.ang_factor = 1.0
    IC_pvs.inputs.min_scale = 0
    IC_pvs.inputs.max_scale = 3
    IC_pvs.inputs.propagation_model = "diffusion"
    IC_pvs.inputs.diffusion_factor = 1.0
    IC_pvs.inputs.similarity_scale = 1.0
    IC_pvs.inputs.neighborhood_size = 2
    IC_pvs.inputs.max_iter = 100
    IC_pvs.inputs.max_diff = 0.001
    IC_pvs.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, IC_pvs.name),
        IC_pvs, 'output_dir')
    wf.connect(ERC, 'background_proba', IC_pvs, 'input_image')
    wf.connect(IC_Orient, 'levelset', IC_pvs, 'surface_levelset')
    wf.connect(RmVentICirn, 'out_file', IC_pvs, 'loc_prior')

    # Extract Lesions : extract IC PVS
    extract_IC_pvs = Node(LesionExtraction(), name='ExtractPVSfromIC')
    extract_IC_pvs.plugin_args = {'sbatch_args': '--mem 6000'}
    extract_IC_pvs.inputs.gm_boundary_partial_vol_dist = 1.0
    extract_IC_pvs.inputs.csf_boundary_partial_vol_dist = 4.0
    extract_IC_pvs.inputs.lesion_clust_dist = 1.0
    extract_IC_pvs.inputs.prob_min_thresh = 0.25
    extract_IC_pvs.inputs.prob_max_thresh = 0.5
    extract_IC_pvs.inputs.small_lesion_size = 4.0
    extract_IC_pvs.inputs.save_data = True
    extract_IC_pvs.inputs.atlas_file = atlas
    wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir,
                             wf.name, extract_IC_pvs.name), extract_IC_pvs,
               'output_dir')
    wf.connect(IC_pvs, 'propagation', extract_IC_pvs, 'probability_image')
    wf.connect(MGDM, 'segmentation', extract_IC_pvs, 'segmentation_image')
    wf.connect(MGDM, 'distance', extract_IC_pvs, 'levelset_boundary_image')
    wf.connect(RmVentICirn, 'out_file', extract_IC_pvs, 'location_prior_image')
    '''
    3rd branch
    '''

    # Image calculator :
    RmInter = Node(ImageMaths(), name="RemoveInterVentricules")
    RmInter.inputs.op_string = "-sub"
    RmInter.inputs.out_file = "rminter.nii.gz"
    wf.connect(ERC2, 'region_pv', RmInter, "in_file")
    wf.connect(DMRP, "inter_ventricular_pv", RmInter, "in_file2")

    # Image calculator :
    AddVentHorns = Node(ImageMaths(), name="AddVentHorns")
    AddVentHorns.inputs.op_string = "-add"
    AddVentHorns.inputs.out_file = "rminter.nii.gz"
    wf.connect(RmInter, 'out_file', AddVentHorns, "in_file")
    wf.connect(DMRP, "ventricular_horns_pv", AddVentHorns, "in_file2")

    # Intensity Range Normalization (5)
    getMaxAddVentHorns = Node(ImageStats(op_string='-r'),
                              name="getMaxAddVentHorns")
    wf.connect(AddVentHorns, 'out_file', getMaxAddVentHorns, 'in_file')

    AddVentHornsirn = Node(AbcImageMaths(), name="IntensityNormalization7")
    AddVentHornsirn.inputs.op_string = "-div"
    AddVentHornsirn.inputs.out_file = "normAddVentHorns.nii.gz"
    wf.connect(AddVentHorns, 'out_file', AddVentHornsirn, 'in_file')
    wf.connect(getMaxAddVentHorns, ('out_stat', getElementFromList, 1),
               AddVentHornsirn, "op_value")

    # Extract Lesions : extract White Matter Hyperintensities
    extract_WMH = Node(LesionExtraction(), name='Extract_WMH')
    extract_WMH.plugin_args = {'sbatch_args': '--mem 6000'}
    extract_WMH.inputs.gm_boundary_partial_vol_dist = 1.0
    extract_WMH.inputs.csf_boundary_partial_vol_dist = 2.0
    extract_WMH.inputs.lesion_clust_dist = 1.0
    extract_WMH.inputs.prob_min_thresh = 0.84
    extract_WMH.inputs.prob_max_thresh = 0.84
    extract_WMH.inputs.small_lesion_size = 4.0
    extract_WMH.inputs.save_data = True
    extract_WMH.inputs.atlas_file = atlas
    wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir,
                             wf.name, extract_WMH.name), extract_WMH,
               'output_dir')
    wf.connect(ERC2, 'background_proba', extract_WMH, 'probability_image')
    wf.connect(MGDM, 'segmentation', extract_WMH, 'segmentation_image')
    wf.connect(MGDM, 'distance', extract_WMH, 'levelset_boundary_image')
    wf.connect(AddVentHornsirn, 'out_file', extract_WMH,
               'location_prior_image')

    #===========================================================================
    # extract_WMH2 = extract_WMH.clone(name='Extract_WMH2')
    # extract_WMH2.inputs.gm_boundary_partial_vol_dist = 2.0
    # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_WMH2.name),extract_WMH2,'output_dir')
    # wf.connect(ERC2,'background_proba',extract_WMH2,'probability_image')
    # wf.connect(MGDM,'segmentation',extract_WMH2,'segmentation_image')
    # wf.connect(MGDM,'distance',extract_WMH2,'levelset_boundary_image')
    # wf.connect(AddVentHornsirn,'out_file',extract_WMH2,'location_prior_image')
    #
    # extract_WMH3 = extract_WMH.clone(name='Extract_WMH3')
    # extract_WMH3.inputs.gm_boundary_partial_vol_dist = 3.0
    # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_WMH3.name),extract_WMH3,'output_dir')
    # wf.connect(ERC2,'background_proba',extract_WMH3,'probability_image')
    # wf.connect(MGDM,'segmentation',extract_WMH3,'segmentation_image')
    # wf.connect(MGDM,'distance',extract_WMH3,'levelset_boundary_image')
    # wf.connect(AddVentHornsirn,'out_file',extract_WMH3,'location_prior_image')
    #===========================================================================
    '''
    ####################################
    ####     FINDING SMALL WMHs     ####
    ####################################
    Small round WMHs near the cortex are often missed by the main algorithm, 
    so we're adding this one that takes care of them.
    '''

    # Recursive Ridge Diffusion : round WMH detection
    round_WMH = Node(RecursiveRidgeDiffusion(), name='round_WMH')
    round_WMH.plugin_args = {'sbatch_args': '--mem 6000'}
    round_WMH.inputs.ridge_intensities = "bright"
    round_WMH.inputs.ridge_filter = "0D"
    round_WMH.inputs.orientation = "undefined"
    round_WMH.inputs.ang_factor = 1.0
    round_WMH.inputs.min_scale = 1
    round_WMH.inputs.max_scale = 4
    round_WMH.inputs.propagation_model = "none"
    round_WMH.inputs.diffusion_factor = 1.0
    round_WMH.inputs.similarity_scale = 0.1
    round_WMH.inputs.neighborhood_size = 4
    round_WMH.inputs.max_iter = 100
    round_WMH.inputs.max_diff = 0.001
    round_WMH.inputs.save_data = True
    wf.connect(
        subjectList,
        ('subject_id', createOutputDir, wf.base_dir, wf.name, round_WMH.name),
        round_WMH, 'output_dir')
    wf.connect(ERC2, 'background_proba', round_WMH, 'input_image')
    wf.connect(AddVentHornsirn, 'out_file', round_WMH, 'loc_prior')

    # Extract Lesions : extract round WMH
    extract_round_WMH = Node(LesionExtraction(), name='Extract_round_WMH')
    extract_round_WMH.plugin_args = {'sbatch_args': '--mem 6000'}
    extract_round_WMH.inputs.gm_boundary_partial_vol_dist = 1.0
    extract_round_WMH.inputs.csf_boundary_partial_vol_dist = 2.0
    extract_round_WMH.inputs.lesion_clust_dist = 1.0
    extract_round_WMH.inputs.prob_min_thresh = 0.33
    extract_round_WMH.inputs.prob_max_thresh = 0.33
    extract_round_WMH.inputs.small_lesion_size = 6.0
    extract_round_WMH.inputs.save_data = True
    extract_round_WMH.inputs.atlas_file = atlas
    wf.connect(subjectList, ('subject_id', createOutputDir, wf.base_dir,
                             wf.name, extract_round_WMH.name),
               extract_round_WMH, 'output_dir')
    wf.connect(round_WMH, 'ridge_pv', extract_round_WMH, 'probability_image')
    wf.connect(MGDM, 'segmentation', extract_round_WMH, 'segmentation_image')
    wf.connect(MGDM, 'distance', extract_round_WMH, 'levelset_boundary_image')
    wf.connect(AddVentHornsirn, 'out_file', extract_round_WMH,
               'location_prior_image')

    #===========================================================================
    # extract_round_WMH2 = extract_round_WMH.clone(name='Extract_round_WMH2')
    # extract_round_WMH2.inputs.gm_boundary_partial_vol_dist = 2.0
    # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_round_WMH2.name),extract_round_WMH2,'output_dir')
    # wf.connect(round_WMH,'ridge_pv',extract_round_WMH2,'probability_image')
    # wf.connect(MGDM,'segmentation',extract_round_WMH2,'segmentation_image')
    # wf.connect(MGDM,'distance',extract_round_WMH2,'levelset_boundary_image')
    # wf.connect(AddVentHornsirn,'out_file',extract_round_WMH2,'location_prior_image')
    #
    # extract_round_WMH3 = extract_round_WMH.clone(name='Extract_round_WMH3')
    # extract_round_WMH3.inputs.gm_boundary_partial_vol_dist = 2.0
    # wf.connect(subjectList,('subject_id',createOutputDir,wf.base_dir,wf.name,extract_round_WMH3.name),extract_round_WMH3,'output_dir')
    # wf.connect(round_WMH,'ridge_pv',extract_round_WMH3,'probability_image')
    # wf.connect(MGDM,'segmentation',extract_round_WMH3,'segmentation_image')
    # wf.connect(MGDM,'distance',extract_round_WMH3,'levelset_boundary_image')
    # wf.connect(AddVentHornsirn,'out_file',extract_round_WMH3,'location_prior_image')
    #===========================================================================
    '''
    ####################################
    ####     COMBINE BOTH TYPES     ####
    ####################################
    Small round WMHs and regular WMH together before thresholding
    +
    PVS from white matter and internal capsule
    '''

    # Image calculator : WM + IC DVRS
    DVRS = Node(ImageMaths(), name="DVRS")
    DVRS.inputs.op_string = "-max"
    DVRS.inputs.out_file = "DVRS_map.nii.gz"
    wf.connect(extract_WM_pvs, 'lesion_score', DVRS, "in_file")
    wf.connect(extract_IC_pvs, "lesion_score", DVRS, "in_file2")

    # Image calculator : WMH + round
    WMH = Node(ImageMaths(), name="WMH")
    WMH.inputs.op_string = "-max"
    WMH.inputs.out_file = "WMH_map.nii.gz"
    wf.connect(extract_WMH, 'lesion_score', WMH, "in_file")
    wf.connect(extract_round_WMH, "lesion_score", WMH, "in_file2")

    #===========================================================================
    # WMH2 = Node(ImageMaths(), name="WMH2")
    # WMH2.inputs.op_string = "-max"
    # WMH2.inputs.out_file = "WMH2_map.nii.gz"
    # wf.connect(extract_WMH2,'lesion_score',WMH2,"in_file")
    # wf.connect(extract_round_WMH2,"lesion_score", WMH2, "in_file2")
    #
    # WMH3 = Node(ImageMaths(), name="WMH3")
    # WMH3.inputs.op_string = "-max"
    # WMH3.inputs.out_file = "WMH3_map.nii.gz"
    # wf.connect(extract_WMH3,'lesion_score',WMH3,"in_file")
    # wf.connect(extract_round_WMH3,"lesion_score", WMH3, "in_file2")
    #===========================================================================

    # Image calculator : multiply by boundnary partial volume
    WMH_mul = Node(ImageMaths(), name="WMH_mul")
    WMH_mul.inputs.op_string = "-mul"
    WMH_mul.inputs.out_file = "final_mask.nii.gz"
    wf.connect(WMH, "out_file", WMH_mul, "in_file")
    wf.connect(MGDM, "distance", WMH_mul, "in_file2")

    #===========================================================================
    # WMH2_mul = Node(ImageMaths(), name="WMH2_mul")
    # WMH2_mul.inputs.op_string = "-mul"
    # WMH2_mul.inputs.out_file = "final_mask.nii.gz"
    # wf.connect(WMH2,"out_file", WMH2_mul,"in_file")
    # wf.connect(MGDM,"distance", WMH2_mul, "in_file2")
    #
    # WMH3_mul = Node(ImageMaths(), name="WMH3_mul")
    # WMH3_mul.inputs.op_string = "-mul"
    # WMH3_mul.inputs.out_file = "final_mask.nii.gz"
    # wf.connect(WMH3,"out_file", WMH3_mul,"in_file")
    # wf.connect(MGDM,"distance", WMH3_mul, "in_file2")
    #===========================================================================
    '''
    ##########################################
    ####      SEGMENTATION THRESHOLD      ####
    ##########################################
    A threshold of 0.5 is very conservative, because the final lesion score is the product of two probabilities.
    This needs to be optimized to a value between 0.25 and 0.5 to balance false negatives 
    (dominant at 0.5) and false positives (dominant at low values).
    '''

    # Threshold binary mask :
    DVRS_mask = Node(Threshold(), name="DVRS_mask")
    DVRS_mask.inputs.thresh = 0.25
    DVRS_mask.inputs.direction = "below"
    wf.connect(DVRS, "out_file", DVRS_mask, "in_file")

    # Threshold binary mask : 025
    WMH1_025 = Node(Threshold(), name="WMH1_025")
    WMH1_025.inputs.thresh = 0.25
    WMH1_025.inputs.direction = "below"
    wf.connect(WMH_mul, "out_file", WMH1_025, "in_file")

    #===========================================================================
    # WMH2_025 = Node(Threshold(), name="WMH2_025")
    # WMH2_025.inputs.thresh = 0.25
    # WMH2_025.inputs.direction = "below"
    # wf.connect(WMH2_mul,"out_file", WMH2_025, "in_file")
    #
    # WMH3_025 = Node(Threshold(), name="WMH3_025")
    # WMH3_025.inputs.thresh = 0.25
    # WMH3_025.inputs.direction = "below"
    # wf.connect(WMH3_mul,"out_file", WMH3_025, "in_file")
    #===========================================================================

    # Threshold binary mask : 050
    WMH1_050 = Node(Threshold(), name="WMH1_050")
    WMH1_050.inputs.thresh = 0.50
    WMH1_050.inputs.direction = "below"
    wf.connect(WMH_mul, "out_file", WMH1_050, "in_file")

    #===========================================================================
    # WMH2_050 = Node(Threshold(), name="WMH2_050")
    # WMH2_050.inputs.thresh = 0.50
    # WMH2_050.inputs.direction = "below"
    # wf.connect(WMH2_mul,"out_file", WMH2_050, "in_file")
    #
    # WMH3_050 = Node(Threshold(), name="WMH3_050")
    # WMH3_050.inputs.thresh = 0.50
    # WMH3_050.inputs.direction = "below"
    # wf.connect(WMH3_mul,"out_file", WMH3_050, "in_file")
    #===========================================================================

    # Threshold binary mask : 075
    WMH1_075 = Node(Threshold(), name="WMH1_075")
    WMH1_075.inputs.thresh = 0.75
    WMH1_075.inputs.direction = "below"
    wf.connect(WMH_mul, "out_file", WMH1_075, "in_file")

    #===========================================================================
    # WMH2_075 = Node(Threshold(), name="WMH2_075")
    # WMH2_075.inputs.thresh = 0.75
    # WMH2_075.inputs.direction = "below"
    # wf.connect(WMH2_mul,"out_file", WMH2_075, "in_file")
    #
    # WMH3_075 = Node(Threshold(), name="WMH3_075")
    # WMH3_075.inputs.thresh = 0.75
    # WMH3_075.inputs.direction = "below"
    # wf.connect(WMH3_mul,"out_file", WMH3_075, "in_file")
    #===========================================================================

    ## Outputs

    DVRS_Output = Node(IdentityInterface(fields=[
        'mask', 'region', 'lesion_size', 'lesion_proba', 'boundary', 'label',
        'score'
    ]),
                       name='DVRS_Output')
    wf.connect(DVRS_mask, 'out_file', DVRS_Output, 'mask')

    WMH_output = Node(IdentityInterface(fields=[
        'mask1025', 'mask1050', 'mask1075', 'mask2025', 'mask2050', 'mask2075',
        'mask3025', 'mask3050', 'mask3075'
    ]),
                      name='WMH_output')
    wf.connect(WMH1_025, 'out_file', WMH_output, 'mask1025')
    #wf.connect(WMH2_025,'out_file',WMH_output,'mask2025')
    #wf.connect(WMH3_025,'out_file',WMH_output,'mask3025')
    wf.connect(WMH1_050, 'out_file', WMH_output, 'mask1050')
    #wf.connect(WMH2_050,'out_file',WMH_output,'mask2050')
    #wf.connect(WMH3_050,'out_file',WMH_output,'mask3050')
    wf.connect(WMH1_075, 'out_file', WMH_output, 'mask1075')
    #wf.connect(WMH2_075,'out_file',WMH_output,'mask2070')
    #wf.connect(WMH3_075,'out_file',WMH_output,'mask3075')

    return wf
Exemple #24
0
def firstlevel_wf(subject_id, sink_directory, name='ds008_R2_frstlvl_wf'):

    frstlvl_wf = Workflow(name='frstlvl_wf')

    info = dict(task_mri_files=[['subject_id', 'stopsignal']],
                motion_noise_files=[['subject_id', 'filter_regressor']])

    # Create a Function node to define stimulus onsets, etc... for each subject
    subject_info = Node(Function(input_names=['subject_id'],
                                 output_names=['output'],
                                 function=subjectinfo),
                        name='subject_info')
    subject_info.inputs.ignore_exception = False
    subject_info.inputs.subject_id = subject_id

    # Create another Function node to define the contrasts for the experiment
    getcontrasts = Node(Function(input_names=['subject_id'],
                                 output_names=['contrasts'],
                                 function=get_contrasts),
                        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    getcontrasts.inputs.subject_id = subject_id

    # Create a Function node to substitute names of files created during pipeline
    getsubs = Node(Function(input_names=['subject_id', 'cons', 'info'],
                            output_names=['subs'],
                            function=get_subs),
                   name='getsubs')
    getsubs.inputs.ignore_exception = False
    getsubs.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getsubs, 'info')
    frstlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    # Create a datasource node to get the task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    #datasource.inputs.base_directory = os.path.abspath('/scratch/PSB6351_2017/ds008_R2.0.0/preproc/')
    #datasource.inputs.field_template = dict(task_mri_files='%s/func/realigned/*%s*.nii.gz',
    #                                        motion_noise_files='%s/noise/%s*.txt')
    datasource.inputs.base_directory = os.path.abspath(
        '/scratch/PSB6351_2017/students/salo/data/preproc/')
    datasource.inputs.field_template = dict(
        task_mri_files=
        '%s/preproc/func/smoothed/corr_*_task-%s_*_bold_bet_smooth_mask.nii.gz',
        motion_noise_files='%s/preproc/noise/%s*.txt')
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    # Create a Function node to modify the motion and noise files to be single regressors
    motionnoise = Node(Function(input_names=['subjinfo', 'files'],
                                output_names=['subjinfo'],
                                function=motion_noise),
                       name='motionnoise')
    motionnoise.inputs.ignore_exception = False
    frstlvl_wf.connect(subject_info, 'output', motionnoise, 'subjinfo')
    frstlvl_wf.connect(datasource, 'motion_noise_files', motionnoise, 'files')

    # Create a specify model node
    specify_model = Node(SpecifyModel(), name='specify_model')
    specify_model.inputs.high_pass_filter_cutoff = 128.
    specify_model.inputs.ignore_exception = False
    specify_model.inputs.input_units = 'secs'
    specify_model.inputs.time_repetition = 2.
    frstlvl_wf.connect(datasource, 'task_mri_files', specify_model,
                       'functional_runs')
    frstlvl_wf.connect(motionnoise, 'subjinfo', specify_model, 'subject_info')

    # Create an InputSpec node for the modelfit node
    modelfit_inputspec = Node(IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ],
                                                mandatory_inputs=True),
                              name='modelfit_inputspec')
    modelfit_inputspec.inputs.bases = {'dgamma': {'derivs': False}}
    modelfit_inputspec.inputs.film_threshold = 0.0
    modelfit_inputspec.inputs.interscan_interval = 2.0
    modelfit_inputspec.inputs.model_serial_correlations = True
    frstlvl_wf.connect(datasource, 'task_mri_files', modelfit_inputspec,
                       'functional_data')
    frstlvl_wf.connect(getcontrasts, 'contrasts', modelfit_inputspec,
                       'contrasts')
    frstlvl_wf.connect(specify_model, 'session_info', modelfit_inputspec,
                       'session_info')

    # Create a level1 design node
    level1_design = Node(Level1Design(), name='level1_design')
    level1_design.inputs.ignore_exception = False
    frstlvl_wf.connect(modelfit_inputspec, 'interscan_interval', level1_design,
                       'interscan_interval')
    frstlvl_wf.connect(modelfit_inputspec, 'session_info', level1_design,
                       'session_info')
    frstlvl_wf.connect(modelfit_inputspec, 'contrasts', level1_design,
                       'contrasts')
    frstlvl_wf.connect(modelfit_inputspec, 'bases', level1_design, 'bases')
    frstlvl_wf.connect(modelfit_inputspec, 'model_serial_correlations',
                       level1_design, 'model_serial_correlations')

    # Create a MapNode to generate a model for each run
    generate_model = MapNode(FEATModel(),
                             iterfield=['fsf_file', 'ev_files'],
                             name='generate_model')
    generate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    generate_model.inputs.ignore_exception = False
    generate_model.inputs.output_type = 'NIFTI_GZ'
    generate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(level1_design, 'fsf_files', generate_model, 'fsf_file')
    frstlvl_wf.connect(level1_design, 'ev_files', generate_model, 'ev_files')

    # Create a MapNode to estimate the model using FILMGLS
    estimate_model = MapNode(FILMGLS(),
                             iterfield=['design_file', 'in_file', 'tcon_file'],
                             name='estimate_model')
    frstlvl_wf.connect(generate_model, 'design_file', estimate_model,
                       'design_file')
    frstlvl_wf.connect(generate_model, 'con_file', estimate_model, 'tcon_file')
    frstlvl_wf.connect(modelfit_inputspec, 'functional_data', estimate_model,
                       'in_file')

    # Create a merge node to merge the contrasts - necessary for fsl 5.0.7 and greater
    merge_contrasts = MapNode(Merge(2),
                              iterfield=['in1'],
                              name='merge_contrasts')
    frstlvl_wf.connect(estimate_model, 'zstats', merge_contrasts, 'in1')

    # Create a MapNode to transform the z2pval
    z2pval = MapNode(ImageMaths(), iterfield=['in_file'], name='z2pval')
    z2pval.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    z2pval.inputs.ignore_exception = False
    z2pval.inputs.op_string = '-ztop'
    z2pval.inputs.output_type = 'NIFTI_GZ'
    z2pval.inputs.suffix = '_pval'
    z2pval.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(merge_contrasts, ('out', pop_lambda), z2pval, 'in_file')

    # Create an outputspec node
    modelfit_outputspec = Node(IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates',
        'zstats', 'design_image', 'design_file', 'design_cov', 'sigmasquareds'
    ],
                                                 mandatory_inputs=True),
                               name='modelfit_outputspec')
    frstlvl_wf.connect(estimate_model, 'copes', modelfit_outputspec, 'copes')
    frstlvl_wf.connect(estimate_model, 'varcopes', modelfit_outputspec,
                       'varcopes')
    frstlvl_wf.connect(merge_contrasts, 'out', modelfit_outputspec, 'zstats')
    frstlvl_wf.connect(z2pval, 'out_file', modelfit_outputspec, 'pfiles')
    frstlvl_wf.connect(generate_model, 'design_image', modelfit_outputspec,
                       'design_image')
    frstlvl_wf.connect(generate_model, 'design_file', modelfit_outputspec,
                       'design_file')
    frstlvl_wf.connect(generate_model, 'design_cov', modelfit_outputspec,
                       'design_cov')
    frstlvl_wf.connect(estimate_model, 'param_estimates', modelfit_outputspec,
                       'parameter_estimates')
    frstlvl_wf.connect(estimate_model, 'dof_file', modelfit_outputspec,
                       'dof_file')
    frstlvl_wf.connect(estimate_model, 'sigmasquareds', modelfit_outputspec,
                       'sigmasquareds')

    # Create a datasink node
    sinkd = Node(DataSink(), name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    frstlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')
    frstlvl_wf.connect(modelfit_outputspec, 'parameter_estimates', sinkd,
                       'modelfit.estimates')
    frstlvl_wf.connect(modelfit_outputspec, 'sigmasquareds', sinkd,
                       'modelfit.estimates.@sigsq')
    frstlvl_wf.connect(modelfit_outputspec, 'dof_file', sinkd, 'modelfit.dofs')
    frstlvl_wf.connect(modelfit_outputspec, 'copes', sinkd,
                       'modelfit.contrasts.@copes')
    frstlvl_wf.connect(modelfit_outputspec, 'varcopes', sinkd,
                       'modelfit.contrasts.@varcopes')
    frstlvl_wf.connect(modelfit_outputspec, 'zstats', sinkd,
                       'modelfit.contrasts.@zstats')
    frstlvl_wf.connect(modelfit_outputspec, 'design_image', sinkd,
                       'modelfit.design')
    frstlvl_wf.connect(modelfit_outputspec, 'design_cov', sinkd,
                       'modelfit.design.@cov')
    frstlvl_wf.connect(modelfit_outputspec, 'design_file', sinkd,
                       'modelfit.design.@matrix')
    frstlvl_wf.connect(modelfit_outputspec, 'pfiles', sinkd,
                       'modelfit.contrasts.@pstats')

    return frstlvl_wf
def create_reconall(config):
    ar1_wf = create_AutoRecon1(config)
    ar2_wf, ar2_lh, ar2_rh = create_AutoRecon2(config)
    ar3_wf = create_AutoRecon3(config)

    # Connect workflows
    reconall = pe.Workflow(name="recon-all")
    if config['longitudinal']:
        # grab files from the initial single session run
        grab_inittp_files = pe.Node(DataGrabber(),
                                    name="Grab_Initial_Files",
                                    infields=['subject_id'],
                                    outfileds=['inputvols', 'iscales', 'ltas'])
        grab_inittp_files.inputs.template = '*'
        grab_inittp_files.inputs.base_directory = config['subjects_dir']
        grab_inittp_files.inputs.field_template = dict(
            inputvols='%s/mri/orig/0*.mgz',
            iscales='%s/mri/orig/0*-iscale.txt',
            ltas='%s/mri/orig/0*.lta')

        grab_inittp_files.inputs.template_args = dict(
            inputvols=[['subject_id']],
            iscales=[['subject_id']],
            ltas=[['subject_id']])

        reconall.connect([(grab_inittp_files, ar1_wf,
                           [('inputvols', 'AutoRecon1_Inputs.in_T1s'),
                            ('iscales', 'AutoRecon1_Inputs.iscales'),
                            ('ltas', 'AutoRecon1_Inputs.ltas')])])

        merge_norms = pe.Node(Merge(len(config['timepoints'])),
                              name="Merge_Norms")
        merge_segs = pe.Node(Merge(len(config['timepoints'])),
                             name="Merge_Segmentations")
        merge_segs_noCC = pe.Node(Merge(len(config['timepoints'])),
                                  name="Merge_Segmentations_noCC")
        merge_template_ltas = pe.Node(Merge(len(config['timepoints'])),
                                      name="Merge_Template_ltas")

        for i, tp in enumerate(config['timepoints']):
            # datasource timepoint files
            tp_data_source = pe.Node(FreeSurferSource(),
                                     name="{0}_DataSource".format(tp))
            tp_data_source.inputs.subject_id = tp
            tp_data_source.inputs.subjects_dir = config['subjects_dir']

            tp_data_grabber = pe.Node(
                DataGrabber(),
                name="{0}_DataGrabber".format(tp),
                infields=['tp', 'long_tempate'],
                outfileds=['subj_to_template_lta', 'seg_noCC', 'seg_presurf'])
            tp_data_grabber.inputs.template = '*'
            tp_data_grabber.inputs.base_directory = config['subjects_dir']
            tp_data_grabber.inputs.field_template = dict(
                subj_to_template_lta='%s/mri/transforms/%s_to_%s.lta',
                seg_noCC='%s/mri/aseg.auto_noCCseg.mgz',
                seg_presurf='%s/mri/aseg.presurf.mgz',
            )

            tp_data_grabber.inputs.template_args = dict(
                subj_to_template_lta=[['long_template', 'tp',
                                       'long_template']],
                seg_noCC=[['tp']],
                seg_presurf=[['tp']])

            reconall.connect([(tp_data_source, merge_norms,
                               [('norm', 'in{0}'.format(i))]),
                              (tp_data_grabber, merge_segs,
                               [('seg_presurf', 'in{0}'.format(i))]),
                              (tp_data_grabber, merge_segs_noCC,
                               [('seg_noCC', 'in{0}'.format(i))]),
                              (tp_data_grabber, merge_template_ltas,
                               [('subj_to_template_lta', 'in{0}'.format(i))])])

            if tp == config['subject_id']:
                reconall.connect([
                    (tp_data_source, ar2_wf, [('wm',
                                               'AutoRecon2_Inputs.init_wm')]),
                    (tp_data_grabber, ar2_wf,
                     [('subj_to_template_lta',
                       'AutoRecon2_Inputs.subj_to_template_lta')]),
                    (tp_data_grabber, ar2_wf,
                     [('subj_to_template_lta',
                       'AutoRecon1_Inputs.subj_to_template_lta')])
                ])

        reconall.connect([
            (merge_norms, ar2_wf, [('out', 'AutoRecon2_Inputs.alltps_norms')]),
            (merge_segs, ar2_wf, [('out', 'AutoRecon2_Inputs.alltps_segs')]),
            (merge_template_ltas, ar2_wf,
             [('out', 'AutoRecon2_Inputs.alltps_to_template_ltas')]),
            (merge_segs_noCC, ar2_wf, [('out',
                                        'AutoRecon2_Inputs.alltps_segs_noCC')])
        ])

        # datasource files from the template run
        ds_template_files = pe.Node(FreeSurferSource(),
                                    name="Datasource_Template_Files")
        ds_template_files.inputs.subject_id = config['subject_id']
        ds_template_files.inputs.subjects_dir = config['subjects_dir']

        reconall.connect([(ds_template_files, ar1_wf, [
            ('brainmask', 'AutoRecon1_Inputs.template_brainmask')
        ]),
                          (ds_template_files, ar2_wf,
                           [('aseg', 'AutoRecon2_Inputs.template_aseg')])])

        # grab files from template run
        grab_template_files = pe.Node(
            DataGrabber(),
            name="Grab_Template_Files",
            infields=['subject_id', 'long_template'],
            outfields=[
                'template_talairach_xfm', 'template_talairach_lta',
                'template_talairach_m3z', 'template_label_intensities',
                'template_lh_white', 'template_rh_white', 'template_lh_pial',
                'template_rh_pial'
            ])
        grab_template_files.inputs.template = '*'
        grab_template_files.inputs.base_directory = config['subjects_dir']
        grab_template_files.inputs.subject_id = config['subject_id']
        grab_template_files.inputs.long_template = config['long_template']
        grab_template_files.inputs.field_template = dict(
            template_talairach_xfm='%s/mri/transfroms/talairach.xfm',
            template_talairach_lta='%s/mri/transfroms/talairach.lta',
            template_talairach_m3z='%s/mri/transfroms/talairach.m3z',
            template_label_intensities=
            '%s/mri/aseg.auto_noCCseg.label_intensities.txt',
            template_lh_white='%s/surf/lh.white',
            template_rh_white='%s/surf/rh.white',
            template_lh_pial='%s/surf/lh.pial',
            template_rh_pial='%s/surf/rh.pial')

        grab_template_files.inputs.template_args = dict(
            template_talairach_xfm=[['long_template']],
            template_talairach_lta=[['long_template']],
            template_talairach_m3z=[['long_template']],
            template_lh_white=[['long_template']],
            template_rh_white=[['long_template']],
            template_lh_pial=[['long_template']],
            template_rh_pial=[['long_template']])
        reconall.connect([
            (grab_template_files, ar1_wf,
             [('template_talairach_xfm',
               'AutoRecon1_Inputs.template_talairach_xfm')]),
            (grab_template_files, ar2_wf, [
                ('template_talairach_lta',
                 'AutoRecon2_Inputs.template_talairach_lta'),
                ('template_talairach_m3z',
                 'AutoRecon2_Inputs.template_talairach_m3z'),
                ('template_label_intensities',
                 'AutoRecon2_Inputs.template_label_intensities'),
                ('template_lh_white', 'AutoRecon2_Inputs.template_lh_white'),
                ('template_rh_white', 'AutoRecon2_Inputs.template_rh_white'),
                ('template_lh_pial', 'AutoRecon2_Inputs.template_lh_pial'),
                ('template_rh_pial', 'AutoRecon2_Inputs.template_rh_pial'),
            ])
        ])
        # end longitudinal data collection

    # connect autorecon 1 - 3
    reconall.connect([
        (ar1_wf, ar3_wf, [
            ('AutoRecon1_Inputs.subject_id', 'AutoRecon3_Inputs.subject_id'),
            ('AutoRecon1_Inputs.subjects_dir',
             'AutoRecon3_Inputs.subjects_dir'),
            ('Copy_Brainmask.out_file', 'AutoRecon3_Inputs.brainmask'),
            ('Copy_Transform.out_file', 'AutoRecon3_Inputs.transform'),
            ('Add_Transform_to_Header.out_file', 'AutoRecon3_Inputs.orig_mgz'),
            ('Robust_Template.out_file', 'AutoRecon3_Inputs.rawavg'),
        ]),
        (ar1_wf, ar2_wf, [
            ('Copy_Brainmask.out_file', 'AutoRecon2_Inputs.brainmask'),
            ('Copy_Transform.out_file', 'AutoRecon2_Inputs.transform'),
            ('Add_Transform_to_Header.out_file', 'AutoRecon2_Inputs.orig'),
            ('AutoRecon1_Inputs.subject_id', 'AutoRecon2_Inputs.subject_id'),
            ('AutoRecon1_Inputs.subjects_dir',
             'AutoRecon2_Inputs.subjects_dir'),
        ]),
        (ar2_lh, ar3_wf, [
            ('inflate2.out_file', 'AutoRecon3_Inputs.lh_inflated'),
            ('Smooth2.surface', 'AutoRecon3_Inputs.lh_smoothwm'),
            ('Make_Surfaces.out_white', 'AutoRecon3_Inputs.lh_white'),
            ('Make_Surfaces.out_cortex', 'AutoRecon3_Inputs.lh_cortex_label'),
            ('Make_Surfaces.out_area', 'AutoRecon3_Inputs.lh_area'),
            ('Make_Surfaces.out_curv', 'AutoRecon3_Inputs.lh_curv'),
            ('inflate2.out_sulc', 'AutoRecon3_Inputs.lh_sulc'),
            ('Extract_Main_Component.out_file',
             'AutoRecon3_Inputs.lh_orig_nofix'),
            ('Remove_Intersection.out_file', 'AutoRecon3_Inputs.lh_orig'),
            ('Curvature1.out_mean', 'AutoRecon3_Inputs.lh_white_H'),
            ('Curvature1.out_gauss', 'AutoRecon3_Inputs.lh_white_K'),
        ]),
        (ar2_rh, ar3_wf, [
            ('inflate2.out_file', 'AutoRecon3_Inputs.rh_inflated'),
            ('Smooth2.surface', 'AutoRecon3_Inputs.rh_smoothwm'),
            ('Make_Surfaces.out_white', 'AutoRecon3_Inputs.rh_white'),
            ('Make_Surfaces.out_cortex', 'AutoRecon3_Inputs.rh_cortex_label'),
            ('Make_Surfaces.out_area', 'AutoRecon3_Inputs.rh_area'),
            ('Make_Surfaces.out_curv', 'AutoRecon3_Inputs.rh_curv'),
            ('inflate2.out_sulc', 'AutoRecon3_Inputs.rh_sulc'),
            ('Extract_Main_Component.out_file',
             'AutoRecon3_Inputs.rh_orig_nofix'),
            ('Remove_Intersection.out_file', 'AutoRecon3_Inputs.rh_orig'),
            ('Curvature1.out_mean', 'AutoRecon3_Inputs.rh_white_H'),
            ('Curvature1.out_gauss', 'AutoRecon3_Inputs.rh_white_K'),
        ]),
        (ar2_wf, ar3_wf, [
            ('Copy_CCSegmentation.out_file', 'AutoRecon3_Inputs.aseg_presurf'),
            ('Mask_Brain_Final_Surface.out_file',
             'AutoRecon3_Inputs.brain_finalsurfs'),
            ('MRI_Pretess.out_file', 'AutoRecon3_Inputs.wm'),
            ('Fill.out_file', 'AutoRecon3_Inputs.filled'),
            ('CA_Normalize.out_file', 'AutoRecon3_Inputs.norm'),
        ]),
    ])

    return reconall
Exemple #26
0
sbc1_workflow.base_dir = workflow_dir
sbc1_workflow.write_graph(graph2use='flat')
#sbc1_workflow.run('MultiProc', plugin_args={'n_procs': proc_cores})

# In[ ]:

infosource2 = Node(IdentityInterface(fields=['roi']), name='infosource2')
infosource2.iterables = ('roi', rois)

# Data grabber- select fMRI and ROIs
templates = {'roi': 'glm_seed_copes/%s_*/cope.nii'}

datagrabber = Node(DataGrabber(infields=['roi'],
                               outfields=['roi'],
                               sort_filelist=True,
                               base_directory=output_dir,
                               template='glm_seed_copes/%s_*/cope.nii',
                               field_template=templates,
                               template_args=dict(roi=[['roi']])),
                   name='datagrabber')

# In[ ]:

## Level 2

# merge param estimates across all subjects per seed
merge = Node(Merge(dimension='t'), name='merge')

# FSL randomise for higher level analysis
highermodel = Node(Randomise(tfce=True,
                             raw_stats_imgs=True,
def firstlevel_wf(subject_id, sink_directory, name='wmaze_frstlvl_wf'):
    frstlvl_wf = Workflow(name='frstlvl_wf')

    info = dict(
        task_mri_files=[['subject_id',
                         'wmaze']],  #dictionary used in datasource
        motion_noise_files=[['subject_id']])

    #function node to call subjectinfo function with name, onset, duration, and amplitude info
    subject_info = Node(Function(input_names=['subject_id'],
                                 output_names=['output'],
                                 function=subjectinfo),
                        name='subject_info')
    subject_info.inputs.ignore_exception = False
    subject_info.inputs.subject_id = subject_id

    #function node to define contrasts
    getcontrasts = Node(Function(input_names=['subject_id', 'info'],
                                 output_names=['contrasts'],
                                 function=get_contrasts),
                        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    getcontrasts.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getcontrasts, 'info')

    #function node to substitute names of folders and files created during pipeline
    getsubs = Node(
        Function(
            input_names=['cons'],
            output_names=['subs'],
            # Calls the function 'get_subs'
            function=get_subs),
        name='getsubs')
    getsubs.inputs.ignore_exception = False
    getsubs.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getsubs, 'info')
    frstlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    #datasource node to get task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/preproc/')
    datasource.inputs.field_template = dict(
        task_mri_files=
        '%s/func/smoothed_fullspectrum/_maskfunc2*/*%s*.nii.gz',  #functional files
        motion_noise_files='%s/noise/filter_regressor??.txt'
    )  #filter regressor noise files
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    #function node to remove last three volumes from functional data
    fslroi_epi = MapNode(
        ExtractROI(t_min=0,
                   t_size=197),  #start from first volume and end on -3
        iterfield=['in_file'],
        name='fslroi_epi')
    fslroi_epi.output_type = 'NIFTI_GZ'
    fslroi_epi.terminal_output = 'stream'
    frstlvl_wf.connect(datasource, 'task_mri_files', fslroi_epi, 'in_file')

    #function node to modify the motion and noise files to be single regressors
    motionnoise = Node(Function(input_names=['subjinfo', 'files'],
                                output_names=['subjinfo'],
                                function=motion_noise),
                       name='motionnoise')
    motionnoise.inputs.ignore_exception = False
    frstlvl_wf.connect(subject_info, 'output', motionnoise, 'subjinfo')
    frstlvl_wf.connect(datasource, 'motion_noise_files', motionnoise, 'files')

    #node to create model specifications compatible with spm/fsl designers (requires subjectinfo to be received in the form of a Bunch)
    specify_model = Node(SpecifyModel(), name='specify_model')
    specify_model.inputs.high_pass_filter_cutoff = -1.0  #high-pass filter cutoff in seconds
    specify_model.inputs.ignore_exception = False
    specify_model.inputs.input_units = 'secs'  #input units in either 'secs' or 'scans'
    specify_model.inputs.time_repetition = 2.0  #TR
    frstlvl_wf.connect(
        fslroi_epi, 'roi_file', specify_model,
        'functional_runs')  #editted data files for model -- list of 4D files
    #list of event description files in 3 column format corresponding to onsets, durations, and amplitudes
    frstlvl_wf.connect(motionnoise, 'subjinfo', specify_model, 'subject_info')

    #node for basic interface class generating identity mappings
    modelfit_inputspec = Node(IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ],
                                                mandatory_inputs=True),
                              name='modelfit_inputspec')
    modelfit_inputspec.inputs.bases = {'dgamma': {'derivs': False}}
    modelfit_inputspec.inputs.film_threshold = 0.0
    modelfit_inputspec.inputs.interscan_interval = 2.0
    modelfit_inputspec.inputs.model_serial_correlations = True
    frstlvl_wf.connect(fslroi_epi, 'roi_file', modelfit_inputspec,
                       'functional_data')
    frstlvl_wf.connect(getcontrasts, 'contrasts', modelfit_inputspec,
                       'contrasts')
    frstlvl_wf.connect(specify_model, 'session_info', modelfit_inputspec,
                       'session_info')

    #node for first level SPM design matrix to demonstrate contrasts and motion/noise regressors
    level1_design = MapNode(Level1Design(),
                            iterfield=['contrasts', 'session_info'],
                            name='level1_design')
    level1_design.inputs.ignore_exception = False
    frstlvl_wf.connect(modelfit_inputspec, 'interscan_interval', level1_design,
                       'interscan_interval')
    frstlvl_wf.connect(modelfit_inputspec, 'session_info', level1_design,
                       'session_info')
    frstlvl_wf.connect(modelfit_inputspec, 'contrasts', level1_design,
                       'contrasts')
    frstlvl_wf.connect(modelfit_inputspec, 'bases', level1_design, 'bases')
    frstlvl_wf.connect(modelfit_inputspec, 'model_serial_correlations',
                       level1_design, 'model_serial_correlations')

    #MapNode to generate a design.mat file for each run
    generate_model = MapNode(FEATModel(),
                             iterfield=['fsf_file', 'ev_files'],
                             name='generate_model')
    generate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    generate_model.inputs.ignore_exception = False
    generate_model.inputs.output_type = 'NIFTI_GZ'
    generate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(level1_design, 'fsf_files', generate_model, 'fsf_file')
    frstlvl_wf.connect(level1_design, 'ev_files', generate_model, 'ev_files')

    #MapNode to estimate the model using FILMGLS -- fits the design matrix to the voxel timeseries
    estimate_model = MapNode(FILMGLS(),
                             iterfield=['design_file', 'in_file', 'tcon_file'],
                             name='estimate_model')
    estimate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    estimate_model.inputs.ignore_exception = False
    estimate_model.inputs.mask_size = 5  #Susan-smooth mask size
    estimate_model.inputs.output_type = 'NIFTI_GZ'
    estimate_model.inputs.results_dir = 'results'
    estimate_model.inputs.smooth_autocorr = True  #smooth auto-correlation estimates
    estimate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(modelfit_inputspec, 'film_threshold', estimate_model,
                       'threshold')
    frstlvl_wf.connect(modelfit_inputspec, 'functional_data', estimate_model,
                       'in_file')
    frstlvl_wf.connect(
        generate_model, 'design_file', estimate_model,
        'design_file')  #mat file containing ascii matrix for design
    frstlvl_wf.connect(generate_model, 'con_file', estimate_model,
                       'tcon_file')  #contrast file containing contrast vectors

    #merge node to merge the contrasts - necessary for fsl 5.0.7 and greater
    merge_contrasts = MapNode(Merge(2),
                              iterfield=['in1'],
                              name='merge_contrasts')
    frstlvl_wf.connect(estimate_model, 'zstats', merge_contrasts, 'in1')

    #MapNode to transform the z2pval
    z2pval = MapNode(ImageMaths(), iterfield=['in_file'], name='z2pval')
    z2pval.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    z2pval.inputs.ignore_exception = False
    z2pval.inputs.op_string = '-ztop'  #defines the operation used
    z2pval.inputs.output_type = 'NIFTI_GZ'
    z2pval.inputs.suffix = '_pval'
    z2pval.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(merge_contrasts, ('out', pop_lambda), z2pval, 'in_file')

    #outputspec node using IdentityInterface() to receive information from estimate_model, merge_contrasts, z2pval, generate_model, and estimate_model
    modelfit_outputspec = Node(IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates',
        'zstats', 'design_image', 'design_file', 'design_cov', 'sigmasquareds'
    ],
                                                 mandatory_inputs=True),
                               name='modelfit_outputspec')
    frstlvl_wf.connect(estimate_model, 'copes', modelfit_outputspec,
                       'copes')  #lvl1 cope files
    frstlvl_wf.connect(estimate_model, 'varcopes', modelfit_outputspec,
                       'varcopes')  #lvl1 varcope files
    frstlvl_wf.connect(merge_contrasts, 'out', modelfit_outputspec,
                       'zstats')  #zstats across runs
    frstlvl_wf.connect(z2pval, 'out_file', modelfit_outputspec, 'pfiles')
    frstlvl_wf.connect(
        generate_model, 'design_image', modelfit_outputspec,
        'design_image')  #graphical representation of design matrix
    frstlvl_wf.connect(
        generate_model, 'design_file', modelfit_outputspec,
        'design_file')  #mat file containing ascii matrix for design
    frstlvl_wf.connect(
        generate_model, 'design_cov', modelfit_outputspec,
        'design_cov')  #graphical representation of design covariance
    frstlvl_wf.connect(estimate_model, 'param_estimates', modelfit_outputspec,
                       'parameter_estimates'
                       )  #parameter estimates for columns of design matrix
    frstlvl_wf.connect(estimate_model, 'dof_file', modelfit_outputspec,
                       'dof_file')  #degrees of freedom
    frstlvl_wf.connect(estimate_model, 'sigmasquareds', modelfit_outputspec,
                       'sigmasquareds')  #summary of residuals

    #datasink node to save output from multiple points in the pipeline
    sinkd = MapNode(DataSink(),
                    iterfield=[
                        'substitutions', 'modelfit.contrasts.@copes',
                        'modelfit.contrasts.@varcopes', 'modelfit.estimates',
                        'modelfit.contrasts.@zstats'
                    ],
                    name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    frstlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')
    frstlvl_wf.connect(modelfit_outputspec, 'parameter_estimates', sinkd,
                       'modelfit.estimates')
    frstlvl_wf.connect(modelfit_outputspec, 'sigmasquareds', sinkd,
                       'modelfit.estimates.@sigsq')
    frstlvl_wf.connect(modelfit_outputspec, 'dof_file', sinkd, 'modelfit.dofs')
    frstlvl_wf.connect(modelfit_outputspec, 'copes', sinkd,
                       'modelfit.contrasts.@copes')
    frstlvl_wf.connect(modelfit_outputspec, 'varcopes', sinkd,
                       'modelfit.contrasts.@varcopes')
    frstlvl_wf.connect(modelfit_outputspec, 'zstats', sinkd,
                       'modelfit.contrasts.@zstats')
    frstlvl_wf.connect(modelfit_outputspec, 'design_image', sinkd,
                       'modelfit.design')
    frstlvl_wf.connect(modelfit_outputspec, 'design_cov', sinkd,
                       'modelfit.design.@cov')
    frstlvl_wf.connect(modelfit_outputspec, 'design_file', sinkd,
                       'modelfit.design.@matrix')
    frstlvl_wf.connect(modelfit_outputspec, 'pfiles', sinkd,
                       'modelfit.contrasts.@pstats')

    return frstlvl_wf
Exemple #28
0
# For distributing subject paths
infosource = Node(IdentityInterface(fields=['subject_path', 'seed']),
                  name="infosource")
infosource.iterables = [('subject_path', subjdir), ('seed', all_seeds)]

info = dict(func=[[
    'subject_path', '/processedfmri_TRCNnSFmDI/images/swua_filteredf*.nii'
]],
            motion=[[
                'subject_path',
                '/processedfmri_TRCNnSFmDI/motion_params_filtered.txt'
            ]])

selectfiles = Node(DataGrabber(infields=['subject_path'],
                               outfields=['func', 'motion'],
                               base_directory='/',
                               template='%s/%s',
                               template_args=info,
                               sort_filelist=True),
                   name='selectfiles')

# For merging seed and nuisance mask paths and then distributing them downstream
seed_plus_nuisance = Node(utilMerge(2), name='seed_plus_nuisance')
seed_plus_nuisance.inputs.in2 = nuisance_masks

# 1. Obtain timeseries for seed and nuisance variables
# 1a. Merge all 3D functional images into a single 4D image
merge = Node(Merge(dimension='t', output_type='NIFTI', tr=TR), name='merge')

# 1b. Take mean of all voxels in each roi at each timepoint
ts = MapNode(ImageMeants(), name='ts', iterfield=['mask'])
Exemple #29
0
from nipype.interfaces.freesurfer import ReconAll
from nipype.interfaces.io import DataGrabber

# CURRENT PROJECT DATA DIRECTORY
data_dir = '/home/data/madlab/data/mri/emuR01'

# CURRENT PROJECT SUBJECT IDS
sids = ['4000', '4001']

info = dict(T1=[['subject_id']])

infosource = Node(IdentityInterface(fields=['subject_id']), name='infosource')
infosource.iterables = ('subject_id', sids)

# Create a datasource node to get the T1 file
datasource = Node(DataGrabber(infields=['subject_id'], outfields=info.keys()),
                  name='datasource')
datasource.inputs.template = '%s/%s'
datasource.inputs.base_directory = os.path.abspath(data_dir)
datasource.inputs.field_template = dict(T1='%s/s1/anatomy/T1_002.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True

reconall_node = Node(ReconAll(), name='reconall_node')
reconall_node.inputs.openmp = 2
reconall_node.inputs.args = '-hippocampal-subfields-T1'
reconall_node.inputs.subjects_dir = '/home/data/madlab/surfaces/emuR01'
reconall_node.plugin_args = {
    'sbatch_args': ('-p investor --qos pq_madlab -n 2'),
    'overwrite': True
}
def create_AutoRecon3(name="AutoRecon3",
                      qcache=False,
                      plugin_args=None,
                      th3=True,
                      exvivo=True,
                      entorhinal=True,
                      fsvernum=5.3):

    # AutoRecon3
    # Workflow
    ar3_wf = pe.Workflow(name=name)

    # Input Node
    inputspec = pe.Node(IdentityInterface(fields=[
        'lh_inflated', 'rh_inflated', 'lh_smoothwm', 'rh_smoothwm', 'lh_white',
        'rh_white', 'lh_white_H', 'rh_white_H', 'lh_white_K', 'rh_white_K',
        'lh_cortex_label', 'rh_cortex_label', 'lh_orig', 'rh_orig', 'lh_sulc',
        'rh_sulc', 'lh_area', 'rh_area', 'lh_curv', 'rh_curv', 'lh_orig_nofix',
        'rh_orig_nofix', 'aseg_presurf', 'brain_finalsurfs', 'wm', 'filled',
        'brainmask', 'transform', 'orig_mgz', 'rawavg', 'norm', 'lh_atlas',
        'rh_atlas', 'lh_classifier1', 'rh_classifier1', 'lh_classifier2',
        'rh_classifier2', 'lh_classifier3', 'rh_classifier3', 'lookup_table',
        'wm_lookup_table', 'src_subject_id', 'src_subject_dir', 'color_table',
        'num_threads'
    ]),
                        name='inputspec')

    ar3_lh_wf1 = pe.Workflow(name="AutoRecon3_Left_1")
    ar3_rh_wf1 = pe.Workflow(name="AutoRecon3_Right_1")
    for hemisphere, hemi_wf in [('lh', ar3_lh_wf1), ('rh', ar3_rh_wf1)]:
        hemi_inputspec1 = pe.Node(IdentityInterface(fields=[
            'inflated', 'smoothwm', 'white', 'cortex_label', 'orig',
            'aseg_presurf', 'brain_finalsurfs', 'wm', 'filled', 'sphere',
            'sulc', 'area', 'curv', 'classifier', 'atlas', 'num_threads'
        ]),
                                  name="inputspec")

        # Spherical Inflation

        # Inflates the orig surface into a sphere while minimizing metric distortion.
        # This step is necessary in order to register the surface to the spherical
        # atlas (also known as the spherical morph). Calls mris_sphere. Creates
        # surf/?h.sphere. The -autorecon3 stage begins here.

        ar3_sphere = pe.Node(Sphere(), name="Spherical_Inflation")
        ar3_sphere.inputs.seed = 1234
        ar3_sphere.inputs.out_file = '{0}.sphere'.format(hemisphere)
        if plugin_args:
            ar3_sphere.plugin_args = plugin_args
        hemi_wf.connect([(hemi_inputspec1,
                          ar3_sphere, [('inflated', 'in_file'),
                                       ('smoothwm', 'in_smoothwm'),
                                       ('num_threads', 'num_threads')])])

        # Ipsilateral Surface Registation (Spherical Morph)

        # Registers the orig surface to the spherical atlas through surf/?h.sphere.
        # The surfaces are first coarsely registered by aligning the large scale
        # folding patterns found in ?h.sulc and then fine tuned using the small-scale
        # patterns as in ?h.curv. Calls mris_register. Creates surf/?h.sphere.reg.

        ar3_surfreg = pe.Node(Register(), name="Surface_Registration")
        ar3_surfreg.inputs.out_file = '{0}.sphere.reg'.format(hemisphere)
        ar3_surfreg.inputs.curv = True
        hemi_wf.connect([(ar3_sphere, ar3_surfreg, [('out_file', 'in_surf')]),
                         (hemi_inputspec1, ar3_surfreg,
                          [('smoothwm', 'in_smoothwm'), ('sulc', 'in_sulc'),
                           ('atlas', 'target')])])

        # Jacobian

        # Computes how much the white surface was distorted in order to register to
        # the spherical atlas during the -surfreg step.

        ar3_jacobian = pe.Node(Jacobian(), name="Jacobian")
        ar3_jacobian.inputs.out_file = '{0}.jacobian_white'.format(hemisphere)
        hemi_wf.connect([
            (hemi_inputspec1, ar3_jacobian, [('white', 'in_origsurf')]),
            (ar3_surfreg, ar3_jacobian, [('out_file', 'in_mappedsurf')])
        ])

        # Average Curvature

        # Resamples the average curvature from the atlas to that of the subject.
        # Allows the user to display activity on the surface of an individual
        # with the folding pattern (ie, anatomy) of a group.

        ar3_paint = pe.Node(Paint(), name="Average_Curvature")
        ar3_paint.inputs.averages = 5
        ar3_paint.inputs.template_param = 6
        ar3_paint.inputs.out_file = "{0}.avg_curv".format(hemisphere)
        hemi_wf.connect([(ar3_surfreg, ar3_paint, [('out_file', 'in_surf')]),
                         (hemi_inputspec1, ar3_paint, [('atlas', 'template')])
                         ])

        # Cortical Parcellation

        # Assigns a neuroanatomical label to each location on the cortical
        # surface. Incorporates both geometric information derived from the
        # cortical model (sulcus and curvature), and neuroanatomical convention.

        ar3_parcellation = pe.Node(MRIsCALabel(), "Cortical_Parcellation")
        ar3_parcellation.inputs.seed = 1234
        ar3_parcellation.inputs.hemisphere = hemisphere
        ar3_parcellation.inputs.copy_inputs = True
        ar3_parcellation.inputs.out_file = "{0}.aparc.annot".format(hemisphere)
        if plugin_args:
            ar3_parcellation.plugin_args = plugin_args
        hemi_wf.connect([(hemi_inputspec1, ar3_parcellation, [
            ('smoothwm', 'smoothwm'), ('cortex_label', 'label'),
            ('aseg_presurf', 'aseg'), ('classifier', 'classifier'),
            ('curv', 'curv'), ('sulc', 'sulc'), ('num_threads', 'num_threads')
        ]), (ar3_surfreg, ar3_parcellation, [('out_file', 'canonsurf')])])

        # Pial Surface

        ar3_pial = pe.Node(MakeSurfaces(), name="Make_Pial_Surface")
        ar3_pial.inputs.mgz = True
        ar3_pial.inputs.hemisphere = hemisphere
        ar3_pial.inputs.copy_inputs = True

        if fsvernum < 6:
            ar3_pial.inputs.white = 'NOWRITE'
            hemi_wf.connect(hemi_inputspec1, 'white', ar3_pial, 'in_white')
        else:
            ar3_pial.inputs.no_white = True
            hemi_wf.connect([(hemi_inputspec1,
                              ar3_pial, [('white', 'orig_pial'),
                                         ('white', 'orig_white')])])

        hemi_wf.connect([
            (hemi_inputspec1, ar3_pial, [('wm', 'in_wm'), ('orig', 'in_orig'),
                                         ('filled', 'in_filled'),
                                         ('brain_finalsurfs', 'in_T1'),
                                         ('aseg_presurf', 'in_aseg')]),
            (ar3_parcellation, ar3_pial, [('out_file', 'in_label')])
        ])

        # Surface Volume
        """
        Creates the ?h.volume file by first creating the ?h.mid.area file by
        adding ?h.area(.white) to ?h.area.pial, then dividing by two. Then ?h.volume
        is created by multiplying ?.mid.area with ?h.thickness.
        """

        ar3_add = pe.Node(MRIsCalc(), name="Add_Pial_Area")
        ar3_add.inputs.action = "add"
        ar3_add.inputs.out_file = '{0}.area.mid'.format(hemisphere)
        hemi_wf.connect([
            (ar3_pial, ar3_add, [('out_area', 'in_file2')]),
            (hemi_inputspec1, ar3_add, [('area', 'in_file1')]),
        ])

        ar3_divide = pe.Node(MRIsCalc(), name="Mid_Pial")
        ar3_divide.inputs.action = "div"
        ar3_divide.inputs.in_int = 2
        ar3_divide.inputs.out_file = '{0}.area.mid'.format(hemisphere)
        hemi_wf.connect([
            (ar3_add, ar3_divide, [('out_file', 'in_file1')]),
        ])

        ar3_volume = pe.Node(MRIsCalc(), name="Calculate_Volume")
        ar3_volume.inputs.action = "mul"
        ar3_volume.inputs.out_file = '{0}.volume'.format(hemisphere)
        hemi_wf.connect([
            (ar3_divide, ar3_volume, [('out_file', 'in_file1')]),
            (ar3_pial, ar3_volume, [('out_thickness', 'in_file2')]),
        ])

        # Connect the inputs
        ar3_wf.connect([(inputspec, hemi_wf, [
            ('{0}_inflated'.format(hemisphere), 'inputspec.inflated'),
            ('{0}_smoothwm'.format(hemisphere), 'inputspec.smoothwm'),
            ('{0}_white'.format(hemisphere), 'inputspec.white'),
            ('{0}_cortex_label'.format(hemisphere), 'inputspec.cortex_label'),
            ('{0}_orig'.format(hemisphere), 'inputspec.orig'),
            ('{0}_sulc'.format(hemisphere), 'inputspec.sulc'),
            ('{0}_area'.format(hemisphere), 'inputspec.area'),
            ('{0}_curv'.format(hemisphere), 'inputspec.curv'),
            ('aseg_presurf', 'inputspec.aseg_presurf'),
            ('brain_finalsurfs', 'inputspec.brain_finalsurfs'),
            ('wm', 'inputspec.wm'), ('filled', 'inputspec.filled'),
            ('{0}_atlas'.format(hemisphere), 'inputspec.atlas'),
            ('{0}_classifier1'.format(hemisphere), 'inputspec.classifier'),
            ('num_threads', 'inputspec.num_threads')
        ])])

        # Workflow1 Outputs
        hemi_outputs1 = [
            'sphere', 'sphere_reg', 'jacobian_white', 'avg_curv',
            'aparc_annot', 'area_pial', 'curv_pial', 'pial', 'thickness_pial',
            'area_mid', 'volume'
        ]
        hemi_outputspec1 = pe.Node(IdentityInterface(fields=hemi_outputs1),
                                   name="outputspec")
        hemi_wf.connect([
            (ar3_pial, hemi_outputspec1, [('out_pial', 'pial'),
                                          ('out_curv', 'curv_pial'),
                                          ('out_area', 'area_pial'),
                                          ('out_thickness', 'thickness_pial')
                                          ]),
            (ar3_divide, hemi_outputspec1, [('out_file', 'area_mid')]),
            (ar3_volume, hemi_outputspec1, [('out_file', 'volume')]),
            (ar3_parcellation, hemi_outputspec1, [('out_file', 'aparc_annot')
                                                  ]),
            (ar3_jacobian, hemi_outputspec1, [('out_file', 'jacobian_white')]),
            (ar3_paint, hemi_outputspec1, [('out_file', 'avg_curv')]),
            (ar3_surfreg, hemi_outputspec1, [('out_file', 'sphere_reg')]),
            (ar3_sphere, hemi_outputspec1, [('out_file', 'sphere')])
        ])

    # Cortical Ribbon Mask
    """
    Creates binary volume masks of the cortical ribbon
    ie, each voxel is either a 1 or 0 depending upon whether it falls in the ribbon or not.
    """
    volume_mask = pe.Node(VolumeMask(), name="Mask_Ribbon")
    volume_mask.inputs.left_whitelabel = 2
    volume_mask.inputs.left_ribbonlabel = 3
    volume_mask.inputs.right_whitelabel = 41
    volume_mask.inputs.right_ribbonlabel = 42
    volume_mask.inputs.save_ribbon = True
    volume_mask.inputs.copy_inputs = True

    ar3_wf.connect([
        (inputspec, volume_mask, [('lh_white', 'lh_white'),
                                  ('rh_white', 'rh_white')]),
        (ar3_lh_wf1, volume_mask, [('outputspec.pial', 'lh_pial')]),
        (ar3_rh_wf1, volume_mask, [('outputspec.pial', 'rh_pial')]),
    ])

    if fsvernum >= 6:
        ar3_wf.connect([(inputspec, volume_mask, [('aseg_presurf', 'in_aseg')])
                        ])
    else:
        ar3_wf.connect([(inputspec, volume_mask, [('aseg_presurf', 'aseg')])])

    ar3_lh_wf2 = pe.Workflow(name="AutoRecon3_Left_2")
    ar3_rh_wf2 = pe.Workflow(name="AutoRecon3_Right_2")

    for hemisphere, hemiwf2 in [('lh', ar3_lh_wf2), ('rh', ar3_rh_wf2)]:
        if hemisphere == 'lh':
            hemiwf1 = ar3_lh_wf1
        else:
            hemiwf1 = ar3_rh_wf1

        hemi_inputs2 = [
            'wm',
            'lh_white',
            'rh_white',
            'transform',
            'brainmask',
            'aseg_presurf',
            'cortex_label',
            'lh_pial',
            'rh_pial',
            'thickness',
            'aparc_annot',
            'ribbon',
            'smoothwm',
            'sphere_reg',
            'orig_mgz',
            'rawavg',
            'curv',
            'sulc',
            'classifier2',
            'classifier3',
        ]

        hemi_inputspec2 = pe.Node(IdentityInterface(fields=hemi_inputs2),
                                  name="inputspec")

        # Parcellation Statistics
        """
        Runs mris_anatomical_stats to create a summary table of cortical parcellation statistics for each structure, including
        structure name
        number of vertices
        total surface area (mm^2)
        total gray matter volume (mm^3)
        average cortical thickness (mm)
        standard error of cortical thicknessr (mm)
        integrated rectified mean curvature
        integrated rectified Gaussian curvature
        folding index
        intrinsic curvature index.
        """
        parcellation_stats_white = pe.Node(
            ParcellationStats(),
            name="Parcellation_Stats_{0}_White".format(hemisphere))
        parcellation_stats_white.inputs.mgz = True
        parcellation_stats_white.inputs.th3 = th3
        parcellation_stats_white.inputs.tabular_output = True
        parcellation_stats_white.inputs.surface = 'white'
        parcellation_stats_white.inputs.hemisphere = hemisphere
        parcellation_stats_white.inputs.out_color = 'aparc.annot.ctab'
        parcellation_stats_white.inputs.out_table = '{0}.aparc.stats'.format(
            hemisphere)
        parcellation_stats_white.inputs.copy_inputs = True

        hemiwf2.connect([
            (hemi_inputspec2, parcellation_stats_white, [
                ('wm', 'wm'),
                ('lh_white', 'lh_white'),
                ('rh_white', 'rh_white'),
                ('transform', 'transform'),
                ('brainmask', 'brainmask'),
                ('aseg_presurf', 'aseg'),
                ('cortex_label', 'in_cortex'),
                ('cortex_label', 'cortex_label'),
                ('lh_pial', 'lh_pial'),
                ('rh_pial', 'rh_pial'),
                ('thickness', 'thickness'),
                ('aparc_annot', 'in_annotation'),
                ('ribbon', 'ribbon'),
            ]),
        ])

        parcellation_stats_pial = pe.Node(
            ParcellationStats(),
            name="Parcellation_Stats_{0}_Pial".format(hemisphere))
        parcellation_stats_pial.inputs.mgz = True
        parcellation_stats_pial.inputs.th3 = th3
        parcellation_stats_pial.inputs.tabular_output = True
        parcellation_stats_pial.inputs.surface = 'pial'
        parcellation_stats_pial.inputs.hemisphere = hemisphere
        parcellation_stats_pial.inputs.copy_inputs = True
        parcellation_stats_pial.inputs.out_color = 'aparc.annot.ctab'
        parcellation_stats_pial.inputs.out_table = '{0}.aparc.pial.stats'.format(
            hemisphere)

        hemiwf2.connect([
            (hemi_inputspec2, parcellation_stats_pial, [
                ('wm', 'wm'),
                ('lh_white', 'lh_white'),
                ('rh_white', 'rh_white'),
                ('transform', 'transform'),
                ('brainmask', 'brainmask'),
                ('aseg_presurf', 'aseg'),
                ('cortex_label', 'cortex_label'),
                ('cortex_label', 'in_cortex'),
                ('lh_pial', 'lh_pial'),
                ('rh_pial', 'rh_pial'),
                ('thickness', 'thickness'),
                ('aparc_annot', 'in_annotation'),
                ('ribbon', 'ribbon'),
            ]),
        ])

        # Cortical Parcellation 2
        cortical_parcellation_2 = pe.Node(
            MRIsCALabel(),
            name="Cortical_Parcellation_{0}_2".format(hemisphere))
        cortical_parcellation_2.inputs.out_file = '{0}.aparc.a2009s.annot'.format(
            hemisphere)
        cortical_parcellation_2.inputs.seed = 1234
        cortical_parcellation_2.inputs.copy_inputs = True
        cortical_parcellation_2.inputs.hemisphere = hemisphere

        hemiwf2.connect([(hemi_inputspec2, cortical_parcellation_2,
                          [('smoothwm', 'smoothwm'), ('aseg_presurf', 'aseg'),
                           ('cortex_label', 'label'),
                           ('sphere_reg', 'canonsurf'), ('curv', 'curv'),
                           ('sulc', 'sulc'), ('classifier2', 'classifier')])])

        # Parcellation Statistics 2
        parcellation_stats_white_2 = parcellation_stats_white.clone(
            name="Parcellation_Statistics_{0}_2".format(hemisphere))
        parcellation_stats_white_2.inputs.hemisphere = hemisphere
        parcellation_stats_white_2.inputs.out_color = 'aparc.annot.a2009s.ctab'
        parcellation_stats_white_2.inputs.out_table = '{0}.aparc.a2009s.stats'.format(
            hemisphere)
        hemiwf2.connect([(hemi_inputspec2, parcellation_stats_white_2, [
            ('wm', 'wm'),
            ('lh_white', 'lh_white'),
            ('rh_white', 'rh_white'),
            ('transform', 'transform'),
            ('brainmask', 'brainmask'),
            ('aseg_presurf', 'aseg'),
            ('cortex_label', 'cortex_label'),
            ('cortex_label', 'in_cortex'),
            ('lh_pial', 'lh_pial'),
            ('rh_pial', 'rh_pial'),
            ('thickness', 'thickness'),
            ('ribbon', 'ribbon'),
        ]),
                         (cortical_parcellation_2, parcellation_stats_white_2,
                          [('out_file', 'in_annotation')])])

        # Cortical Parcellation 3
        cortical_parcellation_3 = pe.Node(
            MRIsCALabel(),
            name="Cortical_Parcellation_{0}_3".format(hemisphere))
        cortical_parcellation_3.inputs.out_file = '{0}.aparc.DKTatlas40.annot'.format(
            hemisphere)
        cortical_parcellation_3.inputs.hemisphere = hemisphere
        cortical_parcellation_3.inputs.seed = 1234
        cortical_parcellation_3.inputs.copy_inputs = True
        hemiwf2.connect([(hemi_inputspec2, cortical_parcellation_3,
                          [('smoothwm', 'smoothwm'), ('aseg_presurf', 'aseg'),
                           ('cortex_label', 'label'),
                           ('sphere_reg', 'canonsurf'), ('curv', 'curv'),
                           ('sulc', 'sulc'), ('classifier3', 'classifier')])])

        # Parcellation Statistics 3
        parcellation_stats_white_3 = parcellation_stats_white.clone(
            name="Parcellation_Statistics_{0}_3".format(hemisphere))
        parcellation_stats_white_3.inputs.out_color = 'aparc.annot.DKTatlas40.ctab'
        parcellation_stats_white_3.inputs.out_table = '{0}.aparc.DKTatlas40.stats'.format(
            hemisphere)
        parcellation_stats_white_3.inputs.hemisphere = hemisphere

        hemiwf2.connect([(hemi_inputspec2, parcellation_stats_white_3, [
            ('wm', 'wm'),
            ('lh_white', 'lh_white'),
            ('rh_white', 'rh_white'),
            ('transform', 'transform'),
            ('brainmask', 'brainmask'),
            ('aseg_presurf', 'aseg'),
            ('cortex_label', 'cortex_label'),
            ('cortex_label', 'in_cortex'),
            ('lh_pial', 'lh_pial'),
            ('rh_pial', 'rh_pial'),
            ('thickness', 'thickness'),
            ('ribbon', 'ribbon'),
        ]),
                         (cortical_parcellation_3, parcellation_stats_white_3,
                          [('out_file', 'in_annotation')])])

        # WM/GM Contrast
        contrast = pe.Node(Contrast(),
                           name="WM_GM_Contrast_{0}".format(hemisphere))
        contrast.inputs.hemisphere = hemisphere
        contrast.inputs.copy_inputs = True

        hemiwf2.connect([
            (hemi_inputspec2, contrast, [
                ('orig_mgz', 'orig'),
                ('rawavg', 'rawavg'),
                ('{0}_white'.format(hemisphere), 'white'),
                ('cortex_label', 'cortex'),
                ('aparc_annot', 'annotation'),
                ('thickness', 'thickness'),
            ]),
        ])

        hemi_outputs2 = [
            'aparc_annot_ctab',
            'aparc_stats',
            'aparc_pial_stats',
            'aparc_a2009s_annot',
            'aparc_a2009s_annot_ctab',
            'aparc_a2009s_annot_stats',
            'aparc_DKTatlas40_annot',
            'aparc_DKTatlas40_annot_ctab',
            'aparc_DKTatlas40_annot_stats',
            'wg_pct_mgh',
            'wg_pct_stats',
            'pctsurfcon_log',
        ]
        hemi_outputspec2 = pe.Node(IdentityInterface(fields=hemi_outputs2),
                                   name="outputspec")

        hemiwf2.connect([
            (contrast, hemi_outputspec2, [('out_contrast', 'wg_pct_mgh'),
                                          ('out_stats', 'wg_pct_stats'),
                                          ('out_log', 'pctsurfcon_log')]),
            (parcellation_stats_white_3, hemi_outputspec2,
             [('out_color', 'aparc_DKTatlas40_annot_ctab'),
              ('out_table', 'aparc_DKTatlas40_annot_stats')]),
            (cortical_parcellation_3, hemi_outputspec2,
             [('out_file', 'aparc_DKTatlas40_annot')]),
            (parcellation_stats_white_2, hemi_outputspec2,
             [('out_color', 'aparc_a2009s_annot_ctab'),
              ('out_table', 'aparc_a2009s_annot_stats')]),
            (cortical_parcellation_2, hemi_outputspec2,
             [('out_file', 'aparc_a2009s_annot')]),
            (parcellation_stats_white,
             hemi_outputspec2, [('out_color', 'aparc_annot_ctab'),
                                ('out_table', 'aparc_stats')]),
            (parcellation_stats_pial, hemi_outputspec2,
             [('out_table', 'aparc_pial_stats')]),
        ])
        # connect inputs to hemisphere2 workflow
        ar3_wf.connect([
            (inputspec, hemiwf2, [
                ('wm', 'inputspec.wm'),
                ('lh_white', 'inputspec.lh_white'),
                ('rh_white', 'inputspec.rh_white'),
                ('transform', 'inputspec.transform'),
                ('brainmask', 'inputspec.brainmask'),
                ('aseg_presurf', 'inputspec.aseg_presurf'),
                ('{0}_cortex_label'.format(hemisphere),
                 'inputspec.cortex_label'),
                ('{0}_smoothwm'.format(hemisphere), 'inputspec.smoothwm'),
                ('orig_mgz', 'inputspec.orig_mgz'),
                ('rawavg', 'inputspec.rawavg'),
                ('{0}_curv'.format(hemisphere), 'inputspec.curv'),
                ('{0}_sulc'.format(hemisphere), 'inputspec.sulc'),
                ('{0}_classifier2'.format(hemisphere),
                 'inputspec.classifier2'),
                ('{0}_classifier3'.format(hemisphere),
                 'inputspec.classifier3'),
            ]),
            (ar3_lh_wf1, hemiwf2, [('outputspec.pial', 'inputspec.lh_pial')]),
            (ar3_rh_wf1, hemiwf2, [('outputspec.pial', 'inputspec.rh_pial')]),
            (hemiwf1, hemiwf2,
             [('outputspec.thickness_pial', 'inputspec.thickness'),
              ('outputspec.aparc_annot', 'inputspec.aparc_annot'),
              ('outputspec.sphere_reg', 'inputspec.sphere_reg')]),
            (volume_mask, hemiwf2, [('out_ribbon', 'inputspec.ribbon')]),
        ])
        # End hemisphere2 workflow

    # APARC to ASEG
    # Adds information from the ribbon into the aseg.mgz (volume parcellation).
    aparc_2_aseg = pe.Node(Aparc2Aseg(), name="Aparc2Aseg")
    aparc_2_aseg.inputs.volmask = True
    aparc_2_aseg.inputs.copy_inputs = True
    aparc_2_aseg.inputs.out_file = "aparc+aseg.mgz"
    ar3_wf.connect([(inputspec, aparc_2_aseg, [
        ('lh_white', 'lh_white'),
        ('rh_white', 'rh_white'),
    ]),
                    (ar3_lh_wf1, aparc_2_aseg, [
                        ('outputspec.pial', 'lh_pial'),
                        ('outputspec.aparc_annot', 'lh_annotation'),
                    ]),
                    (ar3_rh_wf1, aparc_2_aseg, [
                        ('outputspec.pial', 'rh_pial'),
                        ('outputspec.aparc_annot', 'rh_annotation'),
                    ]),
                    (volume_mask, aparc_2_aseg, [
                        ('rh_ribbon', 'rh_ribbon'),
                        ('lh_ribbon', 'lh_ribbon'),
                        ('out_ribbon', 'ribbon'),
                    ])])
    if fsvernum < 6:
        ar3_wf.connect([(inputspec, aparc_2_aseg, [('aseg_presurf', 'aseg')])])
    else:
        # Relabel Hypointensities
        relabel_hypos = pe.Node(RelabelHypointensities(),
                                name="Relabel_Hypointensities")
        relabel_hypos.inputs.out_file = 'aseg.presurf.hypos.mgz'
        ar3_wf.connect([(inputspec, relabel_hypos, [('aseg_presurf', 'aseg'),
                                                    ('lh_white', 'lh_white'),
                                                    ('rh_white', 'rh_white')])
                        ])
        ar3_wf.connect([(relabel_hypos, aparc_2_aseg, [('out_file', 'aseg')])])

    aparc_2_aseg_2009 = pe.Node(Aparc2Aseg(), name="Aparc2Aseg_2009")
    aparc_2_aseg_2009.inputs.volmask = True
    aparc_2_aseg_2009.inputs.a2009s = True
    aparc_2_aseg_2009.inputs.copy_inputs = True
    aparc_2_aseg_2009.inputs.out_file = "aparc.a2009s+aseg.mgz"
    ar3_wf.connect([(inputspec, aparc_2_aseg_2009, [
        ('lh_white', 'lh_white'),
        ('rh_white', 'rh_white'),
    ]), (ar3_lh_wf1, aparc_2_aseg_2009, [
        ('outputspec.pial', 'lh_pial'),
    ]),
                    (ar3_lh_wf2, aparc_2_aseg_2009,
                     [('outputspec.aparc_a2009s_annot', 'lh_annotation')]),
                    (ar3_rh_wf2, aparc_2_aseg_2009,
                     [('outputspec.aparc_a2009s_annot', 'rh_annotation')]),
                    (ar3_rh_wf1, aparc_2_aseg_2009, [
                        ('outputspec.pial', 'rh_pial'),
                    ]),
                    (volume_mask, aparc_2_aseg_2009,
                     [('rh_ribbon', 'rh_ribbon'), ('lh_ribbon', 'lh_ribbon'),
                      ('out_ribbon', 'ribbon')])])

    if fsvernum >= 6:
        apas_2_aseg = pe.Node(Apas2Aseg(), name="Apas_2_Aseg")
        ar3_wf.connect([(aparc_2_aseg, apas_2_aseg, [('out_file', 'in_file')]),
                        (relabel_hypos, aparc_2_aseg_2009, [('out_file',
                                                             'aseg')])])
    else:
        # aseg.mgz gets edited in place, so we'll copy and pass it to the
        # outputspec once aparc_2_aseg has completed
        def out_aseg(in_aparcaseg, in_aseg, out_file):
            import shutil
            import os
            out_file = os.path.abspath(out_file)
            shutil.copy(in_aseg, out_file)
            return out_file

        apas_2_aseg = pe.Node(Function(['in_aparcaseg', 'in_aseg', 'out_file'],
                                       ['out_file'], out_aseg),
                              name="Aseg")
        ar3_wf.connect([
            (aparc_2_aseg, apas_2_aseg, [('out_file', 'in_aparcaseg')]),
            (inputspec, apas_2_aseg, [('aseg_presurf', 'in_aseg')]),
            (inputspec, aparc_2_aseg_2009, [('aseg_presurf', 'aseg')])
        ])

    apas_2_aseg.inputs.out_file = "aseg.mgz"

    # Segmentation Stats
    """
    Computes statistics on the segmented subcortical structures found in
    mri/aseg.mgz. Writes output to file stats/aseg.stats.
    """

    segstats = pe.Node(SegStatsReconAll(), name="Segmentation_Statistics")
    segstats.inputs.empty = True
    segstats.inputs.brain_vol = 'brain-vol-from-seg'
    segstats.inputs.exclude_ctx_gm_wm = True
    segstats.inputs.supratent = True
    segstats.inputs.subcort_gm = True
    segstats.inputs.etiv = True
    segstats.inputs.wm_vol_from_surf = True
    segstats.inputs.cortex_vol_from_surf = True
    segstats.inputs.total_gray = True
    segstats.inputs.euler = True
    segstats.inputs.exclude_id = 0
    segstats.inputs.intensity_units = "MR"
    segstats.inputs.summary_file = 'aseg.stats'
    segstats.inputs.copy_inputs = True

    ar3_wf.connect([
        (apas_2_aseg, segstats, [('out_file', 'segmentation_file')]),
        (inputspec, segstats, [
            ('lh_white', 'lh_white'),
            ('rh_white', 'rh_white'),
            ('transform', 'transform'),
            ('norm', 'in_intensity'),
            ('norm', 'partial_volume_file'),
            ('brainmask', 'brainmask_file'),
            ('lh_orig_nofix', 'lh_orig_nofix'),
            ('rh_orig_nofix', 'rh_orig_nofix'),
            ('lookup_table', 'color_table_file'),
        ]),
        (volume_mask, segstats, [('out_ribbon', 'ribbon')]),
        (ar3_lh_wf1, segstats, [
            ('outputspec.pial', 'lh_pial'),
        ]),
        (ar3_rh_wf1, segstats, [
            ('outputspec.pial', 'rh_pial'),
        ]),
    ])

    if fsvernum >= 6:
        ar3_wf.connect(inputspec, 'aseg_presurf', segstats, 'presurf_seg')
    else:
        ar3_wf.connect(inputspec, 'aseg_presurf', segstats, 'aseg')

    # White Matter Parcellation

    # Adds WM Parcellation info into the aseg and computes stat.

    wm_parcellation = pe.Node(Aparc2Aseg(), name="WM_Parcellation")
    wm_parcellation.inputs.volmask = True
    wm_parcellation.inputs.label_wm = True
    wm_parcellation.inputs.hypo_wm = True
    wm_parcellation.inputs.rip_unknown = True
    wm_parcellation.inputs.copy_inputs = True
    wm_parcellation.inputs.out_file = "wmparc.mgz"

    ar3_wf.connect([(inputspec, wm_parcellation, [
        ('lh_white', 'lh_white'),
        ('rh_white', 'rh_white'),
    ]),
                    (ar3_lh_wf1, wm_parcellation, [
                        ('outputspec.pial', 'lh_pial'),
                        ('outputspec.aparc_annot', 'lh_annotation'),
                    ]),
                    (ar3_rh_wf1, wm_parcellation, [
                        ('outputspec.pial', 'rh_pial'),
                        ('outputspec.aparc_annot', 'rh_annotation'),
                    ]),
                    (volume_mask, wm_parcellation, [
                        ('rh_ribbon', 'rh_ribbon'),
                        ('lh_ribbon', 'lh_ribbon'),
                        ('out_ribbon', 'ribbon'),
                    ]), (apas_2_aseg, wm_parcellation, [('out_file', 'aseg')]),
                    (aparc_2_aseg, wm_parcellation, [('out_file', 'ctxseg')])])

    if fsvernum < 6:
        ar3_wf.connect([(inputspec, wm_parcellation, [('filled', 'filled')])])

    # White Matter Segmentation Stats

    wm_segstats = pe.Node(SegStatsReconAll(),
                          name="WM_Segmentation_Statistics")
    wm_segstats.inputs.intensity_units = "MR"
    wm_segstats.inputs.wm_vol_from_surf = True
    wm_segstats.inputs.etiv = True
    wm_segstats.inputs.copy_inputs = True
    wm_segstats.inputs.exclude_id = 0
    wm_segstats.inputs.summary_file = "wmparc.stats"

    ar3_wf.connect([
        (wm_parcellation, wm_segstats, [('out_file', 'segmentation_file')]),
        (inputspec, wm_segstats, [
            ('lh_white', 'lh_white'),
            ('rh_white', 'rh_white'),
            ('transform', 'transform'),
            ('norm', 'in_intensity'),
            ('norm', 'partial_volume_file'),
            ('brainmask', 'brainmask_file'),
            ('lh_orig_nofix', 'lh_orig_nofix'),
            ('rh_orig_nofix', 'rh_orig_nofix'),
            ('wm_lookup_table', 'color_table_file'),
        ]),
        (volume_mask, wm_segstats, [('out_ribbon', 'ribbon')]),
        (ar3_lh_wf1, wm_segstats, [
            ('outputspec.pial', 'lh_pial'),
        ]),
        (ar3_rh_wf1, wm_segstats, [
            ('outputspec.pial', 'rh_pial'),
        ]),
    ])

    if fsvernum >= 6:
        ar3_wf.connect(inputspec, 'aseg_presurf', wm_segstats, 'presurf_seg')
    else:
        ar3_wf.connect(inputspec, 'aseg_presurf', wm_segstats, 'aseg')

    # add brodman area maps to the workflow
    ba_WF, ba_outputs = create_ba_maps_wf(th3=th3,
                                          exvivo=exvivo,
                                          entorhinal=entorhinal)

    ar3_wf.connect([
        (ar3_lh_wf1, ba_WF, [
            ('outputspec.sphere_reg', 'inputspec.lh_sphere_reg'),
            ('outputspec.thickness_pial', 'inputspec.lh_thickness'),
            ('outputspec.pial', 'inputspec.lh_pial'),
        ]),
        (ar3_rh_wf1, ba_WF, [
            ('outputspec.sphere_reg', 'inputspec.rh_sphere_reg'),
            ('outputspec.thickness_pial', 'inputspec.rh_thickness'),
            ('outputspec.pial', 'inputspec.rh_pial'),
        ]),
        (inputspec, ba_WF, [
            ('lh_white', 'inputspec.lh_white'),
            ('rh_white', 'inputspec.rh_white'),
            ('transform', 'inputspec.transform'),
            ('aseg_presurf', 'inputspec.aseg'),
            ('brainmask', 'inputspec.brainmask'),
            ('wm', 'inputspec.wm'),
            ('lh_orig', 'inputspec.lh_orig'),
            ('rh_orig', 'inputspec.rh_orig'),
            ('lh_cortex_label', 'inputspec.lh_cortex_label'),
            ('rh_cortex_label', 'inputspec.rh_cortex_label'),
            ('src_subject_dir', 'inputspec.src_subject_dir'),
            ('src_subject_id', 'inputspec.src_subject_id'),
            ('color_table', 'inputspec.color_table'),
        ]), (volume_mask, ba_WF, [('out_ribbon', 'inputspec.ribbon')])
    ])

    if qcache:
        source_inputs = ['lh_sphere_reg', 'rh_sphere_reg']
        source_subject = pe.Node(DataGrabber(outfields=source_inputs),
                                 name="{0}_srcsubject".format(hemisphere))
        source_subject.inputs.template = '*'
        source_subject.inputs.sort_filelist = False
        source_subject.inputs.field_template = dict(
            lh_sphere_reg='surf/lh.sphere.reg',
            rh_sphere_reg='surf/rh.sphere.reg')

        qcache_wf = pe.Workflow("QCache")

        measurements = [
            'thickness', 'area', 'area.pial', 'volume', 'curv', 'sulc',
            'white.K', 'white.H', 'jacobian_white', 'w-g.pct.mgh'
        ]

        qcache_inputs = list()
        for source_file in source_inputs:
            qcache_inputs.append('source_' + source_file)
        qcache_config = dict()
        qcache_outputs = list()
        for hemisphere in ['lh', 'rh']:
            qcache_config[hemisphere] = dict()
            for meas_name in measurements:
                qcache_config[hemisphere][meas_name] = dict()

                if meas_name == 'thickness':
                    meas_file = hemisphere + '_' + meas_name + '_pial'
                else:
                    meas_file = hemisphere + '_' + meas_name.replace(
                        '.', '_').replace('-', '')
                qcache_inputs.append(meas_file)

                preproc_name = "Preproc_{0}".format(meas_file)
                preproc_out = '{0}.{1}.{2}.mgh'.format(
                    hemisphere, meas_name, config['src_subject_id'])
                preproc_out_name = preproc_out.replace('.', '_')
                qcache_config[hemisphere][meas_name]['preproc'] = dict(
                    infile=meas_file,
                    name=preproc_name,
                    out=preproc_out,
                    out_name=preproc_out_name)
                qcache_outputs.append(preproc_out_name)

                qcache_config[hemisphere][meas_name]['smooth'] = dict()
                for value in range(0, 26, 5):
                    smooth_name = "Smooth_{0}_{1}".format(meas_file, value)
                    smooth_out = "{0}.{1}.fwhm{2}.{3}.mgh".format(
                        hemisphere, meas_name, value, config['src_subject_id'])
                    smooth_out_name = smooth_out.replace('.', '_')
                    qcache_config[hemisphere][meas_name]['smooth'][
                        value] = dict(name=smooth_name,
                                      out=smooth_out,
                                      out_name=smooth_out_name)
                    qcache_outputs.append(smooth_out_name)

            qcache_inputs.append(hemisphere + '_sphere_reg')

        qcache_inputspec = pe.Node(IdentityInterface(fields=qcache_inputs),
                                   name="inputspec")

        qcache_outputspec = pe.Node(IdentityInterface(fields=qcache_outputs),
                                    name="outputspec")

        for hemi in qcache_config.iterkeys():
            for meas_config in qcache_config[hemi].itervalues():
                preprocess = pe.Node(MRISPreprocReconAll(),
                                     name=meas_config['preproc']['name'])
                target_id = config['src_subject_id']
                preprocess.inputs.out_file = meas_config['preproc']['out']
                preprocess.inputs.target = target_id
                preprocess.inputs.hemi = hemi
                preprocess.inputs.copy_inputs = True

                qcache_merge = pe.Node(Merge(2),
                                       name="Merge{0}".format(
                                           meas_config['preproc']['name']))

                qcache_wf.connect([
                    (qcache_inputspec, qcache_merge, [('lh_sphere_reg', 'in1'),
                                                      ('rh_sphere_reg', 'in2')
                                                      ]),
                    (qcache_inputspec, preprocess,
                     [(meas_config['preproc']['infile'], 'surf_measure_file'),
                      ('source_lh_sphere_reg', 'lh_surfreg_target'),
                      ('source_rh_sphere_reg', 'rh_surfreg_target')]),
                    (qcache_merge, preprocess, [('out', 'surfreg_files')]),
                    (preprocess, qcache_outputspec,
                     [('out_file', meas_config['preproc']['out_name'])]),
                ])

                for value, val_config in meas_config['smooth'].iteritems():
                    surf2surf = pe.Node(SurfaceSmooth(),
                                        name=val_config['name'])
                    surf2surf.inputs.fwhm = value
                    surf2surf.inputs.cortex = True
                    surf2surf.inputs.subject_id = target_id
                    surf2surf.inputs.hemi = hemisphere
                    surf2surf.inputs.out_file = val_config['out']
                    qcache_wf.connect([
                        (preprocess, surf2surf, [('out_file', 'in_file')]),
                        (surf2surf, qcache_outputspec,
                         [('out_file', val_config['out_name'])])
                    ])

        # connect qcache inputs
        ar3_wf.connect([
            (inputspec, qcache_wf, [('lh_curv', 'inputspec.lh_curv'),
                                    ('rh_curv', 'inputspec.rh_curv'),
                                    ('lh_sulc', 'inputspec.lh_sulc'),
                                    ('rh_sulc', 'inputspec.rh_sulc'),
                                    ('lh_white_K', 'inputspec.lh_white_K'),
                                    ('rh_white_K', 'inputspec.rh_white_K'),
                                    ('lh_area', 'inputspec.lh_area'),
                                    ('rh_area', 'inputspec.rh_area')]),
            (ar3_lh_wf1, qcache_wf,
             [('outputspec.thickness_pial', 'inputspec.lh_thickness_pial'),
              ('outputspec.area_pial', 'inputspec.lh_area_pial'),
              ('outputspec.volume', 'inputspec.lh_volume'),
              ('outputspec.jacobian_white', 'inputspec.lh_jacobian_white'),
              ('outputspec.sphere_reg', 'inputspec.lh_sphere_reg')]),
            (ar3_lh_wf2, qcache_wf, [('outputspec.wg_pct_mgh',
                                      'inputspec.lh_wg_pct_mgh')]),
            (ar3_rh_wf1, qcache_wf,
             [('outputspec.thickness_pial', 'inputspec.rh_thickness_pial'),
              ('outputspec.area_pial', 'inputspec.rh_area_pial'),
              ('outputspec.volume', 'inputspec.rh_volume'),
              ('outputspec.jacobian_white', 'inputspec.rh_jacobian_white'),
              ('outputspec.sphere_reg', 'inputspec.rh_sphere_reg')]),
            (ar3_rh_wf2, qcache_wf, [('outputspec.wg_pct_mgh',
                                      'inputspec.rh_wg_pct_mgh')]),
        ])
        for source_file in source_inputs:
            ar3_wf.connect([(inputspec, source_subject, [('source_subject_dir',
                                                          'base_directory')]),
                            (source_subject, qcache_wf,
                             [(source_file, 'inputspec.source_' + source_file)
                              ])])
        # end qcache workflow

    # Add outputs to outputspec
    ar3_outputs = [
        'aseg', 'wmparc', 'wmparc_stats', 'aseg_stats', 'aparc_a2009s_aseg',
        'aparc_aseg', 'aseg_presurf_hypos', 'ribbon', 'rh_ribbon', 'lh_ribbon'
    ]
    for output in hemi_outputs1 + hemi_outputs2:
        for hemi in ('lh_', 'rh_'):
            ar3_outputs.append(hemi + output)
    if qcache:
        ar3_outputs.extend(qcache_outputs)

    ar3_outputs.extend(ba_outputs)

    outputspec = pe.Node(IdentityInterface(fields=ar3_outputs),
                         name="outputspec")

    ar3_wf.connect([
        (apas_2_aseg, outputspec, [('out_file', 'aseg')]),
        (wm_parcellation, outputspec, [('out_file', 'wmparc')]),
        (wm_segstats, outputspec, [('summary_file', 'wmparc_stats')]),
        (segstats, outputspec, [('summary_file', 'aseg_stats')]),
        (aparc_2_aseg_2009, outputspec, [('out_file', 'aparc_a2009s_aseg')]),
        (aparc_2_aseg, outputspec, [('out_file', 'aparc_aseg')]),
        (volume_mask, outputspec, [('out_ribbon', 'ribbon'),
                                   ('lh_ribbon', 'lh_ribbon'),
                                   ('rh_ribbon', 'rh_ribbon')])
    ])
    if fsvernum >= 6:
        ar3_wf.connect([(relabel_hypos, outputspec, [('out_file',
                                                      'aseg_presurf_hypos')])])

    for i, outputs in enumerate([hemi_outputs1, hemi_outputs2]):
        if i == 0:
            lhwf = ar3_lh_wf1
            rhwf = ar3_rh_wf1
        else:
            lhwf = ar3_lh_wf2
            rhwf = ar3_rh_wf2
        for output in outputs:
            ar3_wf.connect([
                (lhwf, outputspec, [('outputspec.' + output, 'lh_' + output)]),
                (rhwf, outputspec, [('outputspec.' + output, 'rh_' + output)])
            ])

    for output in ba_outputs:
        ar3_wf.connect([(ba_WF, outputspec, [('outputspec.' + output, output)])
                        ])

    if qcache:
        for output in qcache_outputs:
            ar3_wf.connect([(qcache_wf, outputspec, [('outputspec.' + output,
                                                      output)])])

    return ar3_wf, ar3_outputs