subject_id_infosource = pe.Node(
        util.IdentityInterface(fields=['subject_id']),
        name="subject_id_infosource")
    subject_id_infosource.iterables = ("subject_id", subjects)

    fwhm_infosource = pe.Node(util.IdentityInterface(fields=['fwhm']),
                              name="fwhm_infosource")
    fwhm_infosource.iterables = ("fwhm", [5])

    bandpass_infosource = pe.Node(util.IdentityInterface(fields=['bandpass']),
                                  name="bandpass_infosource")
    bandpass_infosource.iterables = ("bandpass",
                                     ["highpass_freq_0.01_lowpass_freq_0.1"])

    datagrabber = pe.Node(
        nio.DataGrabber(infields=['subject_id'],
                        outfields=['epi_mask', 'func2anat_transform']),
        name="datagrabber")
    datagrabber.inputs.base_directory = os.path.join(resultsdir, 'volumes')
    datagrabber.inputs.template = '%s/_subject_id_%s/%s*/*.%s'
    datagrabber.inputs.template_args['func2anat_transform'] = [[
        'func2anat_transform', 'subject_id', '', 'mat'
    ]]
    datagrabber.inputs.template_args['epi_mask'] = [[
        'epi_mask', 'subject_id', '', 'nii'
    ]]
    datagrabber.inputs.sort_filelist = True
    wf.connect(subject_id_infosource, "subject_id", datagrabber, "subject_id")

    timeseries_datagrabber = pe.Node(nio.DataGrabber(
        infields=['subject_id', 'fwhm', 'bandpass'],
        outfields=['preprocessed_epi']),
Ejemplo n.º 2
0
                  'spm_mat_file')
modelling.connect(level1estimate, 'beta_images', contrastestimate,
                  'beta_images')
modelling.connect(level1estimate, 'residual_image', contrastestimate,
                  'residual_image')

main_workflow = pe.Workflow(name="main_workflow")
main_workflow.base_dir = "smoothing_comparison_workflow"
main_workflow.connect(preprocessing, "realign.realignment_parameters",
                      modelling, "specify_model.realignment_parameters")
main_workflow.connect(preprocessing, "select_smoothed_files.out", modelling,
                      "specify_model.functional_runs")
main_workflow.connect(preprocessing, "compute_mask.brain_mask", modelling,
                      "level1design.mask_image")

datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                               outfields=['func', 'struct']),
                     name='datasource')
datasource.inputs.base_directory = os.path.abspath('data')
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = info = dict(
    func=[['subject_id', ['f3', 'f5', 'f7', 'f10']]],
    struct=[['subject_id', 'struct']])
datasource.inputs.subject_id = 's1'
datasource.inputs.sort_filelist = True

main_workflow.connect(datasource, 'func', preprocessing, 'realign.in_files')
main_workflow.connect(datasource, 'struct', preprocessing,
                      'recon_all.T1_files')

datasink = pe.Node(interface=nio.DataSink(), name="datasink")
datasink.inputs.base_directory = os.path.abspath(
Ejemplo n.º 3
0
it should repeat the analysis on each of the items in the
``subject_list``. In the current example, the entire first level
preprocessing and estimation will be repeated for each subject
contained in subject_list.
"""

infosource.iterables = ('subject_id', subject_list)
"""
Now we create a :class:`nipype.interfaces.io.DataGrabber` object and
fill in the information from above about the layout of our data.  The
:class:`nipype.pipeline.engine.Node` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""

datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                               outfields=list(info.keys())),
                     name='datasource')

datasource.inputs.template = "%s/%s"

# This needs to point to the fdt folder you can find after extracting
# http://www.fmrib.ox.ac.uk/fslcourse/fsl_course_data2.tar.gz
datasource.inputs.base_directory = os.path.abspath('fsl_course_data/fdt/')

datasource.inputs.field_template = dict(dwi='%s/%s.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""
Setup for Diffusion Tensor Computation
--------------------------------------
Here we will create a generic workflow for DTI computation
Ejemplo n.º 4
0
###############################################################################
# Then we create a node to pass input filenames to DataGrabber from nipype

subject_ids = ['sub-0003']  # 'sub-0004', 'sub-0006'
infosource = create_iterator(['subject_id', 'freq_band_name'],
                             [subject_ids, freq_band_names])

###############################################################################
# and a node to grab data. The template_args in this node iterate upon
# the values in the infosource node

template_path = '*%s_task-rest_run-01_meg_0_60_raw_filt_dsamp_ica_ROI_ts.npy'

datasource = pe.Node(
    interface=nio.DataGrabber(infields=['subject_id'], outfields=['ts_file']),
    name='datasource')

datasource.inputs.base_directory = data_path
datasource.inputs.template = template_path

datasource.inputs.template_args = dict(ts_file=[['subject_id']])
datasource.inputs.sort_filelist = True

###############################################################################
# We then use the pipeline used in the previous example :ref:`conmat_to_graph pipeline <conmat_to_graph>`

from ephypype.pipelines import create_pipeline_time_series_to_spectral_connectivity # noqa

spectral_workflow = create_pipeline_time_series_to_spectral_connectivity(
    data_path, con_method=con_method,
Ejemplo n.º 5
0
func_sessions = data_nii["func_sessions"]
conf_interval_prob = data_nii["conf_interval_prob"]

infosource = pe.Node(
    interface=IdentityInterface(fields=['subject_id', 'session']),
    name="infosource")

infosource.iterables = [('subject_id', subject_ids),
                        ('session', func_sessions)]

###############################################################################
# and a node to grab data. The template_args in this node iterate upon
# the values in the infosource node

datasource = pe.Node(interface=nio.DataGrabber(
    infields=['subject_id', 'session'],
    outfields=['img_file', 'gm_anat_file', 'wm_anat_file', 'csf_anat_file']),
                     name='datasource')

datasource.inputs.base_directory = data_path
datasource.inputs.template = '%ssub-%s%s%s%s'
datasource.inputs.template_args = dict(
    img_file=[["wr", 'subject_id', "_task-", 'session', "_bold.nii"]],
    gm_anat_file=[["rwc1", 'subject_id', "", '', "_T1w.nii"]],
    wm_anat_file=[["rwc2", 'subject_id', "", '', "_T1w.nii"]],
    csf_anat_file=[["rwc3", 'subject_id', "", '', "_T1w.nii"]],
    rp_file=[["rp_", 'subject_id', "_task-", 'session', "_bold.txt"]],
)

datasource.inputs.sort_filelist = True
Ejemplo n.º 6
0
contained in subject_list.
"""

infosource.iterables = ('subject_id', subject_list)
"""
Preprocessing pipeline nodes
----------------------------

Now we create a :class:`nipype.interfaces.io.DataSource` object and
fill in the information from above about the layout of our data.  The
:class:`nipype.pipeline.NodeWrapper` module wraps the interface object
and provides additional housekeeping and pipeline specific
functionality.
"""

datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                               outfields=['func', 'struct']),
                     name='datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True
"""Use :class:`nipype.interfaces.spm.Realign` for motion correction
and register all images to the mean image.
"""

realign = pe.Node(interface=spm.Realign(), name="realign")
realign.inputs.register_to_mean = True
"""Use :class:`nipype.algorithms.rapidart` to determine which of the
images in the functional series are outliers based on deviations in
intensity or movement.
"""
Ejemplo n.º 7
0
def test_datagrabber():
    dg = nio.DataGrabber()
    assert dg.inputs.template == Undefined
    assert dg.inputs.base_directory == Undefined
    assert dg.inputs.template_args == {"outfiles": []}
Ejemplo n.º 8
0
    def build_input_node(self):
        """Build and connect an input node to the pipelines.
        """
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        import nipype.interfaces.io as nio

        from clinica.utils.stream import cprint
        from clinica.utils.dwi import check_dwi_volume

        cprint('Found %s image(s) in BIDS dataset' % len(self.subjects))
        for i in range(len(self.subjects)):
            # cprint('------- SUBJECT %s SESSION %s -------'
            #        % (self.subjects[i], self.sessions[i]))

            # Check b-val file and compute the nb of b0 from file:
            bval_file = self.bids_layout.get(
                return_type='file',
                type='dwi',
                extensions=['bval'],
                session=self.sessions[i].replace('ses-', ''),
                subject=self.subjects[i].replace('sub-', ''))
            if len(bval_file) != 1:
                raise IOError('Expected to find 1 bval file for subject ' +
                              self.subjects[i] + ' and session ' +
                              self.sessions[i] + ' but found ' +
                              str(len(bval_file)) + ' bval instead.')

            # Check b-vec file:
            bvec_file = self.bids_layout.get(
                return_type='file',
                type='dwi',
                extensions=['bvec'],
                session=self.sessions[i].replace('ses-', ''),
                subject=self.subjects[i].replace('sub-', ''))
            if len(bvec_file) != 1:
                raise IOError('Expected to find 1 bvec file for subject ' +
                              self.subjects[i] + ' and session ' +
                              self.sessions[i] + ' but found ' +
                              str(len(bvec_file)) + ' bvec instead.')

            # Check DWI file:
            dwi_file = self.bids_layout.get(
                return_type='file',
                type='dwi',
                extensions=['.nii|.nii.gz'],
                session=self.sessions[i].replace('ses-', ''),
                subject=self.subjects[i].replace('sub-', ''))
            if len(dwi_file) != 1:
                raise IOError('Expected to find 1 dwi file for subject ' +
                              self.subjects[i] + ' and session ' +
                              self.sessions[i] + ' but found ' +
                              str(len(dwi_file)) + ' dwi instead.')

            # Check that the number of DWI, b-vecs & b-val are the same:
            check_dwi_volume(in_dwi=dwi_file[0],
                             in_bvec=bvec_file[0],
                             in_bval=bval_file[0])

            # Check T1w file:
            t1_file = self.bids_layout.get(
                return_type='file',
                type='T1w',
                extensions=['.nii|.nii.gz'],
                session=self.sessions[i].replace('ses-', ''),
                subject=self.subjects[i].replace('sub-', ''))
            if len(t1_file) != 1:
                raise IOError('Expected to find 1 T1w file for subject ' +
                              self.subjects[i] + ' and session ' +
                              self.sessions[i] + ' but found ' +
                              str(len(t1_file)) + ' T1w instead.')

        # Iterables:
        iterables_node = npe.Node(name="LoadingCLIArguments",
                                  interface=nutil.IdentityInterface(
                                      fields=['subject_id', 'session_id'],
                                      mandatory_inputs=True))
        iterables_node.iterables = [('subject_id', self.subjects),
                                    ('session_id', self.sessions)]
        iterables_node.synchronize = True

        # T1 DataGrabber
        t1_bids_reader = npe.Node(nio.DataGrabber(infields=[
            'subject_id', 'session', 'subject_repeat', 'session_repeat'
        ],
                                                  outfields=['out_files']),
                                  name='t1_bids_reader')
        t1_bids_reader.inputs.base_directory = self.bids_directory
        t1_bids_reader.inputs.template = '%s/%s/anat/%s_%s_*T1w.nii*'
        t1_bids_reader.inputs.sort_filelist = False

        # DWI DataGrabber
        dwi_bids_reader = npe.Node(nio.DataGrabber(infields=[
            'subject_id', 'session', 'subject_repeat', 'session_repeat'
        ],
                                                   outfields=['out_files']),
                                   name='dwi_bids_reader')
        dwi_bids_reader.inputs.base_directory = self.bids_directory
        dwi_bids_reader.inputs.template = '%s/%s/dwi/%s_%s_*dwi.nii*'
        dwi_bids_reader.inputs.sort_filelist = False

        # Bval DataGrabber
        bval_bids_reader = npe.Node(nio.DataGrabber(infields=[
            'subject_id', 'session', 'subject_repeat', 'session_repeat'
        ],
                                                    outfields=['out_files']),
                                    name='bval_bids_reader')
        bval_bids_reader.inputs.base_directory = self.bids_directory
        bval_bids_reader.inputs.template = '%s/%s/dwi/%s_%s_*dwi.bval'
        bval_bids_reader.inputs.sort_filelist = False

        # Bvec dataGrabber
        bvec_bids_reader = npe.Node(nio.DataGrabber(infields=[
            'subject_id', 'session', 'subject_repeat', 'session_repeat'
        ],
                                                    outfields=['out_files']),
                                    name='bvec_bids_reader')
        bvec_bids_reader.inputs.base_directory = self.bids_directory
        bvec_bids_reader.inputs.template = '%s/%s/dwi/%s_%s_*dwi.bvec'
        bvec_bids_reader.inputs.sort_filelist = False

        self.connect([
            # Iterables:
            (
                iterables_node,
                t1_bids_reader,
                [
                    ('subject_id', 'subject_id'),  # noqa
                    ('session_id', 'session'),  # noqa
                    ('subject_id', 'subject_repeat'),  # noqa
                    ('session_id', 'session_repeat')
                ]),  # noqa
            (
                iterables_node,
                dwi_bids_reader,
                [
                    ('subject_id', 'subject_id'),  # noqa
                    ('session_id', 'session'),  # noqa
                    ('subject_id', 'subject_repeat'),  # noqa
                    ('session_id', 'session_repeat')
                ]),  # noqa
            (
                iterables_node,
                bval_bids_reader,
                [
                    ('subject_id', 'subject_id'),  # noqa
                    ('session_id', 'session'),  # noqa
                    ('subject_id', 'subject_repeat'),  # noqa
                    ('session_id', 'session_repeat')
                ]),  # noqa
            (
                iterables_node,
                bvec_bids_reader,
                [
                    ('subject_id', 'subject_id'),  # noqa
                    ('session_id', 'session'),  # noqa
                    ('subject_id', 'subject_repeat'),  # noqa
                    ('session_id', 'session_repeat')
                ]),  # noqa
            # Inputnode:
            (t1_bids_reader, self.input_node, [('out_files', 'T1w')]),  # noqa
            (dwi_bids_reader, self.input_node, [('out_files', 'dwi')]),  # noqa
            (bval_bids_reader, self.input_node, [('out_files', 'bval')
                                                 ]),  # noqa
            (bvec_bids_reader, self.input_node, [('out_files', 'bvec')]
             )  # noqa
        ])
Ejemplo n.º 9
0
def generate_single_session_template_WF(projectid,
                                        subjectid,
                                        sessionid,
                                        onlyT1,
                                        master_config,
                                        phase,
                                        interpMode,
                                        pipeline_name,
                                        doDenoise=True):
    """
    Run autoworkup on a single sessionid

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.
    """

    #if  not 'landmark' in master_config['components'] or not 'auxlmk' in master_config['components'] or not 'tissue_classify' in master_config['components']:
    #    print "Baseline DataSink requires 'AUXLMK' and/or 'TISSUE_CLASSIFY'!!!"
    #    raise NotImplementedError
    # master_config['components'].append('auxlmk')
    # master_config['components'].append('tissue_classify')

    assert phase in [
        'atlas-based-reference', 'subject-based-reference'
    ], "Unknown phase! Valid entries: 'atlas-based-reference', 'subject-based-reference'"

    if 'tissue_classify' in master_config['components']:
        assert ('landmark' in master_config['components']
                ), "tissue_classify Requires landmark step!"
    if 'landmark' in master_config['components']:
        assert 'denoise' in master_config[
            'components'], "landmark Requires denoise step!"

    from workflows.atlasNode import MakeAtlasNode

    baw201 = pe.Workflow(name=pipeline_name)

    inputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'atlasLandmarkFilename', 'atlasWeightFilename', 'LLSModel',
        'inputTemplateModel', 'template_t1', 'atlasDefinition', 'T1s', 'T2s',
        'PDs', 'FLs', 'OTHERs', 'hncma_atlas', 'template_rightHemisphere',
        'template_leftHemisphere', 'template_WMPM2_labels',
        'template_nac_labels', 'template_ventricles'
    ]),
                         run_without_submitting=True,
                         name='inputspec')

    outputsSpec = pe.Node(
        interface=IdentityInterface(fields=[
            't1_average',
            't2_average',
            'pd_average',
            'fl_average',
            'posteriorImages',
            'outputLabels',
            'outputHeadLabels',
            'atlasToSubjectTransform',
            'atlasToSubjectInverseTransform',
            'atlasToSubjectRegistrationState',
            'BCD_ACPC_T1_CROPPED',
            'outputLandmarksInACPCAlignedSpace',
            'outputLandmarksInInputSpace',
            'output_tx',
            'LMIatlasToSubject_tx',
            'writeBranded2DImage',
            'brainStemMask',
            'UpdatedPosteriorsList'  # Longitudinal
        ]),
        run_without_submitting=True,
        name='outputspec')

    dsName = "{0}_ds_{1}".format(phase, sessionid)
    DataSink = pe.Node(name=dsName, interface=nio.DataSink())
    DataSink.overwrite = master_config['ds_overwrite']
    DataSink.inputs.container = '{0}/{1}/{2}'.format(projectid, subjectid,
                                                     sessionid)
    DataSink.inputs.base_directory = master_config['resultdir']

    atlas_static_directory = master_config['atlascache']
    if master_config['workflow_phase'] == 'atlas-based-reference':
        atlas_warped_directory = master_config['atlascache']
        atlasABCNode_XML = MakeAtlasNode(atlas_warped_directory,
                                         'BABCXMLAtlas_{0}'.format(sessionid),
                                         ['W_BRAINSABCSupport'])
        baw201.connect(atlasABCNode_XML, 'ExtendedAtlasDefinition_xml',
                       inputsSpec, 'atlasDefinition')

        atlasABCNode_W = MakeAtlasNode(
            atlas_warped_directory, 'BABCAtlas_W{0}'.format(sessionid),
            ['W_BRAINSABCSupport', 'W_LabelMapsSupport'])
        baw201.connect([(atlasABCNode_W, inputsSpec, [
            ('hncma_atlas', 'hncma_atlas'),
            ('template_leftHemisphere', 'template_leftHemisphere'),
            ('template_rightHemisphere', 'template_rightHemisphere'),
            ('template_WMPM2_labels', 'template_WMPM2_labels'),
            ('template_nac_labels', 'template_nac_labels'),
            ('template_ventricles', 'template_ventricles')
        ])])
        ## These landmarks are only relevant for the atlas-based-reference case
        atlasBCDNode_W = MakeAtlasNode(atlas_warped_directory,
                                       'BBCDAtlas_W{0}'.format(sessionid),
                                       ['W_BCDSupport'])
        baw201.connect([
            (atlasBCDNode_W, inputsSpec, [
                ('template_t1', 'template_t1'),
                ('template_landmarks_50Lmks_fcsv', 'atlasLandmarkFilename'),
            ]),
        ])
        ## Needed for both segmentation and template building prep
        atlasBCUTNode_W = MakeAtlasNode(atlas_warped_directory,
                                        'BBCUTAtlas_W{0}'.format(sessionid),
                                        ['W_BRAINSCutSupport'])

    elif master_config['workflow_phase'] == 'subject-based-reference':
        print master_config['previousresult']
        atlas_warped_directory = os.path.join(master_config['previousresult'],
                                              subjectid, 'Atlas')

        template_DG = pe.Node(interface=nio.DataGrabber(
            infields=['subject'],
            outfields=[
                'outAtlasXMLFullPath', 'hncma_atlas',
                'template_leftHemisphere', 'template_rightHemisphere',
                'template_WMPM2_labels', 'template_nac_labels',
                'template_ventricles', 'template_t1',
                'template_landmarks_50Lmks_fcsv'
            ]),
                              name='Template_DG')
        template_DG.inputs.base_directory = master_config['previousresult']
        template_DG.inputs.subject = subjectid
        template_DG.inputs.field_template = {
            'outAtlasXMLFullPath': '%s/Atlas/AtlasDefinition_%s.xml',
            'hncma_atlas': '%s/Atlas/AVG_hncma_atlas.nii.gz',
            'template_leftHemisphere':
            '%s/Atlas/AVG_template_leftHemisphere.nii.gz',
            'template_rightHemisphere':
            '%s/Atlas/AVG_template_rightHemisphere.nii.gz',
            'template_WMPM2_labels':
            '%s/Atlas/AVG_template_WMPM2_labels.nii.gz',
            'template_nac_labels': '%s/Atlas/AVG_template_nac_labels.nii.gz',
            'template_ventricles': '%s/Atlas/AVG_template_ventricles.nii.gz',
            'template_t1': '%s/Atlas/AVG_T1.nii.gz',
            'template_landmarks_50Lmks_fcsv': '%s/Atlas/AVG_LMKS.fcsv',
        }
        template_DG.inputs.template_args = {
            'outAtlasXMLFullPath': [['subject', 'subject']],
            'hncma_atlas': [['subject']],
            'template_leftHemisphere': [['subject']],
            'template_rightHemisphere': [['subject']],
            'template_WMPM2_labels': [['subject']],
            'template_nac_labels': [['subject']],
            'template_ventricles': [['subject']],
            'template_t1': [['subject']],
            'template_landmarks_50Lmks_fcsv': [['subject']]
        }
        template_DG.inputs.template = '*'
        template_DG.inputs.sort_filelist = True
        template_DG.inputs.raise_on_empty = True

        baw201.connect(template_DG, 'outAtlasXMLFullPath', inputsSpec,
                       'atlasDefinition')
        baw201.connect([(
            template_DG,
            inputsSpec,
            [
                ## Already connected ('template_t1','template_t1'),
                ('hncma_atlas', 'hncma_atlas'),
                ('template_leftHemisphere', 'template_leftHemisphere'),
                ('template_rightHemisphere', 'template_rightHemisphere'),
                ('template_WMPM2_labels', 'template_WMPM2_labels'),
                ('template_nac_labels', 'template_nac_labels'),
                ('template_ventricles', 'template_ventricles')
            ])])
        ## These landmarks are only relevant for the atlas-based-reference case
        baw201.connect([
            (template_DG, inputsSpec, [
                ('template_t1', 'template_t1'),
                ('template_landmarks_50Lmks_fcsv', 'atlasLandmarkFilename'),
            ]),
        ])

    else:
        assert 0 == 1, "Invalid workflow type specified for singleSession"

    atlasBCDNode_S = MakeAtlasNode(atlas_static_directory,
                                   'BBCDAtlas_S{0}'.format(sessionid),
                                   ['S_BCDSupport'])
    baw201.connect([
        (atlasBCDNode_S, inputsSpec,
         [('template_weights_50Lmks_wts', 'atlasWeightFilename'),
          ('LLSModel_50Lmks_h5', 'LLSModel'),
          ('T1_50Lmks_mdl', 'inputTemplateModel')]),
    ])

    if doDenoise:
        print("\ndenoise image filter\n")
        makeDenoiseInImageList = pe.Node(Function(
            function=MakeOutFileList,
            input_names=[
                'T1List', 'T2List', 'PDList', 'FLList', 'OtherList', 'postfix',
                'PrimaryT1'
            ],
            output_names=['inImageList', 'outImageList', 'imageTypeList']),
                                         run_without_submitting=True,
                                         name="99_makeDenoiseInImageList")
        baw201.connect(inputsSpec, 'T1s', makeDenoiseInImageList, 'T1List')
        baw201.connect(inputsSpec, 'T2s', makeDenoiseInImageList, 'T2List')
        baw201.connect(inputsSpec, 'PDs', makeDenoiseInImageList, 'PDList')
        makeDenoiseInImageList.inputs.FLList = []  # an emptyList HACK
        makeDenoiseInImageList.inputs.PrimaryT1 = None  # an emptyList HACK
        makeDenoiseInImageList.inputs.postfix = "_UNM_denoised.nii.gz"
        # HACK baw201.connect( inputsSpec, 'FLList', makeDenoiseInImageList, 'FLList' )
        baw201.connect(inputsSpec, 'OTHERs', makeDenoiseInImageList,
                       'OtherList')

        print("\nDenoise:\n")
        DenoiseInputImgs = pe.MapNode(
            interface=UnbiasedNonLocalMeans(),
            name='denoiseInputImgs',
            iterfield=['inputVolume', 'outputVolume'])
        DenoiseInputImgs.inputs.rc = [1, 1, 1]
        DenoiseInputImgs.inputs.rs = [4, 4, 4]
        DenoiseInputImgs.plugin_args = {
            'qsub_args': modify_qsub_args(master_config['queue'], .2, 1, 1),
            'overwrite': True
        }
        baw201.connect([(makeDenoiseInImageList, DenoiseInputImgs,
                         [('inImageList', 'inputVolume')]),
                        (makeDenoiseInImageList, DenoiseInputImgs,
                         [('outImageList', 'outputVolume')])])
        print("\nMerge all T1 and T2 List\n")
        makePreprocessingOutList = pe.Node(Function(
            function=GenerateSeparateImageTypeList,
            input_names=['inFileList', 'inTypeList'],
            output_names=['T1s', 'T2s', 'PDs', 'FLs', 'OtherList']),
                                           run_without_submitting=True,
                                           name="99_makePreprocessingOutList")
        baw201.connect(DenoiseInputImgs, 'outputVolume',
                       makePreprocessingOutList, 'inFileList')
        baw201.connect(makeDenoiseInImageList, 'imageTypeList',
                       makePreprocessingOutList, 'inTypeList')

    else:
        makePreprocessingOutList = inputsSpec

    if 'landmark' in master_config['components']:
        DoReverseMapping = False  # Set to true for debugging outputs
        if 'auxlmk' in master_config['components']:
            DoReverseMapping = True
        myLocalLMIWF = CreateLandmarkInitializeWorkflow(
            "LandmarkInitialize", interpMode, DoReverseMapping)

        baw201.connect([
            (makePreprocessingOutList, myLocalLMIWF,
             [(('T1s', get_list_element, 0), 'inputspec.inputVolume')]),
            (inputsSpec, myLocalLMIWF,
             [('atlasLandmarkFilename', 'inputspec.atlasLandmarkFilename'),
              ('atlasWeightFilename', 'inputspec.atlasWeightFilename'),
              ('LLSModel', 'inputspec.LLSModel'),
              ('inputTemplateModel', 'inputspec.inputTemplateModel'),
              ('template_t1', 'inputspec.atlasVolume')]),
            (myLocalLMIWF, outputsSpec,
             [('outputspec.outputResampledCroppedVolume',
               'BCD_ACPC_T1_CROPPED'),
              ('outputspec.outputLandmarksInACPCAlignedSpace',
               'outputLandmarksInACPCAlignedSpace'),
              ('outputspec.outputLandmarksInInputSpace',
               'outputLandmarksInInputSpace'),
              ('outputspec.outputTransform', 'output_tx'),
              ('outputspec.atlasToSubjectTransform', 'LMIatlasToSubject_tx'),
              ('outputspec.writeBranded2DImage', 'writeBranded2DImage')])
        ])
        baw201.connect([(
            outputsSpec,
            DataSink,  # TODO: change to myLocalLMIWF -> DataSink
            [
                ('outputLandmarksInACPCAlignedSpace',
                 'ACPCAlign.@outputLandmarks_ACPC'),
                ('writeBranded2DImage', 'ACPCAlign.@writeBranded2DImage'),
                ('BCD_ACPC_T1_CROPPED', 'ACPCAlign.@BCD_ACPC_T1_CROPPED'),
                ('outputLandmarksInInputSpace',
                 'ACPCAlign.@outputLandmarks_Input'),
                ('output_tx', 'ACPCAlign.@output_tx'),
                ('LMIatlasToSubject_tx', 'ACPCAlign.@LMIatlasToSubject_tx'),
            ])])

    if 'tissue_classify' in master_config['components']:
        useRegistrationMask = master_config['use_registration_masking']

        myLocalTCWF = CreateTissueClassifyWorkflow("TissueClassify",
                                                   master_config, interpMode,
                                                   useRegistrationMask)
        baw201.connect([
            (makePreprocessingOutList, myLocalTCWF, [('T1s',
                                                      'inputspec.T1List')]),
            (makePreprocessingOutList, myLocalTCWF, [('T2s',
                                                      'inputspec.T2List')]),
            (inputsSpec, myLocalTCWF,
             [('atlasDefinition', 'inputspec.atlasDefinition'),
              ('template_t1', 'inputspec.atlasVolume'),
              (('T1s', getAllT1sLength), 'inputspec.T1_count'),
              ('PDs', 'inputspec.PDList'), ('FLs', 'inputspec.FLList'),
              ('OTHERs', 'inputspec.OtherList')]),
            (myLocalLMIWF, myLocalTCWF,
             [('outputspec.outputResampledCroppedVolume',
               'inputspec.PrimaryT1'),
              ('outputspec.atlasToSubjectTransform',
               'inputspec.atlasToSubjectInitialTransform')]),
            (myLocalTCWF, outputsSpec,
             [('outputspec.t1_average', 't1_average'),
              ('outputspec.t2_average', 't2_average'),
              ('outputspec.pd_average', 'pd_average'),
              ('outputspec.fl_average', 'fl_average'),
              ('outputspec.posteriorImages', 'posteriorImages'),
              ('outputspec.outputLabels', 'outputLabels'),
              ('outputspec.outputHeadLabels', 'outputHeadLabels'),
              ('outputspec.atlasToSubjectTransform',
               'atlasToSubjectTransform'),
              ('outputspec.atlasToSubjectInverseTransform',
               'atlasToSubjectInverseTransform'),
              ('outputspec.atlasToSubjectRegistrationState',
               'atlasToSubjectRegistrationState')]),
        ])

        baw201.connect([(
            outputsSpec,
            DataSink,  # TODO: change to myLocalTCWF -> DataSink
            [(('t1_average', convertToList), 'TissueClassify.@t1'),
             (('t2_average', convertToList), 'TissueClassify.@t2'),
             (('pd_average', convertToList), 'TissueClassify.@pd'),
             (('fl_average', convertToList), 'TissueClassify.@fl')])])

        currentFixWMPartitioningName = "_".join(
            ['FixWMPartitioning',
             str(subjectid),
             str(sessionid)])
        FixWMNode = pe.Node(interface=Function(
            function=FixWMPartitioning,
            input_names=['brainMask', 'PosteriorsList'],
            output_names=[
                'UpdatedPosteriorsList', 'MatchingFGCodeList',
                'MatchingLabelList', 'nonAirRegionMask'
            ]),
                            name=currentFixWMPartitioningName)

        baw201.connect([
            (myLocalTCWF, FixWMNode, [('outputspec.outputLabels', 'brainMask'),
                                      (('outputspec.posteriorImages',
                                        flattenDict), 'PosteriorsList')]),
            (FixWMNode, outputsSpec, [('UpdatedPosteriorsList',
                                       'UpdatedPosteriorsList')]),
        ])

        currentBRAINSCreateLabelMapName = 'BRAINSCreateLabelMapFromProbabilityMaps_' + str(
            subjectid) + "_" + str(sessionid)
        BRAINSCreateLabelMapNode = pe.Node(
            interface=BRAINSCreateLabelMapFromProbabilityMaps(),
            name=currentBRAINSCreateLabelMapName)

        ## TODO:  Fix the file names
        BRAINSCreateLabelMapNode.inputs.dirtyLabelVolume = 'fixed_headlabels_seg.nii.gz'
        BRAINSCreateLabelMapNode.inputs.cleanLabelVolume = 'fixed_brainlabels_seg.nii.gz'

        baw201.connect([
            (FixWMNode, BRAINSCreateLabelMapNode,
             [('UpdatedPosteriorsList', 'inputProbabilityVolume'),
              ('MatchingFGCodeList', 'foregroundPriors'),
              ('MatchingLabelList', 'priorLabelCodes'),
              ('nonAirRegionMask', 'nonAirRegionMask')]),
            (
                BRAINSCreateLabelMapNode,
                DataSink,
                [  # brainstem code below replaces this ('cleanLabelVolume', 'TissueClassify.@outputLabels'),
                    ('dirtyLabelVolume', 'TissueClassify.@outputHeadLabels')
                ]),
            (myLocalTCWF, DataSink,
             [('outputspec.atlasToSubjectTransform',
               'TissueClassify.@atlas2session_tx'),
              ('outputspec.atlasToSubjectInverseTransform',
               'TissueClassify.@atlas2sessionInverse_tx')]),
            (FixWMNode, DataSink, [('UpdatedPosteriorsList',
                                    'TissueClassify.@posteriors')]),
        ])

        currentAccumulateLikeTissuePosteriorsName = 'AccumulateLikeTissuePosteriors_' + str(
            subjectid) + "_" + str(sessionid)
        AccumulateLikeTissuePosteriorsNode = pe.Node(
            interface=Function(
                function=AccumulateLikeTissuePosteriors,
                input_names=['posteriorImages'],
                output_names=['AccumulatePriorsList',
                              'AccumulatePriorsNames']),
            name=currentAccumulateLikeTissuePosteriorsName)

        baw201.connect([
            (FixWMNode, AccumulateLikeTissuePosteriorsNode,
             [('UpdatedPosteriorsList', 'posteriorImages')]),
            (AccumulateLikeTissuePosteriorsNode, DataSink,
             [('AccumulatePriorsList',
               'ACCUMULATED_POSTERIORS.@AccumulateLikeTissuePosteriorsOutputDir'
               )])
        ])
        """
        brain stem adds on feature
        inputs:
            - landmark (fcsv) file
            - fixed brainlabels seg.nii.gz
        output:
            - complete_brainlabels_seg.nii.gz Segmentation
        """
        myLocalBrainStemWF = CreateBrainstemWorkflow(
            "BrainStem", master_config['queue'],
            "complete_brainlabels_seg.nii.gz")

        baw201.connect([(myLocalLMIWF, myLocalBrainStemWF,
                         [('outputspec.outputLandmarksInACPCAlignedSpace',
                           'inputspec.inputLandmarkFilename')]),
                        (BRAINSCreateLabelMapNode, myLocalBrainStemWF,
                         [('cleanLabelVolume',
                           'inputspec.inputTissueLabelFilename')])])

        baw201.connect(myLocalBrainStemWF,
                       'outputspec.ouputTissuelLabelFilename', DataSink,
                       'TissueClassify.@complete_brainlabels_seg')

    ###########################
    do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(
        master_config)
    if do_BRAINSCut_Segmentation:
        from workflows.segmentation import segmentation
        from workflows.WorkupT1T2BRAINSCut import GenerateWFName

        sname = 'segmentation'
        segWF = segmentation(projectid,
                             subjectid,
                             sessionid,
                             master_config,
                             onlyT1,
                             pipeline_name=sname)

        baw201.connect([(inputsSpec, segWF, [('template_t1',
                                              'inputspec.template_t1')])])
        atlasBCUTNode_W = pe.Node(interface=nio.DataGrabber(
            infields=['subject'],
            outfields=[
                "l_accumben_ProbabilityMap", "r_accumben_ProbabilityMap",
                "l_caudate_ProbabilityMap", "r_caudate_ProbabilityMap",
                "l_globus_ProbabilityMap", "r_globus_ProbabilityMap",
                "l_hippocampus_ProbabilityMap", "r_hippocampus_ProbabilityMap",
                "l_putamen_ProbabilityMap", "r_putamen_ProbabilityMap",
                "l_thalamus_ProbabilityMap", "r_thalamus_ProbabilityMap",
                "phi", "rho", "theta"
            ]),
                                  name='PerSubject_atlasBCUTNode_W')
        atlasBCUTNode_W.inputs.base_directory = master_config['previousresult']
        atlasBCUTNode_W.inputs.subject = subjectid
        atlasBCUTNode_W.inputs.field_template = {
            'l_accumben_ProbabilityMap':
            '%s/Atlas/AVG_l_accumben_ProbabilityMap.nii.gz',
            'r_accumben_ProbabilityMap':
            '%s/Atlas/AVG_r_accumben_ProbabilityMap.nii.gz',
            'l_caudate_ProbabilityMap':
            '%s/Atlas/AVG_l_caudate_ProbabilityMap.nii.gz',
            'r_caudate_ProbabilityMap':
            '%s/Atlas/AVG_r_caudate_ProbabilityMap.nii.gz',
            'l_globus_ProbabilityMap':
            '%s/Atlas/AVG_l_globus_ProbabilityMap.nii.gz',
            'r_globus_ProbabilityMap':
            '%s/Atlas/AVG_r_globus_ProbabilityMap.nii.gz',
            'l_hippocampus_ProbabilityMap':
            '%s/Atlas/AVG_l_hippocampus_ProbabilityMap.nii.gz',
            'r_hippocampus_ProbabilityMap':
            '%s/Atlas/AVG_r_hippocampus_ProbabilityMap.nii.gz',
            'l_putamen_ProbabilityMap':
            '%s/Atlas/AVG_l_putamen_ProbabilityMap.nii.gz',
            'r_putamen_ProbabilityMap':
            '%s/Atlas/AVG_r_putamen_ProbabilityMap.nii.gz',
            'l_thalamus_ProbabilityMap':
            '%s/Atlas/AVG_l_thalamus_ProbabilityMap.nii.gz',
            'r_thalamus_ProbabilityMap':
            '%s/Atlas/AVG_r_thalamus_ProbabilityMap.nii.gz',
            'phi': '%s/Atlas/AVG_phi.nii.gz',
            'rho': '%s/Atlas/AVG_rho.nii.gz',
            'theta': '%s/Atlas/AVG_theta.nii.gz'
        }
        atlasBCUTNode_W.inputs.template_args = {
            'l_accumben_ProbabilityMap': [['subject']],
            'r_accumben_ProbabilityMap': [['subject']],
            'l_caudate_ProbabilityMap': [['subject']],
            'r_caudate_ProbabilityMap': [['subject']],
            'l_globus_ProbabilityMap': [['subject']],
            'r_globus_ProbabilityMap': [['subject']],
            'l_hippocampus_ProbabilityMap': [['subject']],
            'r_hippocampus_ProbabilityMap': [['subject']],
            'l_putamen_ProbabilityMap': [['subject']],
            'r_putamen_ProbabilityMap': [['subject']],
            'l_thalamus_ProbabilityMap': [['subject']],
            'r_thalamus_ProbabilityMap': [['subject']],
            'phi': [['subject']],
            'rho': [['subject']],
            'theta': [['subject']]
        }
        atlasBCUTNode_W.inputs.template = '*'
        atlasBCUTNode_W.inputs.sort_filelist = True
        atlasBCUTNode_W.inputs.raise_on_empty = True

        baw201.connect([(atlasBCUTNode_W, segWF, [
            ('rho', 'inputspec.rho'), ('phi', 'inputspec.phi'),
            ('theta', 'inputspec.theta'),
            ('l_caudate_ProbabilityMap', 'inputspec.l_caudate_ProbabilityMap'),
            ('r_caudate_ProbabilityMap', 'inputspec.r_caudate_ProbabilityMap'),
            ('l_hippocampus_ProbabilityMap',
             'inputspec.l_hippocampus_ProbabilityMap'),
            ('r_hippocampus_ProbabilityMap',
             'inputspec.r_hippocampus_ProbabilityMap'),
            ('l_putamen_ProbabilityMap', 'inputspec.l_putamen_ProbabilityMap'),
            ('r_putamen_ProbabilityMap', 'inputspec.r_putamen_ProbabilityMap'),
            ('l_thalamus_ProbabilityMap',
             'inputspec.l_thalamus_ProbabilityMap'),
            ('r_thalamus_ProbabilityMap',
             'inputspec.r_thalamus_ProbabilityMap'),
            ('l_accumben_ProbabilityMap',
             'inputspec.l_accumben_ProbabilityMap'),
            ('r_accumben_ProbabilityMap',
             'inputspec.r_accumben_ProbabilityMap'),
            ('l_globus_ProbabilityMap', 'inputspec.l_globus_ProbabilityMap'),
            ('r_globus_ProbabilityMap', 'inputspec.r_globus_ProbabilityMap')
        ])])

        atlasBCUTNode_S = MakeAtlasNode(atlas_static_directory,
                                        'BBCUTAtlas_S{0}'.format(sessionid),
                                        ['S_BRAINSCutSupport'])
        baw201.connect(atlasBCUTNode_S, 'trainModelFile_txtD0060NT0060_gz',
                       segWF, 'inputspec.trainModelFile_txtD0060NT0060_gz')

        ## baw201_outputspec = baw201.get_node('outputspec')
        baw201.connect([
            (myLocalTCWF, segWF,
             [('outputspec.t1_average', 'inputspec.t1_average'),
              ('outputspec.atlasToSubjectRegistrationState',
               'inputspec.atlasToSubjectRegistrationState'),
              ('outputspec.outputLabels', 'inputspec.inputLabels'),
              ('outputspec.posteriorImages', 'inputspec.posteriorImages'),
              ('outputspec.outputHeadLabels', 'inputspec.inputHeadLabels')]),
            (myLocalLMIWF, segWF, [('outputspec.atlasToSubjectTransform',
                                    'inputspec.LMIatlasToSubject_tx')]),
            (FixWMNode, segWF, [('UpdatedPosteriorsList',
                                 'inputspec.UpdatedPosteriorsList')]),
        ])
        if not onlyT1:
            baw201.connect([(myLocalTCWF, segWF, [('outputspec.t2_average',
                                                   'inputspec.t2_average')])])

    if 'warp_atlas_to_subject' in master_config['components']:
        ##
        ##~/src/NEP-build/bin/BRAINSResample
        # --warpTransform AtlasToSubjectPreBABC_Composite.h5
        #  --inputVolume  /Shared/sinapse/CACHE/x20141001_KIDTEST_base_CACHE/Atlas/hncma-atlas.nii.gz
        #  --referenceVolume  /Shared/sinapse/CACHE/x20141001_KIDTEST_base_CACHE/singleSession_KID1_KT1/LandmarkInitialize/BROIAuto_cropped/Cropped_BCD_ACPC_Aligned.nii.gz
        # !--outputVolume hncma.nii.gz
        # !--interpolationMode NearestNeighbor
        # !--pixelType short
        ##
        ##

        ## TODO : SHOULD USE BRAINSCut transform that was refined even further!

        BResample = dict()
        AtlasLabelMapsToResample = [
            'hncma_atlas',
            'template_WMPM2_labels',
            'template_nac_labels',
        ]

        for atlasImage in AtlasLabelMapsToResample:
            BResample[atlasImage] = pe.Node(interface=BRAINSResample(),
                                            name="BRAINSResample_" +
                                            atlasImage)
            BResample[atlasImage].plugin_args = {
                'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1),
                'overwrite': True
            }
            BResample[atlasImage].inputs.pixelType = 'short'
            BResample[atlasImage].inputs.interpolationMode = 'NearestNeighbor'
            BResample[atlasImage].inputs.outputVolume = atlasImage + ".nii.gz"

            baw201.connect(myLocalTCWF, 'outputspec.t1_average',
                           BResample[atlasImage], 'referenceVolume')
            baw201.connect(inputsSpec, atlasImage, BResample[atlasImage],
                           'inputVolume')
            baw201.connect(myLocalTCWF, 'outputspec.atlasToSubjectTransform',
                           BResample[atlasImage], 'warpTransform')
            baw201.connect(BResample[atlasImage], 'outputVolume', DataSink,
                           'WarpedAtlas2Subject.@' + atlasImage)

        AtlasBinaryMapsToResample = [
            'template_rightHemisphere', 'template_leftHemisphere',
            'template_ventricles'
        ]

        for atlasImage in AtlasBinaryMapsToResample:
            BResample[atlasImage] = pe.Node(interface=BRAINSResample(),
                                            name="BRAINSResample_" +
                                            atlasImage)
            BResample[atlasImage].plugin_args = {
                'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1),
                'overwrite': True
            }
            BResample[atlasImage].inputs.pixelType = 'binary'
            BResample[
                atlasImage].inputs.interpolationMode = 'Linear'  ## Conversion to distance map, so use linear to resample distance map
            BResample[atlasImage].inputs.outputVolume = atlasImage + ".nii.gz"

            baw201.connect(myLocalTCWF, 'outputspec.t1_average',
                           BResample[atlasImage], 'referenceVolume')
            baw201.connect(inputsSpec, atlasImage, BResample[atlasImage],
                           'inputVolume')
            baw201.connect(myLocalTCWF, 'outputspec.atlasToSubjectTransform',
                           BResample[atlasImage], 'warpTransform')
            baw201.connect(BResample[atlasImage], 'outputVolume', DataSink,
                           'WarpedAtlas2Subject.@' + atlasImage)

        BRAINSCutAtlasImages = [
            'rho', 'phi', 'theta', 'l_caudate_ProbabilityMap',
            'r_caudate_ProbabilityMap', 'l_hippocampus_ProbabilityMap',
            'r_hippocampus_ProbabilityMap', 'l_putamen_ProbabilityMap',
            'r_putamen_ProbabilityMap', 'l_thalamus_ProbabilityMap',
            'r_thalamus_ProbabilityMap', 'l_accumben_ProbabilityMap',
            'r_accumben_ProbabilityMap', 'l_globus_ProbabilityMap',
            'r_globus_ProbabilityMap'
        ]
        for atlasImage in BRAINSCutAtlasImages:
            BResample[atlasImage] = pe.Node(interface=BRAINSResample(),
                                            name="BCUTBRAINSResample_" +
                                            atlasImage)
            BResample[atlasImage].plugin_args = {
                'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1),
                'overwrite': True
            }
            BResample[atlasImage].inputs.pixelType = 'float'
            BResample[
                atlasImage].inputs.interpolationMode = 'Linear'  ## Conversion to distance map, so use linear to resample distance map
            BResample[atlasImage].inputs.outputVolume = atlasImage + ".nii.gz"

            baw201.connect(myLocalTCWF, 'outputspec.t1_average',
                           BResample[atlasImage], 'referenceVolume')
            baw201.connect(atlasBCUTNode_W, atlasImage, BResample[atlasImage],
                           'inputVolume')
            baw201.connect(myLocalTCWF, 'outputspec.atlasToSubjectTransform',
                           BResample[atlasImage], 'warpTransform')
            baw201.connect(BResample[atlasImage], 'outputVolume', DataSink,
                           'WarpedAtlas2Subject.@' + atlasImage)

        WhiteMatterHemisphereNode = pe.Node(interface=Function(
            function=CreateLeftRightWMHemispheres,
            input_names=[
                'BRAINLABELSFile', 'HDCMARegisteredVentricleMaskFN',
                'LeftHemisphereMaskName', 'RightHemisphereMaskName',
                'WM_LeftHemisphereFileName', 'WM_RightHemisphereFileName'
            ],
            output_names=[
                'WM_LeftHemisphereFileName', 'WM_RightHemisphereFileName'
            ]),
                                            name="WhiteMatterHemisphere")
        WhiteMatterHemisphereNode.inputs.WM_LeftHemisphereFileName = "left_hemisphere_wm.nii.gz"
        WhiteMatterHemisphereNode.inputs.WM_RightHemisphereFileName = "right_hemisphere_wm.nii.gz"

        baw201.connect(myLocalBrainStemWF,
                       'outputspec.ouputTissuelLabelFilename',
                       WhiteMatterHemisphereNode, 'BRAINLABELSFile')
        baw201.connect(BResample['hncma_atlas'], 'outputVolume',
                       WhiteMatterHemisphereNode,
                       'HDCMARegisteredVentricleMaskFN')
        baw201.connect(BResample['template_leftHemisphere'], 'outputVolume',
                       WhiteMatterHemisphereNode, 'LeftHemisphereMaskName')
        baw201.connect(BResample['template_rightHemisphere'], 'outputVolume',
                       WhiteMatterHemisphereNode, 'RightHemisphereMaskName')

        baw201.connect(WhiteMatterHemisphereNode, 'WM_LeftHemisphereFileName',
                       DataSink, 'WarpedAtlas2Subject.@LeftHemisphereWM')
        baw201.connect(WhiteMatterHemisphereNode, 'WM_RightHemisphereFileName',
                       DataSink, 'WarpedAtlas2Subject.@RightHemisphereWM')

    if 'malf_2012_neuro' in master_config[
            'components']:  ## HACK Do MALF labeling
        good_subjects = [
            '1001', '1004', '1005', '1011', '1012', '1018', '1019', '1102',
            '1103', '1104', '1120', '1129', '1009', '1010', '1013', '1014',
            '1036', '1109', '1117', '1122'
        ]

        ## HACK FOR NOW SHOULD BE MORE ELEGANT FROM THE .config file
        BASE_DATA_GRABBER_DIR = '/Shared/johnsonhj/HDNI/Neuromorphometrics/20141116_Neuromorphometrics_base_Results/Neuromorphometrics/2012Subscription'

        myLocalMALF = CreateMALFWorkflow("MALF", master_config, good_subjects,
                                         BASE_DATA_GRABBER_DIR)
        baw201.connect(myLocalTCWF, 'outputspec.t1_average', myLocalMALF,
                       'inputspec.subj_t1_image')
        baw201.connect(myLocalLMIWF,
                       'outputspec.outputLandmarksInACPCAlignedSpace',
                       myLocalMALF, 'inputspec.subj_lmks')
        baw201.connect(atlasBCDNode_S, 'template_weights_50Lmks_wts',
                       myLocalMALF, 'inputspec.atlasWeightFilename')
        baw201.connect(myLocalMALF, 'outputspec.MALF_neuro2012_labelmap',
                       DataSink, 'TissueClassify.@MALF_neuro2012_labelmap')

    return baw201
Ejemplo n.º 10
0

'''
==============
META NODES
==============
'''
# MASTER node
masterpipeline = pe.Workflow(name="MasterWorkfow")
masterpipeline.base_dir = workingdir + 'MFX'
masterpipeline.config = {"execution": {"crashdump_dir": crashRecordsDir}}

# 2nd level dataGrabber
contrast_ids = range(0, len(contrasts))
l2source = pe.Node(nio.DataGrabber(
    infields=['con'],
    outfields=['copes', 'varcopes', 'matrix', 'field', 'fieldcoeff']),
                   name="l2source")

l2source.inputs.base_directory = withinSubjectResults_dir
l2source.inputs.template = '*'
l2source.inputs.field_template = dict(
    copes='%s/copes/%s/contrast%d/cope1.nii.gz',
    varcopes='%s/varcopes/%s/contrast%d/varcope1.nii.gz',
    matrix='%s/registration/struct2mni/MATRIX/%s/*.mat',
    field='%s/registration/struct2mni/FIELD/%s/*.nii.gz',
    fieldcoeff='%s/registration/struct2mni/FIELDCOEFF/%s/*.nii.gz')
l2source.inputs.template_args = dict(
    copes=[[subject_list, subject_list, 'con']],
    varcopes=[[subject_list, subject_list, 'con']],
    matrix=[[subject_list, subject_list]],
Ejemplo n.º 11
0
def analyze_openfmri_dataset(data_dir,
                             subject=None,
                             model_id=None,
                             task_id=None,
                             output_dir=None,
                             subj_prefix='*',
                             hpcutoff=120.,
                             use_derivatives=True,
                             fwhm=6.0,
                             subjects_dir=None,
                             target=None):
    """Analyzes an open fmri dataset

    Parameters
    ----------

    data_dir : str
        Path to the base data directory

    work_dir : str
        Nipype working directory (defaults to cwd)
    """
    """
    Load nipype workflows
    """

    preproc = create_featreg_preproc(whichvol='first')
    modelfit = create_modelfit_workflow()
    fixed_fx = create_fixed_effects_flow()
    if subjects_dir:
        registration = create_fs_reg_workflow()
    else:
        registration = create_reg_workflow()
    """
    Remove the plotting connection so that plot iterables don't propagate
    to the model stage
    """

    preproc.disconnect(preproc.get_node('plot_motion'), 'out_file',
                       preproc.get_node('outputspec'), 'motion_plots')
    """
    Set up openfmri data specific components
    """

    subjects = sorted([
        path.split(os.path.sep)[-1]
        for path in glob(os.path.join(data_dir, subj_prefix))
    ])

    infosource = pe.Node(
        niu.IdentityInterface(fields=['subject_id', 'model_id', 'task_id']),
        name='infosource')
    if len(subject) == 0:
        infosource.iterables = [('subject_id', subjects),
                                ('model_id', [model_id]), ('task_id', task_id)]
    else:
        infosource.iterables = [
            ('subject_id',
             [subjects[subjects.index(subj)] for subj in subject]),
            ('model_id', [model_id]), ('task_id', task_id)
        ]

    subjinfo = pe.Node(niu.Function(
        input_names=['subject_id', 'base_dir', 'task_id', 'model_id'],
        output_names=['run_id', 'conds', 'TR'],
        function=get_subjectinfo),
                       name='subjectinfo')
    subjinfo.inputs.base_dir = data_dir
    """
    Return data components as anat, bold and behav
    """

    contrast_file = os.path.join(data_dir, 'models', 'model%03d' % model_id,
                                 'task_contrasts.txt')
    has_contrast = os.path.exists(contrast_file)
    if has_contrast:
        datasource = pe.Node(nio.DataGrabber(
            infields=['subject_id', 'run_id', 'task_id', 'model_id'],
            outfields=['anat', 'bold', 'behav', 'contrasts']),
                             name='datasource')
    else:
        datasource = pe.Node(nio.DataGrabber(
            infields=['subject_id', 'run_id', 'task_id', 'model_id'],
            outfields=['anat', 'bold', 'behav']),
                             name='datasource')
    datasource.inputs.base_directory = data_dir
    datasource.inputs.template = '*'

    if has_contrast:
        datasource.inputs.field_template = {
            'anat': '%s/anatomy/T1_001.nii.gz',
            'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
            'behav': ('%s/model/model%03d/onsets/task%03d_'
                      'run%03d/cond*.txt'),
            'contrasts': ('models/model%03d/'
                          'task_contrasts.txt')
        }
        datasource.inputs.template_args = {
            'anat': [['subject_id']],
            'bold': [['subject_id', 'task_id']],
            'behav': [['subject_id', 'model_id', 'task_id', 'run_id']],
            'contrasts': [['model_id']]
        }
    else:
        datasource.inputs.field_template = {
            'anat': '%s/anatomy/T1_001.nii.gz',
            'bold': '%s/BOLD/task%03d_r*/bold.nii.gz',
            'behav': ('%s/model/model%03d/onsets/task%03d_'
                      'run%03d/cond*.txt')
        }
        datasource.inputs.template_args = {
            'anat': [['subject_id']],
            'bold': [['subject_id', 'task_id']],
            'behav': [['subject_id', 'model_id', 'task_id', 'run_id']]
        }

    datasource.inputs.sort_filelist = True
    """
    Create meta workflow
    """

    wf = pe.Workflow(name='openfmri')
    wf.connect(infosource, 'subject_id', subjinfo, 'subject_id')
    wf.connect(infosource, 'model_id', subjinfo, 'model_id')
    wf.connect(infosource, 'task_id', subjinfo, 'task_id')
    wf.connect(infosource, 'subject_id', datasource, 'subject_id')
    wf.connect(infosource, 'model_id', datasource, 'model_id')
    wf.connect(infosource, 'task_id', datasource, 'task_id')
    wf.connect(subjinfo, 'run_id', datasource, 'run_id')
    wf.connect([
        (datasource, preproc, [('bold', 'inputspec.func')]),
    ])

    def get_highpass(TR, hpcutoff):
        return hpcutoff / (2. * TR)

    gethighpass = pe.Node(niu.Function(input_names=['TR', 'hpcutoff'],
                                       output_names=['highpass'],
                                       function=get_highpass),
                          name='gethighpass')
    wf.connect(subjinfo, 'TR', gethighpass, 'TR')
    wf.connect(gethighpass, 'highpass', preproc, 'inputspec.highpass')
    """
    Setup a basic set of contrasts, a t-test per condition
    """

    def get_contrasts(contrast_file, task_id, conds):
        import numpy as np
        import os
        contrast_def = []
        if os.path.exists(contrast_file):
            with open(contrast_file, 'rt') as fp:
                contrast_def.extend([
                    np.array(row.split()) for row in fp.readlines()
                    if row.strip()
                ])
        contrasts = []
        for row in contrast_def:
            if row[0] != 'task%03d' % task_id:
                continue
            con = [
                row[1], 'T', ['cond%03d' % (i + 1) for i in range(len(conds))],
                row[2:].astype(float).tolist()
            ]
            contrasts.append(con)
        # add auto contrasts for each column
        for i, cond in enumerate(conds):
            con = [cond, 'T', ['cond%03d' % (i + 1)], [1]]
            contrasts.append(con)
        return contrasts

    contrastgen = pe.Node(niu.Function(
        input_names=['contrast_file', 'task_id', 'conds'],
        output_names=['contrasts'],
        function=get_contrasts),
                          name='contrastgen')

    art = pe.MapNode(
        interface=ra.ArtifactDetect(use_differences=[True, False],
                                    use_norm=True,
                                    norm_threshold=1,
                                    zintensity_threshold=3,
                                    parameter_source='FSL',
                                    mask_type='file'),
        iterfield=['realigned_files', 'realignment_parameters', 'mask_file'],
        name="art")

    modelspec = pe.Node(interface=model.SpecifyModel(), name="modelspec")
    modelspec.inputs.input_units = 'secs'

    def check_behav_list(behav, run_id, conds):
        import numpy as np
        num_conds = len(conds)
        if isinstance(behav, (str, bytes)):
            behav = [behav]
        behav_array = np.array(behav).flatten()
        num_elements = behav_array.shape[0]
        return behav_array.reshape(int(num_elements / num_conds),
                                   num_conds).tolist()

    reshape_behav = pe.Node(niu.Function(
        input_names=['behav', 'run_id', 'conds'],
        output_names=['behav'],
        function=check_behav_list),
                            name='reshape_behav')

    wf.connect(subjinfo, 'TR', modelspec, 'time_repetition')
    wf.connect(datasource, 'behav', reshape_behav, 'behav')
    wf.connect(subjinfo, 'run_id', reshape_behav, 'run_id')
    wf.connect(subjinfo, 'conds', reshape_behav, 'conds')
    wf.connect(reshape_behav, 'behav', modelspec, 'event_files')

    wf.connect(subjinfo, 'TR', modelfit, 'inputspec.interscan_interval')
    wf.connect(subjinfo, 'conds', contrastgen, 'conds')
    if has_contrast:
        wf.connect(datasource, 'contrasts', contrastgen, 'contrast_file')
    else:
        contrastgen.inputs.contrast_file = ''
    wf.connect(infosource, 'task_id', contrastgen, 'task_id')
    wf.connect(contrastgen, 'contrasts', modelfit, 'inputspec.contrasts')

    wf.connect([(preproc, art,
                 [('outputspec.motion_parameters', 'realignment_parameters'),
                  ('outputspec.realigned_files', 'realigned_files'),
                  ('outputspec.mask', 'mask_file')]),
                (preproc, modelspec,
                 [('outputspec.highpassed_files', 'functional_runs'),
                  ('outputspec.motion_parameters', 'realignment_parameters')]),
                (art, modelspec, [('outlier_files', 'outlier_files')]),
                (modelspec, modelfit, [('session_info',
                                        'inputspec.session_info')]),
                (preproc, modelfit, [('outputspec.highpassed_files',
                                      'inputspec.functional_data')])])

    # Comute TSNR on realigned data regressing polynomials upto order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(preproc, "outputspec.realigned_files", tsnr, "in_file")

    # Compute the median image across runs
    calc_median = Node(CalculateMedian(), name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
    """
    Reorder the copes so that now it combines across runs
    """

    def sort_copes(copes, varcopes, contrasts):
        import numpy as np
        if not isinstance(copes, list):
            copes = [copes]
            varcopes = [varcopes]
        num_copes = len(contrasts)
        n_runs = len(copes)
        all_copes = np.array(copes).flatten()
        all_varcopes = np.array(varcopes).flatten()
        outcopes = all_copes.reshape(int(len(all_copes) / num_copes),
                                     num_copes).T.tolist()
        outvarcopes = all_varcopes.reshape(int(len(all_varcopes) / num_copes),
                                           num_copes).T.tolist()
        return outcopes, outvarcopes, n_runs

    cope_sorter = pe.Node(niu.Function(
        input_names=['copes', 'varcopes', 'contrasts'],
        output_names=['copes', 'varcopes', 'n_runs'],
        function=sort_copes),
                          name='cope_sorter')

    pickfirst = lambda x: x[0]

    wf.connect(contrastgen, 'contrasts', cope_sorter, 'contrasts')
    wf.connect([(preproc, fixed_fx, [(('outputspec.mask', pickfirst),
                                      'flameo.mask_file')]),
                (modelfit, cope_sorter, [('outputspec.copes', 'copes')]),
                (modelfit, cope_sorter, [('outputspec.varcopes', 'varcopes')]),
                (cope_sorter, fixed_fx, [('copes', 'inputspec.copes'),
                                         ('varcopes', 'inputspec.varcopes'),
                                         ('n_runs', 'l2model.num_copes')]),
                (modelfit, fixed_fx, [
                    ('outputspec.dof_file', 'inputspec.dof_files'),
                ])])

    wf.connect(calc_median, 'median_file', registration,
               'inputspec.mean_image')
    if subjects_dir:
        wf.connect(infosource, 'subject_id', registration,
                   'inputspec.subject_id')
        registration.inputs.inputspec.subjects_dir = subjects_dir
        registration.inputs.inputspec.target_image = fsl.Info.standard_image(
            'MNI152_T1_2mm_brain.nii.gz')
        if target:
            registration.inputs.inputspec.target_image = target
    else:
        wf.connect(datasource, 'anat', registration,
                   'inputspec.anatomical_image')
        registration.inputs.inputspec.target_image = fsl.Info.standard_image(
            'MNI152_T1_2mm.nii.gz')
        registration.inputs.inputspec.target_image_brain = fsl.Info.standard_image(
            'MNI152_T1_2mm_brain.nii.gz')
        registration.inputs.inputspec.config_file = 'T1_2_MNI152_2mm'

    def merge_files(copes, varcopes, zstats):
        out_files = []
        splits = []
        out_files.extend(copes)
        splits.append(len(copes))
        out_files.extend(varcopes)
        splits.append(len(varcopes))
        out_files.extend(zstats)
        splits.append(len(zstats))
        return out_files, splits

    mergefunc = pe.Node(niu.Function(
        input_names=['copes', 'varcopes', 'zstats'],
        output_names=['out_files', 'splits'],
        function=merge_files),
                        name='merge_files')
    wf.connect([(fixed_fx.get_node('outputspec'), mergefunc, [
        ('copes', 'copes'),
        ('varcopes', 'varcopes'),
        ('zstats', 'zstats'),
    ])])
    wf.connect(mergefunc, 'out_files', registration, 'inputspec.source_files')

    def split_files(in_files, splits):
        copes = in_files[:splits[0]]
        varcopes = in_files[splits[0]:(splits[0] + splits[1])]
        zstats = in_files[(splits[0] + splits[1]):]
        return copes, varcopes, zstats

    splitfunc = pe.Node(niu.Function(
        input_names=['in_files', 'splits'],
        output_names=['copes', 'varcopes', 'zstats'],
        function=split_files),
                        name='split_files')
    wf.connect(mergefunc, 'splits', splitfunc, 'splits')
    wf.connect(registration, 'outputspec.transformed_files', splitfunc,
               'in_files')

    if subjects_dir:
        get_roi_mean = pe.MapNode(fs.SegStats(default_color_table=True),
                                  iterfield=['in_file'],
                                  name='get_aparc_means')
        get_roi_mean.inputs.avgwf_txt_file = True
        wf.connect(fixed_fx.get_node('outputspec'), 'copes', get_roi_mean,
                   'in_file')
        wf.connect(registration, 'outputspec.aparc', get_roi_mean,
                   'segmentation_file')

        get_roi_tsnr = pe.MapNode(fs.SegStats(default_color_table=True),
                                  iterfield=['in_file'],
                                  name='get_aparc_tsnr')
        get_roi_tsnr.inputs.avgwf_txt_file = True
        wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file')
        wf.connect(registration, 'outputspec.aparc', get_roi_tsnr,
                   'segmentation_file')
    """
    Connect to a datasink
    """

    def get_subs(subject_id, conds, run_id, model_id, task_id):
        subs = [('_subject_id_%s_' % subject_id, '')]
        subs.append(('_model_id_%d' % model_id, 'model%03d' % model_id))
        subs.append(('task_id_%d/' % task_id, '/task%03d_' % task_id))
        subs.append(
            ('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_warp', 'mean'))
        subs.append(('bold_dtype_mcf_mask_smooth_mask_gms_tempfilt_mean_flirt',
                     'affine'))

        for i in range(len(conds)):
            subs.append(('_flameo%d/cope1.' % i, 'cope%02d.' % (i + 1)))
            subs.append(('_flameo%d/varcope1.' % i, 'varcope%02d.' % (i + 1)))
            subs.append(('_flameo%d/zstat1.' % i, 'zstat%02d.' % (i + 1)))
            subs.append(('_flameo%d/tstat1.' % i, 'tstat%02d.' % (i + 1)))
            subs.append(('_flameo%d/res4d.' % i, 'res4d%02d.' % (i + 1)))
            subs.append(('_warpall%d/cope1_warp.' % i, 'cope%02d.' % (i + 1)))
            subs.append(('_warpall%d/varcope1_warp.' % (len(conds) + i),
                         'varcope%02d.' % (i + 1)))
            subs.append(('_warpall%d/zstat1_warp.' % (2 * len(conds) + i),
                         'zstat%02d.' % (i + 1)))
            subs.append(('_warpall%d/cope1_trans.' % i, 'cope%02d.' % (i + 1)))
            subs.append(('_warpall%d/varcope1_trans.' % (len(conds) + i),
                         'varcope%02d.' % (i + 1)))
            subs.append(('_warpall%d/zstat1_trans.' % (2 * len(conds) + i),
                         'zstat%02d.' % (i + 1)))
            subs.append(('__get_aparc_means%d/' % i, '/cope%02d_' % (i + 1)))

        for i, run_num in enumerate(run_id):
            subs.append(('__get_aparc_tsnr%d/' % i, '/run%02d_' % run_num))
            subs.append(('__art%d/' % i, '/run%02d_' % run_num))
            subs.append(('__dilatemask%d/' % i, '/run%02d_' % run_num))
            subs.append(('__realign%d/' % i, '/run%02d_' % run_num))
            subs.append(('__modelgen%d/' % i, '/run%02d_' % run_num))
        subs.append(('/model%03d/task%03d/' % (model_id, task_id), '/'))
        subs.append(('/model%03d/task%03d_' % (model_id, task_id), '/'))
        subs.append(('_bold_dtype_mcf_bet_thresh_dil', '_mask'))
        subs.append(('_output_warped_image', '_anat2target'))
        subs.append(('median_flirt_brain_mask', 'median_brain_mask'))
        subs.append(('median_bbreg_brain_mask', 'median_brain_mask'))
        return subs

    subsgen = pe.Node(niu.Function(
        input_names=['subject_id', 'conds', 'run_id', 'model_id', 'task_id'],
        output_names=['substitutions'],
        function=get_subs),
                      name='subsgen')
    wf.connect(subjinfo, 'run_id', subsgen, 'run_id')

    datasink = pe.Node(interface=nio.DataSink(), name="datasink")
    wf.connect(infosource, 'subject_id', datasink, 'container')
    wf.connect(infosource, 'subject_id', subsgen, 'subject_id')
    wf.connect(infosource, 'model_id', subsgen, 'model_id')
    wf.connect(infosource, 'task_id', subsgen, 'task_id')
    wf.connect(contrastgen, 'contrasts', subsgen, 'conds')
    wf.connect(subsgen, 'substitutions', datasink, 'substitutions')
    wf.connect([(fixed_fx.get_node('outputspec'), datasink,
                 [('res4d', 'res4d'), ('copes', 'copes'),
                  ('varcopes', 'varcopes'), ('zstats', 'zstats'),
                  ('tstats', 'tstats')])])
    wf.connect([(modelfit.get_node('modelgen'), datasink, [
        ('design_cov', 'qa.model'),
        ('design_image', 'qa.model.@matrix_image'),
        ('design_file', 'qa.model.@matrix'),
    ])])
    wf.connect([(preproc, datasink,
                 [('outputspec.motion_parameters', 'qa.motion'),
                  ('outputspec.motion_plots', 'qa.motion.plots'),
                  ('outputspec.mask', 'qa.mask')])])
    wf.connect(registration, 'outputspec.mean2anat_mask', datasink,
               'qa.mask.mean2anat')
    wf.connect(art, 'norm_files', datasink, 'qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.anat2target', datasink,
               'qa.anat2target')
    wf.connect(tsnr, 'tsnr_file', datasink, 'qa.tsnr.@map')
    if subjects_dir:
        wf.connect(registration, 'outputspec.min_cost_file', datasink,
                   'qa.mincost')
        wf.connect([(get_roi_tsnr, datasink, [('avgwf_txt_file', 'qa.tsnr'),
                                              ('summary_file',
                                               'qa.tsnr.@summary')])])
        wf.connect([(get_roi_mean, datasink, [('avgwf_txt_file', 'copes.roi'),
                                              ('summary_file',
                                               'copes.roi.@summary')])])
    wf.connect([(splitfunc, datasink, [
        ('copes', 'copes.mni'),
        ('varcopes', 'varcopes.mni'),
        ('zstats', 'zstats.mni'),
    ])])
    wf.connect(calc_median, 'median_file', datasink, 'mean')
    wf.connect(registration, 'outputspec.transformed_mean', datasink,
               'mean.mni')
    wf.connect(registration, 'outputspec.func2anat_transform', datasink,
               'xfm.mean2anat')
    wf.connect(registration, 'outputspec.anat2target_transform', datasink,
               'xfm.anat2target')
    """
    Set processing parameters
    """

    preproc.inputs.inputspec.fwhm = fwhm
    gethighpass.inputs.hpcutoff = hpcutoff
    modelspec.inputs.high_pass_filter_cutoff = hpcutoff
    modelfit.inputs.inputspec.bases = {'dgamma': {'derivs': use_derivatives}}
    modelfit.inputs.inputspec.model_serial_correlations = True
    modelfit.inputs.inputspec.film_threshold = 1000

    datasink.inputs.base_directory = output_dir
    return wf
Ejemplo n.º 12
0
def MakeAtlasNode(atlasDirectory, name, atlasParts):
    """ Make an atlas node that contains the elements requested in the atlasParts section
        This will allow more fine grained data grabbers to be used, thereby allowing enhanced
        compartmentalization of algorithmic components.

        (S_) Static files that are relevant for any atlas
        (W_) Files that require warping to subjecgt specific atlas

        KEY:
          [S|W]_BRAINSABCSupport
          [S|W]_BRAINSABCSupport
          [S|W]_BRAINSCutSupport
          [S|W]_BCDSupport
          [S|W]_LabelMapsSupport
          [S|W]_ExtraSupport
    """

    import nipype.interfaces.io as nio  # Data i/o
    import nipype.pipeline.engine as pe  # pypeline engine
    import os

    valid_choices = [
        'S_BRAINSABCSupport', 'S_BRAINSABCSupport', 'S_BRAINSCutSupport',
        'S_BCDSupport', 'S_LabelMapsSupport', 'S_ExtraSupport',
        'W_BRAINSABCSupport', 'W_BRAINSABCSupport', 'W_BRAINSCutSupport',
        'W_BCDSupport', 'W_LabelMapsSupport', 'W_ExtraSupport'
    ]
    for ap in atlasParts:
        assert ap in valid_choices, "ERROR: Invalid choice: {0} not in {1}".format(
            ap, valid_choices)

    # Generate by running a file system list "ls -1 $AtlasDir *.nii.gz *.xml *.fcsv *.wgts"
    # atlas_file_names=atlas_file_list.split(' ')
    atlas_file_names = list()
    if 'S_BRAINSABCSupport' in atlasParts:
        atlas_file_names.extend(["ExtendedAtlasDefinition.xml.in"])
    if 'W_BRAINSABCSupport' in atlasParts:
        atlas_file_names.extend(["ExtendedAtlasDefinition.xml"])
    if 'S_BRAINSCutSupport' in atlasParts:
        atlas_file_names.extend(
            ["modelFiles/trainModelFile.txtD0060NT0060.gz"])
    if 'W_BRAINSCutSupport' in atlasParts:
        atlas_file_names.extend([
            "hncma-atlas.nii.gz",
            "template_t1.nii.gz",
            "probabilityMaps/l_accumben_ProbabilityMap.nii.gz",
            "probabilityMaps/r_accumben_ProbabilityMap.nii.gz",
            "probabilityMaps/l_caudate_ProbabilityMap.nii.gz",
            "probabilityMaps/r_caudate_ProbabilityMap.nii.gz",
            "probabilityMaps/l_globus_ProbabilityMap.nii.gz",
            "probabilityMaps/r_globus_ProbabilityMap.nii.gz",
            "probabilityMaps/l_hippocampus_ProbabilityMap.nii.gz",
            "probabilityMaps/r_hippocampus_ProbabilityMap.nii.gz",
            "probabilityMaps/l_putamen_ProbabilityMap.nii.gz",
            "probabilityMaps/r_putamen_ProbabilityMap.nii.gz",
            "probabilityMaps/l_thalamus_ProbabilityMap.nii.gz",
            "probabilityMaps/r_thalamus_ProbabilityMap.nii.gz",
            "spatialImages/phi.nii.gz",
            "spatialImages/rho.nii.gz",
            "spatialImages/theta.nii.gz",
        ])
    if 'S_BCDSupport' in atlasParts:
        atlas_file_names.extend([
            "20141004_BCD/LLSModel_50Lmks.h5", "20141004_BCD/T1_50Lmks.mdl",
            "20141004_BCD/template_weights_50Lmks.wts"
        ])
    if 'W_BCDSupport' in atlasParts:
        atlas_file_names.extend([
            "template_t1.nii.gz",
            "20141004_BCD/template_landmarks_50Lmks.fcsv",
        ])
    if 'W_LabelMapsSupport' in atlasParts:
        atlas_file_names.extend([
            "hncma-atlas.nii.gz", "hncma-atlas-lut-mod2.ctbl",
            "template_rightHemisphere.nii.gz",
            "template_leftHemisphere.nii.gz", "template_WMPM2_labels.nii.gz",
            "template_WMPM2_labels.txt", "template_nac_labels.nii.gz",
            "template_nac_labels.txt", "template_ventricles.nii.gz"
        ])
    if 'W_ExtraSupport' in atlasParts:
        atlas_file_names.extend([
            "tempNOTVBBOX.nii.gz", "template_ABC_labels.nii.gz",
            "avg_t1.nii.gz", "avg_t2.nii.gz", "template_brain.nii.gz",
            "template_cerebellum.nii.gz", "template_class.nii.gz",
            "template_headregion.nii.gz", "template_t1.nii.gz",
            "template_t2.nii.gz", "template_t1_clipped.nii.gz",
            "template_t2_clipped.nii.gz"
        ])
    atlas_file_names = list(set(atlas_file_names))  # Make a unique listing
    # # Remove filename extensions for images, but replace . with _ for other file types
    atlas_file_keys = [
        os.path.basename(fn).replace('.nii.gz',
                                     '').replace('.', '_').replace('-', '_')
        for fn in atlas_file_names
    ]
    atlas_outputs_filename_match = dict(
        list(zip(atlas_file_keys, atlas_file_names)))

    node = pe.Node(interface=nio.DataGrabber(force_output=False,
                                             outfields=atlas_file_keys),
                   run_without_submitting=True,
                   name=name)
    node.inputs.base_directory = atlasDirectory
    node.inputs.sort_filelist = False
    # node.inputs.raise_on_empty = True
    node.inputs.template = '*'
    ## Prefix every filename with atlasDirectory
    atlas_search_paths = ['{0}'.format(fn) for fn in atlas_file_names]
    node.inputs.field_template = dict(
        list(zip(atlas_file_keys, atlas_search_paths)))
    ## Give 'atlasDirectory' as the substitution argument
    atlas_template_args_match = [
        [[]] for i in atlas_file_keys
    ]  # build a list of proper length with repeated entries
    node.inputs.template_args = dict(
        list(zip(atlas_file_keys, atlas_template_args_match)))
    # print "+" * 100
    # print node.inputs
    # print "-" * 100
    return node
Ejemplo n.º 13
0
Archivo: t1.py Proyecto: bpinsard/misc
def t1_pipeline(name='t1_preproc'):
    inputnode = pe.Node(utility.IdentityInterface(fields=['t1_dicom_dir']),
                        name='inputspec')
    outputnode = pe.Node(utility.IdentityInterface(
        fields=['mask', 'corrected_t1', 'corrected_t1_brain']),
                         name='outputspec')

    n_t1_dicom_files = pe.Node(nio.DataGrabber(sort_filelist=True, ),
                               name='t1_dicom_files')

    n_to3d_t1 = pe.Node(afni.To3D(filetype='anat',
                                  environ=dict(AFNI_DICOM_RESCALE='YES')),
                        name='to3d_t1')

    n_reorient_t1 = pe.Node(afni.Resample(orientation='RPI'),
                            name='reorient_t1')

    n_autobox_t1 = pe.Node(afni.Autobox(padding=5), name='autobox_t1')

    n_zcut_t1 = pe.Node(afni.ZCutUp(outputtype='NIFTI'), name='zcut_t1')

    n_newsegment_t1 = pe.Node(spm.NewSegment(
        write_deformation_fields=[True, True],
        channel_info=(0.0001, 60, (True, True))),
                              name='newsegment_t1')

    n_seg2mask = pe.Node(fsl.MultiImageMaths(
        output_type='NIFTI',
        op_string=' -add %s -add %s -thr 0.8 -bin -eroF -dilF -dilF'),
                         name='seg2mask')

    n_mask_brain = pe.Node(interface=fsl.ImageMaths(op_string='-mul',
                                                    suffix='_brain',
                                                    output_type='NIFTI'),
                           name='mask_brain')

    w = pe.Workflow(name=name)

    def zmax2keep(z):
        return '%d %d' % (max(0, z - 174), z)

    w.connect([
        (inputnode, n_t1_dicom_files, [('t1_dicom_dir', 'base_directory')]),
        (n_t1_dicom_files, n_to3d_t1,
         [(('outfiles', sort_t1_files), 'in_files'),
          (('outfiles', t1_filename, 'nii.gz'), 'out_file')]),
        (n_to3d_t1, n_reorient_t1, [('out_file', 'in_file')]),
        (n_reorient_t1, n_autobox_t1, [('out_file', 'in_file')]),
        (n_reorient_t1, n_zcut_t1, [('out_file', 'in_file')]),
        (n_autobox_t1, n_zcut_t1, [(('z_max', zmax2keep), 'keep')]),
        (n_zcut_t1, n_newsegment_t1, [('out_file', 'channel_files')]),
        (n_newsegment_t1, n_seg2mask, [(('native_class_images', getitem_rec, 0,
                                         0), 'in_file'),
                                       (('native_class_images', getitem_rec,
                                         slice(1, 3), 0), 'operand_files')]),
        (n_zcut_t1, n_seg2mask, [(('out_file', fname_presuffix_basename, '',
                                   '_mask', '.'), 'out_file')]),
        (n_newsegment_t1, n_mask_brain, [('bias_corrected_images', 'in_file')
                                         ]),
        (n_seg2mask, n_mask_brain, [('out_file', 'in_file2')]),
        (n_seg2mask, outputnode, [('out_file', 'mask')]),
        (n_newsegment_t1, outputnode, [('bias_corrected_images',
                                        'corrected_t1')])
    ])
    return w
Ejemplo n.º 14
0
def get_wf():

    wf = pe.Workflow(name="main_workflow")
    wf.base_dir = os.path.join(workingdir, "similarity_pipeline")
    wf.config['execution']['crashdump_dir'] = wf.base_dir + "/crash_files"

    ##Infosource##
    subject_id_infosource = pe.Node(
        util.IdentityInterface(fields=['subject_id']),
        name="subject_id_infosource")
    subject_id_infosource.iterables = ('subject_id', subjects)

    #session_infosource = pe.Node(util.IdentityInterface(fields=['session']), name="session_infosource")
    #session_infosource.iterables = ('session', sessions)

    fs_infosource = pe.Node(util.IdentityInterface(fields=['fs']),
                            name="fs_infosource")
    fs_infosource.iterables = ('fs', fsaverage)

    hemi_infosource = pe.Node(util.IdentityInterface(fields=['hemi']),
                              name="hemi_infosource")
    hemi_infosource.iterables = ('hemi', hemispheres)

    sim_infosource = pe.Node(util.IdentityInterface(fields=['sim']),
                             name="sim_infosource")
    sim_infosource.iterables = ('sim', similarity_types)

    ##Datagrabber##
    datagrabber = pe.Node(nio.DataGrabber(
        infields=['subject_id', 'hemi', 'fs'],
        outfields=['sxfm', 'volumedata', 'regfile', 'parcfile']),
                          name="datagrabber")
    datagrabber.inputs.base_directory = '/'
    datagrabber.inputs.template = '*'
    datagrabber.inputs.field_template = similarity_dg_template
    datagrabber.inputs.template_args = similarity_dg_args
    datagrabber.inputs.sort_filelist = True

    wf.connect(subject_id_infosource, 'subject_id', datagrabber, 'subject_id')
    #wf.connect(session_infosource, 'session', datagrabber, 'session')
    wf.connect(fs_infosource, 'fs', datagrabber, 'fs')
    wf.connect(hemi_infosource, 'hemi', datagrabber, 'hemi')

    ##mask surface##
    Smask = pe.Node(MaskSurface(), name='surface_mask')
    Smask.inputs.sourcelabels = surface_sourcelabels
    Smask.inputs.targetlabels = surface_targetlabels
    Smask.inputs.freesurferdir = freesurferdir
    wf.connect(hemi_infosource, 'hemi', Smask, 'hemi')
    wf.connect(fs_infosource, 'fs', Smask, 'fs')
    wf.connect(datagrabber, 'sxfm', Smask, 'sxfmout')

    ##mask volume##
    Vmask = pe.Node(MaskVolume(), name='volume_mask')
    Vmask.inputs.vol_source = volume_sourcelabels
    Vmask.inputs.vol_target = volume_targetlabels
    wf.connect(datagrabber, 'volumedata', Vmask, 'preprocessedfile')
    wf.connect(datagrabber, 'regfile', Vmask, 'regfile')
    wf.connect(datagrabber, 'parcfile', Vmask, 'parcfile')

    ##concatenate data & run similarity##
    concat = pe.JoinNode(Concat(),
                         joinsource="hemi",
                         joinfield=["surface_input"],
                         unique=True,
                         name='concat')
    wf.connect(Vmask, 'volume_input_mask', concat, 'volume_input')
    wf.connect(Vmask, 'volume_target_mask', concat, 'volume_target_mask')
    wf.connect(Smask, 'surface_data', concat, 'surface_input')
    wf.connect(Smask, 'surface_mask', concat, 'surface_mask')
    wf.connect(sim_infosource, 'sim', concat, 'sim_type')

    ##Datasink##
    ds = pe.Node(nio.DataSink(), name="datasink")
    ds.inputs.base_directory = similaritydir
    wf.connect(concat, 'simmatrix', ds, 'similarity')
    wf.connect(concat, 'maskindex', ds, 'maskindex')
    wf.connect(concat, 'targetmask', ds, 'targetmask')
    wf.write_graph()
    return wf
Ejemplo n.º 15
0
def create_pipeline_SS_TV(bids_dir, work_dir, out_dir, subjects, sessions,
                          mag_match_pattern, phase_match_pattern,
                          keep_unnecessary_outputs, FAST_bias_iters,
                          FAST_bias_lowpass, FAST_num_classes, BET_frac,
                          freq_weights__snr_window_sz, truncate_echo,
                          SS_TV_lagrange_parameter, B0_dir,
                          scnd_diff_reliability_thresh_trim,
                          scnd_diff_reliability_thresh_noise):
    layout = BIDSLayout(bids_dir)

    #can we do this more elegantly?
    first_echo_files = []
    for subject in subjects:
        if layout.get_sessions(subject=subject) == []:
            if sessions == ['.*']:
                first_echo_files = first_echo_files + layout.get(
                    subject=subject,
                    modality='anat',
                    extensions='.*part-phase.*echo-0*1.*.nii.*',
                )
            else:
                print(
                    "Warning: Session filter applied, but subject " + subject +
                    " has no bids session information. This subject has been ignored."
                )
        else:
            for session in sessions:
                first_echo_files = first_echo_files + layout.get(
                    subject=subject,
                    session=session,
                    modality='anat',
                    extensions='.*part-phase.*echo-0*1.*.nii.*',
                )
    anat_folders = []
    for img in first_echo_files:
        full_dirname = os.path.dirname(img.filename)
        remove_base_dir = full_dirname.replace(bids_dir, '')
        remove_leading_slash = remove_base_dir.lstrip(os.sep)
        anat_folders.append(remove_leading_slash)
    list(set(anat_folders)).sort()

    #IdentityInterface is useful for passing subject directory structure to datasink
    infosource = pe.Node(niu.IdentityInterface(fields=['subject_id']),
                         name="infosource")
    infosource.iterables = ('subject_id', anat_folders)

    ### NODES AND PARAMETERS
    datasource = pe.Node(nio.DataGrabber(
        infields=['subject_id'],
        outfields=['phase_images', 'mag_images', 'phase_jsons', 'mag_jsons']),
                         name='datasource')
    datasource.inputs.field_template = dict(
        phase_images='%s/' + phase_match_pattern + '.nii*',
        phase_jsons='%s/' + phase_match_pattern + '.json',
        mag_images='%s/' + mag_match_pattern + '.nii*',
        mag_jsons='%s/' + mag_match_pattern + '.json',
    )
    datasource.inputs.sort_filelist = True
    datasource.inputs.template = "*"
    datasource.inputs.base_directory = bids_dir

    #this node must change depending on the scanner vendor
    susc_phase_preprocess = pe.Node(SiemensPhasePreprocess(),
                                    name='susc_phase_preprocess')

    avg_and_freq_estimate_weights = pe.Node(
        GetAvgAndWeightsFromMag(), name='avg_and_freq_estimate_weights')
    avg_and_freq_estimate_weights.inputs.snr_window_sz = freq_weights__snr_window_sz
    avg_and_freq_estimate_weights.inputs.avg_out_filename = "avg.nii.gz"
    avg_and_freq_estimate_weights.inputs.weight_out_filename = "weights.nii.gz"
    """
    #spm worked better for varian 7T data
    #if using spm, these prameters are needed
    bias_regularization=.001
    sampling_distance=2.0
    bias_fwhm=30
    
    nonuniformityCorrect_spm=pe.Node(spm.preprocess.Segment(),name='nonuniformityCorrect_spm')
    nonuniformityCorrect_spm.inputs.bias_regularization=bias_regularization
    nonuniformityCorrect_spm.inputs.sampling_distance=sampling_distance
    nonuniformityCorrect_spm.inputs.bias_fwhm=bias_fwhm
    nonuniformityCorrect_spm.inputs.save_bias_corrected=True
    """

    nonuniformity_correct_fsl = pe.Node(fsl.FAST(),
                                        name='nonuniformity_correct_fsl')
    nonuniformity_correct_fsl.inputs.img_type = 2  #1 for t1, 2 for t2
    nonuniformity_correct_fsl.inputs.bias_iters = FAST_bias_iters  #higher for larger nonuniformity
    nonuniformity_correct_fsl.inputs.bias_lowpass = FAST_bias_lowpass  #spm uses 30
    nonuniformity_correct_fsl.inputs.number_classes = FAST_num_classes  #spm uses 5
    nonuniformity_correct_fsl.inputs.output_biasfield = True
    nonuniformity_correct_fsl.inputs.output_biascorrected = True
    nonuniformity_correct_fsl.interface.estimated_memory_gb = 10

    brain_extract = pe.Node(fsl.BET(), name='brain_extract')
    brain_extract.inputs.frac = BET_frac
    brain_extract.inputs.mask = True
    brain_extract.inputs.robust = True

    freq_est = pe.Node(EstimateFrequncyFromWrappedPhase(), 'freq_est')
    freq_est.inputs.truncate_echo = truncate_echo
    freq_est.inputs.freq_filename = "freq_est.nii.gz"
    freq_est.interface.estimated_memory_gb = 4

    R2Star = pe.Node(CalcR2Star_cmd(), 'R2Star')
    R2Star.inputs.R2star = 'R2star.nii.gz'
    R2Star.inputs.neg_mask = 'negMask.nii.gz'
    R2Star.inputs.nan_mask = 'nanMask.nii.gz'
    #R2Star.interface.estimated_memory_gb = 5

    trim_mask = pe.Node(TrimMaskUsingReliability(), name='trim_mask')
    trim_mask.inputs.erosion_sz = 15.0  #in mm
    trim_mask.inputs.threshold = scnd_diff_reliability_thresh_trim
    trim_mask.inputs.trimmed_mask_filename = "trim_mask.nii.gz"
    trim_mask.inputs.reliability_filename = "unreliableMap.nii.gz"
    trim_mask.interface.estimated_memory_gb = 25

    unreliable_fieldmap_voxels = pe.Node(CalculatReliabilityMask(),
                                         name='unreliable_fieldmap_voxels')
    unreliable_fieldmap_voxels.inputs.threshold = scnd_diff_reliability_thresh_noise
    unreliable_fieldmap_voxels.inputs.reliability_mask_filename = "unreliableMask.nii.gz"
    unreliable_fieldmap_voxels.inputs.reliability_filename = "unreliableMap.nii.gz"

    CF_value = pe.Node(GetCFFromJson, name='CFValue')

    susceptibility = pe.Node(SS_TV_mcr(), name='susceptibility')
    susceptibility.inputs.quit_matlab = ''  #use this line when using mcr, comment when using matlab
    susceptibility.inputs.alpha = SS_TV_lagrange_parameter
    susceptibility.inputs.B0_dir = B0_dir
    susceptibility.inputs.susceptibility_filename = 'susceptibilityMap.nii.gz'
    susceptibility.interface.estimated_memory_gb = 10

    fieldmap_reorient = pe.Node(fsl.Reorient2Std(), name='fieldmap_reorient')
    QSM_reorient = pe.Node(fsl.Reorient2Std(), name='QSM_reorient')
    QSM_brain_mask_reorient = pe.Node(fsl.Reorient2Std(),
                                      name='QSM_brain_mask_reorient')
    QSM_noise_mask_reorient = pe.Node(fsl.Reorient2Std(),
                                      name='QSM_noise_mask_reorient')
    R2star_reorient = pe.Node(fsl.Reorient2Std(), name='R2star_reorient')
    R2star_fit_reorient = pe.Node(fsl.Reorient2Std(),
                                  name='R2star_fit_reorient')
    R2star_neg_mask_reorient = pe.Node(fsl.Reorient2Std(),
                                       name='R2star_neg_mask_reorient')

    datasink = pe.Node(nio.DataSink(), name="datasink")
    datasink.inputs.base_directory = out_dir + '/qsm_sstv/'
    datasink.inputs.parameterization = False

    rename_infosource = pe.Node(replace_slash, "rename_infosource")
    rename_fieldmap = pe.Node(
        niu.Rename(format_string="%(subject_id)s-fieldmap", keep_ext=True),
        "rename_fieldmap")
    rename_QSM = pe.Node(
        niu.Rename(format_string="%(subject_id)s-QSM", keep_ext=True),
        "rename_QSM")
    rename_QSM_brain_mask = pe.Node(
        niu.Rename(format_string="%(subject_id)s-QSM_brainMask",
                   keep_ext=True), "rename_QSM_brain_mask")
    rename_QSM_noise_mask = pe.Node(
        niu.Rename(format_string="%(subject_id)s-QSM_noiseMask",
                   keep_ext=True), "rename_QSM_noise_mask")

    rename_R2star = pe.Node(
        niu.Rename(format_string="%(subject_id)s-R2star", keep_ext=True),
        "rename_R2star")
    rename_R2star_fit = pe.Node(
        niu.Rename(format_string="%(subject_id)s-R2star_fit", keep_ext=True),
        "rename_R2star_fit")
    rename_R2star_neg_mask = pe.Node(
        niu.Rename(format_string="%(subject_id)s-R2star_negMask",
                   keep_ext=True), "rename_R2star_neg_mask")

    ### PIPELINE CONNECTION
    pipelineDir = work_dir
    wf = pe.Workflow(name="SS_TV")
    wf.base_dir = pipelineDir
    wf.config['execution'][
        'remove_unnecessary_outputs'] = False  #useful for debugging
    wf.connect([
        (infosource, datasource, [('subject_id', 'subject_id')]),
        (datasource, avg_and_freq_estimate_weights, [('mag_images', 'mag')]),
        (datasource, susc_phase_preprocess, [('phase_images', 'infiles')]),
        #spm requires matlab
        #(avg_and_freq_estimate_weights, nonuniformityCorrect_spm, [('avgOutFilename', 'data')]),
        #(nonuniformityCorrect_spm, brain_extract, [('bias_corrected_image', 'in_file')]),
        (avg_and_freq_estimate_weights, nonuniformity_correct_fsl,
         [('avg_out_filename', 'in_files')]),
        (nonuniformity_correct_fsl, brain_extract, [('restored_image',
                                                     'in_file')]),
        (susc_phase_preprocess, freq_est, [('outfiles', 'phase')]),
        (datasource, freq_est, [('phase_jsons', 'json')]),
        (brain_extract, freq_est, [('mask_file', 'mask')]),
        (avg_and_freq_estimate_weights, freq_est, [('weight_out_filename',
                                                    'weight')]),
        (freq_est, trim_mask, [('freq_filename', 'phase')]),
        (datasource, R2Star, [('mag_images', 'mag')]),
        (susc_phase_preprocess, R2Star, [('outfiles', 'phase')]),
        (freq_est, R2Star, [('freq_filename', 'freq_loc')]),
        (trim_mask, R2Star, [('trimmed_mask_filename', 'mask')]),
        (datasource, R2Star, [('mag_jsons', 'json')]),
        (brain_extract, trim_mask, [('mask_file', 'mask')]),
        (freq_est, unreliable_fieldmap_voxels, [('freq_filename', 'phase')]),
        (brain_extract, unreliable_fieldmap_voxels, [('mask_file', 'mask')]),
        (freq_est, susceptibility, [('freq_filename', 'freq_loc')]),
        (datasource, CF_value, [('mag_jsons', 'filename')]),
        (unreliable_fieldmap_voxels, susceptibility,
         [('reliability_mask_filename', 'reliability_mask_loc')]),
        (trim_mask, susceptibility, [('trimmed_mask_filename', 'mask_loc')]),
        (CF_value, susceptibility, [('cf', 'CF')]),
        (freq_est, fieldmap_reorient, [('freq_filename', 'in_file')]),
        (susceptibility, QSM_reorient, [('susceptibility_filename', 'in_file')
                                        ]),
        (trim_mask, QSM_brain_mask_reorient, [('trimmed_mask_filename',
                                               'in_file')]),
        (unreliable_fieldmap_voxels, QSM_noise_mask_reorient,
         [('reliability_mask_filename', 'in_file')]),
        (R2Star, R2star_reorient, [('R2star', 'in_file')]),
        (R2Star, R2star_fit_reorient, [('R2star_fit', 'in_file')]),
        (R2Star, R2star_neg_mask_reorient, [('neg_mask', 'in_file')]),

        #rename files and data sink
        (infosource, rename_infosource, [('subject_id', 'filename')]),
        #fieldmap
        (rename_infosource, rename_fieldmap, [('renamed', 'subject_id')]),
        (fieldmap_reorient, rename_fieldmap, [('out_file', 'in_file')]),
        (rename_fieldmap, datasink, [('out_file', '@')]),
        #qsm
        (rename_infosource, rename_QSM, [('renamed', 'subject_id')]),
        (QSM_reorient, rename_QSM, [('out_file', 'in_file')]),
        (rename_QSM, datasink, [('out_file', '@.@qsm')]),
        #qsm brain mask
        (rename_infosource, rename_QSM_brain_mask, [('renamed', 'subject_id')]
         ),
        (QSM_brain_mask_reorient, rename_QSM_brain_mask, [('out_file',
                                                           'in_file')]),
        (rename_QSM_brain_mask, datasink, [('out_file', '@.@qsm_brain')]),
        #qsm noisey voxels in fieldmap
        (rename_infosource, rename_QSM_noise_mask, [('renamed', 'subject_id')]
         ),
        (QSM_noise_mask_reorient, rename_QSM_noise_mask, [('out_file',
                                                           'in_file')]),
        (rename_QSM_noise_mask, datasink, [('out_file', '@.@qsm_noise')]),
        #r2star
        (rename_infosource, rename_R2star, [('renamed', 'subject_id')]),
        (R2star_reorient, rename_R2star, [('out_file', 'in_file')]),
        (rename_R2star, datasink, [('out_file', '@.@r2star')]),
        #r2star fit map
        (rename_infosource, rename_R2star_fit, [('renamed', 'subject_id')]),
        (R2star_fit_reorient, rename_R2star_fit, [('out_file', 'in_file')]),
        (rename_R2star_fit, datasink, [('out_file', '@.@r2starfit')]),
        #r2star negative values that were set to 0
        (rename_infosource, rename_R2star_neg_mask, [('renamed', 'subject_id')]
         ),
        (R2star_neg_mask_reorient, rename_R2star_neg_mask, [('out_file',
                                                             'in_file')]),
        (rename_R2star_neg_mask, datasink, [('out_file', '@.@r2starneg')]),
        (infosource, datasink, [('subject_id', 'container')]),
    ])
    return wf
Ejemplo n.º 16
0
preproc.inputs.inputspec.struct = os.path.abspath('data/s1/struct.nii')
datasink = pe.Node(interface=nio.DataSink(), name='sinker')
preprocess = pe.Workflow(name='preprocout')
preprocess.base_dir = os.path.abspath('.')
preprocess.connect([(preproc, datasink, [('meanfunc2.out_file', 'meanfunc'),
                                         ('maskfunc3.out_file', 'funcruns')])])
preprocess.run()
"""
Datagrabber
-----------

Datagrabber is (surprise, surprise) an interface for collecting files from hard drive. It is very flexible and
supports almost any file organisation of your data you can imagine.
"""

datasource1 = nio.DataGrabber()
datasource1.inputs.template = 'data/s1/f3.nii'
datasource1.inputs.sort_filelist = True
results = datasource1.run()
print results.outputs

datasource2 = nio.DataGrabber()
datasource2.inputs.template = 'data/s*/f*.nii'
datasource2.inputs.sort_filelist = True
results = datasource2.run()
print results.outputs

datasource3 = nio.DataGrabber(infields=['run'])
datasource3.inputs.template = 'data/s1/f%d.nii'
datasource3.inputs.sort_filelist = True
datasource3.inputs.run = [3, 7]
Ejemplo n.º 17
0

def getfieldList(subject_list):
    field_list = []
    index = 0
    for subject_id in subject_list:
        field_list.append(
            os.path.join(tbssDir, 'tbssproc/tbss_all/tbss2/fnirt/mapflow',
                         '_fnirt' + str(index),
                         subject_id + '_FA_prep_fieldwarp'))
        index = index + 1
    return field_list


tbss_source = pe.Node(
    interface=nio.DataGrabber(outfiles=['file_list', 'field_list']),
    name='tbss_source')
tbss_source.inputs.base_directory = os.path.abspath('/')
tbss_source.inputs.template = '%s.nii.gz'
tbss_source.inputs.template_args = dict(
    file_list=[[get_nonFAList(subject_list)]],
    field_list=[[getfieldList(subject_list)]])
'''TBSS analysis

'''
tbss_nonFA = tbss_nonFA.create_tbss_non_FA(name='tbss_' + nonFA)
tbss_nonFA.inputs.inputnode.target = fsl.Info.standard_image(
    "FMRIB58_FA_1mm.nii.gz")
tbss_nonFA.inputs.inputnode.skeleton_thresh = skeleton_thr
tbss_nonFA.inputs.inputnode.merged_file = 'all_' + nonFA + '.nii.gz'
tbss_nonFA.inputs.inputnode.mean_FA_mask = os.path.join(
Ejemplo n.º 18
0
    def build_input_node(self):
        """Build and connect an input node to the pipelines.
        """

        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        import nipype.interfaces.io as nio
        import clinica.pipelines.pet_volume.pet_volume_utils as utils
        from os.path import join, split, realpath

        iterables_fwhm = self._fwhm
        if not self._apply_pvc:
            iterables_fwhm = [[]] * len(self.subjects)

        iterables_node = npe.Node(
            name="LoadingCLIArguments",
            interface=nutil.IdentityInterface(
                fields=['subject_id', 'session_id', 'fwhm'],
                mandatory_inputs=True))
        iterables_node.iterables = [('subject_id', self.subjects),
                                    ('session_id', self.sessions),
                                    ('fwhm', iterables_fwhm)]

        iterables_node.synchronize = True

        # PET DataGrabber
        # ===============
        pet_bids_reader = npe.Node(nio.DataGrabber(infields=[
            'subject_id', 'session', 'subject_repeat', 'session_repeat'
        ],
                                                   outfields=['out_files']),
                                   name='pet_bids_reader')
        pet_bids_reader.inputs.base_directory = self.bids_directory
        pet_bids_reader.inputs.template = '%s/%s/pet/%s_%s_task-rest_acq-' + self.parameters[
            'pet_type'] + '_pet.nii*'
        pet_bids_reader.inputs.sort_filelist = False

        # Native T1 DataGrabber
        # ======================
        t1_bids_reader = npe.Node(nio.DataGrabber(infields=[
            'subject_id', 'session', 'subject_repeat', 'session_repeat'
        ],
                                                  outfields=['out_files']),
                                  name='t1_bids_reader')
        t1_bids_reader.inputs.base_directory = self.bids_directory
        t1_bids_reader.inputs.template = '%s/%s/anat/%s_%s_T1w.nii*'
        t1_bids_reader.inputs.sort_filelist = False

        # Flow Fields DataGrabber
        # ========================
        flowfields_caps_reader = npe.Node(nio.DataGrabber(
            infields=[
                'subject_id', 'session', 'subject_repeat', 'session_repeat'
            ],
            outfields=['out_files']),
                                          name='flowfields_caps_reader')
        flowfields_caps_reader.inputs.base_directory = join(
            self.caps_directory, 'subjects')
        flowfields_caps_reader.inputs.template = '%s/%s/t1/spm/dartel/group-' + self._group_id + '/%s_%s_T1w_target-' \
                                                 + self._group_id + '_transformation-forward_deformation.nii*'
        flowfields_caps_reader.inputs.sort_filelist = False

        # Dartel Template DataGrabber
        # ============================
        template_caps_reader = npe.Node(
            nio.DataGrabber(outfields=['out_files']),
            name="template_caps_reader")
        template_caps_reader.inputs.base_directory = self.caps_directory
        template_caps_reader.inputs.template = 'groups/group-' + self._group_id + '/t1/group-' + self._group_id \
                                               + '_template.nii*'
        template_caps_reader.inputs.sort_filelist = False

        # Reference Mask DataGrabber
        # ===========================
        reference_mask = npe.Node(nio.DataGrabber(outfields=['out_files']),
                                  name='reference_mask')
        reference_mask.inputs.base_directory = join(
            split(realpath(__file__))[0], '../../resources/masks')
        reference_mask.inputs.sort_filelist = False
        # TODO ADD DIFFERENT PET TYPES TO PROCESS
        if self.parameters['pet_type'] == 'fdg':
            reference_mask.inputs.template = 'region-pons_eroded-6mm_mask.nii*'
            self._suvr_region = 'pons'
        elif self.parameters['pet_type'] == 'av45':
            reference_mask.inputs.template = 'region-cerebellumPons_eroded-6mm_mask.nii*'
            self._suvr_region = 'cerebellumPons'
        else:
            raise NotImplementedError(
                'Unknown type of PET image. We currently accept as input only "fdg" or "av45"'
                + ' as values.')

        # Tissues DataGrabber
        # ====================
        tissue_names = {
            1: 'graymatter',
            2: 'whitematter',
            3: 'csf',
            4: 'bone',
            5: 'softtissue',
            6: 'background'
        }

        tissues_caps_reader = npe.Node(nio.DataGrabber(infields=[
            'subject_id', 'session', 'subject_repeat', 'session_repeat',
            'tissues'
        ],
                                                       outfields=['out_files'
                                                                  ]),
                                       name='tissues_caps_reader')
        tissues_caps_reader.inputs.base_directory = join(
            self.caps_directory, 'subjects')
        tissues_caps_reader.inputs.template = '%s/%s/t1/spm/segmentation/normalized_space/%s_%s_T1w_segm-%s_space-Ixi549Space_modulated-off_probability.nii*'
        tissues_caps_reader.inputs.tissues = [
            tissue_names[t] for t in self.parameters['mask_tissues']
        ]
        tissues_caps_reader.inputs.sort_filelist = False

        n_tissues = len(self.parameters['mask_tissues'])

        self.connect([
            (iterables_node,
             pet_bids_reader, [('subject_id', 'subject_id'),
                               ('session_id', 'session'),
                               ('subject_id', 'subject_repeat'),
                               ('session_id', 'session_repeat')]),
            (iterables_node, t1_bids_reader, [('subject_id', 'subject_id'),
                                              ('session_id', 'session'),
                                              ('subject_id', 'subject_repeat'),
                                              ('session_id', 'session_repeat')
                                              ]),
            (iterables_node, tissues_caps_reader,
             [(('subject_id', utils.expand_into_list, n_tissues),
               'subject_id'),
              (('session_id', utils.expand_into_list, n_tissues), 'session'),
              (('subject_id', utils.expand_into_list, n_tissues),
               'subject_repeat'),
              (('session_id', utils.expand_into_list, n_tissues),
               'session_repeat')]),
            (iterables_node,
             flowfields_caps_reader, [('subject_id', 'subject_id'),
                                      ('session_id', 'session'),
                                      ('subject_id', 'subject_repeat'),
                                      ('session_id', 'session_repeat')]),
            (pet_bids_reader, self.input_node, [('out_files', 'pet_image')]),
            (t1_bids_reader, self.input_node, [('out_files', 't1_image_native')
                                               ]),
            (tissues_caps_reader, self.input_node, [('out_files',
                                                     'mask_tissues')]),
            (flowfields_caps_reader, self.input_node, [('out_files',
                                                        'flow_fields')]),
            (template_caps_reader, self.input_node, [('out_files',
                                                      'dartel_template')]),
            (reference_mask, self.input_node, [('out_files', 'reference_mask')
                                               ]),
            (iterables_node, self.input_node, [('fwhm', 'fwhm')])
        ])
        if self._apply_pvc:
            pvc_tissues_caps_reader = npe.Node(nio.DataGrabber(
                infields=[
                    'subject_id', 'session', 'subject_repeat',
                    'session_repeat', 'tissues'
                ],
                outfields=['out_files']),
                                               name='pvc_tissues_caps_reader')
            pvc_tissues_caps_reader.inputs.base_directory = join(
                self.caps_directory, 'subjects')
            pvc_tissues_caps_reader.inputs.template = '%s/%s/t1/spm/segmentation/native_space/%s_%s_T1w_segm-%s_probability.nii*'
            pvc_tissues_caps_reader.inputs.tissues = [
                tissue_names[t] for t in self.parameters['pvc_mask_tissues']
            ]
            pvc_tissues_caps_reader.inputs.sort_filelist = False

            n_pvc_tissues = len(self.parameters['pvc_mask_tissues'])

            self.connect([(iterables_node, pvc_tissues_caps_reader,
                           [(('subject_id', utils.expand_into_list,
                              n_pvc_tissues), 'subject_id'),
                            (('session_id', utils.expand_into_list,
                              n_pvc_tissues), 'session'),
                            (('subject_id', utils.expand_into_list,
                              n_pvc_tissues), 'subject_repeat'),
                            (('session_id', utils.expand_into_list,
                              n_pvc_tissues), 'session_repeat')]),
                          (pvc_tissues_caps_reader, self.input_node,
                           [('out_files', 'pvc_mask_tissues')])])
        else:
            self.input_node.inputs.pvc_mask_tissues = []
Ejemplo n.º 19
0
#bring in subject and study specific data and dirs
from variables import working_dir, freesurfer_dir, subjects_M, subjects_NM

nii_base_dir = "/scr/alaska1/steele/BSL_IHI/T1w/"

if __name__ == '__main__':
    wf = pe.Workflow(name="main_workflow")
    wf.base_dir = os.path.join(working_dir,"preprecossing")
    wf.config['execution']['crashdump_dir'] = wf.base_dir + "/crash_files"
    
    subjects_infosource = pe.Node(util.IdentityInterface(fields=['subject_id']), name="subject_infosource")
    subjects = ['M/' + subject for subject in subjects_M] + ['NM/' + subject for subject in subjects_NM]
    subjects_infosource.iterables = ('subject_id', subjects)
    
    #this one is for bob
    datagrabber = pe.Node(nio.DataGrabber(infields=['subject_id'], outfields=['t1w']), 
                          name="datagrabber")
    datagrabber.inputs.base_directory = nii_base_dir
    datagrabber.inputs.template = '%s/%s'
    datagrabber.inputs.template_args['t1w'] = [['subject_id', '*.nii']]
    datagrabber.inputs.sort_filelist = True

    wf.connect(subjects_infosource, "subject_id", datagrabber, "subject_id")
    
    recon_all = pe.Node(ReconAll(), name="recon_all")
    recon_all.plugin_args={'submit_specs': 'request_memory = 2500'}
    #recon_all.inputs.subjects_dir = "/scr/adenauer1/freesurfer"
	
	 # link datagrabber stuff to workflow
    wf.connect(datagrabber, "t1w", recon_all, "T1_files")
    
Ejemplo n.º 20
0
src_reconstruction_pipeline_name = 'source_dsamp_full_reconstruction_' + \
    inv_method + '_' + parc.replace('.', '')

main_workflow = pe.Workflow(name=src_reconstruction_pipeline_name)
main_workflow.base_dir = data_path

# We create a node to pass input filenames to DataGrabber from nipype
infosource = create_iterator(['subject_id'], [subject_ids])

# and a node to grab data. The template_args in this node iterate upon
# the values in the infosource node
ica_dir = op.join(data_path, 'preprocessing_dsamp_workflow',
                  'preproc_meeg_dsamp_pipeline')  # noqa

datasource = pe.Node(
    interface=nio.DataGrabber(infields=['subject_id'],
                              outfields=['raw_file', 'trans_file']),  # noqa
    name='datasource')

datasource.inputs.base_directory = ica_dir
datasource.inputs.template = '*'
datasource.inputs.field_template = dict(
    raw_file=
    "_session_id_*_subject_id_%s/ica/run_*_sss_filt_dsamp_ica.fif",  # noqa
    trans_file='../../%s/MEG/%s%s.fif')

datasource.inputs.template_args = dict(
    raw_file=[['subject_id']],
    trans_file=[['subject_id', 'subject_id', "-trans"]])

datasource.inputs.sort_filelist = True
Ejemplo n.º 21
0
###############################################################################
# Then we create a node to pass input filenames to DataGrabber from nipype
infosource = pe.Node(interface=IdentityInterface(fields=['freq_band_name']),
                     name="infosource")

infosource.iterables = [('freq_band_name', freq_band_names)]

###############################################################################
# and a node to grab data. The template_args in this node iterate upon
# the values in the infosource node

# template_path = '*%s/conmat_0_coh.npy'
# template_args = [['freq_band_name']
# datasource = create_datagrabber(data_path, template_path, template_args)

datasource = pe.Node(interface=nio.DataGrabber(infields=['freq_band_name'],
                                               outfields=['conmat_file']),
                     name='datasource')
datasource.inputs.base_directory = data_path
datasource.inputs.template = ("%s/conmat_0_coh.npy")
datasource.inputs.template_args = dict(conmat_file=[['freq_band_name']])

datasource.inputs.sort_filelist = True

###############################################################################
# This parameter corrdesponds to the percentage of highest connections retains
# for the analyses. con_den = 1.0 means a fully connected graphs (all edges
# are present)

import json  # noqa
import pprint  # noqa
Ejemplo n.º 22
0
hcp_thal_wf = pe.Workflow(name='hcp_thal_wf')
hcp_thal_wf.base_dir = work_directory


#does this one need to be changed??
info = dict(fs_brain=[['subject_id', 'T1w_acpc_dc']],
            mni_brain=[['subject_id', 'T1w_restore_brain']],
            dmri_brain=[['subject_id','T1w_acpc_dc_restore_1.25']],
            aparcaseg=[['subject_id','aparc+aseg']],
            acpc2standard_warp=[['subject_id', 'acpc_dc2standard']])

subj_iterable = pe.Node(IdentityInterface(fields=['subject_id'], mandatory_inputs=True), name='subj_iterable')
subj_iterable.iterables = ('subject_id', sids)

# create a datasource doe to get the standard space brain and warp to standard space
datasource = pe.Node(nio.DataGrabber(infields=['subject_id'],outfields=info.keys()),
                     name='datasource')
datasource.inputs.base_directory = os.path.abspath('/home/data/hcp')
datasource.inputs.field_template = dict(fs_brain='%s/T1w/%s.nii.gz',
                                        mni_brain='%s/MNINonLinear/%s.nii.gz', 
                                        dmri_brain='%s/T1w/%s.nii.gz',
                                        aparcaseg='%s/T1w/%s.nii.gz',
                                        acpc2standard_warp='%s/MNINonLinear/xfms/%s.nii.gz')
datasource.inputs.template = '%s/%s'
datasource.inputs.sort_filelist = True
datasource.inputs.template_args = info
hcp_thal_wf.connect(subj_iterable, 'subject_id', datasource, 'subject_id')

# Create a flirt node to calculate the dmri_brain to fs_brain xfm
dmri2fs_xfm = pe.Node(fsl.FLIRT(), name = 'dmri2fs_xfm')
dmri2fs_xfm.inputs.out_matrix_file = 'dmri_2_fs_xfm.mat'
Ejemplo n.º 23
0
    wf = pe.Workflow(name="post_hoc_seeds")
    wf.base_dir = workingdir
    wf.config['execution']['crashdump_dir'] = wf.base_dir + "/crash_files"

    ds = pe.Node(nio.DataSink(), name="datasink")
    ds.run_without_submitting = True
    ds.inputs.base_directory = resultsdir

    subjects_infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_id']),
        name="subjects_infosource")
    subjects_infosource.iterables = ("subject_id", subjects)

    datasource = pe.Node(nio.DataGrabber(
        infields=['subject_id'],
        outfields=['EPI_bandpassed', "EPI_full_spectrum"]),
                         name="datasource")
    datasource.inputs.base_directory = "/scr/kalifornien1/mindwandering/results/"
    datasource.inputs.template = '%s/smri/warped_image/fwhm_6.0/*_afni_%s_wtsimt.nii.gz'
    datasource.inputs.template_args['EPI_bandpassed'] = [[
        'subject_id', "bandpassed"
    ]]
    datasource.inputs.template_args['EPI_full_spectrum'] = [[
        'subject_id', "fullspectrum"
    ]]
    datasource.inputs.sort_filelist = True
    wf.connect(subjects_infosource, "subject_id", datasource, "subject_id")

    post_hoc_seed_infosource = pe.Node(
        util.IdentityInterface(fields=["seed_name"]),
Ejemplo n.º 24
0
def crossValidationWorkUp(
    crossValidationConfigurationFilename,
    baseDir,
    runOption,
    PythonBinDir,
    BRAINSToolsSrcDir,
    BRAINSToolsBuildDir,
):
    print(
        """****************************
          crossValidationWorkUp
          """
    )
    from collections import (
        OrderedDict,
    )  # Need OrderedDict internally to ensure consistent ordering
    from nipype import config

    config.enable_debug_mode()

    import crossValidation as this
    import ConfigurationParser

    myConfigurationMap = ConfigurationParser.ConfigurationSectionMap(
        crossValidationConfigurationFilename
    )

    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import ast

    print(
        """ before
           createeachvalidationunitnd
           """
    )
    createConfigurationFiles = pe.Node(
        name="createConfigurationFiles",
        interface=Function(
            input_names=[
                "inputConfigurationFilename",
                "outputConfigurationFilenamePrefix",
            ],
            output_names=["outputConfigFilenameDict"],
            function=this.createConfigurationFileForCrossValidationUnitTest,
        ),
    )

    preprocessing = pe.Workflow(name="Preprocessing")
    preprocessing.base_dir = baseDir + "/PreprocessingDir"

    createConfigurationFiles.inputs.inputConfigurationFilename = (
        crossValidationConfigurationFilename
    )
    createConfigurationFiles.inputs.outputConfigurationFilenamePrefix = (
        "createConfigurationFiles"
    )

    extractConfigurationFileListND = pe.Node(
        name="extractConfigurationFileListND",
        interface=Function(
            input_names=["configurationFiledict"],
            output_names=["configurationFileList"],
            function=this.extractConfigFile,
        ),
    )
    preprocessing.connect(
        createConfigurationFiles,
        "outputConfigFilenameDict",
        extractConfigurationFileListND,
        "configurationFiledict",
    )

    preprocessing.run()

    # ------------------------------------------------------------------------------------
    # Data graber for outputs
    #
    import nipype.interfaces.io as nio

    dg = nio.DataGrabber()
    dg.inputs.base_directory = (
        baseDir + "/PreprocessingDir/Preprocessing/createConfigurationFiles/"
    )
    dg.inputs.template = "*config"
    mainConfigFiles = dg.run()

    print((mainConfigFiles.outputs.outfiles))
    print((mainConfigFiles.outputs.outfiles))
    print((mainConfigFiles.outputs.outfiles))
    print((mainConfigFiles.outputs.outfiles))

    # ------------------------------------------------------------------------------------
    workflow = pe.Workflow(name="crossValidationWF")
    workflow.base_dir = baseDir

    # ------------------------------------------------------------------------------------
    # Generate Probability Map
    #
    Options = myConfigurationMap["Options"]
    roiDict = Options["roiBooleanCreator".lower()]

    # -------------------------------- probMapFilenameGenerator is dummy node
    # to create proper probability file location for nipype
    #
    print(
        """************************
          probMapFilenameGenerator
          """
    )

    probMapFilenameGenerator = pe.Node(
        name="probMapFilenameGenerator",
        interface=Function(
            input_names=["roiList"],
            output_names=["probabilityMapFilename"],
            function=this.getProbabilityMapFilename,
        ),
    )
    print(roiDict)
    probMapFilenameGenerator.inputs.roiList = list(roiDict.keys())
    print(
        """************************
          probabilityMapGeneratorND
          """
    )

    #
    # --------------------------------  start from generate probability
    #
    probabilityMapGeneratorND = pe.Node(
        name="probabilityMapGeneratorND",
        interface=Function(
            input_names=[
                "configurationFilename",
                "probabilityMapDict",
                "gaussianSigma",
                "outputXmlFilename",
            ],
            output_names=[
                "probabilityMapDict",
                "outputXmlFilename",
                "outputConfigurationFilename",
            ],
            function=ConfigurationParser.BRAINSCutGenerateProbabilityMap,
        ),
    )

    probabilityMapGeneratorND.inputs.outputXmlFilename = "netConfiguration.xml"

    gaussianSigmaParam = ast.literal_eval(Options["gaussianSigma".lower()])
    print(gaussianSigmaParam)
    probabilityMapGeneratorND.iterables = (
        "configurationFilename",
        mainConfigFiles.outputs.outfiles,
    )
    probabilityMapGeneratorND.inputs.gaussianSigma = gaussianSigmaParam

    workflow.connect(
        probMapFilenameGenerator,
        "probabilityMapFilename",
        probabilityMapGeneratorND,
        "probabilityMapDict",
    )

    #
    # --------------------------------  create vectors for each ROI
    #
    print(
        """************************
          configFileND
          """
    )
    configFileND = pe.Node(
        name="configFileND",
        interface=Function(
            input_names=["originalFilename", "editedFilenamePrefix"],
            output_names=["editedFilenames"],
            function=ConfigurationParser.ConfigurationFileEditor,
        ),
    )

    configFileND.inputs.editedFilenamePrefix = "ROI"
    workflow.connect(
        probabilityMapGeneratorND,
        "outputConfigurationFilename",
        configFileND,
        "originalFilename",
    )

    vectorCreatorND = pe.MapNode(
        name="vectorCreatorND",
        interface=Function(
            input_names=[
                "configurationFilename",
                "probabilityMapDict",
                "normalization",
                "outputXmlFilename",
                "outputVectorFilename",
            ],
            output_names=[
                "outputVectorFilename",
                "outputVectorHdrFilename",
                "outputNormalization",
                "outputXmlFilename",
            ],
            function=ConfigurationParser.BRAINSCutCreateVector,
        ),
        iterfield=["configurationFilename"],
    )
    vectorCreatorND.inputs.outputVectorFilename = "oneROIVectorFile.txt"
    vectorCreatorND.inputs.outputXmlFilename = "oneROICreateVectorNetConfiguration.xml"
    normalizationOption = Options["normalization".lower()]
    print(
        (
            """Normalization Option: {str}
           """.format(
                str=normalizationOption
            )
        )
    )
    vectorCreatorND.iterables = ("normalization", normalizationOption)
    #
    # --------------------------------  workflow connections
    #
    workflow.connect(
        configFileND, "editedFilenames", vectorCreatorND, "configurationFilename"
    )
    workflow.connect(
        probabilityMapGeneratorND,
        "probabilityMapDict",
        vectorCreatorND,
        "probabilityMapDict",
    )

    #
    # --------------------------------  balance and combine each ROI vectors
    #
    print(
        """************************
          balanceND
          """
    )
    balaceND = pe.Node(
        name="balanceND",
        interface=Function(
            input_names=["inputVectorFilenames"],
            output_names=["outputVectorFilenames", "outputVectorHdrFilenames"],
            function=ConfigurationParser.BalanceInputVectors,
        ),
    )
    workflow.connect(
        vectorCreatorND, "outputVectorFilename", balaceND, "inputVectorFilenames"
    )

    combineND = pe.Node(
        name="combineND",
        interface=Function(
            input_names=["inputVectorFilenames", "outputVectorFilename"],
            output_names=["outputVectorFilename", "outputVectorHdrFilename"],
            function=ConfigurationParser.CombineInputVectors,
        ),
    )
    workflow.connect(
        balaceND, "outputVectorFilenames", combineND, "inputVectorFilenames"
    )

    combineND.inputs.outputVectorFilename = "allCombinedVector.txtANN"
    #
    # --------------------------------  train
    #
    print(
        """************************
          trainND
          """
    )
    trainND = pe.Node(
        name="trainND",
        interface=Function(
            input_names=[
                "configurationFilename",
                "inputVectorFilename",
                "outputModelFilenamePrefix",
                "outputXmlFilename",
                "methodParameter",
            ],
            output_names=["outputTrainedModelFilename", "outputMethodParameter"],
            function=ConfigurationParser.BRAINSCutTrainModel,
        ),
    )
    # methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    methodFromConfiguFile = Options["modelParameter".lower()]
    trainND.iterables = ("methodParameter", methodFromConfiguFile)

    trainND.inputs.outputXmlFilename = "trianNetConfiguration.xml"
    trainND.inputs.outputModelFilenamePrefix = "trainModelFile.txt"

    workflow.connect(
        probabilityMapGeneratorND,
        "outputConfigurationFilename",
        trainND,
        "configurationFilename",
    )
    workflow.connect(combineND, "outputVectorFilename", trainND, "inputVectorFilename")
    #
    # --------------------------------  apply
    #
    applyND = pe.Node(
        name="applyND",
        interface=Function(
            input_names=[
                "configurationFilename",
                "probabilityMapDict",
                "normalization",
                "inputModelFilename",
                "methodParameter",
                "outputXmlFilename",
            ],
            output_names=["outputLabelDict"],
            function=ConfigurationParser.BRAINSCutApplyModel,
        ),
    )
    # methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    applyND.inputs.outputXmlFilename = "applyConfiguration.xml"
    workflow.connect(
        probabilityMapGeneratorND,
        "outputConfigurationFilename",
        applyND,
        "configurationFilename",
    )
    workflow.connect(vectorCreatorND, "outputNormalization", applyND, "normalization")
    workflow.connect(
        probabilityMapGeneratorND, "probabilityMapDict", applyND, "probabilityMapDict"
    )
    workflow.connect(
        trainND, "outputTrainedModelFilename", applyND, "inputModelFilename"
    )
    workflow.connect(trainND, "outputMethodParameter", applyND, "methodParameter")

    #####################################################################################
    # Data Sink
    #
    import os

    LabelsDS = pe.Node(nio.DataSink(), name="LabelDS")
    LabelsDS.inputs.base_directory = os.path.join(baseDir, "Result")
    LabelsDS.inputs.regexp_substitutions = [
        ("/_", "/"),
        ("configurationFilename.*_Test", "Test"),
        ("_configuration.config/normalization_", "/"),
        ("methodParameter_--method", ""),
        ("RandomForest", "RF/"),
        (".--randomTreeDepth", "TreeDepth"),
        (".--numberOfTrees", "_TreeNumber"),
        (
            "ANNContinuousPrediction(?P<roi>.+)(?P<session>\d\d\d\d\d).nii.gz",
            r"\g<session>_\g<roi>_ANNContinuous.nii.gz",
        ),
    ]
    # ANNContinuousPredictionl_accumben77478

    workflow.connect(
        [(applyND, LabelsDS, [(("outputLabelDict", getDictionaryValues), "Labels")])]
    )

    #####################################################################################
    # analysis
    #

    #####################################################################################
    # Running
    #
    if runOption == "cluster":
        ############################################
        # Platform specific information
        #     Prepend the python search paths
        pythonPath = (
            BRAINSToolsSrcDir
            + "/BRAINSCut/BRAINSFeatureCreators/RobustStatisticComputations:"
            + BRAINSToolsSrcDir
            + "/AutoWorkup/:"
            + BRAINSToolsSrcDir
            + "/AutoWorkup/BRAINSTools/:"
            + BRAINSToolsBuildDir
            + "/SimpleITK-build/bin/"
            + BRAINSToolsBuildDir
            + "/SimpleITK-build/lib:"
            + PythonBinDir
        )
        binPath = BRAINSToolsBuildDir + "/bin:" + BRAINSToolsBuildDir + "/lib"

        PYTHON_AUX_PATHS = pythonPath
        PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(":")
        PYTHON_AUX_PATHS.extend(sys.path)
        sys.path = PYTHON_AUX_PATHS
        # print sys.path
        import SimpleITK as sitk

        #     Prepend the shell environment search paths
        PROGRAM_PATHS = binPath
        PROGRAM_PATHS = PROGRAM_PATHS.split(":")
        import os

        PROGRAM_PATHS.extend(os.environ["PATH"].split(":"))
        os.environ["PATH"] = ":".join(PROGRAM_PATHS)

        Cluster_Script = get_global_sge_script(PYTHON_AUX_PATHS, PROGRAM_PATHS, {})
        workflow.run(
            plugin="SGE",
            plugin_args=OrderedDict(
                template=Cluster_Script,
                qsub_args="-S /bin/bash -pe smp 4-8 -o /dev/null ",
            ),
        )
    else:
        print(
            """************************
              run
              """
        )
        try:
            workflow.write_graph(graph2use="flat")
        except:
            pass
        workflow.run()
Ejemplo n.º 25
0
def test_datagrabber():
    dg = nio.DataGrabber()
    yield assert_equal, dg.inputs.template, Undefined
    yield assert_equal, dg.inputs.base_directory, Undefined
    yield assert_equal, dg.inputs.template_args, {'outfiles': []}
def pbX_wf(subject_id, sink_directory, name='hcp_pbX'):

    hcp_pbX_wf = pe.Workflow(name='hcp_pbX_wf')

    #making all the keys for the dictionary
    info = dict(merged_thsamples=[['subject_id', 'merged_th']],
                merged_phsamples=[['subject_id', 'merged_ph']],
                merged_fsamples=[['subject_id', 'merged_f']],
                dmri_brain=[['subject_id', 'T1w_acpc_dc_restore_1.25']],
                fs_brain=[['subject_id', 'T1w_acpc_dc']],
                aparcaseg=[['subject_id', 'aparc+aseg']],
                mask=[['subject_id', 'nodif_brain_mask']])

    # Create a datasource node to get the dwi, bvecs, and bvals
    #This uses the dictionary created above and inputs the keys from the dictionary
    datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                                   outfields=list(
                                                       info.keys())),
                         name='datasource')
    datasource.inputs.template = '%s/%s'
    datasource.inputs.subject_id = subject_id
    datasource.inputs.base_directory = os.path.abspath('/home/data/hcp')
    datasource.inputs.field_template = dict(
        merged_thsamples=
        '/home/data/madlab/data/mri/hcp/bedpostX/%s/hcpbpX/thsamples/%s*.nii.gz',
        merged_phsamples=
        '/home/data/madlab/data/mri/hcp/bedpostX/%s/hcpbpX/phsamples/%s*.nii.gz',
        merged_fsamples=
        '/home/data/madlab/data/mri/hcp/bedpostX/%s/hcpbpX/fsamples/%s*.nii.gz',
        dmri_brain='/home/data/hcp/%s/T1w/%s.nii.gz',
        fs_brain='/home/data/hcp/%s/T1w/%s.nii.gz',
        aparcaseg='/home/data/hcp/%s/T1w/%s.nii.gz',
        mask='/home/data/hcp/%s/T1w/Diffusion/%s.nii.gz')
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True

    # Create a flirt node to calculate the dmri_brain to fs_brain xfm
    #Basically creating a conversion from DWI space to Freesurfer space
    dmri2fs_xfm = pe.Node(fsl.FLIRT(), name='dmri2fs_xfm')
    dmri2fs_xfm.inputs.out_matrix_file = 'dmri_2_fs_xfm.mat'
    hcp_pbX_wf.connect(datasource, 'dmri_brain', dmri2fs_xfm, 'in_file')
    hcp_pbX_wf.connect(datasource, 'fs_brain', dmri2fs_xfm, 'reference')

    # Create a convertxfm node to create inverse xfm of dmri2fs affine
    # Basicaaly creating a conversion from freesurfer space to DWI space
    invt_dmri2fs = pe.Node(fsl.ConvertXFM(), name='invt_dmri2fs')
    invt_dmri2fs.inputs.invert_xfm = True
    invt_dmri2fs.inputs.out_file = 'fs_2_dmri_xfm.mat'
    hcp_pbX_wf.connect(dmri2fs_xfm, 'out_matrix_file', invt_dmri2fs, 'in_file')

    # Extract thalamus seed masks from aparc+aseg.nii.gz file
    # Here 10 is the left thalamus, and 49 is the right thalamus
    thal_seed_mask = pe.MapNode(fs.Binarize(),
                                iterfield=['match', 'binary_file'],
                                name='thal_seed_mask')
    #thal_seed_mask.inputs.subject_dir = 'aparcaseg'
    thal_seed_mask.inputs.match = [[10], [49]]
    thal_seed_mask.inputs.binary_file = ['lft_thal.nii.gz', 'rt_thal.nii.gz']
    hcp_pbX_wf.connect(datasource, 'aparcaseg', thal_seed_mask, 'in_file')

    #Next we need to avoid the ventricles by creating an -avoid_mask
    #There are no left and right 3rd and 4th ventricle, so we are making one mask
    avoid_mask = pe.Node(
        fs.Binarize(),
        #out_type='nii.gz',
        name='avoid_mask')
    #avoid_mask.inputs.subject_dir = 'aparcaseg'
    avoid_mask.inputs.match = [
        4, 14, 15, 43, 72
    ]  #lft_lat_ven, 3rd_ven, 4th_ven, rgt_lat_ven, 5th_ven
    avoid_mask.inputs.binary_file = 'ventricles.nii.gz'
    hcp_pbX_wf.connect(datasource, 'aparcaseg', avoid_mask, 'in_file')

    # Extract cortical target masks from aparc+aseg.nii.gz file
    # The ".match" is the freesurfer label and the binary_file is the label/name
    ctx_targ_mask = pe.MapNode(fs.Binarize(),
                               iterfield=['match', 'binary_file'],
                               name='ctx_targ_mask')
    #ctx_targ_mask.inputs.subject_dir = 'aparcaseg'
    ctx_targ_mask.inputs.match = [[1024], [1022],
                                  [1003, 1028, 1027, 1012, 1019, 1020, 1032],
                                  [1031, 1029, 1008],
                                  [1009, 1015, 1033, 1035, 1034, 1030], [1011],
                                  [1017], [1002], [1014], [1026], [1028],
                                  [1023, 1025, 1010], [1005, 1013,
                                                       1021], [1007], [1006],
                                  [1016], [17], [18], [26], [2024], [2022],
                                  [2003, 2028, 2027, 2012, 2019, 2020, 2032],
                                  [2031, 2029, 2008],
                                  [2009, 2015, 2033, 2035, 2034, 2030], [2011],
                                  [2017], [2002], [2014], [2026], [2028],
                                  [2023, 2025, 2010], [2005, 2013, 2021],
                                  [2007], [2006], [2016], [53], [54], [58]]
    ctx_targ_mask.inputs.binary_file = [
        'ctx_lh_precentral.nii.gz', 'ctx_lh_postcentral.nii.gz',
        'ctx_lh_latfront.nii.gz', 'ctx_lh_parietal.nii.gz',
        'ctx_lh_temporal.nii.gz', 'ctx_lh_occipital.nii.gz',
        'ctx_lh_paracentral.nii.gz', 'ctx_lh_caudantcing.nii.gz',
        'ctx_lh_medorbfront.nii.gz', 'ctx_lh_rostantcing.nii.gz',
        'ctx_lh_superfront.nii.gz', 'ctx_lh_medpost.nii.gz',
        'ctx_lh_medoccipital.nii.gz', 'ctx_lh_fusiform.nii.gz',
        'ctx_lh_entorhinal.nii.gz', 'ctx_lh_parahippocampal.nii.gz',
        'lh_hpc.nii.gz', 'lh_amy.nii.gz', 'lh_nacc.nii.gz',
        'ctx_rh_precentral.nii.gz', 'ctx_rh_postcentral.nii.gz',
        'ctx_rh_latfront.nii.gz', 'ctx_rh_parietal.nii.gz',
        'ctx_rh_temporal.nii.gz', 'ctx_rh_occipital.nii.gz',
        'ctx_rh_paracentral.nii.gz', 'ctx_rh_caudantcing.nii.gz',
        'ctx_rh_medorbfront.nii.gz', 'ctx_rh_rostantcing.nii.gz',
        'ctx_rh_superfront.nii.gz', 'ctx_rh_medpost.nii.gz',
        'ctx_rh_medoccipital.nii.gz', 'ctx_rh_fusiform.nii.gz',
        'ctx_rh_entorhinal.nii.gz', 'ctx_rh_parahippocampal.nii.gz',
        'rh_hpc.nii.gz', 'rh_amy.nii.gz', 'rh_nacc.nii.gz'
    ]
    hcp_pbX_wf.connect(datasource, 'aparcaseg', ctx_targ_mask, 'in_file')

    # Create a flirt node to apply inverse transform to seeds
    # Basically you convert the masks (seeds) that were in freesurfer space to the DWI space
    seedxfm_fs2dmri = pe.MapNode(fsl.FLIRT(),
                                 iterfield=['in_file'],
                                 name='seedxfm_fs2dmri')
    seedxfm_fs2dmri.inputs.apply_xfm = True
    seedxfm_fs2dmri.inputs.interp = 'nearestneighbour'
    hcp_pbX_wf.connect(thal_seed_mask, 'binary_file', seedxfm_fs2dmri,
                       'in_file')
    hcp_pbX_wf.connect(datasource, 'dmri_brain', seedxfm_fs2dmri, 'reference')
    hcp_pbX_wf.connect(invt_dmri2fs, 'out_file', seedxfm_fs2dmri,
                       'in_matrix_file')

    # Create a flirt node to apply inverse transform to targets
    # You do the same as the previous node, but to the target masks
    targxfm_fs2dmri = pe.MapNode(fsl.FLIRT(),
                                 iterfield=['in_file'],
                                 name='targxfm_fs2dmri')
    targxfm_fs2dmri.inputs.apply_xfm = True
    targxfm_fs2dmri.inputs.interp = 'nearestneighbour'
    hcp_pbX_wf.connect(ctx_targ_mask, 'binary_file', targxfm_fs2dmri,
                       'in_file')
    hcp_pbX_wf.connect(datasource, 'dmri_brain', targxfm_fs2dmri, 'reference')
    hcp_pbX_wf.connect(invt_dmri2fs, 'out_file', targxfm_fs2dmri,
                       'in_matrix_file')

    #Apply the inverse transform for the avoid masks from freesurfer to DWI space
    avoidmaskxfm_fs2dmri = pe.Node(fsl.FLIRT(), name='avoidmaskxfm_fs2dmri')
    avoidmaskxfm_fs2dmri.inputs.apply_xfm = True
    avoidmaskxfm_fs2dmri.inputs.interp = 'nearestneighbour'
    hcp_pbX_wf.connect(avoid_mask, 'binary_file', avoidmaskxfm_fs2dmri,
                       'in_file')
    hcp_pbX_wf.connect(datasource, 'dmri_brain', avoidmaskxfm_fs2dmri,
                       'reference')
    hcp_pbX_wf.connect(invt_dmri2fs, 'out_file', avoidmaskxfm_fs2dmri,
                       'in_matrix_file')

    # Compute motion regressors (save file with 1st and 2nd derivatives)
    #make_targ_lists = pe.Node(util.Function(input_names=['in_files'],
    #                                        output_names='out_list',
    #                                        function=create_two_lists),
    #                          name='make_targ_lists')
    #hcp_pbX_wf.connect(targxfm_fs2dmri, 'out_file', make_targ_lists, 'in_files')

    #PROBTRACKX NODE
    pbx2 = pe.MapNode(
        fsl.ProbTrackX2(),
        iterfield=['seed',
                   'target_masks'],  #Should I have included avoid_mp here?
        name='pbx2')
    pbx2.inputs.c_thresh = 0.2
    pbx2.inputs.n_steps = 2000
    pbx2.inputs.step_length = 0.5
    pbx2.inputs.n_samples = 25000
    pbx2.inputs.opd = True
    pbx2.inputs.os2t = True
    pbx2.inputs.loop_check = True
    #pbx2.plugin_args = {'bsub_args': '-q PQ_madlab'} #old way new way below
    pbx2.plugin_args = {
        'sbatch_args':
        ('-p IB_40C_1.5T --qos pq_madlab --account iacc_madlab -N 1 -n 6')
    }
    hcp_pbX_wf.connect(datasource, 'merged_thsamples', pbx2, 'thsamples')
    hcp_pbX_wf.connect(datasource, 'merged_phsamples', pbx2, 'phsamples')
    hcp_pbX_wf.connect(datasource, 'merged_fsamples', pbx2, 'fsamples')
    hcp_pbX_wf.connect(seedxfm_fs2dmri, 'out_file', pbx2, 'seed')
    hcp_pbX_wf.connect(targxfm_fs2dmri, ('out_file', hemispherize), pbx2,
                       'target_masks')
    #hcp_pbX_wf.connect(make_targ_lists, 'out_list', pbx2, 'target_masks')
    hcp_pbX_wf.connect(avoidmaskxfm_fs2dmri, 'out_file', pbx2, 'avoid_mp')
    hcp_pbX_wf.connect(datasource, 'mask', pbx2, 'mask')

    # Create a findthebiggest node to do hard segmentation between
    # seeds and targets
    #basically this segments the seed region on the basis of outputs of probtrackX when classification targets are being used.
    findthebiggest = pe.MapNode(fsl.FindTheBiggest(),
                                iterfield=['in_files'],
                                name='findthebiggest')
    hcp_pbX_wf.connect(pbx2, 'targets', findthebiggest, 'in_files')

    # Create a datasink node to save outputs.
    datasink = pe.Node(interface=nio.DataSink(), name='datasink')
    datasink.inputs.base_directory = os.path.abspath(sink_directory)
    datasink.inputs.container = subject_id + '/' + 'thal_seed'
    hcp_pbX_wf.connect(pbx2, 'log', datasink, 'hcpprobX.log')
    hcp_pbX_wf.connect(pbx2, 'fdt_paths', datasink, 'hcpprobX.fdt')
    hcp_pbX_wf.connect(pbx2, 'way_total', datasink, 'hcpprobX.waytotal')
    hcp_pbX_wf.connect(pbx2, 'targets', datasink, 'hcpprobX.targets')
    hcp_pbX_wf.connect(findthebiggest, 'out_file', datasink,
                       'hcpprobX.fbiggest.@biggestsegmentation')
    #hcp_pbX_wf.connect(thal_seed_mask, 'binary_file', datasink, 'hcpprobX.thal_mask')
    hcp_pbX_wf.connect(seedxfm_fs2dmri, 'out_file', datasink,
                       'hcpprobX.seed_masks')
    #from seed_xsfm(out_file) to datasink "seed_files"
    #do we need this - > emu_pbX_wf.connect(datasource, 'ref_b0', datasink, 'emuprobX.b0')
    #do we need this - > emu_pbX_wf.connect(thal_seed_mask, 'binary_file', datasink, 'emuprobX.thal_mask')

    return hcp_pbX_wf
Ejemplo n.º 27
0
def create_main_workflow_FS_segmentation():

    # check envoiroment variables
    if not os.environ.get('FREESURFER_HOME'):
        raise RuntimeError('FREESURFER_HOME environment variable not set')

    if not os.environ.get('MNE_ROOT'):
        raise RuntimeError('MNE_ROOT environment variable not set')

    if not op.exists(sbj_dir):
        os.mkdir(sbj_dir)

    os.environ["SUBJECTS_DIR"] = "/home/karim/Documents/Blindsight/FSF"

    print 'SUBJECTS_DIR %s ' % os.environ["SUBJECTS_DIR"]

    # (1) iterate over subjects to define paths with templates -> Infosource
    #     and DataGrabber
    #     Node: SubjectData - we use IdentityInterface to create our own node,
    #     to specify the list of subjects the pipeline should be executed on
    infosource = pe.Node(interface=IdentityInterface(fields=['subject_id']),
                         name="infosource")
    infosource.iterables = ('subject_id', subjects_list)

    # Grab data
    #   the template can be filled by other inputs
    #   Here we define an input field for datagrabber called subject_id.
    #   This is then used to set the template (see %s in the template).

    # we can look for DICOM files or .nii ones
    if is_nii:
        datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                                       outfields=['struct']),
                             name='datasource')
        datasource.inputs.template = '%s.nii'  # '%s*.nii.gz'
        datasource.inputs.template_args = dict(struct=[['subject_id']])
    else:
        datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                                       outfields=['dcm_file']),
                             name='datasource')
        datasource.inputs.template = '%s*/*.dcm'  # '%s*/*/*T1*1mm/*.dcm'
        datasource.inputs.template_args = dict(dcm_file=[['subject_id']])

    datasource.inputs.base_directory = MRI_path  # dir where the MRI files are
    datasource.inputs.sort_filelist = True

    # get the path of the first dicom file
    def get_first_file(dcm_files):
        return dcm_files[0]

    # return the path of the struct filename in the MRI sbj dir that will be
    # the  input of MRI convert routine
    def get_MRI_sbj_dir(dcm_file):
        from nipype.utils.filemanip import split_filename as split_f
        import os.path as op

        MRI_sbj_dir, basename, ext = split_f(dcm_file)
        struct_filename = op.join(MRI_sbj_dir, 'struct.nii.gz')
        return struct_filename

    get_firstfile = pe.Node(interface=Function(input_names=['dcm_files'],
                                               output_names=['dcm_file'],
                                               function=get_first_file),
                            name='get_firstfile')

    get_MRI_sbjdir = pe.Node(interface=Function(
        input_names=['dcm_file'],
        output_names=['struct_filename'],
        function=get_MRI_sbj_dir),
                             name='get_MRI_sbjdir')

    # MRI_convert Node
    # We use it if we don't have a .nii.gz file
    # The output of mriconvert is the input of recon-all
    mri_convert = pe.Node(interface=MRIConvert(),
                          infields=['in_file'],
                          outfields=['out_file'],
                          name='mri_convert')

    # (2) ReconAll Node to generate surfaces and parcellations of structural
    #     data from anatomical images of a subject.
    recon_all = pe.Node(interface=ReconAll(),
                        infields=['T1_files'],
                        name='recon_all')
    recon_all.inputs.subjects_dir = sbj_dir
    recon_all.inputs.directive = 'all'

    # reconall_workflow will be a node of the main workflow
    reconall_workflow = pe.Workflow(name=FS_WF_name)

    reconall_workflow.base_dir = MRI_path

    reconall_workflow.connect(infosource, 'subject_id', recon_all,
                              'subject_id')

    reconall_workflow.connect(infosource, 'subject_id', datasource,
                              'subject_id')

    if is_nii:
        reconall_workflow.connect(datasource, 'struct', recon_all, 'T1_files')
    else:
        reconall_workflow.connect(datasource, 'dcm_file', get_firstfile,
                                  'dcm_files')
        reconall_workflow.connect(get_firstfile, 'dcm_file', get_MRI_sbjdir,
                                  'dcm_file')

        reconall_workflow.connect(get_firstfile, 'dcm_file', mri_convert,
                                  'in_file')
        reconall_workflow.connect(get_MRI_sbjdir, 'struct_filename',
                                  mri_convert, 'out_file')

        reconall_workflow.connect(mri_convert, 'out_file', recon_all,
                                  'T1_files')

    # (3) BEM generation by the watershed algo of MNE C
    main_workflow = pe.Workflow(name=MAIN_WF_name)
    main_workflow.base_dir = sbj_dir

    # I mode: WatershedBEM Interface of nipype
    bem_generation = pe.Node(
        interface=WatershedBEM(),
        infields=['subject_id', 'subjects_dir', 'atlas_mode'],
        outfields=['mesh_files'],
        name='bem_generation')
    bem_generation.inputs.subjects_dir = sbj_dir
    bem_generation.inputs.atlas_mode = True

    main_workflow.connect(reconall_workflow, 'recon_all.subject_id',
                          bem_generation, 'subject_id')

    # II mode: make_watershed_bem of MNE Python package
    def mne_watershed_bem(sbj_dir, sbj_id):

        from mne.bem import make_watershed_bem

        print 'call make_watershed_bem'
        make_watershed_bem(sbj_id, sbj_dir, overwrite=True)

    call_mne_watershed_bem = pe.Node(interface=Function(
        input_names=['sbj_dir', 'sbj_id'],
        output_names=['sbj_id'],
        function=mne_watershed_bem),
                                     name='call_mne_watershed_bem')

    # copy the generated meshes from bem/watershed to bem/ and change the names
    # according to MNE
    def copy_surfaces(sbj_id, mesh_files):
        import os
        import os.path as op
        from smri_params import sbj_dir
        from mne.report import Report

        report = Report()

        surf_names = [
            'brain_surface', 'inner_skull_surface', 'outer_skull_surface',
            'outer_skin_surface'
        ]
        new_surf_names = [
            'brain.surf', 'inner_skull.surf', 'outer_skull.surf',
            'outer_skin.surf'
        ]

        bem_dir = op.join(sbj_dir, sbj_id, 'bem')
        surface_dir = op.join(sbj_dir, sbj_id, 'bem/watershed')

        for i in range(len(surf_names)):
            os.system('cp %s %s' %
                      (op.join(surface_dir, sbj_id + '_' + surf_names[i]),
                       op.join(bem_dir, new_surf_names[i])))
            #op.join(bem_dir,sbj_id + '-' + new_surf_names[i])))

        report.add_bem_to_section(subject=sbj_id, subjects_dir=sbj_dir)
        report_filename = op.join(bem_dir, "BEM_report.html")
        print '*** REPORT file %s written ***' % report_filename
        print report_filename
        report.save(report_filename, open_browser=False, overwrite=True)

        return sbj_id

    copy_bem_surf = pe.Node(interface=Function(
        input_names=['sbj_id', 'mesh_files'],
        output_names=['sbj_id'],
        function=copy_surfaces),
                            name='copy_bem_surf')

    main_workflow.connect(infosource, 'subject_id', copy_bem_surf, 'sbj_id')
    main_workflow.connect(bem_generation, 'mesh_files', copy_bem_surf,
                          'mesh_files')

    return main_workflow
Ejemplo n.º 28
0
subject_list = np.array(subject_list)

# Specify a subset of subjects through local indexing variable or use all:
if 'used_subj_idx' in locals():
    used_subj = subject_list[used_subj_idx]
else:
    used_subj = subject_list
''' IdentityInterface: The beginning of all workflows: '''
infosource = pe.Node(
    IdentityInterface(fields=['subject_id', 'visit', 'subtype']),
    name="infosource")
infosource.iterables = [('subject_id', used_subj), ('visit', visit_list)]
infosource.inputs.subtype = 'Resting_State'
''' Datagrabber node: '''
datag = pe.Node(nio.DataGrabber(infields=['subject_id', 'visit', 'subtype']),
                name='datag')
datag.inputs.base_directory = cooked_dir
datag.inputs.template = ('{0}/visit_{1}/{2}/'
                         '{0}_visit_{1}_{2}_S??????.nii.gz')
datag.inputs.sort_filelist = True
'''Load Resting State WorkFlow: '''
restingflow = rsfmri.create_resting_preproc(name='restingflow')
restingflow.inputs.inputspec.num_noise_components = 6
# In FSL Sigmas are in volumes, not seconds. Therefore this is the TR-dependent
# sigma in volumes if one wants a TR-indepdendent sigma in seconds:
restingflow.inputs.inputspec.highpass_sigma = 200 / (2 * TR)  # 0.01 Hz
restingflow.inputs.inputspec.lowpass_sigma = 12.5 / (2 * TR)  # 0.16 Hz
'''Define the datasink: '''
datasink = pe.Node(nio.DataSink(base_directory=cooked_dir), name="datasink")
datasink.inputs.parameterization = False
Ejemplo n.º 29
0
def create_rs_qc(subjectlist):
    # main workflow for extended qc of diffusion/rsfmri data
    # fsl output type
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    # some hard coded things
    fd_thres = 0.2
    tr = 2

    # Specify the location of the preprocessed data
    data_dir = "/data/pt_life/LIFE_fu/wd_preprocessing/hcp_prep_workflow/resting/"
    working_dir = "/data/pt_life/LIFE_fu/wd_preprocessing/"  #MODIFY
    freesurfer_dir = "/data/pt_life_freesurfer/freesurfer_all"

    qc = Workflow(name="qc")
    qc.base_dir = working_dir + '/'
    qc.config['execution']['crashdump_dir'] = qc.base_dir + "/crash_files"
    qc.config['execution'] = {'hash_method': 'content'}
    #first get all data needed
    identitynode = Node(util.IdentityInterface(fields=['subject']),
                        name='identitynode')
    identitynode.iterables = ('subject', subjectlist)

    info = dict(func=[[
        'transform_timeseries/', '_subject_', 'subj', '/merge/rest2anat.nii.gz'
    ]],
                dvars=[[
                    'transform_timeseries/', '_subject_', 'subj',
                    '/dvars/rest2anat_dvars.tsv'
                ]],
                motpars=[[
                    '/motion_correction/', '_subject_', 'subj',
                    '/mcflirt/rest_realigned.nii.gz.par'
                ]],
                brainmask=[[
                    'transform_timeseries/', '_subject_', 'subj',
                    '/resample_brain/T1_brain_mask_lowres.nii.gz'
                ]])

    ds_rs = Node(interface=nio.DataGrabber(
        infields=['subj'], outfields=['func', 'dvars', 'motpars',
                                      'brainmask']),
                 name='ds_rs')
    ds_rs.inputs.base_directory = data_dir
    ds_rs.inputs.template = '%s%s%s%s'
    ds_rs.inputs.template_args = info
    ds_rs.inputs.sort_filelist = True

    def juggle_subj(input_id):
        import pandas as pd
        from datetime import datetime as dt
        import os
        import random, string

        sic_pseudo = pd.read_csv(
            "/data/gh_gr_agingandobesity_share/life_shared/Data/Preprocessed/derivatives/pseudo_mrt_20201214.csv"
        )
        tmp = sic_pseudo.loc[sic_pseudo.sic == input_id, 'pseudonym']
        pseudo = tmp.get_values()[0] + "_fu"
        return pseudo

    rename = Node(util.Function(input_names=['input_id'],
                                output_names=['output_id'],
                                function=juggle_subj),
                  name="rename")

    get_fs = Node(nio.FreeSurferSource(), name="get_fs")
    get_fs.inputs.subjects_dir = freesurfer_dir

    get_correct_aseg = Node(util.Function(input_names=['in_list'],
                                          output_names=['out_aseg'],
                                          function=get_aseg),
                            name="get_correct_aseg")

    convert = Node(fs.MRIConvert(), name="convert")
    convert.inputs.out_type = "niigz"

    downsample = Node(afni.Resample(resample_mode='NN',
                                    outputtype='NIFTI_GZ',
                                    out_file='aparcaseg_lowres.nii.gz'),
                      name='downsample')

    calc_fd_official = Node(FramewiseDisplacement(parameter_source='FSL'),
                            name='calc_fd_official')

    calc_fd = Node(util.Function(
        input_names=['realignment_parameters_file', 'parameter_source'],
        output_names=['FD_power', 'fn'],
        function=calc_frame_displacement),
                   name="calc_fd")
    calc_fd.inputs.parameter_source = 'FSL'

    outliers = Node(afni.OutlierCount(fraction=True, out_file='outliers.out'),
                    name='outliers',
                    mem_gb=1 * 2.5)

    bigplot = Node(util.Function(input_names=[
        'func', 'seg', 'tr', 'fd_thres', 'outliers', 'dvars', 'fd', 'subj',
        'outfile'
    ],
                                 output_names=['fn', 'dataframe'],
                                 function=make_the_plot),
                   name="bigplot")
    bigplot.inputs.tr = tr
    bigplot.inputs.fd_thres = fd_thres
    bigplot.inputs.outfile = "summary_fmriplot.png"

    fftplot = Node(util.Function(input_names=['fn_pd', 'tr'],
                                 output_names=['fn'],
                                 function=plot_fft),
                   name="fftplot")
    fftplot.inputs.tr = tr

    datasink = Node(name="datasink", interface=nio.DataSink())
    datasink.inputs.base_directory = "/data/pt_life_restingstate_followup/Results/QA"
    datasink.inputs.substitutions = [('_subject_', '')]

    qc.connect([
        (identitynode, rename, [('subject', 'input_id')]),
        (rename, get_fs, [('output_id', 'subject_id')]),
        (identitynode, ds_rs, [('subject', 'subj')]),
        (identitynode, bigplot, [('subject', 'subj')]),
        (get_fs, get_correct_aseg, [('aparc_aseg', 'in_list')]),
        (get_correct_aseg, convert, [('out_aseg', 'in_file')]),
        (convert, downsample, [('out_file', 'in_file')]),
        (ds_rs, downsample, [('func', 'master')]),
        (downsample, bigplot, [('out_file', 'seg')]),
        (ds_rs, calc_fd, [('motpars', 'realignment_parameters_file')]),
        (ds_rs, calc_fd_official, [('motpars', 'in_file')]),
        (ds_rs, bigplot, [('func', 'func')]),
        (ds_rs, bigplot, [('dvars', 'dvars')]),
        (calc_fd, bigplot, [('fn', 'fd')]),  #FD_power
        (ds_rs, outliers, [('func', 'in_file')]),
        (ds_rs, outliers, [('brainmask', 'mask')]),
        (outliers, bigplot, [('out_file', 'outliers')]),
        (bigplot, datasink, [('fn', 'detailedQA.@bigplot')]),
        (bigplot, fftplot, [('dataframe', 'fn_pd')]),
        (bigplot, datasink, [('dataframe', 'detailedQA.metrics.@dataframe')]),
        (fftplot, datasink, [('fn', 'detailedQA.@fftplot')]),
        (calc_fd, datasink, [('fn', 'detailedQA.metrics.@fd')]),
        (calc_fd_official, datasink, [('out_file',
                                       'detailedQA.metrics.@fd_official')])
    ])

    qc.run(plugin="MultiProc", plugin_args={"n_procs": 16, "non_daemon": True})

    return qc
Ejemplo n.º 30
0
### Define basic workflow here
dog_preproc_wf = pe.Workflow(name="dog_preproc_wf")
dog_preproc_wf.base_dir = working_dir

## DEFINE iterables for this workflow.. in this case I iterate through subjects
subject_infosource = pe.Node(
    interface=util.IdentityInterface(fields=["subject_id"]),
    name="subject_infosource")
subject_infosource.iterables = ("subject_id", SUBJECT_LIST)

## Building input list which should have the axial and sagittal images
## and also pointing to the auto generated BET mask.... I am double checking to make sure
## we didn't cut off to much tissue

dogscan_datasource = pe.Node(nio.DataGrabber(
    infields=['subject_id'], outfields=['axl_t2', 'sag_t2', 'axial_bet_mask']),
                             name="dog_axl_sag_datasource")

dogscan_datasource.inputs.base_directory = base_subject_directory
dogscan_datasource.inputs.sort_filelist = True
dogscan_datasource.inputs.template = '*'
dogscan_datasource.inputs.field_template = {
    'axl_t2': '%s/*/*axl-t2*.nii*',
    'sag_t2': '%s/*/sag-t2*.nii*',
    'axial_bet_mask': '%s/*/axial_mask/*.hdr'
}

dogscan_datasource.inputs.template_args = {
    'axl_t2': [['subject_id']],
    'sag_t2': [['subject_id']],
    'axial_bet_mask': [['subject_id']]