Esempio n. 1
0
def joinstrings(n_args=2):
    """ Return a nipype function that joins up to `n_args` parts. and
    leaves the result in `out`.
    Parameters
    ----------
    n_args: int
        Number of `argX` parameters the function might have.
        Example: If `n_args` == 2, then the node will have `arg1` and `arg2` nodes.

    Returns
    -------
    fi: nipype.interfaces.Function
    """
    arg_names = ['arg{}'.format(n) for n in range(1, n_args + 1)]

    func = '''def func({0}): import os; return os.path.join({0})'''.format(
        ', '.join(arg_names))
    fi = Function(input_names=arg_names, output_names=['out'])
    fi.inputs.function_str = func
    return fi
Esempio n. 2
0
def create_melodic_workflow(name='melodic', template=None, varnorm=True):

    input_node = pe.Node(IdentityInterface(fields=['in_file']),
                         name='inputspec')

    output_node = pe.Node(IdentityInterface(fields=['out_dir']),
                          name='outputspec')

    if template is None:
        template = op.join(op.dirname(op.dirname(op.abspath(__file__))),
                           'data', 'fsf_templates', 'melodic_template.fsf')

    melodic4fix_node = pe.MapNode(interface=Melodic4fix,
                                  iterfield=['in_file', 'out_dir'],
                                  name='melodic4fix')

    # Don't know if this works. Could also set these defaults inside the
    # melodic4fix node definition...
    melodic4fix_node.inputs.template = template
    melodic4fix_node.inputs.varnorm = varnorm

    rename_ica = pe.MapNode(Function(input_names=['in_file'],
                                     output_names=['out_file'],
                                     function=extract_task),
                            name='rename_ica',
                            iterfield=['in_file'])

    mel4fix_workflow = pe.Workflow(name=name)

    mel4fix_workflow.connect(input_node, 'in_file', melodic4fix_node,
                             'in_file')

    mel4fix_workflow.connect(input_node, 'in_file', rename_ica, 'in_file')

    mel4fix_workflow.connect(rename_ica, 'out_file', melodic4fix_node,
                             'out_dir')

    mel4fix_workflow.connect(melodic4fix_node, 'out_dir', output_node,
                             'out_dir')

    return mel4fix_workflow
def prepWorkflow(skipCount, outputType, name="prep"):
    preprocessing = pipe.Workflow(name=name)

    # Nodes
    # inputnode = pipe.Node(interface=IdentityInterface(fields=in_fields), name='inputs')
    format_out = ['modality', 'numberOfSlices', 'numberOfFiles', 'repetitionTime', 'sliceOrder']
    formatFMRINode = pipe.Node(interface=Function(function=utilities.formatFMRI,
                                                  input_names=['dicomDirectory'],
                                                  output_names=format_out),
                               name='formatFMRINode')

    to_3D_str = afninodes.to3dstrnode('strCreate')
    to_3D = afninodes.to3dnode('to_3D')
    refit = afninodes.refitnode('refit')
    despike = afninodes.despikenode(outputType, skipCount, 'despike')
    volreg = afninodes.volregnode(outputType, 'volreg')
    zeropad = afninodes.zeropadnode('zeropad')
    merge = afninodes.mergenode(outputType, 'merge')
    automask = afninodes.automasknode(outputType, 'automask')
    calc = afninodes.multiplynode(outputType, 'calc')

    def strToIntMinusOne(string):
        return int(string) - 1

    preprocessing.connect([(formatFMRINode, to_3D_str, [('numberOfSlices', 'slices'),
                                                        ('numberOfFiles', 'volumes'),
                                                        ('repetitionTime', 'repTime'),
                                                        ('sliceOrder', 'order')]),
                           (to_3D_str, to_3D,          [('funcparams', 'funcparams')]),
                           (formatFMRINode, despike,   [(('numberOfFiles', strToIntMinusOne), 'end')]),
                           (to_3D, refit,              [('out_file', 'in_file')]),    # 1a
                           (refit, despike,            [('out_file', 'in_file')]),    # 2
                           (despike, volreg,           [('out_file', 'in_file')]),    # 3
                           (volreg, zeropad,           [('out_file', 'in_file')]),    # 4
                           (zeropad, merge,            [('out_file', 'in_files')]),   # 5
                           (merge, automask,           [('out_file', 'in_file')]),    # 6
                           (merge, calc,               [('out_file', 'in_file_a')]),  # 7
                           (automask, calc,            [('out_file', 'in_file_b')]),  # 8
                           ])
    return preprocessing
Esempio n. 4
0
def write_reconall_log_summary(caps_dir, subjects_visits_tsv):
    """
        This func is to write the recon_all.log summary for all the subjects,
        the first step quality check

    Args: caps_dir: CAPS directory subjects_visits_tsv: tsv contains all the
    particiapnt_id and session_id

    Returns:

    """
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import pandas as pd
    from clinica.pipelines.t1_freesurfer_cross_sectional.t1_freesurfer_cross_sectional_utils import log_summary

    # get the list for subject_ids
    subjects_visits = pd.io.parsers.read_csv(subjects_visits_tsv, sep='\t')
    if ((list(subjects_visits.columns.values)[0] != 'participant_id')
            and (list(subjects_visits.columns.values)[1] != 'session_id')):
        raise Exception(
            'Subjects and visits file is not in the correct format.')
    subject_list = list(subjects_visits.participant_id)
    session_list = list(subjects_visits.session_id)
    subject_id = list(subject_list[i] + '_' + session_list[i]
                      for i in range(len(subject_list)))

    lognode = pe.Node(name='lognode',
                      interface=Function(input_names=[
                          'subject_list', 'session_list', 'subject_id',
                          'output_dir'
                      ],
                                         output_names=[],
                                         function=log_summary))
    lognode.inputs.subject_list = subject_list
    lognode.inputs.session_list = session_list
    lognode.inputs.subject_id = subject_id
    lognode.inputs.output_dir = caps_dir

    return lognode
Esempio n. 5
0
def CreateBrainstemWorkflow(WFname, CLUSTER_QUEUE, outputFilename):
    """
    this function...
    :param WFname:
    :param CLUSTER_QUEUE:
    :param outputFilename:
    :return: brainstemWF
    """
    brainstemWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(interface=IdentityInterface(
        fields=['inputTissueLabelFilename', 'inputLandmarkFilename']),
                         run_without_submitting=True,
                         name='inputspec')
    outputSpec = pe.Node(
        interface=IdentityInterface(fields=['ouputTissuelLabelFilename']),
        run_without_submitting=True,
        name='outputspec')

    generateBrainStemNode = pe.Node(Function(
        function=brainStem,
        input_names=[
            'tissueLabelFilename', 'landmarkFilename', 'brainStemFilename',
            'ouputTissuelLabelFilename'
        ],
        output_names=['ouputTissuelLabelFilename']),
                                    run_without_submitting=False,
                                    name='brainStem')

    brainstemWF.connect(inputsSpec, 'inputTissueLabelFilename',
                        generateBrainStemNode, 'tissueLabelFilename')
    brainstemWF.connect(inputsSpec, 'inputLandmarkFilename',
                        generateBrainStemNode, 'landmarkFilename')
    generateBrainStemNode.inputs.brainStemFilename = outputFilename + "_brainStem.nii.gz"
    generateBrainStemNode.inputs.ouputTissuelLabelFilename = outputFilename

    brainstemWF.connect(generateBrainStemNode, 'ouputTissuelLabelFilename',
                        outputSpec, 'ouputTissuelLabelFilename')

    return brainstemWF
Esempio n. 6
0
def init_anat_mask_prep_wf(csv_labels, name='anat_prep_mask_wf'):
    '''
    This workflow will take the output masks and labels from pydpiper for each
    subject, the transform of each subject,
    '''

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(
        niu.IdentityInterface(fields=["subject_id", "session", 'labels']),
        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        fields=['WM_mask', 'CSF_mask', 'eroded_WM_mask', 'eroded_CSF_mask']),
                         name='outputnode')

    compute_anat_masks = pe.Node(Function(
        input_names=['atlas', 'csv_labels', "subject_id", "session"],
        output_names=[
            'WM_mask_file', 'CSF_mask_file', 'eroded_WM_mask_file',
            'eroded_CSF_mask_file'
        ],
        function=compute_masks),
                                 name='compute_anat_masks')
    compute_anat_masks.inputs.csv_labels = csv_labels

    workflow.connect([
        (inputnode, compute_anat_masks, [
            ("labels", "atlas"),
            ("session", "session"),
            ("subject_id", "subject_id"),
        ]),
        (compute_anat_masks, outputnode, [
            ("WM_mask_file", "WM_mask"),
            ("eroded_WM_mask_file", "eroded_WM_mask"),
            ("CSF_mask_file", "CSF_mask"),
            ("eroded_CSF_mask_file", "eroded_CSF_mask"),
        ]),
    ])

    return workflow
Esempio n. 7
0
def infosrc(fif_files):
    '''Create input node.

    Use wildcards to run computations on multiple files;
    To check yourself it's a good idea to run ls command first like this:


    $ ls ./*/*.fif

    $ neuropype input ./*/*.fif

    '''

    from os.path import abspath, split
    from os.path import commonprefix as cprfx
    from nipype.interfaces.utility import IdentityInterface, Function

    fif_files = [abspath(f) for f in fif_files]

    common_prefix = split(cprfx(fif_files))[0] + '/'
    iter_mapping = dict()
    for fif_file in fif_files:
        new_base = fif_file.replace(common_prefix, '')
        new_base = new_base.replace('/', '__')
        new_base = new_base.replace('.', '-')
        iter_mapping[new_base] = fif_file

    infosource = pe.Node(interface=IdentityInterface(fields=['keys']),
                         name='infosource')

    path_node = pe.Node(interface=Function(input_names=['key', 'iter_mapping'],
                                           output_names=['path'],
                                           function=map_path),
                        name='path_node')

    infosource.iterables = [('keys', iter_mapping.keys())]
    path_node.inputs.iter_mapping = iter_mapping
    return infosource, path_node
Esempio n. 8
0
def write_volumetric_per_subject(caps_dir, subjects_visits_tsv):
    """
        This func is to write the volumetric measurement after recon-all
        pipelines for each subjects in the subjects_visits_tsv

    Args: caps_dir: CAPS directory subjects_visits_tsv: tsv contains all the
    particiapnt_id and session_id

    Returns:

    """
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import pandas as pd
    from clinica.pipelines.t1_freesurfer_cross_sectional.t1_freesurfer_cross_sectional_utils import write_statistics_per_subject

    # get the list for subject_ids
    subjects_visits = pd.io.parsers.read_csv(subjects_visits_tsv, sep='\t')
    if (list(subjects_visits.columns.values)[0] != 'participant_id') and (list(
            subjects_visits.columns.values)[1] != 'session_id'):
        raise Exception(
            'Subjects and visits file is not in the correct format.')
    subject_list = list(subjects_visits.participant_id)
    session_list = list(subjects_visits.session_id)
    subject_id = list(subject_list[i] + '_' + session_list[i]
                      for i in range(len(subject_list)))

    fs_tsv_subject = pe.MapNode(name='volumetric_summary_node',
                                iterfield=['subject_id'],
                                interface=Function(
                                    input_names=['subject_id', 'output_dir'],
                                    output_names=[],
                                    function=write_statistics_per_subject,
                                    imports=['import os', 'import errno']))
    fs_tsv_subject.inputs.subject_id = subject_id
    fs_tsv_subject.inputs.output_dir = caps_dir

    return fs_tsv_subject
Esempio n. 9
0
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1

    n1 = MapNode(
        Function(input_names=["in1"], output_names=["out"], function=func1),
        iterfield=["in1"],
        name="n1",
    )
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name="test")
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config["execution"] = {
        "stop_on_first_crash": "true",
        "local_hash_check": "true",
        "crashdump_dir": wd,
        "poll_sleep_duration": 2,
    }

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin="MultiProc")

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin="MultiProc")
Esempio n. 10
0
def test_serial_input(tmpdir):
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1, 2, 3]

    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.add_nodes([n1])
    # set local check
    w1.config['execution'] = {
        'stop_on_first_crash': 'true',
        'local_hash_check': 'true',
        'crashdump_dir': wd,
        'poll_sleep_duration': 2
    }

    # test output of num_subnodes method when serial is default (False)
    assert n1.num_subnodes() == len(n1.inputs.in1)

    # test running the workflow on default conditions
    w1.run(plugin='MultiProc')

    # test output of num_subnodes method when serial is True
    n1._serial = True
    assert n1.num_subnodes() == 1

    # test running the workflow on serial conditions
    w1.run(plugin='MultiProc')
Esempio n. 11
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(Function(input_names=['in1'],
                          output_names=['out'],
                          function=func1),
                 iterfield=['in1'],
                 name='n1')
    n1.inputs.in1 = [1]
    w1 = Workflow(name='test')
    w1.base_dir = wd
    w1.config['execution']['crashdump_dir'] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), '_0x*.json'))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), 'test.json'), 'wt') as fp:
        fp.write('dummy file')
    w1.config['execution'].update(**{'stop_on_first_rerun': True})

    w1.run()
Esempio n. 12
0
def test_mapnode_json(tmpdir):
    """Tests that mapnodes don't generate excess jsons
    """
    tmpdir.chdir()
    wd = os.getcwd()
    from nipype import MapNode, Function, Workflow

    def func1(in1):
        return in1 + 1

    n1 = MapNode(
        Function(input_names=["in1"], output_names=["out"], function=func1),
        iterfield=["in1"],
        name="n1",
    )
    n1.inputs.in1 = [1]
    w1 = Workflow(name="test")
    w1.base_dir = wd
    w1.config["execution"]["crashdump_dir"] = wd
    w1.add_nodes([n1])
    w1.run()
    n1.inputs.in1 = [2]
    w1.run()
    # should rerun
    n1.inputs.in1 = [1]
    eg = w1.run()

    node = list(eg.nodes())[0]
    outjson = glob(os.path.join(node.output_dir(), "_0x*.json"))
    assert len(outjson) == 1

    # check that multiple json's don't trigger rerun
    with open(os.path.join(node.output_dir(), "test.json"), "wt") as fp:
        fp.write("dummy file")
    w1.config["execution"].update(**{"stop_on_first_rerun": True})

    w1.run()
Esempio n. 13
0
def cbf_report_initialize():
    cbf_report = pe.Workflow(name='cbf_report')
    cbf_report.base_dir = methods_dir

    cbf_report_inputnode = pe.Node(interface=util.IdentityInterface(
        fields=['positive_mean_pwi_filename', 't1w']),
                                   name='cbf_report_inputnode')

    cbf_report_node = pe.Node(name='cbf_microgl',
                              interface=Function(
                                  input_names=['mean_pwi_filename'],
                                  output_names=['png_pwi_filename'],
                                  function=cbf_microgl))

    cbf_report_datasink = pe.Node(nio.DataSink(), name='cbf_report_sinker')
    cbf_report_datasink.inputs.base_directory = results_dir

    cbf_report_outputnode = pe.Node(
        interface=util.IdentityInterface(fields=['png_pwi_filename']),
        name='cbf_report_outputnode')

    #
    cbf_report.connect([
        (cbf_report_inputnode, cbf_report_node, [('positive_mean_pwi_filename',
                                                  'mean_pwi_filename')]),

        # Datasink Node
        (cbf_report_node, cbf_report_datasink,
         [('png_pwi_filename', 'results.@png_pwi_filename')]),

        # Output Node
        (cbf_report_node, cbf_report_outputnode, [('png_pwi_filename',
                                                   'png_pwi_filename')]),
    ])

    return cbf_report
Esempio n. 14
0
def init_bold_stc_wf(opts, name='bold_stc_wf'):

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['bold_file']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=['stc_file']),
                         name='outputnode')

    if opts.apply_STC:
        slice_timing_correction_node = pe.Node(Function(
            input_names=['in_file', 'tr', 'tpattern', 'rabies_data_type'],
            output_names=['out_file'],
            function=slice_timing_correction),
                                               name='slice_timing_correction',
                                               mem_gb=1.5 *
                                               opts.scale_min_memory)
        slice_timing_correction_node.inputs.tr = opts.TR
        slice_timing_correction_node.inputs.tpattern = opts.tpattern
        slice_timing_correction_node.inputs.rabies_data_type = opts.data_type
        slice_timing_correction_node.plugin_args = {
            'qsub_args': f'-pe smp {str(3*opts.min_proc)}',
            'overwrite': True
        }

        workflow.connect([
            (inputnode, slice_timing_correction_node, [('bold_file', 'in_file')
                                                       ]),
            (slice_timing_correction_node, outputnode, [('out_file',
                                                         'stc_file')]),
        ])
    else:
        workflow.connect([
            (inputnode, outputnode, [('bold_file', 'stc_file')]),
        ])

    return workflow
Esempio n. 15
0
def create_all_calcarine_reward_2_h5_workflow(
        analysis_info, name='all_calcarine_reward_nii_2_h5'):
    import os.path as op
    import tempfile
    import nipype.pipeline as pe
    from nipype.interfaces import fsl
    from nipype.interfaces.utility import Function, Merge, IdentityInterface
    from spynoza.nodes.utils import get_scaninfo, dyns_min_1, topup_scan_params, apply_scan_params
    from nipype.interfaces.io import SelectFiles, DataSink

    # Importing of custom nodes from spynoza packages; assumes that spynoza is installed:
    # pip install git+https://github.com/spinoza-centre/spynoza.git@develop
    from utils.utils import mask_nii_2_hdf5, combine_eye_hdfs_to_nii_hdf

    input_node = pe.Node(
        IdentityInterface(fields=['sub_id', 'preprocessed_data_dir']),
        name='inputspec')

    # i/o node
    datasource_templates = dict(mcf='{sub_id}/mcf/*.nii.gz',
                                psc='{sub_id}/psc/*.nii.gz',
                                tf='{sub_id}/tf/*.nii.gz',
                                GLM='{sub_id}/GLM/*.nii.gz',
                                eye='{sub_id}/eye/h5/*.h5',
                                rois='{sub_id}/roi/*_vol.nii.gz')
    datasource = pe.Node(SelectFiles(datasource_templates,
                                     sort_filelist=True,
                                     raise_on_empty=False),
                         name='datasource')

    hdf5_psc_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                              name='hdf5_psc_masker')
    hdf5_psc_masker.inputs.folder_alias = 'psc'
    hdf5_psc_masker.inputs.hdf5_file = op.join(tempfile.mkdtemp(), 'roi.h5')

    hdf5_tf_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                             name='hdf5_tf_masker')
    hdf5_tf_masker.inputs.folder_alias = 'tf'
    hdf5_psc_masker.inputs.hdf5_file = op.join(tempfile.mkdtemp(), 'roi.h5')

    hdf5_mcf_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                              name='hdf5_mcf_masker')
    hdf5_mcf_masker.inputs.folder_alias = 'mcf'

    hdf5_GLM_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                              name='hdf5_GLM_masker')
    hdf5_GLM_masker.inputs.folder_alias = 'GLM'

    eye_hdfs_to_nii_masker = pe.Node(Function(
        input_names=['nii_hdf5_file', 'eye_hdf_filelist', 'new_alias'],
        output_names=['nii_hdf5_file'],
        function=combine_eye_hdfs_to_nii_hdf),
                                     name='eye_hdfs_to_nii_masker')
    eye_hdfs_to_nii_masker.inputs.new_alias = 'eye'

    # node for datasinking
    datasink = pe.Node(DataSink(), name='sinker')
    datasink.inputs.parameterization = False

    all_calcarine_reward_nii_2_h5_workflow = pe.Workflow(name=name)

    all_calcarine_reward_nii_2_h5_workflow.connect(input_node,
                                                   'preprocessed_data_dir',
                                                   datasink, 'base_directory')
    all_calcarine_reward_nii_2_h5_workflow.connect(input_node, 'sub_id',
                                                   datasink, 'container')

    all_calcarine_reward_nii_2_h5_workflow.connect(input_node,
                                                   'preprocessed_data_dir',
                                                   datasource,
                                                   'base_directory')
    all_calcarine_reward_nii_2_h5_workflow.connect(input_node, 'sub_id',
                                                   datasource, 'sub_id')

    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'psc',
                                                   hdf5_psc_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_psc_masker,
                                                   'mask_files')

    # the hdf5_file is created by the psc node, and then passed from masker to masker on into the datasink.
    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_psc_masker,
                                                   'hdf5_file', hdf5_tf_masker,
                                                   'hdf5_file')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'tf',
                                                   hdf5_tf_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_tf_masker,
                                                   'mask_files')

    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_tf_masker, 'hdf5_file',
                                                   hdf5_mcf_masker,
                                                   'hdf5_file')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'mcf',
                                                   hdf5_mcf_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_mcf_masker,
                                                   'mask_files')

    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'GLM',
                                                   hdf5_GLM_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_GLM_masker,
                                                   'mask_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_mcf_masker,
                                                   'hdf5_file',
                                                   hdf5_GLM_masker,
                                                   'hdf5_file')

    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_GLM_masker,
                                                   'hdf5_file',
                                                   eye_hdfs_to_nii_masker,
                                                   'nii_hdf5_file')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'eye',
                                                   eye_hdfs_to_nii_masker,
                                                   'eye_hdf_filelist')

    all_calcarine_reward_nii_2_h5_workflow.connect(eye_hdfs_to_nii_masker,
                                                   'nii_hdf5_file', datasink,
                                                   'h5')

    return all_calcarine_reward_nii_2_h5_workflow
Esempio n. 16
0
def upscale(name='upscale'):
	""" ...
	
	"""
	
	upscaler = pe.Workflow(name=name)


	"""
    	Set up a node to define all inputs required for the preprocessing workflow

   	"""

	inputnode = pe.Node(interface=util.IdentityInterface(fields=['in_file'], mandatory_inputs=True),
                            name='inputspec')

	"""
    	Set up a node to define outputs for the preprocessing workflow

    	"""


        outputnode = pe.Node(interface=util.IdentityInterface(fields=['out_file'], mandatory_inputs=True),
                         name='outputspec')

		

	"""
    	Set up a node to read voxel size

    	"""

	getdim = pe.MapNode(interface=myfsl.utils.ImageInfo(), name="get_dim",  iterfield=['in_file'])

	"""
    	Multiple voxel size by 10

    	"""

	mult10x = pe.MapNode(name='mult10x',
               interface=Function(input_names=['in_val'],
                                  output_names=['out_val'],
                                  function=mult10),
						 iterfield=['in_val'])

	mult10y = pe.MapNode(name='mult10y',
               interface=Function(input_names=['in_val'],
                                  output_names=['out_val'],
                                  function=mult10),
						 iterfield=['in_val'])

	mult10z = pe.MapNode(name='mult10z',
               interface=Function(input_names=['in_val'],
                                  output_names=['out_val'],
                                  function=mult10),
						 iterfield=['in_val'])


	"""
    	Set up a node to change voxel size

    	"""

	changedim = pe.MapNode(myfsl.utils.ChangePixDim(), name="upscale",
						   iterfield=['in_file',
									  'xdim',
									  'ydim',
									  'zdim'])


	upscaler.connect(inputnode, 'in_file', getdim, 'in_file')
	upscaler.connect(inputnode, 'in_file', changedim, 'in_file')

	upscaler.connect(getdim, 'out_pixdim1', mult10x, 'in_val')
	upscaler.connect(getdim, 'out_pixdim2', mult10y, 'in_val')
	upscaler.connect(getdim, 'out_pixdim3', mult10z, 'in_val')

	upscaler.connect(mult10x, 'out_val', changedim, 'xdim')
	upscaler.connect(mult10y, 'out_val', changedim, 'ydim')
	upscaler.connect(mult10z, 'out_val', changedim, 'zdim')
	upscaler.connect(changedim, 'out_file', outputnode, 'out_file')

	return upscaler
Esempio n. 17
0
def ratbet(name='bet', swapscale=True):
	"""
		swapscale: do upscaling and reorientation
	
	"""
	
	bet = pe.Workflow(name=name)

	"""
    	Set up a node to define all inputs required for the preprocessing workflow

   	"""

	inputnode = pe.Node(interface=util.IdentityInterface(fields=['in_file']),
                            name='inputspec', mandatory_inputs=True)

	"""
    	Set up a node to define outputs for the preprocessing workflow

    	"""


        outputnode = pe.Node(interface=util.IdentityInterface(fields=['out_brain', 'out_brain_mask', 'out_head']),
                         name='outputspec', mandatory_inputs=True)

		

	# swapper
	
	reorient = pe.MapNode(interface=fsl.utils.SwapDimensions(new_dims=("RL", "AP", "IS")),
						  name='reorient',
						  iterfield=['in_file'])


	# upscale

	upscaler = upscale()

	# scaley
	
	getdim = pe.MapNode(interface=myfsl.utils.ImageInfo(), name="get_dim", iterfield=['in_file'])
	div = pe.MapNode(interface=Function(
		input_names=['in_val'],
		output_names=['out_val'],
		function=div2),
		name='divDim', iterfield=['in_val'])
	scale_y = pe.MapNode(myfsl.utils.ChangePixDim(), name="scale_y", iterfield=['in_file', 'xdim', 'ydim', 'zdim'])

	# bet
	fslbet = pe.MapNode(interface=fsl.BET(frac=0.6, vertical_gradient=0), name="bet", iterfield=['in_file'])
	fslbet2 = pe.MapNode(interface=fsl.BET(frac=0.35, vertical_gradient=0.25, mask=True), name="bet2", iterfield=['in_file'])
									# frac=0.3
	# descaley

	rescale_y = pe.MapNode(myfsl.utils.ChangePixDim(), name="rescale_y", iterfield=['in_file', 'xdim', 'ydim', 'zdim'])
	rescale_y_mask = pe.MapNode(myfsl.utils.ChangePixDim(), name="rescale_y_mask", iterfield=['in_file', 'xdim', 'ydim', 'zdim']) # TODO: no new node, 'iterable' instead?

	
	if (swapscale==True):
		bet.connect(inputnode, 'in_file', reorient, 'in_file')
		bet.connect(reorient, 'out_file', upscaler, 'inputspec.in_file')
		bet.connect(upscaler, 'outputspec.out_file', getdim, 'in_file')
		bet.connect(reorient, 'out_file', scale_y, 'in_file')
		bet.connect(getdim, 'out_pixdim1', scale_y, 'xdim')
		bet.connect(getdim, 'out_pixdim2', div, 'in_val')
		bet.connect(div, 'out_val', scale_y, 'ydim')
		bet.connect(getdim, 'out_pixdim3', scale_y, 'zdim')
	else:
		bet.connect(inputnode, 'in_file', getdim, 'in_file')
		bet.connect(inputnode, 'in_file', scale_y, 'in_file')

	bet.connect(scale_y, 'out_file', fslbet, 'in_file')
	bet.connect(fslbet, 'out_file', fslbet2, 'in_file')

	bet.connect(fslbet2, 'out_file', rescale_y, 'in_file')
	bet.connect(getdim, 'out_pixdim1', rescale_y, 'xdim')
	bet.connect(getdim, 'out_pixdim2', rescale_y, 'ydim')
	bet.connect(getdim, 'out_pixdim3', rescale_y, 'zdim')

	bet.connect(fslbet2, 'mask_file', rescale_y_mask, 'in_file')
	bet.connect(getdim, 'out_pixdim1', rescale_y_mask, 'xdim')
	bet.connect(getdim, 'out_pixdim2', rescale_y_mask, 'ydim')
	bet.connect(getdim, 'out_pixdim3', rescale_y_mask, 'zdim')


	bet.connect(rescale_y, 'out_file', outputnode, 'out_brain')
	bet.connect(rescale_y_mask, 'out_file', outputnode, 'out_brain_mask')
	bet.connect(upscaler, 'outputspec.out_file', outputnode, 'out_head')

	return bet
Esempio n. 18
0
def runMainWorkflow(DWI_scan, T2_scan, labelMap_image, BASE_DIR, dataSink_DIR):
    print("Running the workflow ...")

    sessionID = os.path.basename(os.path.dirname(DWI_scan))
    subjectID = os.path.basename(os.path.dirname(os.path.dirname(DWI_scan)))
    siteID = os.path.basename(
        os.path.dirname(os.path.dirname(os.path.dirname(DWI_scan))))

    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
    ####### Workflow ###################
    WFname = 'DWIWorkflow_CACHE_' + sessionID
    DWIWorkflow = pe.Workflow(name=WFname)
    DWIWorkflow.base_dir = BASE_DIR

    inputsSpec = pe.Node(interface=IdentityInterface(
        fields=['T2Volume', 'DWIVolume', 'LabelMapVolume']),
                         name='inputspec')

    inputsSpec.inputs.DWIVolume = DWI_scan
    inputsSpec.inputs.T2Volume = T2_scan
    inputsSpec.inputs.LabelMapVolume = labelMap_image

    outputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'CorrectedDWI', 'CorrectedDWI_in_T2Space', 'tensor_image',
        'DWIBrainMask', 'FAImage', 'MDImage', 'RDImage', 'FrobeniusNormImage',
        'Lambda1Image', 'Lambda2Image', 'Lambda3Image', 'ukfTracks',
        'ukf2ndTracks'
    ]),
                          name='outputsSpec')

    # Step0: remove the skull from the T2 volume
    ExtractBRAINFromHeadNode = pe.Node(interface=Function(
        function=ExtractBRAINFromHead,
        input_names=['RawScan', 'BrainLabels'],
        output_names=['outputVolume']),
                                       name="ExtractBRAINFromHead")

    DWIWorkflow.connect(inputsSpec, 'T2Volume', ExtractBRAINFromHeadNode,
                        'RawScan')
    DWIWorkflow.connect(inputsSpec, 'LabelMapVolume', ExtractBRAINFromHeadNode,
                        'BrainLabels')

    # Step1: extract B0 from DWI volume
    EXTRACT_B0 = pe.Node(interface=extractNrrdVectorIndex(), name="EXTRACT_B0")
    EXTRACT_B0.inputs.vectorIndex = 0
    EXTRACT_B0.inputs.outputVolume = 'B0_Image.nrrd'
    DWIWorkflow.connect(inputsSpec, 'DWIVolume', EXTRACT_B0, 'inputVolume')

    # Step2: Register T2 to B0 space using BRAINSFit
    BFit_T2toB0 = pe.Node(interface=BRAINSFit(), name="BFit_T2toB0")
    BFit_T2toB0.inputs.costMetric = "MMI"
    BFit_T2toB0.inputs.numberOfSamples = 100000
    BFit_T2toB0.inputs.numberOfIterations = [1500]
    BFit_T2toB0.inputs.numberOfHistogramBins = 50
    BFit_T2toB0.inputs.maximumStepLength = 0.2
    BFit_T2toB0.inputs.minimumStepLength = [0.00005]
    BFit_T2toB0.inputs.useRigid = True
    BFit_T2toB0.inputs.useAffine = True
    BFit_T2toB0.inputs.maskInferiorCutOffFromCenter = 65
    BFit_T2toB0.inputs.maskProcessingMode = "ROIAUTO"
    BFit_T2toB0.inputs.ROIAutoDilateSize = 13
    BFit_T2toB0.inputs.backgroundFillValue = 0.0
    BFit_T2toB0.inputs.initializeTransformMode = 'useCenterOfHeadAlign'
    BFit_T2toB0.inputs.strippedOutputTransform = "T2ToB0_RigidTransform.h5"
    BFit_T2toB0.inputs.writeOutputTransformInFloat = True
    DWIWorkflow.connect(EXTRACT_B0, 'outputVolume', BFit_T2toB0, 'fixedVolume')
    DWIWorkflow.connect(ExtractBRAINFromHeadNode, 'outputVolume', BFit_T2toB0,
                        'movingVolume')

    # Step3: Use T_rigid to "resample" T2 and label map images to B0 image space
    MakeResamplerInFilesListNode = pe.Node(Function(
        function=MakeResamplerInFileList,
        input_names=['inputT2', 'inputLabelMap'],
        output_names=['imagesList']),
                                           name="MakeResamplerInFilesListNode")

    DWIWorkflow.connect([(ExtractBRAINFromHeadNode,
                          MakeResamplerInFilesListNode, [('outputVolume',
                                                          'inputT2')]),
                         (inputsSpec, MakeResamplerInFilesListNode,
                          [('LabelMapVolume', 'inputLabelMap')])])

    ResampleToB0Space = pe.MapNode(
        interface=BRAINSResample(),
        name="ResampleToB0Space",
        iterfield=['inputVolume', 'pixelType', 'outputVolume'])
    ResampleToB0Space.inputs.interpolationMode = 'Linear'
    ResampleToB0Space.inputs.outputVolume = [
        'T2toB0.nrrd', 'BRAINMaskToB0.nrrd'
    ]
    ResampleToB0Space.inputs.pixelType = ['ushort', 'binary']
    DWIWorkflow.connect(BFit_T2toB0, 'strippedOutputTransform',
                        ResampleToB0Space, 'warpTransform')
    DWIWorkflow.connect(EXTRACT_B0, 'outputVolume', ResampleToB0Space,
                        'referenceVolume')
    DWIWorkflow.connect(MakeResamplerInFilesListNode, 'imagesList',
                        ResampleToB0Space, 'inputVolume')

    # Step4: Create registration mask from resampled label map image
    CreateRegistrationMask = pe.Node(interface=Function(
        function=CreateAntsRegistrationMask,
        input_names=['brainMask'],
        output_names=['registrationMask']),
                                     name="CreateAntsRegistrationMask")

    DWIWorkflow.connect(ResampleToB0Space, ('outputVolume', pickFromList, 1),
                        CreateRegistrationMask, 'brainMask')

    # Step5: Save direction cosine for the resampled T2 image
    SaveDirectionCosineToMatrixNode = pe.Node(
        interface=Function(function=SaveDirectionCosineToMatrix,
                           input_names=['inputVolume'],
                           output_names=['directionCosine']),
        name="SaveDirectionCosineToMatrix")

    DWIWorkflow.connect(ResampleToB0Space, ('outputVolume', pickFromList, 0),
                        SaveDirectionCosineToMatrixNode, 'inputVolume')

    # Step6: Force DC to ID
    MakeForceDCFilesListNode = pe.Node(Function(
        function=MakeForceDCFilesList,
        input_names=['inputB0', 'inputT2', 'inputLabelMap'],
        output_names=['imagesList']),
                                       name="MakeForceDCFilesListNode")

    DWIWorkflow.connect([(EXTRACT_B0, MakeForceDCFilesListNode,
                          [('outputVolume', 'inputB0')]),
                         (ResampleToB0Space, MakeForceDCFilesListNode,
                          [(('outputVolume', pickFromList, 0), 'inputT2')]),
                         (CreateRegistrationMask, MakeForceDCFilesListNode,
                          [('registrationMask', 'inputLabelMap')])])

    ForceDCtoIDNode = pe.MapNode(interface=Function(
        function=ForceDCtoID,
        input_names=['inputVolume'],
        output_names=['outputVolume']),
                                 name="ForceDCtoID",
                                 iterfield=['inputVolume'])

    DWIWorkflow.connect(MakeForceDCFilesListNode, 'imagesList',
                        ForceDCtoIDNode, 'inputVolume')

    # Step7: Run antsRegistration in one direction
    antsReg_B0ToTransformedT2 = pe.Node(interface=ants.Registration(),
                                        name="antsReg_B0ToTransformedT2")
    antsReg_B0ToTransformedT2.inputs.dimension = 3
    antsReg_B0ToTransformedT2.inputs.transforms = ["SyN"]
    antsReg_B0ToTransformedT2.inputs.transform_parameters = [(0.25, 3.0, 0.0)]
    antsReg_B0ToTransformedT2.inputs.metric = ['MI']
    antsReg_B0ToTransformedT2.inputs.sampling_strategy = [None]
    antsReg_B0ToTransformedT2.inputs.sampling_percentage = [1.0]
    antsReg_B0ToTransformedT2.inputs.metric_weight = [1.0]
    antsReg_B0ToTransformedT2.inputs.radius_or_number_of_bins = [32]
    antsReg_B0ToTransformedT2.inputs.number_of_iterations = [[70, 50, 40]]
    antsReg_B0ToTransformedT2.inputs.convergence_threshold = [1e-6]
    antsReg_B0ToTransformedT2.inputs.convergence_window_size = [10]
    antsReg_B0ToTransformedT2.inputs.use_histogram_matching = [True]
    antsReg_B0ToTransformedT2.inputs.shrink_factors = [[3, 2, 1]]
    antsReg_B0ToTransformedT2.inputs.smoothing_sigmas = [[2, 1, 0]]
    antsReg_B0ToTransformedT2.inputs.sigma_units = ["vox"]
    antsReg_B0ToTransformedT2.inputs.use_estimate_learning_rate_once = [False]
    antsReg_B0ToTransformedT2.inputs.write_composite_transform = True
    antsReg_B0ToTransformedT2.inputs.collapse_output_transforms = False
    antsReg_B0ToTransformedT2.inputs.initialize_transforms_per_stage = False
    antsReg_B0ToTransformedT2.inputs.output_transform_prefix = 'Tsyn'
    antsReg_B0ToTransformedT2.inputs.winsorize_lower_quantile = 0.01
    antsReg_B0ToTransformedT2.inputs.winsorize_upper_quantile = 0.99
    antsReg_B0ToTransformedT2.inputs.float = True
    antsReg_B0ToTransformedT2.inputs.args = '--restrict-deformation 0x1x0'

    DWIWorkflow.connect(ForceDCtoIDNode, ('outputVolume', pickFromList, 1),
                        antsReg_B0ToTransformedT2, 'fixed_image')
    DWIWorkflow.connect(ForceDCtoIDNode, ('outputVolume', pickFromList, 2),
                        antsReg_B0ToTransformedT2, 'fixed_image_mask')
    DWIWorkflow.connect(ForceDCtoIDNode, ('outputVolume', pickFromList, 0),
                        antsReg_B0ToTransformedT2, 'moving_image')

    # Step8: Now, all necessary transforms are acquired. It's a time to
    #        transform input DWI image into T2 image space
    # {DWI} --> ForceDCtoID --> gtractResampleDWIInPlace(using SyN transfrom)
    # --> Restore DirectionCosine From Saved Matrix --> gtractResampleDWIInPlace(inverse of T_rigid from BFit)
    # --> {CorrectedDW_in_T2Space}

    DWI_ForceDCtoIDNode = pe.Node(interface=Function(
        function=ForceDCtoID,
        input_names=['inputVolume'],
        output_names=['outputVolume']),
                                  name='DWI_ForceDCtoIDNode')

    DWIWorkflow.connect(inputsSpec, 'DWIVolume', DWI_ForceDCtoIDNode,
                        'inputVolume')

    gtractResampleDWI_SyN = pe.Node(interface=gtractResampleDWIInPlace(),
                                    name="gtractResampleDWI_SyN")

    DWIWorkflow.connect(DWI_ForceDCtoIDNode, 'outputVolume',
                        gtractResampleDWI_SyN, 'inputVolume')
    DWIWorkflow.connect(
        antsReg_B0ToTransformedT2,
        ('composite_transform', pickCompositeTransfromFromList),
        gtractResampleDWI_SyN, 'warpDWITransform')
    DWIWorkflow.connect(ForceDCtoIDNode, ('outputVolume', pickFromList, 1),
                        gtractResampleDWI_SyN,
                        'referenceVolume')  # fixed image of antsRegistration
    gtractResampleDWI_SyN.inputs.outputVolume = 'IDDC_correctedDWI.nrrd'

    RestoreDCFromSavedMatrixNode = pe.Node(interface=Function(
        function=RestoreDCFromSavedMatrix,
        input_names=['inputVolume', 'inputDirectionCosine'],
        output_names=['outputVolume']),
                                           name='RestoreDCFromSavedMatrix')

    DWIWorkflow.connect(gtractResampleDWI_SyN, 'outputVolume',
                        RestoreDCFromSavedMatrixNode, 'inputVolume')
    DWIWorkflow.connect(SaveDirectionCosineToMatrixNode, 'directionCosine',
                        RestoreDCFromSavedMatrixNode, 'inputDirectionCosine')
    DWIWorkflow.connect(RestoreDCFromSavedMatrixNode, 'outputVolume',
                        outputsSpec, 'CorrectedDWI')

    GetRigidTransformInverseNode = pe.Node(interface=Function(
        function=GetRigidTransformInverse,
        input_names=['inputTransform'],
        output_names=['inverseTransform']),
                                           name='GetRigidTransformInverse')

    DWIWorkflow.connect(BFit_T2toB0, 'strippedOutputTransform',
                        GetRigidTransformInverseNode, 'inputTransform')

    gtractResampleDWIInPlace_Trigid = pe.Node(
        interface=gtractResampleDWIInPlace(),
        name="gtractResampleDWIInPlace_Trigid")

    DWIWorkflow.connect(RestoreDCFromSavedMatrixNode, 'outputVolume',
                        gtractResampleDWIInPlace_Trigid, 'inputVolume')
    DWIWorkflow.connect(
        GetRigidTransformInverseNode, 'inverseTransform',
        gtractResampleDWIInPlace_Trigid,
        'inputTransform')  #Inverse of rigid transform from BFit
    gtractResampleDWIInPlace_Trigid.inputs.outputVolume = 'CorrectedDWI_in_T2Space_estimate.nrrd'
    gtractResampleDWIInPlace_Trigid.inputs.outputResampledB0 = 'CorrectedDWI_in_T2Space_estimate_B0.nrrd'

    # Setp9: An extra registration step to tune the alignment between the CorrecetedDWI_in_T2Space image and T2 image.
    BFit_TuneRegistration = pe.Node(interface=BRAINSFit(),
                                    name="BFit_TuneRegistration")
    BFit_TuneRegistration.inputs.costMetric = "MMI"
    BFit_TuneRegistration.inputs.numberOfSamples = 100000
    BFit_TuneRegistration.inputs.numberOfIterations = [1500]
    BFit_TuneRegistration.inputs.numberOfHistogramBins = 50
    BFit_TuneRegistration.inputs.maximumStepLength = 0.2
    BFit_TuneRegistration.inputs.minimumStepLength = [0.00005]
    BFit_TuneRegistration.inputs.useRigid = True
    BFit_TuneRegistration.inputs.useAffine = True
    BFit_TuneRegistration.inputs.maskInferiorCutOffFromCenter = 65
    BFit_TuneRegistration.inputs.maskProcessingMode = "ROIAUTO"
    BFit_TuneRegistration.inputs.ROIAutoDilateSize = 13
    BFit_TuneRegistration.inputs.backgroundFillValue = 0.0
    BFit_TuneRegistration.inputs.initializeTransformMode = 'useCenterOfHeadAlign'
    BFit_TuneRegistration.inputs.strippedOutputTransform = "CorrectedB0inT2Space_to_T2_RigidTransform.h5"
    BFit_TuneRegistration.inputs.writeOutputTransformInFloat = True
    DWIWorkflow.connect(ExtractBRAINFromHeadNode, 'outputVolume',
                        BFit_TuneRegistration, 'fixedVolume')  #T2 brain volume
    DWIWorkflow.connect(gtractResampleDWIInPlace_Trigid, 'outputResampledB0',
                        BFit_TuneRegistration,
                        'movingVolume')  # CorrectedB0_in_T2Space

    gtractResampleDWIInPlace_TuneRigidTx = pe.Node(
        interface=gtractResampleDWIInPlace(),
        name="gtractResampleDWIInPlace_TuneRigidTx")
    DWIWorkflow.connect(gtractResampleDWIInPlace_Trigid, 'outputVolume',
                        gtractResampleDWIInPlace_TuneRigidTx, 'inputVolume')
    DWIWorkflow.connect(BFit_TuneRegistration, 'strippedOutputTransform',
                        gtractResampleDWIInPlace_TuneRigidTx, 'inputTransform')
    gtractResampleDWIInPlace_TuneRigidTx.inputs.outputVolume = 'CorrectedDWI_in_T2Space.nrrd'
    gtractResampleDWIInPlace_TuneRigidTx.inputs.outputResampledB0 = 'CorrectedDWI_in_T2Space_B0.nrrd'

    # Finally we pass the outputs of the gtractResampleDWIInPlace_TuneRigidTx to the outputsSpec
    DWIWorkflow.connect(gtractResampleDWIInPlace_TuneRigidTx, 'outputVolume',
                        outputsSpec, 'CorrectedDWI_in_T2Space')

    # Step10: Create brain mask from the input labelmap
    DWIBRAINMASK = pe.Node(interface=BRAINSResample(), name='DWIBRAINMASK')
    DWIBRAINMASK.inputs.interpolationMode = 'Linear'
    DWIBRAINMASK.inputs.outputVolume = 'BrainMaskForDWI.nrrd'
    DWIBRAINMASK.inputs.pixelType = 'binary'
    DWIWorkflow.connect(gtractResampleDWIInPlace_TuneRigidTx,
                        'outputResampledB0', DWIBRAINMASK, 'referenceVolume')
    DWIWorkflow.connect(inputsSpec, 'LabelMapVolume', DWIBRAINMASK,
                        'inputVolume')
    DWIWorkflow.connect(DWIBRAINMASK, 'outputVolume', outputsSpec,
                        'DWIBrainMask')

    # Step11: DTI estimation
    DTIEstim = pe.Node(interface=dtiestim(), name="DTIEstim")
    DTIEstim.inputs.method = 'wls'
    DTIEstim.inputs.tensor_output = 'DTI_Output.nrrd'
    DWIWorkflow.connect(gtractResampleDWIInPlace_TuneRigidTx, 'outputVolume',
                        DTIEstim, 'dwi_image')
    DWIWorkflow.connect(DWIBRAINMASK, 'outputVolume', DTIEstim, 'brain_mask')
    DWIWorkflow.connect(DTIEstim, 'tensor_output', outputsSpec, 'tensor_image')

    # Step12: DTI process
    DTIProcess = pe.Node(interface=dtiprocess(), name='DTIProcess')
    DTIProcess.inputs.fa_output = 'FA.nrrd'
    DTIProcess.inputs.md_output = 'MD.nrrd'
    DTIProcess.inputs.RD_output = 'RD.nrrd'
    DTIProcess.inputs.frobenius_norm_output = 'frobenius_norm_output.nrrd'
    DTIProcess.inputs.lambda1_output = 'lambda1_output.nrrd'
    DTIProcess.inputs.lambda2_output = 'lambda2_output.nrrd'
    DTIProcess.inputs.lambda3_output = 'lambda3_output.nrrd'
    DTIProcess.inputs.scalar_float = True

    DWIWorkflow.connect(DTIEstim, 'tensor_output', DTIProcess, 'dti_image')
    DWIWorkflow.connect(DTIProcess, 'fa_output', outputsSpec, 'FAImage')
    DWIWorkflow.connect(DTIProcess, 'md_output', outputsSpec, 'MDImage')
    DWIWorkflow.connect(DTIProcess, 'RD_output', outputsSpec, 'RDImage')
    DWIWorkflow.connect(DTIProcess, 'frobenius_norm_output', outputsSpec,
                        'FrobeniusNormImage')
    DWIWorkflow.connect(DTIProcess, 'lambda1_output', outputsSpec,
                        'Lambda1Image')
    DWIWorkflow.connect(DTIProcess, 'lambda2_output', outputsSpec,
                        'Lambda2Image')
    DWIWorkflow.connect(DTIProcess, 'lambda3_output', outputsSpec,
                        'Lambda3Image')

    # Step13: UKF Processing
    UKFNode = pe.Node(interface=UKFTractography(), name="UKFRunRecordStates")
    UKFNode.inputs.tracts = "ukfTracts.vtk"
    #UKFNode.inputs.tractsWithSecondTensor = "ukfSecondTensorTracks.vtk"
    UKFNode.inputs.numTensor = '2'
    UKFNode.inputs.freeWater = True  ## default False
    UKFNode.inputs.recordFA = True  ## default False
    UKFNode.inputs.recordTensors = True  ## default False
    #UKFNode.inputs.recordCovariance = True ## default False
    #UKFNode.inputs.recordState = True ## default False
    #UKFNode.inputs.recordFreeWater = True ## default False
    #UKFNode.inputs.recordTrace = True ## default False
    #UKFNode.inputs.recordNMSE = True ## default False

    DWIWorkflow.connect(gtractResampleDWIInPlace_TuneRigidTx, 'outputVolume',
                        UKFNode, 'dwiFile')
    DWIWorkflow.connect(DWIBRAINMASK, 'outputVolume', UKFNode, 'maskFile')
    DWIWorkflow.connect(UKFNode, 'tracts', outputsSpec, 'ukfTracks')
    #DWIWorkflow.connect(UKFNode,'tractsWithSecondTensor',outputsSpec,'ukf2ndTracks')

    ## Write all outputs with DataSink
    DWIDataSink = pe.Node(interface=nio.DataSink(), name='DWIDataSink')
    DWIDataSink.inputs.base_directory = dataSink_DIR
    DWIDataSink.inputs.container = sessionID

    DWIWorkflow.connect(outputsSpec, 'ukfTracks', DWIDataSink,
                        'Outputs.@ukfTracks')
    #DWIWorkflow.connect(outputsSpec, 'ukf2ndTracks', DWIDataSink, 'Outputs.@ukf2ndTracks')
    DWIWorkflow.connect(outputsSpec, 'CorrectedDWI', DWIDataSink,
                        'Outputs.@CorrectedDWI')
    DWIWorkflow.connect(outputsSpec, 'CorrectedDWI_in_T2Space', DWIDataSink,
                        'Outputs.@CorrectedDWI_in_T2Space')
    DWIWorkflow.connect(outputsSpec, 'tensor_image', DWIDataSink,
                        'Outputs.@tensor_image')
    DWIWorkflow.connect(outputsSpec, 'DWIBrainMask', DWIDataSink,
                        'Outputs.@DWIBrainMask')
    DWIWorkflow.connect(outputsSpec, 'FAImage', DWIDataSink,
                        'Outputs.@FAImage')
    DWIWorkflow.connect(outputsSpec, 'MDImage', DWIDataSink,
                        'Outputs.@MDImage')
    DWIWorkflow.connect(outputsSpec, 'RDImage', DWIDataSink,
                        'Outputs.@RDImage')
    DWIWorkflow.connect(outputsSpec, 'FrobeniusNormImage', DWIDataSink,
                        'Outputs.@FrobeniusNormImage')
    DWIWorkflow.connect(outputsSpec, 'Lambda1Image', DWIDataSink,
                        'Outputs.@Lambda1Image')
    DWIWorkflow.connect(outputsSpec, 'Lambda2Image', DWIDataSink,
                        'Outputs.@Lambda2Image')
    DWIWorkflow.connect(outputsSpec, 'Lambda3Image', DWIDataSink,
                        'Outputs.@Lambda3Image')

    DWIWorkflow.write_graph()
    DWIWorkflow.run()
Esempio n. 19
0
def CreateMeasurementWorkflow(WFname, LABELS_CONFIG_FILE):
    # \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    ###### UTILITY FUNCTIONS #######
    # This function returns a label map that only covers the FOV of the input DWI scan
    def CreateDWILabelMap(T2LabelMapVolume, DWIBrainMask):
        import os
        import SimpleITK as sitk
        T2LabelMapVolume = sitk.ReadImage(
            T2LabelMapVolume, sitk.sitkUInt16.encode(
                'ascii', 'replace'))  # FreeSurfer labelmap needs uint-16
        DWIBrainMask = sitk.ReadImage(DWIBrainMask)
        # 1- Dilate input DWI mask
        dilateFilter = sitk.BinaryDilateImageFilter()
        dilateFilter.SetKernelRadius(1)
        dilated_mask = dilateFilter.Execute(DWIBrainMask)
        # 2- Resample dilated mask to the space of T2LabelMap (1x1x1)
        # Use Linear interpolation + thresholding
        resFilt = sitk.ResampleImageFilter()
        resFilt.SetReferenceImage(T2LabelMapVolume)
        resFilt.SetOutputPixelType(sitk.sitkFloat32)
        resFilt.SetInterpolator(sitk.sitkLinear)
        resampled_dilated_mask = resFilt.Execute(dilated_mask)
        # Thresholding by 0
        threshFilt = sitk.BinaryThresholdImageFilter()
        thresh_resampled_dilated_mask = threshFilt.Execute(
            resampled_dilated_mask, 0.0001, 1.0, 1, 0)
        # 3- Cast the thresholded image to uInt-16
        castFilt = sitk.CastImageFilter()
        castFilt.SetOutputPixelType(sitk.sitkUInt16)
        casted_thresh_resampled_dilated_mask = castFilt.Execute(
            thresh_resampled_dilated_mask)
        # 4- Multiply this binary mask to the T2 labelmap volume
        mulFilt = sitk.MultiplyImageFilter()
        DWILabelMapVolume = mulFilt.Execute(
            casted_thresh_resampled_dilated_mask, T2LabelMapVolume)
        # write the output label map
        outputVolume = os.path.realpath('DWILabelMapVolume.nrrd')
        sitk.WriteImage(DWILabelMapVolume, outputVolume)
        return outputVolume

    def MakeResamplerInFileList(FAImage, MDImage, RDImage, FrobeniusNormImage,
                                Lambda1Image, Lambda2Image, Lambda3Image):
        RISsList = [
            FAImage, MDImage, RDImage, FrobeniusNormImage, Lambda1Image,
            Lambda2Image, Lambda3Image
        ]
        return RISsList

    # This functions computes statistics of each input RIS volume over all input labels
    # and writes the results as a CSV file
    def ComputeStatistics(inputVolume, T2LabelMapVolume, DWILabelMapVolume,
                          labelCodesFile):
        import os
        import SimpleITK as sitk

        #### Util Funcs ####
        def createLabelsDictionary(labelCodesFile):
            import csv
            labelsDictionary = {}
            with open(labelCodesFile) as lf:
                reader = csv.reader(lf, delimiter=',')
                for line in reader:
                    if line[0][0] == "#":
                        continue
                    else:
                        labelsDictionary[line[0]] = line[1]
            return labelsDictionary

        def computeVoxelVolume(inputVolume):
            import operator
            return reduce(operator.mul, inputVolume.GetSpacing())

        def ReturnStatisticsList(labelID, voxelVolume, resampledRISVolume,
                                 DWILabelMap, T2LabelMap):
            from past.utils import old_div
            statFilter = sitk.LabelStatisticsImageFilter()
            # RIS stats over input label ID
            statFilter.Execute(resampledRISVolume, DWILabelMap)
            mean = statFilter.GetMean(labelID)
            std = statFilter.GetSigma(labelID)
            maximum = statFilter.GetMaximum(labelID)
            minimum = statFilter.GetMinimum(labelID)
            median = statFilter.GetMedian(labelID)
            effectiveVolume = statFilter.GetCount(labelID) * voxelVolume
            # compute total volume of input label ID in the non-cropped labelmap (T2LabelMap)
            statFilter.Execute(resampledRISVolume, T2LabelMap)
            totalVolume = statFilter.GetCount(labelID) * voxelVolume
            # if effectiveVolume is 0, the label is missed in dwi scan, or it doesn't exists in current labelmaps
            # in both cases we need zero confidence coefficient for that.
            if effectiveVolume == 0:
                confidence_coeficient = 0
                maximum = 0
                minimum = 0
            else:
                if totalVolume == 0:
                    raise ValueError(
                        'Label {0} is not found in T2 labels map, but exists in DWI labels map!'
                        .format(labelID))
                confidence_coeficient = old_div(effectiveVolume, totalVolume)
            # Now create statistics list
            statsList = [
                format(mean, '.4f'),
                format(std, '.4f'),
                format(maximum, '.4f'),
                format(minimum, '.4f'),
                format(median, '.4f'), effectiveVolume, totalVolume,
                format(confidence_coeficient, '.3f')
            ]
            return statsList, totalVolume

        def writeLabelStatistics(filename, statisticsDictionary):
            import csv
            with open(filename, 'wb') as lf:
                headerdata = [[
                    '#Label', 'mean', 'std', 'max', 'min', 'median',
                    'effective_volume', 'total_volume', 'confidence_coeficient'
                ]]
                wr = csv.writer(lf, delimiter=',')
                wr.writerows(headerdata)
                for key, value in sorted(statisticsDictionary.items()):
                    wr.writerows([[key] + value])

        #### #### #### ####
        resampledRISVolume = sitk.ReadImage(inputVolume)
        T2LabelMap = sitk.ReadImage(T2LabelMapVolume)
        DWILabelMap = sitk.ReadImage(DWILabelMapVolume)
        labelsDictionary = createLabelsDictionary(labelCodesFile)
        statisticsDictionary = {}
        voxelVolume = computeVoxelVolume(resampledRISVolume)
        for key in labelsDictionary:
            labelID = int(key)
            [statisticsList,
             total_volume] = ReturnStatisticsList(labelID, voxelVolume,
                                                  resampledRISVolume,
                                                  DWILabelMap, T2LabelMap)
            if total_volume != 0:
                statisticsDictionary[labelsDictionary[key]] = statisticsList
        # Create output file name
        inputBaseName = os.path.basename(inputVolume)
        inputName = os.path.splitext(inputBaseName)[0]
        RISName = inputName.split('_', 1)[0]
        CSVStatisticsFile = os.path.realpath(RISName + '_statistics.csv')
        writeLabelStatistics(CSVStatisticsFile, statisticsDictionary)
        return CSVStatisticsFile

    # This function helps to pick desirable output from the output list
    def pickFromList(inputlist, item):
        return inputlist[item]

    def ResampleRISVolumes(referenceVolume, inputVolume):
        import os
        import SimpleITK as sitk
        refVolume = sitk.ReadImage(referenceVolume)
        RISVolume = sitk.ReadImage(inputVolume)
        # 0- because of numberical precision error, we have small negative values
        # in rotationary invariant scalars e.g. -1e-13,
        # so set voxel value x to 0 if -1e-5<x<1e-5
        binaryThreshFilt = sitk.BinaryThresholdImageFilter()
        maskNearZero = binaryThreshFilt.Execute(RISVolume, -1e-5, 1e-5, 0, 1)
        RISVolume = RISVolume * sitk.Cast(maskNearZero, sitk.sitkFloat64)
        # Linear interpolation
        resFilt = sitk.ResampleImageFilter()
        resFilt.SetReferenceImage(refVolume)
        resFilt.SetOutputPixelType(RISVolume.GetPixelIDValue())
        resFilt.SetInterpolator(sitk.sitkLinear)
        RIS_resampled = resFilt.Execute(RISVolume)
        '''
        # sqrt voxel-wise + cubic BSpline + square voxel-wise
        # 1- voxel-wise square root of input volume
        sqrtFilt = sitk.SqrtImageFilter()
        RIS_sqrt = sqrtFilt.Execute(RISVolume)
        # 2-resample squared image using cubic BSpline
        resFilt = sitk.ResampleImageFilter()
        resFilt.SetReferenceImage(refVolume)
        resFilt.SetInterpolator(sitk.sitkBSpline)
        RIS_sqrt_res = resFilt.Execute(RIS_sqrt)
        # 3- square the resampled RIS volume voxel-wise
        squarFilt = sitk.SquareImageFilter()
        RIS_resampled = squarFilt.Execute(RIS_sqrt_res)
        '''
        # Create output file name
        inputBaseName = os.path.basename(inputVolume)
        RISName = os.path.splitext(inputBaseName)[0]
        outputVolume = os.path.realpath(RISName + '_res.nrrd')
        sitk.WriteImage(RIS_resampled, outputVolume)
        assert os.path.isfile(
            outputVolume), "Resampled RIS file is not found: %s" % outputVolume
        return outputVolume

    #################################
    # \/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    MeasurementWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'T2LabelMapVolume', 'DWIBrainMask', 'LabelsConfigFile', 'FAImage',
        'MDImage', 'RDImage', 'FrobeniusNormImage', 'Lambda1Image',
        'Lambda2Image', 'Lambda3Image'
    ]),
                         name='inputsSpec')
    inputsSpec.inputs.LabelsConfigFile = LABELS_CONFIG_FILE

    outputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'FA_stats', 'MD_stats', 'RD_stats', 'FrobeniusNorm_stats',
        'Lambda1_stats', 'Lambda2_stats', 'Lambda3_stats'
    ]),
                          name='outputsSpec')

    # Step1: Create the labelmap volume for DWI scan
    CreateDWILabelMapNode = pe.Node(interface=Function(
        function=CreateDWILabelMap,
        input_names=['T2LabelMapVolume', 'DWIBrainMask'],
        output_names=['DWILabelMapVolume']),
                                    name="CreateDWILabelMap")
    MeasurementWF.connect(inputsSpec, 'T2LabelMapVolume',
                          CreateDWILabelMapNode, 'T2LabelMapVolume')
    MeasurementWF.connect(inputsSpec, 'DWIBrainMask', CreateDWILabelMapNode,
                          'DWIBrainMask')

    # Now we have two labelmap volumes (both have 1x1x1 voxel lattice):
    # (1) T2LabelMap: Used to compute total_volume for each label
    # (2) DWILabelMap: It is probably cropped and missed some part of labels,
    #                  and is used to compute all stats like [mean,std,max,min,median,effective_volume].

    # Step2: Resample each RIS to T2LabelmapVolume voxel lattice
    MakeResamplerInFilesListNode = pe.Node(interface=Function(
        function=MakeResamplerInFileList,
        input_names=[
            'FAImage', 'MDImage', 'RDImage', 'FrobeniusNormImage',
            'Lambda1Image', 'Lambda2Image', 'Lambda3Image'
        ],
        output_names=['RISsList']),
                                           name="MakeResamplerInFilesListNode")
    MeasurementWF.connect([(inputsSpec, MakeResamplerInFilesListNode,
                            [('FAImage', 'FAImage'), ('MDImage', 'MDImage'),
                             ('RDImage', 'RDImage'),
                             ('FrobeniusNormImage', 'FrobeniusNormImage'),
                             ('Lambda1Image', 'Lambda1Image'),
                             ('Lambda2Image', 'Lambda2Image'),
                             ('Lambda3Image', 'Lambda3Image')])])
    # To resample RIS volumes we should consider that the output of resampling
    # should not have any negative intensity value becuase negative values have no
    # meaning in rotationally invariant scalar measures.
    # There are 3 options:
    # 1- Use linear interpolation (commented out part using BRAINSResample)
    # 2- Use Gaussian interpolation
    # 3- Use cubic BSpline interpolation
    # Third option is chosen here, but with some considerations:
    # "voxel-wise squared root of intensity values" +
    # cubic BSpline interpolation +
    # "voxel-wise square of intesity values"
    ResampleRISsNode = pe.MapNode(interface=Function(
        function=ResampleRISVolumes,
        input_names=['referenceVolume', 'inputVolume'],
        output_names=['outputVolume']),
                                  name="ResampleRISs",
                                  iterfield=['inputVolume'])
    MeasurementWF.connect(inputsSpec, 'T2LabelMapVolume', ResampleRISsNode,
                          'referenceVolume')
    MeasurementWF.connect(MakeResamplerInFilesListNode, 'RISsList',
                          ResampleRISsNode, 'inputVolume')
    '''
    ResampleRISsNode = pe.MapNode(interface=BRAINSResample(), name="ResampleRISs",
                                  iterfield=['inputVolume', 'outputVolume'])
    ResampleRISsNode.inputs.interpolationMode = 'Linear'
    ResampleRISsNode.inputs.pixelType = 'float'
    ResampleRISsNode.inputs.outputVolume = ['FA_res.nrrd','MD_res.nrrd','RD_res.nrrd','frobenius_norm_res.nrrd',
                                            'lambda1_res.nrrd','lambda2_res.nrrd','lambda3_res.nrrd']
    MeasurementWF.connect(inputsSpec,'T2LabelMapVolume',ResampleRISsNode,'referenceVolume')
    MeasurementWF.connect(MakeResamplerInFilesListNode,'RISsList',ResampleRISsNode,'inputVolume')
    '''
    # Step3: Computes statistics of each resampled RIS over all input labels
    # and writes the results as a CSV file (a csv file for each RIS)
    ComputeStatisticsNode = pe.MapNode(interface=Function(
        function=ComputeStatistics,
        input_names=[
            'inputVolume', 'T2LabelMapVolume', 'DWILabelMapVolume',
            'labelCodesFile'
        ],
        output_names=['CSVStatisticsFile']),
                                       name="ComputeStatistics",
                                       iterfield=['inputVolume'])
    MeasurementWF.connect(ResampleRISsNode, 'outputVolume',
                          ComputeStatisticsNode, 'inputVolume')
    MeasurementWF.connect(inputsSpec, 'T2LabelMapVolume',
                          ComputeStatisticsNode, 'T2LabelMapVolume')
    MeasurementWF.connect(CreateDWILabelMapNode, 'DWILabelMapVolume',
                          ComputeStatisticsNode, 'DWILabelMapVolume')
    MeasurementWF.connect(inputsSpec, 'LabelsConfigFile',
                          ComputeStatisticsNode, 'labelCodesFile')
    MeasurementWF.connect(ComputeStatisticsNode,
                          ('CSVStatisticsFile', pickFromList, 0), outputsSpec,
                          'FA_stats')
    MeasurementWF.connect(ComputeStatisticsNode,
                          ('CSVStatisticsFile', pickFromList, 1), outputsSpec,
                          'MD_stats')
    MeasurementWF.connect(ComputeStatisticsNode,
                          ('CSVStatisticsFile', pickFromList, 2), outputsSpec,
                          'RD_stats')
    MeasurementWF.connect(ComputeStatisticsNode,
                          ('CSVStatisticsFile', pickFromList, 3), outputsSpec,
                          'FrobeniusNorm_stats')
    MeasurementWF.connect(ComputeStatisticsNode,
                          ('CSVStatisticsFile', pickFromList, 4), outputsSpec,
                          'Lambda1_stats')
    MeasurementWF.connect(ComputeStatisticsNode,
                          ('CSVStatisticsFile', pickFromList, 5), outputsSpec,
                          'Lambda2_stats')
    MeasurementWF.connect(ComputeStatisticsNode,
                          ('CSVStatisticsFile', pickFromList, 6), outputsSpec,
                          'Lambda3_stats')

    return MeasurementWF
Esempio n. 20
0
"""
Created on Mon Aug 29 14:30:53 2016

@author: fbeyer
"""
import os
from nipype.pipeline.engine import Node, Workflow
from nipype.interfaces.utility import Function
import nipype.interfaces.utility as util
import nipype.interfaces.io as nio
import nipype.interfaces.fsl as fsl
from strip_rois import strip_rois_func
from moco import create_moco_pipeline
#from ICA_AROMA2 import create_ica_aroma
from smoothing import create_smoothing_pipeline
import ICA_AROMA_functions as aromafunc
    
    
runICA = Function(input_names=["fslDir", "outDir", "inFile", "melDirIn", "mask", "dim", "TR"],
                  output_names=["mdir"],
                  function=aromafunc.runICA)  
runICA.inputs.fslDir= '/usr/share/fsl/5.0/bin/'
#os.path.join(os.environ["FSLDIR"],'bin','')
runICA.inputs.dim=0
runICA.inputs.TR=2
runICA.inputs.melDirIn=""
runICA.inputs.outDir="/scr/lessing2/data_fbeyer/FTO_YFAS/WDR/"
runICA.inputs.inFile="/scr/lessing2/data_fbeyer/FTO_YFAS/Subjects/LI00037838/aroma_inputs/func_preproc_smoothed.nii"
runICA.inputs.mask="/scr/lessing2/data_fbeyer/FTO_YFAS/Subjects/LI00037838/aroma_inputs/func_brain_mask.nii"

runICA.run()
Esempio n. 21
0
psb6351_wf = pe.Workflow(
    name='psb6351_wf'
)  # First I create a workflow...this will serve as the backbone of the pipeline
psb6351_wf.base_dir = work_dir + f'/psb6351workdir/sub-{sids[0]}'  # I deinfe the working directory where I want preliminary files to be written
psb6351_wf.config['execution'][
    'use_relative_paths'] = True  # I assign a execution variable to use relative paths...TRYING TO USE THIS TO FIX A BUG?

# Create a Function node to substitute names of files created during pipeline
# In nipype you create nodes using the pipeline engine that was imported earlier.
# In this case I am sepcifically creating a function node with an input called func_files
# and expects an output (what the function returns) called subs.  The actual function
# which was created above is called get_subs.
# I can assign the input either through a workflow connect syntax or by simplying hardcoding it.
# in this case I hard coded it by saying that .inputs.func_files = func_files
getsubs = pe.Node(Function(input_names=['func_files'],
                           output_names=['subs'],
                           function=get_subs),
                  name='getsubs')
getsubs.inputs.func_files = func_files

# Here I am inputing just the first run functional data
# I want to use afni's 3dToutcount to find the number of
# outliers at each volume.  I will use this information to
# later select the earliest volume with the least number of outliers
# to serve as the base for the motion correction
id_outliers = pe.Node(afni.OutlierCount(), name='id_outliers')
id_outliers.inputs.in_file = func_files[0]
id_outliers.inputs.automask = True
id_outliers.inputs.legendre = True
id_outliers.inputs.polort = 4
id_outliers.inputs.out_file = 'outlier_file'
Esempio n. 22
0
def init_commonspace_wf(name="antsRegistrationTemplateBuilder"):
    # from nipype.workflows.smri.ants import antsRegistrationTemplateBuildSingleIterationWF

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['file_list']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=[
        'PrimaryTemplate', 'PassiveTemplate', 'Transforms',
        'PreRegisterAverage'
    ]),
                         name='outputnode')

    datasource = pe.Node(Function(input_names=['InitialTemplateInputs'],
                                  output_names=[
                                      'InitialTemplateInputs',
                                      'ListOfImagesDictionaries',
                                      'registrationImageTypes',
                                      'interpolationMapping'
                                  ],
                                  function=prep_data),
                         name='datasource')

    # creates an average from the input images as initial target template
    initAvg = pe.Node(interface=ants.AverageImages(), name='initAvg')
    initAvg.inputs.dimension = 3
    initAvg.inputs.normalize = True

    # Define the iterations for template building
    buildTemplateIteration1 = antsRegistrationTemplateBuildSingleIterationWF(
        'iteration01')
    buildTemplateIteration2 = antsRegistrationTemplateBuildSingleIterationWF(
        'iteration02')
    buildTemplateIteration3 = antsRegistrationTemplateBuildSingleIterationWF(
        'iteration03')

    workflow.connect(inputnode, "file_list", datasource,
                     "InitialTemplateInputs")
    workflow.connect(datasource, "InitialTemplateInputs", initAvg, "images")

    workflow.connect(initAvg, 'output_average_image', buildTemplateIteration1,
                     'inputspec.fixed_image')
    workflow.connect(datasource, 'ListOfImagesDictionaries',
                     buildTemplateIteration1,
                     'inputspec.ListOfImagesDictionaries')
    workflow.connect(datasource, 'registrationImageTypes',
                     buildTemplateIteration1,
                     'inputspec.registrationImageTypes')
    workflow.connect(datasource, 'interpolationMapping',
                     buildTemplateIteration1, 'inputspec.interpolationMapping')
    '''
    #the template created from the previous iteration becomes the new target template
    workflow.connect(buildTemplateIteration1, 'outputspec.template',
                     buildTemplateIteration2, 'inputspec.fixed_image')
    workflow.connect(datasource, 'ListOfImagesDictionaries',
                     buildTemplateIteration2, 'inputspec.ListOfImagesDictionaries')
    workflow.connect(datasource, 'registrationImageTypes', buildTemplateIteration2,
                     'inputspec.registrationImageTypes')
    workflow.connect(datasource, 'interpolationMapping', buildTemplateIteration2,
                     'inputspec.interpolationMapping')
    #the template created from the previous iteration becomes the new target template
    workflow.connect(buildTemplateIteration2, 'outputspec.template',
                     buildTemplateIteration3, 'inputspec.fixed_image')
    workflow.connect(datasource, 'ListOfImagesDictionaries',
                     buildTemplateIteration3, 'inputspec.ListOfImagesDictionaries')
    workflow.connect(datasource, 'registrationImageTypes', buildTemplateIteration3,
                     'inputspec.registrationImageTypes')
    workflow.connect(datasource, 'interpolationMapping', buildTemplateIteration3,
                     'inputspec.interpolationMapping')
    '''

    workflow.connect(buildTemplateIteration1, 'outputspec.template',
                     outputnode, 'PrimaryTemplate')
    workflow.connect(buildTemplateIteration1,
                     'outputspec.passive_deformed_templates', outputnode,
                     'PassiveTemplate')
    workflow.connect(buildTemplateIteration1, 'outputspec.transforms_list',
                     outputnode, 'Transforms')
    workflow.connect(initAvg, 'output_average_image', outputnode,
                     'PreRegisterAverage')

    return workflow
Esempio n. 23
0
def spm_mrpet_preprocessing(wf_name="spm_mrpet_preproc"):
    """ Run the PET pre-processing workflow against the
    gunzip_pet.in_file files.
    It depends on the anat_preproc_workflow, so if this
    has not been run, this function will run it too.

    # TODO: organize the anat2pet hack/condition somehow:
    If anat2pet:
    - SPM12 Coregister T1 and tissues to PET
    - PVC the PET image in PET space
    - SPM12 Warp PET to MNI
    else:
    - SPM12 Coregister PET to T1
    - PVC the PET image in anatomical space
    - SPM12 Warp PET in anatomical space to MNI through the
    `anat_to_mni_warp`.

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    pet_input.in_file: traits.File
        The raw NIFTI_GZ PET image file

    pet_input.anat: traits.File
        Path to the high-contrast anatomical image.
        Reference file of the warp_field, i.e., the
        anatomical image in its native space.

    pet_input.anat_to_mni_warp: traits.File
        The warp field from the transformation of the
        anatomical image to the standard MNI space.

    pet_input.atlas_anat: traits.File
        The atlas file in anatomical space.

    pet_input.tissues: list of traits.File
        List of tissues files from the New Segment process.
        At least the first 3 tissues must be present.

    Nipype outputs
    --------------
    pet_output.pvc_out: existing file
        The results of the PVC process

    pet_output.brain_mask: existing file
        A brain mask calculated with the tissues file.

    pet_output.coreg_ref: existing file
        The coregistered reference image to PET space.

    pet_output.coreg_others: list of existing files
        List of coregistered files from coreg_pet.apply_to_files

    pet_output.pvc_warped: existing file
        Results from PETPVC normalized to MNI.
        The result of every internal pre-processing step
        is normalized to MNI here.

    pet_output.warp_field: existing files
        Spatial normalization parameters .mat files

    pet_output.gm_norm: existing file
        The output of the grey matter intensity
        normalization process.
        This is the last step in the PET signal correction,
        before registration.

    pet_output.atlas_pet: existing file
        Atlas image warped to PET space.
        If the `atlas_file` option is an existing file and
        `normalize_atlas` is True.

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields  = ["in_file",
                  "anat",
                  "anat_to_mni_warp",
                  "tissues",]

    out_fields = ["brain_mask",
                  "coreg_others",
                  "coreg_ref",
                  "pvc_warped",
                  "pet_warped", # 'pet_warped' is a dummy entry to keep the fields pattern.
                  "warp_field",
                  "pvc_out",
                  "pvc_mask",
                  "gm_norm",]

    do_atlas, _ = check_atlas_file()
    if do_atlas:
        in_fields  += ["atlas_anat"]
        out_fields += ["atlas_pet" ]

    # input
    pet_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
                           name="pet_input")

    # workflow to perform partial volume correction
    petpvc    = petpvc_workflow(wf_name="petpvc")

    merge_list = setup_node(Merge(4), name='merge_for_unzip')
    gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])

    warp_pet = setup_node(spm_normalize(), name="warp_pet")

    tpm_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="tpm_bbox")
    tpm_bbox.inputs.in_file = spm_tpm_priors_path()

    # output
    pet_output = setup_node(IdentityInterface(fields=out_fields), name="pet_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # check how to perform the registration, to decide how to build the pipeline
    anat2pet = get_config_setting('registration.anat2pet', False)
    if anat2pet:
        wf.connect([
                    # inputs
                    (pet_input, petpvc,     [("in_file", "pvc_input.in_file"),
                                             ("anat",    "pvc_input.reference_file"),
                                             ("tissues", "pvc_input.tissues")]),

                    # gunzip some files for SPM Normalize
                    (petpvc,    merge_list, [("pvc_output.pvc_out",    "in1"),
                                             ("pvc_output.brain_mask", "in2"),
                                             ("pvc_output.gm_norm",    "in3")]),
                    (pet_input, merge_list, [("in_file",               "in4")]),

                    (merge_list, gunzipper, [("out", "in_file")]),

                    # warp the PET PVCed to MNI
                    (petpvc,    warp_pet,   [("pvc_output.coreg_ref", "image_to_align")]),
                    (gunzipper, warp_pet,   [("out_file",             "apply_to_files")]),
                    (tpm_bbox,  warp_pet,   [("bbox",                 "write_bounding_box")]),

                    # output
                    (petpvc,    pet_output, [("pvc_output.pvc_out",      "pvc_out"),
                                             ("pvc_output.brain_mask",   "brain_mask"),
                                             ("pvc_output.coreg_ref",    "coreg_ref"),
                                             ("pvc_output.coreg_others", "coreg_others"),
                                             ("pvc_output.gm_norm",      "gm_norm")]),

                    # output
                    (warp_pet,  pet_output, [("normalized_files",  "pvc_warped"),
                                             ("deformation_field", "warp_field")]),
                   ])
    else: # PET 2 ANAT
        collector  = setup_node(Merge(2), name='merge_for_warp')
        apply_warp = setup_node(spm_apply_deformations(), name="warp_pet")

        wf.connect([
                    # inputs
                    (pet_input, petpvc,     [("in_file", "pvc_input.in_file"),
                                             ("anat",    "pvc_input.reference_file"),
                                             ("tissues", "pvc_input.tissues")]),

                    # gunzip some files for SPM Normalize
                    (petpvc,    merge_list, [("pvc_output.pvc_out",    "in1"),
                                             ("pvc_output.brain_mask", "in2"),
                                             ("pvc_output.gm_norm",    "in3")]),
                    (pet_input, merge_list, [("in_file",               "in4")]),

                    (merge_list, gunzipper, [("out",                   "in_file")]),

                    # warp the PET PVCed to MNI
                    (gunzipper,   collector,   [("out_file",             "in1")]),
                    (petpvc,      collector,   [("pvc_output.coreg_ref", "in2")]),

                    (pet_input,   apply_warp,  [("anat_to_mni_warp", "deformation_file")]),
                    (collector,   apply_warp,  [("out",              "apply_to_files")]),
                    (tpm_bbox,    apply_warp,  [("bbox",             "write_bounding_box")]),

                    # output
                    (petpvc,    pet_output, [("pvc_output.pvc_out",      "pvc_out"),
                                             ("pvc_output.brain_mask",   "brain_mask"),
                                             ("pvc_output.petpvc_mask",  "petpvc_mask"),
                                             ("pvc_output.coreg_ref",    "coreg_ref"),
                                             ("pvc_output.coreg_others", "coreg_others"),
                                             ("pvc_output.gm_norm",      "gm_norm")]),

                    # output
                    (apply_warp,  pet_output, [("normalized_files",  "pvc_warped"),
                                               ("deformation_field", "warp_field")]),
                   ])


    if do_atlas:
        coreg_atlas = setup_node(spm_coregister(cost_function="mi"), name="coreg_atlas")

        # set the registration interpolation to nearest neighbour.
        coreg_atlas.inputs.write_interp = 0
        wf.connect([
                    (pet_input,   coreg_atlas, [("anat",                 "source")]),
                    (petpvc,      coreg_atlas, [("pvc_output.coreg_ref", "target")]),
                    (pet_input,   coreg_atlas, [("atlas_anat",           "apply_to_files")]),
                    (coreg_atlas, pet_output,  [("coregistered_files",   "atlas_pet")]),
        ])

    return wf
Esempio n. 24
0
def generate_single_session_template_WF(projectid, subjectid, sessionid, onlyT1, master_config, phase, interpMode,
                                        pipeline_name, doDenoise=True):
    """
    Run autoworkup on a single sessionid

    This is the main function to call when processing a data set with T1 & T2
    data.  ExperimentBaseDirectoryPrefix is the base of the directory to place results, T1Images & T2Images
    are the lists of images to be used in the auto-workup. atlas_fname_wpath is
    the path and filename of the atlas to use.
    """

    #if  not 'landmark' in master_config['components'] or not 'auxlmk' in master_config['components'] or not 'tissue_classify' in master_config['components']:
    #    print "Baseline DataSink requires 'AUXLMK' and/or 'TISSUE_CLASSIFY'!!!"
    #    raise NotImplementedError
    # master_config['components'].append('auxlmk')
    # master_config['components'].append('tissue_classify')

    assert phase in ['atlas-based-reference',
                     'subject-based-reference'], "Unknown phase! Valid entries: 'atlas-based-reference', 'subject-based-reference'"

    if 'tissue_classify' in master_config['components']:
        assert ('landmark' in master_config['components'] ), "tissue_classify Requires landmark step!"
    # NOT TRUE if 'landmark' in master_config['components']:
    #    assert 'denoise' in master_config['components'], "landmark Requires denoise step!"

    if 'malf_2015_wholebrain' in master_config['components']:
        assert ('warp_atlas_to_subject' in master_config['components'] ), "malf_2015_wholebrain requires warp_atlas_to_subject!"

    from workflows.atlasNode import MakeAtlasNode

    baw201 = pe.Workflow(name=pipeline_name)

    inputsSpec = pe.Node(interface=IdentityInterface(fields=['atlasLandmarkFilename', 'atlasWeightFilename',
                                                             'LLSModel', 'inputTemplateModel', 'template_t1',
                                                             'atlasDefinition', 'T1s', 'T2s', 'PDs', 'FLs', 'OTHERs',
                                                             'hncma_atlas',
                                                             'template_rightHemisphere',
                                                             'template_leftHemisphere',
                                                             'template_WMPM2_labels',
                                                             'template_nac_labels',
                                                             'template_ventricles']),
                         run_without_submitting=True, name='inputspec')

    outputsSpec = pe.Node(interface=IdentityInterface(fields=['t1_average', 't2_average', 'pd_average', 'fl_average',
                                                              'posteriorImages', 'outputLabels', 'outputHeadLabels',
                                                              'atlasToSubjectTransform',
                                                              'atlasToSubjectInverseTransform',
                                                              'atlasToSubjectRegistrationState',
                                                              'BCD_ACPC_T1_CROPPED',
                                                              'outputLandmarksInACPCAlignedSpace',
                                                              'outputLandmarksInInputSpace',
                                                              'output_tx', 'LMIatlasToSubject_tx',
                                                              'writeBranded2DImage',
                                                              'brainStemMask',
                                                              'UpdatedPosteriorsList'  # Longitudinal
    ]),
                          run_without_submitting=True, name='outputspec')

    dsName = "{0}_ds_{1}".format(phase, sessionid)
    DataSink = pe.Node(name=dsName, interface=nio.DataSink())
    DataSink.overwrite = master_config['ds_overwrite']
    DataSink.inputs.container = '{0}/{1}/{2}'.format(projectid, subjectid, sessionid)
    DataSink.inputs.base_directory = master_config['resultdir']

    atlas_static_directory = master_config['atlascache']
    if master_config['workflow_phase'] == 'atlas-based-reference':
        atlas_warped_directory = master_config['atlascache']
        atlasABCNode_XML = MakeAtlasNode(atlas_warped_directory, 'BABCXMLAtlas_{0}'.format(sessionid),
                                         ['W_BRAINSABCSupport'])
        baw201.connect(atlasABCNode_XML, 'ExtendedAtlasDefinition_xml', inputsSpec, 'atlasDefinition')

        atlasABCNode_W = MakeAtlasNode(atlas_warped_directory, 'BABCAtlas_W{0}'.format(sessionid),
                                       ['W_BRAINSABCSupport', 'W_LabelMapsSupport'])
        baw201.connect([( atlasABCNode_W, inputsSpec, [
            ('hncma_atlas', 'hncma_atlas'),
            ('template_leftHemisphere', 'template_leftHemisphere'),
            ('template_rightHemisphere', 'template_rightHemisphere'),
            ('template_WMPM2_labels', 'template_WMPM2_labels'),
            ('template_nac_labels', 'template_nac_labels'),
            ('template_ventricles', 'template_ventricles')]
                        )]
        )
        ## These landmarks are only relevant for the atlas-based-reference case
        atlasBCDNode_W = MakeAtlasNode(atlas_warped_directory, 'BBCDAtlas_W{0}'.format(sessionid),
                                       ['W_BCDSupport'])
        baw201.connect([(atlasBCDNode_W, inputsSpec,
                         [('template_t1', 'template_t1'),
                          ('template_landmarks_50Lmks_fcsv', 'atlasLandmarkFilename'),
                         ]),
        ])
        ## Needed for both segmentation and template building prep
        atlasBCUTNode_W = MakeAtlasNode(atlas_warped_directory,
                                        'BBCUTAtlas_W{0}'.format(sessionid), ['W_BRAINSCutSupport'])



    elif master_config['workflow_phase'] == 'subject-based-reference':
        print(master_config['previousresult'])
        atlas_warped_directory = os.path.join(master_config['previousresult'], subjectid, 'Atlas')

        atlasBCUTNode_W = pe.Node(interface=nio.DataGrabber(infields=['subject'],
                                                            outfields=[
                                                                "l_accumben_ProbabilityMap",
                                                                "r_accumben_ProbabilityMap",
                                                                "l_caudate_ProbabilityMap",
                                                                "r_caudate_ProbabilityMap",
                                                                "l_globus_ProbabilityMap",
                                                                "r_globus_ProbabilityMap",
                                                                "l_hippocampus_ProbabilityMap",
                                                                "r_hippocampus_ProbabilityMap",
                                                                "l_putamen_ProbabilityMap",
                                                                "r_putamen_ProbabilityMap",
                                                                "l_thalamus_ProbabilityMap",
                                                                "r_thalamus_ProbabilityMap",
                                                                "phi",
                                                                "rho",
                                                                "theta"
                                                            ]),
                                  name='PerSubject_atlasBCUTNode_W')
        atlasBCUTNode_W.inputs.base_directory = master_config['previousresult']
        atlasBCUTNode_W.inputs.subject = subjectid
        atlasBCUTNode_W.inputs.field_template = {
            'l_accumben_ProbabilityMap': '%s/Atlas/AVG_l_accumben_ProbabilityMap.nii.gz',
            'r_accumben_ProbabilityMap': '%s/Atlas/AVG_r_accumben_ProbabilityMap.nii.gz',
            'l_caudate_ProbabilityMap': '%s/Atlas/AVG_l_caudate_ProbabilityMap.nii.gz',
            'r_caudate_ProbabilityMap': '%s/Atlas/AVG_r_caudate_ProbabilityMap.nii.gz',
            'l_globus_ProbabilityMap': '%s/Atlas/AVG_l_globus_ProbabilityMap.nii.gz',
            'r_globus_ProbabilityMap': '%s/Atlas/AVG_r_globus_ProbabilityMap.nii.gz',
            'l_hippocampus_ProbabilityMap': '%s/Atlas/AVG_l_hippocampus_ProbabilityMap.nii.gz',
            'r_hippocampus_ProbabilityMap': '%s/Atlas/AVG_r_hippocampus_ProbabilityMap.nii.gz',
            'l_putamen_ProbabilityMap': '%s/Atlas/AVG_l_putamen_ProbabilityMap.nii.gz',
            'r_putamen_ProbabilityMap': '%s/Atlas/AVG_r_putamen_ProbabilityMap.nii.gz',
            'l_thalamus_ProbabilityMap': '%s/Atlas/AVG_l_thalamus_ProbabilityMap.nii.gz',
            'r_thalamus_ProbabilityMap': '%s/Atlas/AVG_r_thalamus_ProbabilityMap.nii.gz',
            'phi': '%s/Atlas/AVG_phi.nii.gz',
            'rho': '%s/Atlas/AVG_rho.nii.gz',
            'theta': '%s/Atlas/AVG_theta.nii.gz'
        }
        atlasBCUTNode_W.inputs.template_args = {
            'l_accumben_ProbabilityMap': [['subject']],
            'r_accumben_ProbabilityMap': [['subject']],
            'l_caudate_ProbabilityMap': [['subject']],
            'r_caudate_ProbabilityMap': [['subject']],
            'l_globus_ProbabilityMap': [['subject']],
            'r_globus_ProbabilityMap': [['subject']],
            'l_hippocampus_ProbabilityMap': [['subject']],
            'r_hippocampus_ProbabilityMap': [['subject']],
            'l_putamen_ProbabilityMap': [['subject']],
            'r_putamen_ProbabilityMap': [['subject']],
            'l_thalamus_ProbabilityMap': [['subject']],
            'r_thalamus_ProbabilityMap': [['subject']],
            'phi': [['subject']],
            'rho': [['subject']],
            'theta': [['subject']]
        }
        atlasBCUTNode_W.inputs.template = '*'
        atlasBCUTNode_W.inputs.sort_filelist = True
        atlasBCUTNode_W.inputs.raise_on_empty = True

        template_DG = pe.Node(interface=nio.DataGrabber(infields=['subject'],
                                                        outfields=['outAtlasXMLFullPath',
                                                                   'hncma_atlas',
                                                                   'template_leftHemisphere',
                                                                   'template_rightHemisphere',
                                                                   'template_WMPM2_labels',
                                                                   'template_nac_labels',
                                                                   'template_ventricles',
                                                                   'template_t1',
                                                                   'template_landmarks_50Lmks_fcsv'
                                                        ]),
                              name='Template_DG')
        template_DG.inputs.base_directory = master_config['previousresult']
        template_DG.inputs.subject = subjectid
        template_DG.inputs.field_template = {'outAtlasXMLFullPath': '%s/Atlas/AtlasDefinition_%s.xml',
                                             'hncma_atlas': '%s/Atlas/AVG_hncma_atlas.nii.gz',
                                             'template_leftHemisphere': '%s/Atlas/AVG_template_leftHemisphere.nii.gz',
                                             'template_rightHemisphere': '%s/Atlas/AVG_template_rightHemisphere.nii.gz',
                                             'template_WMPM2_labels': '%s/Atlas/AVG_template_WMPM2_labels.nii.gz',
                                             'template_nac_labels': '%s/Atlas/AVG_template_nac_labels.nii.gz',
                                             'template_ventricles': '%s/Atlas/AVG_template_ventricles.nii.gz',
                                             'template_t1': '%s/Atlas/AVG_T1.nii.gz',
                                             'template_landmarks_50Lmks_fcsv': '%s/Atlas/AVG_LMKS.fcsv',
        }
        template_DG.inputs.template_args = {'outAtlasXMLFullPath': [['subject', 'subject']],
                                            'hncma_atlas': [['subject']],
                                            'template_leftHemisphere': [['subject']],
                                            'template_rightHemisphere': [['subject']],
                                            'template_WMPM2_labels': [['subject']],
                                            'template_nac_labels': [['subject']],
                                            'template_ventricles': [['subject']],
                                            'template_t1': [['subject']],
                                            'template_landmarks_50Lmks_fcsv': [['subject']]
        }
        template_DG.inputs.template = '*'
        template_DG.inputs.sort_filelist = True
        template_DG.inputs.raise_on_empty = True

        baw201.connect(template_DG, 'outAtlasXMLFullPath', inputsSpec, 'atlasDefinition')
        baw201.connect([(template_DG, inputsSpec, [
            ## Already connected ('template_t1','template_t1'),
            ('hncma_atlas', 'hncma_atlas'),
            ('template_leftHemisphere', 'template_leftHemisphere'),
            ('template_rightHemisphere', 'template_rightHemisphere'),
            ('template_WMPM2_labels', 'template_WMPM2_labels'),
            ('template_nac_labels', 'template_nac_labels'),
            ('template_ventricles', 'template_ventricles')]
                        )]
        )
        ## These landmarks are only relevant for the atlas-based-reference case
        baw201.connect([(template_DG, inputsSpec,
                         [('template_t1', 'template_t1'),
                          ('template_landmarks_50Lmks_fcsv', 'atlasLandmarkFilename'),
                         ]),
        ])

    else:
        assert 0 == 1, "Invalid workflow type specified for singleSession"

    atlasBCDNode_S = MakeAtlasNode(atlas_static_directory, 'BBCDAtlas_S{0}'.format(sessionid),
                                   ['S_BCDSupport'])
    baw201.connect([(atlasBCDNode_S, inputsSpec,
                     [('template_weights_50Lmks_wts', 'atlasWeightFilename'),
                      ('LLSModel_50Lmks_h5', 'LLSModel'),
                      ('T1_50Lmks_mdl', 'inputTemplateModel')
                     ]),
    ])

    if doDenoise:
        print("\ndenoise image filter\n")
        makeDenoiseInImageList = pe.Node(Function(function=MakeOutFileList,
                                                  input_names=['T1List', 'T2List', 'PDList', 'FLList',
                                                               'OtherList', 'postfix', 'PrimaryT1'],
                                                  output_names=['inImageList', 'outImageList', 'imageTypeList']),
                                         run_without_submitting=True, name="99_makeDenoiseInImageList")
        baw201.connect(inputsSpec, 'T1s', makeDenoiseInImageList, 'T1List')
        baw201.connect(inputsSpec, 'T2s', makeDenoiseInImageList, 'T2List')
        baw201.connect(inputsSpec, 'PDs', makeDenoiseInImageList, 'PDList')
        makeDenoiseInImageList.inputs.FLList = []  # an emptyList HACK
        makeDenoiseInImageList.inputs.PrimaryT1 = None  # an emptyList HACK
        makeDenoiseInImageList.inputs.postfix = "_UNM_denoised.nii.gz"
        # HACK baw201.connect( inputsSpec, 'FLList', makeDenoiseInImageList, 'FLList' )
        baw201.connect(inputsSpec, 'OTHERs', makeDenoiseInImageList, 'OtherList')

        print("\nDenoise:\n")
        DenoiseInputImgs = pe.MapNode(interface=UnbiasedNonLocalMeans(),
                                      name='denoiseInputImgs',
                                      iterfield=['inputVolume',
                                                 'outputVolume'])
        DenoiseInputImgs.inputs.rc = [1, 1, 1]
        DenoiseInputImgs.inputs.rs = [4, 4, 4]
        DenoiseInputImgs.plugin_args = {'qsub_args': modify_qsub_args(master_config['queue'], .2, 1, 1),
                                        'overwrite': True}
        baw201.connect([(makeDenoiseInImageList, DenoiseInputImgs, [('inImageList', 'inputVolume')]),
                        (makeDenoiseInImageList, DenoiseInputImgs, [('outImageList', 'outputVolume')])
        ])
        print("\nMerge all T1 and T2 List\n")
        makePreprocessingOutList = pe.Node(Function(function=GenerateSeparateImageTypeList,
                                                    input_names=['inFileList', 'inTypeList'],
                                                    output_names=['T1s', 'T2s', 'PDs', 'FLs', 'OtherList']),
                                           run_without_submitting=True, name="99_makePreprocessingOutList")
        baw201.connect(DenoiseInputImgs, 'outputVolume', makePreprocessingOutList, 'inFileList')
        baw201.connect(makeDenoiseInImageList, 'imageTypeList', makePreprocessingOutList, 'inTypeList')

    else:
        makePreprocessingOutList = inputsSpec

    if 'landmark' in master_config['components']:
        DoReverseMapping = False  # Set to true for debugging outputs
        if 'auxlmk' in master_config['components']:
            DoReverseMapping = True
        myLocalLMIWF = CreateLandmarkInitializeWorkflow("LandmarkInitialize", interpMode, DoReverseMapping)

        baw201.connect([(makePreprocessingOutList, myLocalLMIWF,
                         [(('T1s', get_list_element, 0), 'inputspec.inputVolume' )]),
                        (inputsSpec, myLocalLMIWF,
                         [('atlasLandmarkFilename', 'inputspec.atlasLandmarkFilename'),
                          ('atlasWeightFilename', 'inputspec.atlasWeightFilename'),
                          ('LLSModel', 'inputspec.LLSModel'),
                          ('inputTemplateModel', 'inputspec.inputTemplateModel'),
                          ('template_t1', 'inputspec.atlasVolume')]),
                        (myLocalLMIWF, outputsSpec,
                         [('outputspec.outputResampledCroppedVolume', 'BCD_ACPC_T1_CROPPED'),
                          ('outputspec.outputLandmarksInACPCAlignedSpace',
                           'outputLandmarksInACPCAlignedSpace'),
                          ('outputspec.outputLandmarksInInputSpace',
                           'outputLandmarksInInputSpace'),
                          ('outputspec.outputTransform', 'output_tx'),
                          ('outputspec.atlasToSubjectTransform', 'LMIatlasToSubject_tx'),
                          ('outputspec.writeBranded2DImage', 'writeBranded2DImage')])
        ])
        baw201.connect([(outputsSpec, DataSink,  # TODO: change to myLocalLMIWF -> DataSink
                         [('outputLandmarksInACPCAlignedSpace', 'ACPCAlign.@outputLandmarks_ACPC'),
                          ('writeBranded2DImage', 'ACPCAlign.@writeBranded2DImage'),
                          ('BCD_ACPC_T1_CROPPED', 'ACPCAlign.@BCD_ACPC_T1_CROPPED'),
                          ('outputLandmarksInInputSpace', 'ACPCAlign.@outputLandmarks_Input'),
                          ('output_tx', 'ACPCAlign.@output_tx'),
                          ('LMIatlasToSubject_tx', 'ACPCAlign.@LMIatlasToSubject_tx'), ]
                        )
        ]
        )

    if 'tissue_classify' in master_config['components']:
        useRegistrationMask = master_config['use_registration_masking']

        myLocalTCWF = CreateTissueClassifyWorkflow("TissueClassify", master_config, interpMode,useRegistrationMask)
        baw201.connect([(makePreprocessingOutList, myLocalTCWF, [('T1s', 'inputspec.T1List')]),
                        (makePreprocessingOutList, myLocalTCWF, [('T2s', 'inputspec.T2List')]),
                        (inputsSpec, myLocalTCWF, [('atlasDefinition', 'inputspec.atlasDefinition'),
                                                   ('template_t1', 'inputspec.atlasVolume'),
                                                   (('T1s', getAllT1sLength), 'inputspec.T1_count'),
                                                   ('PDs', 'inputspec.PDList'),
                                                   ('FLs', 'inputspec.FLList'),
                                                   ('OTHERs', 'inputspec.OtherList')
                        ]),
                        (myLocalLMIWF, myLocalTCWF, [('outputspec.outputResampledCroppedVolume', 'inputspec.PrimaryT1'),
                                                     ('outputspec.atlasToSubjectTransform',
                                                      'inputspec.atlasToSubjectInitialTransform')]),
                        (myLocalTCWF, outputsSpec, [('outputspec.t1_average', 't1_average'),
                                                    ('outputspec.t2_average', 't2_average'),
                                                    ('outputspec.pd_average', 'pd_average'),
                                                    ('outputspec.fl_average', 'fl_average'),
                                                    ('outputspec.posteriorImages', 'posteriorImages'),
                                                    ('outputspec.outputLabels', 'outputLabels'),
                                                    ('outputspec.outputHeadLabels', 'outputHeadLabels'),
                                                    ('outputspec.atlasToSubjectTransform', 'atlasToSubjectTransform'),
                                                    ('outputspec.atlasToSubjectInverseTransform',
                                                     'atlasToSubjectInverseTransform'),
                                                    ('outputspec.atlasToSubjectRegistrationState',
                                                     'atlasToSubjectRegistrationState')
                        ]),
        ])

        baw201.connect([(outputsSpec, DataSink,  # TODO: change to myLocalTCWF -> DataSink
                         [(('t1_average', convertToList), 'TissueClassify.@t1'),
                          (('t2_average', convertToList), 'TissueClassify.@t2'),
                          (('pd_average', convertToList), 'TissueClassify.@pd'),
                          (('fl_average', convertToList), 'TissueClassify.@fl')])
        ])

        currentFixWMPartitioningName = "_".join(['FixWMPartitioning', str(subjectid), str(sessionid)])
        FixWMNode = pe.Node(interface=Function(function=FixWMPartitioning,
                                               input_names=['brainMask', 'PosteriorsList'],
                                               output_names=['UpdatedPosteriorsList', 'MatchingFGCodeList',
                                                             'MatchingLabelList', 'nonAirRegionMask']),
                            name=currentFixWMPartitioningName)

        baw201.connect([(myLocalTCWF, FixWMNode, [('outputspec.outputLabels', 'brainMask'),
                                                  (('outputspec.posteriorImages', flattenDict), 'PosteriorsList')]),
                        (FixWMNode, outputsSpec, [('UpdatedPosteriorsList', 'UpdatedPosteriorsList')]),
        ])

        currentBRAINSCreateLabelMapName = 'BRAINSCreateLabelMapFromProbabilityMaps_' + str(subjectid) + "_" + str(
            sessionid)
        BRAINSCreateLabelMapNode = pe.Node(interface=BRAINSCreateLabelMapFromProbabilityMaps(),
                                           name=currentBRAINSCreateLabelMapName)

        ## TODO:  Fix the file names
        BRAINSCreateLabelMapNode.inputs.dirtyLabelVolume = 'fixed_headlabels_seg.nii.gz'
        BRAINSCreateLabelMapNode.inputs.cleanLabelVolume = 'fixed_brainlabels_seg.nii.gz'

        baw201.connect([(FixWMNode, BRAINSCreateLabelMapNode, [('UpdatedPosteriorsList', 'inputProbabilityVolume'),
                                                               ('MatchingFGCodeList', 'foregroundPriors'),
                                                               ('MatchingLabelList', 'priorLabelCodes'),
                                                               ('nonAirRegionMask', 'nonAirRegionMask')]),
                        (BRAINSCreateLabelMapNode, DataSink,
                         [  # brainstem code below replaces this ('cleanLabelVolume', 'TissueClassify.@outputLabels'),
                            ('dirtyLabelVolume', 'TissueClassify.@outputHeadLabels')]),
                        (myLocalTCWF, DataSink, [('outputspec.atlasToSubjectTransform',
                                                  'TissueClassify.@atlas2session_tx'),
                                                 ('outputspec.atlasToSubjectInverseTransform',
                                                  'TissueClassify.@atlas2sessionInverse_tx')]),
                        (FixWMNode, DataSink, [('UpdatedPosteriorsList', 'TissueClassify.@posteriors')]),
        ])

        currentAccumulateLikeTissuePosteriorsName = 'AccumulateLikeTissuePosteriors_' + str(subjectid) + "_" + str(
            sessionid)
        AccumulateLikeTissuePosteriorsNode = pe.Node(interface=Function(function=AccumulateLikeTissuePosteriors,
                                                                        input_names=['posteriorImages'],
                                                                        output_names=['AccumulatePriorsList',
                                                                                      'AccumulatePriorsNames']),
                                                     name=currentAccumulateLikeTissuePosteriorsName)

        baw201.connect([(FixWMNode, AccumulateLikeTissuePosteriorsNode, [('UpdatedPosteriorsList', 'posteriorImages')]),
                        (AccumulateLikeTissuePosteriorsNode, DataSink, [('AccumulatePriorsList',
                                                                         'ACCUMULATED_POSTERIORS.@AccumulateLikeTissuePosteriorsOutputDir')])])

        """
        brain stem adds on feature
        inputs:
            - landmark (fcsv) file
            - fixed brainlabels seg.nii.gz
        output:
            - complete_brainlabels_seg.nii.gz Segmentation
        """
        myLocalBrainStemWF = CreateBrainstemWorkflow("BrainStem",
                                                     master_config['queue'],
                                                     "complete_brainlabels_seg.nii.gz")

        baw201.connect([(myLocalLMIWF, myLocalBrainStemWF, [('outputspec.outputLandmarksInACPCAlignedSpace',
                                                             'inputspec.inputLandmarkFilename')]),
                        (BRAINSCreateLabelMapNode, myLocalBrainStemWF, [('cleanLabelVolume',
                                                                         'inputspec.inputTissueLabelFilename')])
        ])

        baw201.connect(myLocalBrainStemWF, 'outputspec.ouputTissuelLabelFilename', DataSink,
                       'TissueClassify.@complete_brainlabels_seg')


    ###########################
    do_BRAINSCut_Segmentation = DetermineIfSegmentationShouldBeDone(master_config)
    if do_BRAINSCut_Segmentation:
        from workflows.segmentation import segmentation
        from workflows.WorkupT1T2BRAINSCut import GenerateWFName

        sname = 'segmentation'
        segWF = segmentation(projectid, subjectid, sessionid, master_config, onlyT1, pipeline_name=sname)

        baw201.connect([(inputsSpec, segWF,
                         [
                             ('template_t1', 'inputspec.template_t1')
                         ])
        ])
        baw201.connect([(atlasBCUTNode_W, segWF,
                         [
                             ('rho', 'inputspec.rho'),
                             ('phi', 'inputspec.phi'),
                             ('theta', 'inputspec.theta'),
                             ('l_caudate_ProbabilityMap', 'inputspec.l_caudate_ProbabilityMap'),
                             ('r_caudate_ProbabilityMap', 'inputspec.r_caudate_ProbabilityMap'),
                             ('l_hippocampus_ProbabilityMap', 'inputspec.l_hippocampus_ProbabilityMap'),
                             ('r_hippocampus_ProbabilityMap', 'inputspec.r_hippocampus_ProbabilityMap'),
                             ('l_putamen_ProbabilityMap', 'inputspec.l_putamen_ProbabilityMap'),
                             ('r_putamen_ProbabilityMap', 'inputspec.r_putamen_ProbabilityMap'),
                             ('l_thalamus_ProbabilityMap', 'inputspec.l_thalamus_ProbabilityMap'),
                             ('r_thalamus_ProbabilityMap', 'inputspec.r_thalamus_ProbabilityMap'),
                             ('l_accumben_ProbabilityMap', 'inputspec.l_accumben_ProbabilityMap'),
                             ('r_accumben_ProbabilityMap', 'inputspec.r_accumben_ProbabilityMap'),
                             ('l_globus_ProbabilityMap', 'inputspec.l_globus_ProbabilityMap'),
                             ('r_globus_ProbabilityMap', 'inputspec.r_globus_ProbabilityMap')
                         ]
                        )])

        atlasBCUTNode_S = MakeAtlasNode(atlas_static_directory,
                                        'BBCUTAtlas_S{0}'.format(sessionid), ['S_BRAINSCutSupport'])
        baw201.connect(atlasBCUTNode_S, 'trainModelFile_txtD0060NT0060_gz',
                       segWF, 'inputspec.trainModelFile_txtD0060NT0060_gz')

        ## baw201_outputspec = baw201.get_node('outputspec')
        baw201.connect([(myLocalTCWF, segWF, [('outputspec.t1_average', 'inputspec.t1_average'),
                                              ('outputspec.atlasToSubjectRegistrationState',
                                               'inputspec.atlasToSubjectRegistrationState'),
                                              ('outputspec.outputLabels', 'inputspec.inputLabels'),
                                              ('outputspec.posteriorImages', 'inputspec.posteriorImages'),
                                              ('outputspec.outputHeadLabels', 'inputspec.inputHeadLabels')
        ] ),
                        (myLocalLMIWF, segWF, [('outputspec.atlasToSubjectTransform', 'inputspec.LMIatlasToSubject_tx')
                        ] ),
                        (FixWMNode, segWF, [('UpdatedPosteriorsList', 'inputspec.UpdatedPosteriorsList')
                        ] ),
        ])
        if not onlyT1:
            baw201.connect([(myLocalTCWF, segWF, [('outputspec.t2_average', 'inputspec.t2_average')])])

    if 'warp_atlas_to_subject' in master_config['components']:
        ##
        ##~/src/NEP-build/bin/BRAINSResample
        # --warpTransform AtlasToSubjectPreBABC_Composite.h5
        #  --inputVolume  /Shared/sinapse/CACHE/x20141001_KIDTEST_base_CACHE/Atlas/hncma-atlas.nii.gz
        #  --referenceVolume  /Shared/sinapse/CACHE/x20141001_KIDTEST_base_CACHE/singleSession_KID1_KT1/LandmarkInitialize/BROIAuto_cropped/Cropped_BCD_ACPC_Aligned.nii.gz
        # !--outputVolume hncma.nii.gz
        # !--interpolationMode NearestNeighbor
        # !--pixelType short
        ##
        ##

        ## TODO : SHOULD USE BRAINSCut transform that was refined even further!

        BResample = dict()
        AtlasLabelMapsToResample = [
            'hncma_atlas',
            'template_WMPM2_labels',
            'template_nac_labels',
        ]


        for atlasImage in AtlasLabelMapsToResample:
            BResample[atlasImage] = pe.Node(interface=BRAINSResample(), name="BRAINSResample_" + atlasImage)
            BResample[atlasImage].plugin_args = {'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1),
                                                 'overwrite': True}
            BResample[atlasImage].inputs.pixelType = 'short'
            BResample[atlasImage].inputs.interpolationMode = 'NearestNeighbor'
            BResample[atlasImage].inputs.outputVolume = atlasImage + ".nii.gz"

            baw201.connect(myLocalTCWF, 'outputspec.t1_average', BResample[atlasImage], 'referenceVolume')
            baw201.connect(inputsSpec, atlasImage, BResample[atlasImage], 'inputVolume')
            baw201.connect(myLocalTCWF, 'outputspec.atlasToSubjectTransform',
                           BResample[atlasImage], 'warpTransform')
            baw201.connect(BResample[atlasImage], 'outputVolume', DataSink, 'WarpedAtlas2Subject.@' + atlasImage)

        AtlasBinaryMapsToResample = [
            'template_rightHemisphere',
            'template_leftHemisphere',
            'template_ventricles']

        for atlasImage in AtlasBinaryMapsToResample:
            BResample[atlasImage] = pe.Node(interface=BRAINSResample(), name="BRAINSResample_" + atlasImage)
            BResample[atlasImage].plugin_args = {'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1),
                                                 'overwrite': True}
            BResample[atlasImage].inputs.pixelType = 'binary'
            BResample[
                atlasImage].inputs.interpolationMode = 'Linear'  ## Conversion to distance map, so use linear to resample distance map
            BResample[atlasImage].inputs.outputVolume = atlasImage + ".nii.gz"

            baw201.connect(myLocalTCWF, 'outputspec.t1_average', BResample[atlasImage], 'referenceVolume')
            baw201.connect(inputsSpec, atlasImage, BResample[atlasImage], 'inputVolume')
            baw201.connect(myLocalTCWF, 'outputspec.atlasToSubjectTransform', BResample[atlasImage], 'warpTransform')
            baw201.connect(BResample[atlasImage], 'outputVolume', DataSink, 'WarpedAtlas2Subject.@' + atlasImage)

        BRAINSCutAtlasImages = [
            'rho',
            'phi',
            'theta',
            'l_caudate_ProbabilityMap',
            'r_caudate_ProbabilityMap',
            'l_hippocampus_ProbabilityMap',
            'r_hippocampus_ProbabilityMap',
            'l_putamen_ProbabilityMap',
            'r_putamen_ProbabilityMap',
            'l_thalamus_ProbabilityMap',
            'r_thalamus_ProbabilityMap',
            'l_accumben_ProbabilityMap',
            'r_accumben_ProbabilityMap',
            'l_globus_ProbabilityMap',
            'r_globus_ProbabilityMap'
        ]
        for atlasImage in BRAINSCutAtlasImages:
            BResample[atlasImage] = pe.Node(interface=BRAINSResample(), name="BCUTBRAINSResample_" + atlasImage)
            BResample[atlasImage].plugin_args = {'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1),
                                                 'overwrite': True}
            BResample[atlasImage].inputs.pixelType = 'float'
            BResample[
                atlasImage].inputs.interpolationMode = 'Linear'  ## Conversion to distance map, so use linear to resample distance map
            BResample[atlasImage].inputs.outputVolume = atlasImage + ".nii.gz"

            baw201.connect(myLocalTCWF, 'outputspec.t1_average', BResample[atlasImage], 'referenceVolume')
            baw201.connect(atlasBCUTNode_W, atlasImage, BResample[atlasImage], 'inputVolume')
            baw201.connect(myLocalTCWF, 'outputspec.atlasToSubjectTransform', BResample[atlasImage], 'warpTransform')
            baw201.connect(BResample[atlasImage], 'outputVolume', DataSink, 'WarpedAtlas2Subject.@' + atlasImage)


        WhiteMatterHemisphereNode = pe.Node(interface=Function(function=CreateLeftRightWMHemispheres,
                                                                        input_names=['BRAINLABELSFile',
                                                                                     'HDCMARegisteredVentricleMaskFN',
                                                                                     'LeftHemisphereMaskName',
                                                                                     'RightHemisphereMaskName',
                                                                                     'WM_LeftHemisphereFileName',
                                                                                     'WM_RightHemisphereFileName'],
                                                                        output_names=['WM_LeftHemisphereFileName',
                                                                                      'WM_RightHemisphereFileName']),
                                                     name="WhiteMatterHemisphere")
        WhiteMatterHemisphereNode.inputs.WM_LeftHemisphereFileName ="left_hemisphere_wm.nii.gz"
        WhiteMatterHemisphereNode.inputs.WM_RightHemisphereFileName ="right_hemisphere_wm.nii.gz"

        baw201.connect(myLocalBrainStemWF,'outputspec.ouputTissuelLabelFilename',WhiteMatterHemisphereNode,'BRAINLABELSFile')
        baw201.connect(BResample['hncma_atlas'],'outputVolume',WhiteMatterHemisphereNode,'HDCMARegisteredVentricleMaskFN')
        baw201.connect(BResample['template_leftHemisphere'],'outputVolume',WhiteMatterHemisphereNode,'LeftHemisphereMaskName')
        baw201.connect(BResample['template_rightHemisphere'],'outputVolume',WhiteMatterHemisphereNode,'RightHemisphereMaskName')

        baw201.connect(WhiteMatterHemisphereNode,'WM_LeftHemisphereFileName',DataSink,'WarpedAtlas2Subject.@LeftHemisphereWM')
        baw201.connect(WhiteMatterHemisphereNode,'WM_RightHemisphereFileName',DataSink,'WarpedAtlas2Subject.@RightHemisphereWM')

    if 'malf_2015_wholebrain' in master_config['components']:  ## HACK Do MALF labeling
        ## HACK FOR NOW SHOULD BE MORE ELEGANT FROM THE .config file
        BASE_DATA_GRABBER_DIR='/Shared/johnsonhj/HDNI/ReferenceData/Neuromorphometrics/2012Subscription'

        if onlyT1:
            print("T1 only processing in baseline")
        else:
            print("Multimodal processing in baseline")
        myLocalMALF = CreateMALFWorkflow("MALF", onlyT1, master_config,BASE_DATA_GRABBER_DIR)
        baw201.connect(myLocalTCWF,'outputspec.t1_average',myLocalMALF,'inputspec.subj_t1_image')
        baw201.connect(myLocalTCWF,'outputspec.t2_average',myLocalMALF,'inputspec.subj_t2_image')
        baw201.connect(myLocalBrainStemWF, 'outputspec.ouputTissuelLabelFilename',myLocalMALF,'inputspec.subj_fixed_head_labels')

        baw201.connect(BResample['template_leftHemisphere'],'outputVolume',myLocalMALF,'inputspec.subj_left_hemisphere')
        baw201.connect(myLocalLMIWF, 'outputspec.outputLandmarksInACPCAlignedSpace' ,myLocalMALF,'inputspec.subj_lmks')
        baw201.connect(atlasBCDNode_S,'template_weights_50Lmks_wts',myLocalMALF,'inputspec.atlasWeightFilename')

        inputLabelFileMALFnameSpec = pe.Node( interface=IdentityInterface( fields=['labelBaseFilename']),
                                              run_without_submitting = True,
                                              name="inputLabelFileMALFnameSpec")
        baw201.connect( inputLabelFileMALFnameSpec, 'labelBaseFilename',
                        myLocalMALF, 'inputspec.labelBaseFilename')

        baw201.connect(myLocalMALF,'outputspec.MALF_HDAtlas20_2015_label',DataSink,'TissueClassify.@MALF_HDAtlas20_2015_label')
        baw201.connect(myLocalMALF,'outputspec.MALF_HDAtlas20_2015_CSFVBInjected_label',DataSink,'TissueClassify.@MALF_HDAtlas20_2015_CSFVBInjected_label')
        baw201.connect(myLocalMALF,'outputspec.MALF_HDAtlas20_2015_fs_standard_label',DataSink,'TissueClassify.@MALF_HDAtlas20_2015_fs_standard_label')
        baw201.connect(myLocalMALF,'outputspec.MALF_HDAtlas20_2015_lobar_label',DataSink,'TissueClassify.@MALF_HDAtlas20_2015_lobar_label')
        baw201.connect(myLocalMALF,'outputspec.MALF_extended_snapshot',DataSink,'TissueClassify.@MALF_extended_snapshot')

    return baw201
Esempio n. 25
0
def spm_mrpet_grouptemplate_preprocessing(wf_name="spm_mrpet_grouptemplate_preproc"):
    """ Run the PET pre-processing workflow against the gunzip_pet.in_file files.
    It depends on the anat_preproc_workflow, so if this has not been run, this function
    will run it too.

    This is identical to the workflow defined in `spm_mrpet_preprocessing`,
    with the only difference that we now normalize all subjects agains a custom
    template using the spm Old Normalize interface.

    It does:
    - SPM12 Coregister T1 and tissues to PET
    - PVC the PET image in PET space
    - SPM12 Warp PET to the given template

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    pet_input.in_file: traits.File
        The raw NIFTI_GZ PET image file.

    pet_input.atlas_anat: traits.File
        The atlas file in anatomical space.

    pet_input.anat: traits.File
        Path to the high-contrast anatomical image.
        Reference file of the warp_field, i.e., the anatomical image in its native space.

    pet_input.tissues: list of traits.File
        List of tissues files from the New Segment process. At least the first
        3 tissues must be present.

    pet_input.pet_template: traits.File
        The template file for inter-subject registration reference.

    Nipype outputs
    --------------
    pet_output.pvc_out: existing file
        The results of the PVC process.

    pet_output.brain_mask: existing file
        A brain mask calculated with the tissues file.

    pet_output.coreg_ref: existing file
        The coregistered reference image to PET space.

    pet_output.coreg_others: list of existing files
        List of coregistered files from coreg_pet.apply_to_files.

    pet_output.pet_warped: existing file
        PET image normalized to the group template.

    pet_output.pvc_warped: existing file
        The outputs of the PETPVC workflow normalized to the group template.
        The result of every internal pre-processing step is normalized to the
        group template here.

    pet_output.warp_field: existing files
        Spatial normalization parameters .mat files.

    pet_output.gm_norm: existing file
        The output of the grey matter intensity normalization process.
        This is the last step in the PET signal correction, before registration.

    pet_output.atlas_pet: existing file
        Atlas image warped to PET space.
        If the `atlas_file` option is an existing file and `normalize_atlas` is True.

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields  = ["in_file",
                  "anat",
                  "tissues",
                  "pet_template"]

    out_fields = ["brain_mask",
                  "coreg_others",
                  "coreg_ref",
                  "pvc_warped",
                  "pet_warped",
                  "warp_field",
                  "pvc_out",
                  "pvc_mask",
                  "gm_norm",]

    do_atlas, _ = check_atlas_file()
    if do_atlas:
        in_fields  += ["atlas_anat"]
        out_fields += ["atlas_pet" ]

    # input
    pet_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
                           name="pet_input")

    # workflow to perform partial volume correction
    petpvc = petpvc_workflow(wf_name="petpvc")

    unzip_mrg = setup_node(Merge(4), name='merge_for_unzip')
    gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])

    # warp each subject to the group template
    gunzip_template = setup_node(Gunzip(), name="gunzip_template",)
    gunzip_pet      = setup_node(Gunzip(), name="gunzip_pet",)

    warp_mrg = setup_node(Merge(2), name='merge_for_warp')
    warp2template = setup_node(spm.Normalize(jobtype="estwrite", out_prefix="wgrptemplate_"),
                               name="warp2template",)

    get_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="get_bbox")

    # output
    pet_output = setup_node(IdentityInterface(fields=out_fields), name="pet_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    wf.connect([
                # inputs
                (pet_input,   petpvc,  [("in_file", "pvc_input.in_file"),
                                        ("anat",    "pvc_input.reference_file"),
                                        ("tissues", "pvc_input.tissues")]),

                # get template bounding box to apply to results
                (pet_input, get_bbox,  [("pet_template", "in_file")]),

                # gunzip some inputs
                (pet_input, gunzip_pet,      [("in_file",      "in_file")]),
                (pet_input, gunzip_template, [("pet_template", "in_file")]),

                # gunzip some files for SPM Normalize
                (petpvc,    unzip_mrg, [("pvc_output.pvc_out",    "in1"),
                                        ("pvc_output.brain_mask", "in2"),
                                        ("pvc_output.gm_norm",    "in3")]),
                (pet_input, unzip_mrg, [("in_file",               "in4")]),

                (unzip_mrg, gunzipper, [("out", "in_file")]),

                (gunzipper, warp_mrg,  [("out_file", "in1")]),

                (warp_mrg, warp2template, [(("out", flatten_list), "apply_to_files")]),

                # prepare the target parameters of the warp to template
                (gunzip_pet,      warp2template, [("out_file", "source")]),
                (gunzip_template, warp2template, [("out_file", "template")]),
                (get_bbox,        warp2template, [("bbox",     "write_bounding_box")]),

                # output
                (warp2template, pet_output, [("normalization_parameters", "warp_field"),
                                             ("normalized_files" ,        "pvc_warped"),
                                             ("normalized_source",        "pet_warped"),
                                            ]),

                # output
                (petpvc,   pet_output, [("pvc_output.pvc_out",      "pvc_out"),
                                        ("pvc_output.brain_mask",   "brain_mask"),
                                        ("pvc_output.coreg_ref",    "coreg_ref"),
                                        ("pvc_output.coreg_others", "coreg_others"),
                                        ("pvc_output.gm_norm",      "gm_norm")]),
                ])

    if do_atlas:
        coreg_atlas = setup_node(spm_coregister(cost_function="mi"), name="coreg_atlas")

        # set the registration interpolation to nearest neighbour.
        coreg_atlas.inputs.write_interp = 0
        wf.connect([
                    (pet_input,   coreg_atlas, [("anat",                 "source")]),
                    (petpvc,      coreg_atlas, [("pvc_output.coreg_ref", "target")]),
                    (pet_input,   coreg_atlas, [("atlas_anat",           "apply_to_files")]),
                    (coreg_atlas, pet_output,  [("coregistered_files",   "atlas_pet")]),

                    # warp the atlas to the template space as well
                    (coreg_atlas, warp_mrg,    [("coregistered_files",   "in2")]),
        ])

    return wf
Esempio n. 26
0
    def __init__(self, settings):
        # call base constructor
        super().__init__(settings)

        # define input/output node
        self.set_input(['T1', 'orig', 'brainmask'])
        self.set_output(['T1_skullstrip', 'allineate_freesurfer2anat'])

        # define datasink substitutions
        self.set_subs([('_maskop40', ''), ('_calc_calc_calc_calc_calc', '')])

        # 3dAllineate (FSorig)
        self.allineate_orig = MapNode(afni.Allineate(
            out_matrix='FSorig2MPR.aff12.1D',
            overwrite=True,
            outputtype='NIFTI_GZ'),
                                      iterfield=['in_file', 'reference'],
                                      name='3dallineate_orig')
        # 3dAllineate (FSbrainmask)
        self.allineate_bm = MapNode(
            afni.Allineate(overwrite=True, no_pad=True, outputtype='NIFTI_GZ'),
            iterfield=['in_file', 'reference', 'in_matrix'],
            name='3dallineate_brainmask')

        # skullstrip mprage (afni)
        self.afni_skullstrip = MapNode(afni.SkullStrip(args="-orig_vol",
                                                       outputtype="NIFTI_GZ"),
                                       iterfield=['in_file'],
                                       name='afni_skullstrip')
        # 3dcalc operations for achieving final mask
        self.maskop1 = MapNode(afni.Calc(expr='step(a)',
                                         overwrite=True,
                                         outputtype='NIFTI_GZ'),
                               iterfield=['in_file_a'],
                               name='maskop1')
        self.maskop2 = []
        for n in range(3):
            self.maskop2.append(
                MapNode(afni.Calc(
                    args='-b a+i -c a-i -d a+j -e a-j -f a+k -g a-k',
                    expr='ispositive(a+b+c+d+e+f+g)',
                    overwrite=True,
                    outputtype='NIFTI_GZ'),
                        iterfield=['in_file_a'],
                        name='maskop2_{}'.format(n)))
        # Inline function for setting up to copy IJK_TO_DICOM_REAL file attribute
        self.refit_setup = MapNode(Function(input_names=['noskull_T1'],
                                            output_names=['refit_input'],
                                            function=lambda noskull_T1:
                                            (noskull_T1, 'IJK_TO_DICOM_REAL')),
                                   iterfield=['noskull_T1'],
                                   name='refitsetup')
        # 3dRefit
        self.refit = MapNode(afni.Refit(),
                             iterfield=['in_file', 'atrcopy'],
                             name='3drefit')
        # 3dcalc for uniform intensity
        self.uniform = MapNode(afni.Calc(expr='a*and(b,b)',
                                         overwrite=True,
                                         outputtype='NIFTI_GZ'),
                               iterfield=['in_file_a', 'in_file_b'],
                               name='uniformintensity')

        # skullstrip mprage (fsl)
        self.fsl_skullstrip = MapNode(fsl.BET(),
                                      iterfield=['in_file'],
                                      name='fsl_skullstrip')
        self.maskop3 = MapNode(
            afni.Calc(expr='or(a,b,c)', overwrite=True, outputtype='NIFTI_GZ'),
            iterfield=['in_file_a', 'in_file_b', 'in_file_c'],
            name='maskop3')
        self.maskop4 = MapNode(
            afni.Calc(expr='c*and(a,b)', overwrite=True,
                      outputtype='NIFTI_GZ'),
            iterfield=['in_file_a', 'in_file_b', 'in_file_c'],
            name='maskop4')

        # Convert from list to string input
        self.select0T1 = Node(Function(input_names=['T1_list'],
                                       output_names=['T1_0'],
                                       function=lambda T1_list: T1_list[0]),
                              name='select0T1')

        # apply bias field correction
        self.biasfieldcorrect = Node(ants.N4BiasFieldCorrection(
            num_threads=settings['num_threads'], copy_header=True),
                                     name='biasfieldcorrect')
Esempio n. 27
0
def rest_noise_filter_wf(wf_name='rest_noise_removal'):
    """ Create a resting-state fMRI noise removal node.

    Nipype Inputs
    -------------
    rest_noise_input.in_file

    rest_noise_input.brain_mask

    rest_noise_input.wm_mask

    rest_noise_input.csf_mask

    rest_noise_input.motion_params
        Nipy motion parameters.

    Nipype Outputs
    --------------
    rest_noise_output.tsnr_file
        A SNR estimation volume file for QA purposes.

    rest_noise_output.motion_corrected
        The fMRI motion corrected image.

    rest_noise_output.nuis_corrected
        The resulting nuisance corrected image.
        This will be the same as 'motion_corrected' if compcor
        is disabled.

    rest_noise_output.motion_regressors
        Motion regressors file.

    rest_noise_output.compcor_regressors
        CompCor regressors file.

    rest_noise_output.art_displacement_files
        One image file containing the voxel-displacement timeseries.

    rest_noise_output.art_intensity_files
        One file containing the global intensity values determined
        from the brainmask.

    rest_noise_output.art_norm_files
        One file containing the composite norm.

    rest_noise_output.art_outlier_files
         One file containing a list of 0-based indices corresponding
         to outlier volumes.

    rest_noise_output.art_plot_files
        One image file containing the detected outliers.

    rest_noise_output.art_statistic_files
        One file containing information about the different types of
        artifacts and if design info is provided then details of
        stimulus correlated motion and a listing or artifacts by
        event type.

    Returns
    -------
    rm_nuisance_wf: nipype Workflow
    """

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    in_fields = [
        "in_file",
        "brain_mask",
        "wm_mask",
        "csf_mask",
        "motion_params",
    ]

    out_fields = [
        "tsnr_file",
        "motion_corrected",
        "nuis_corrected",
        "motion_regressors",
        "compcor_regressors",
        "gsr_regressors",
        "art_displacement_files",
        "art_intensity_files",
        "art_norm_files",
        "art_outlier_files",
        "art_plot_files",
        "art_statistic_files",
    ]

    # input identities
    rest_noise_input = setup_node(IdentityInterface(fields=in_fields,
                                                    mandatory_inputs=True),
                                  name="rest_noise_input")

    # get the settings for filters
    filters = _get_params_for('rest_filter')

    # Compute TSNR on realigned data regressing polynomial up to order 2
    tsnr = setup_node(TSNR(regress_poly=2), name='tsnr')

    # Use :class:`nipype.algorithms.rapidart` to determine which of the
    # images in the functional series are outliers based on deviations in
    # intensity or movement.
    art = setup_node(rapidart_fmri_artifact_detection(),
                     name="detect_artifacts")

    # Compute motion regressors
    motion_regs = setup_node(Function(
        input_names=[
            'motion_params',
            'order',
            'derivatives',
        ],
        output_names=['out_files'],
        function=motion_regressors,
    ),
                             name='motion_regressors')

    # Create a filter to remove motion and art confounds
    motart_pars = setup_node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=create_regressors),
                             name='motart_parameters')

    motion_filter = setup_node(fsl.GLM(out_f_name='F_mcart.nii.gz',
                                       out_pf_name='pF_mcart.nii.gz',
                                       demean=True),
                               name='motion_filter')

    # Noise confound regressors
    compcor_pars = setup_node(Function(
        input_names=[
            'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
        ],
        output_names=['out_files'],
        function=extract_noise_components,
    ),
                              name='compcor_pars')
    #compcor_pars = setup_node(ACompCor(), name='compcor_pars')
    #compcor_pars.inputs.components_file = 'noise_components.txt'

    compcor_filter = setup_node(fsl.GLM(out_f_name='F.nii.gz',
                                        out_pf_name='pF.nii.gz',
                                        demean=True),
                                name='compcor_filter')

    # Global signal regression
    gsr_pars = setup_node(Function(
        input_names=[
            'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
        ],
        output_names=['out_files'],
        function=extract_noise_components,
    ),
                          name='gsr_pars')

    gsr_filter = setup_node(fsl.GLM(out_f_name='F_gsr.nii.gz',
                                    out_pf_name='pF_gsr.nii.gz',
                                    demean=True),
                            name='gsr_filter')

    # output identities
    rest_noise_output = setup_node(IdentityInterface(fields=out_fields,
                                                     mandatory_inputs=True),
                                   name="rest_noise_output")

    # Connect the nodes
    wf.connect([
        # tsnr
        (rest_noise_input, tsnr, [("in_file", "in_file")]),

        # artifact detection
        (rest_noise_input, art, [
            ("in_file", "realigned_files"),
            ("motion_params", "realignment_parameters"),
            ("brain_mask", "mask_file"),
        ]),

        # calculte motion regressors
        (rest_noise_input, motion_regs, [("motion_params", "motion_params")]),

        # create motion and confound regressors parameters file
        (art, motart_pars, [
            ("norm_files", "comp_norm"),
            ("outlier_files", "outliers"),
        ]),
        (motion_regs, motart_pars, [("out_files", "motion_params")]),

        # motion filtering
        (rest_noise_input, motion_filter, [
            ("in_file", "in_file"),
            (("in_file", rename, "_filtermotart"), "out_res_name"),
        ]),
        (motart_pars, motion_filter, [(("out_files", selectindex, [0]),
                                       "design")]),

        # output
        (tsnr, rest_noise_output, [("tsnr_file", "tsnr_file")]),
        (motart_pars, rest_noise_output, [("out_files", "motion_regressors")]),
        (motion_filter, rest_noise_output, [("out_res", "motion_corrected")]),
        (art, rest_noise_output, [
            ("displacement_files", "art_displacement_files"),
            ("intensity_files", "art_intensity_files"),
            ("norm_files", "art_norm_files"),
            ("outlier_files", "art_outlier_files"),
            ("plot_files", "art_plot_files"),
            ("statistic_files", "art_statistic_files"),
        ]),
    ])

    last_filter = motion_filter

    # compcor filter
    if filters['compcor_csf'] or filters['compcor_wm']:
        wf.connect([
            # calculate compcor regressor and parameters file
            (motart_pars, compcor_pars, [
                (("out_files", selectindex, [0]), "extra_regressors"),
            ]),
            (motion_filter, compcor_pars, [
                ("out_res", "realigned_file"),
            ]),

            # the compcor filter
            (motion_filter, compcor_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_cleaned"), "out_res_name"),
            ]),
            (compcor_pars, compcor_filter, [(("out_files", selectindex, [0]),
                                             "design")]),
            #(compcor_pars,     compcor_filter,    [("components_file",  "design")]),
            (rest_noise_input, compcor_filter, [("brain_mask", "mask")]),

            # output
            (compcor_pars, rest_noise_output, [("out_files",
                                                "compcor_regressors")]),
            #(compcor_pars,     rest_noise_output, [("components_file",   "compcor_regressors")]),
        ])
        last_filter = compcor_filter

    # global signal regression
    if filters['gsr']:
        wf.connect([
            # calculate gsr regressors parameters file
            (last_filter, gsr_pars, [("out_res", "realigned_file")]),
            (rest_noise_input, gsr_pars, [("brain_mask", "mask_file")]),

            # the output file name
            (rest_noise_input, gsr_filter, [("brain_mask", "mask")]),
            (last_filter, gsr_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_gsr"), "out_res_name"),
            ]),
            (gsr_pars, gsr_filter, [(("out_files", selectindex, [0]), "design")
                                    ]),

            # output
            (gsr_pars, rest_noise_output, [("out_files", "gsr_regressors")]),
        ])
        last_filter = gsr_filter

    # connect the final nuisance correction output node
    wf.connect([
        (last_filter, rest_noise_output, [("out_res", "nuis_corrected")]),
    ])

    if filters['compcor_csf'] and filters['compcor_wm']:
        mask_merge = setup_node(Merge(2), name="mask_merge")
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, mask_merge, [("wm_mask", "in1")]),
            (rest_noise_input, mask_merge, [("csf_mask", "in2")]),
            (mask_merge, compcor_pars, [("out", "mask_file")]),
        ])

    elif filters['compcor_csf']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("csf_mask", "mask_file")]),
        ])

    elif filters['compcor_wm']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("wm_mask", "mask_file")]),
        ])

    return wf
Esempio n. 28
0
                     filename='bold_2_anat_sub-{0}.nii.gz'.format(subj_no))

    # composite_transform = os.path.abspath('bold_2_anat_sub-{0}_0GenericAffine.mat'.format(subj_no))

    # warped_image = os.path.abspath('bold_2_anat_sub-{0}.nii.gz'.format(subj_no))
    composite_transform = 'bold_2_anat_sub-{0}_0GenericAffine.mat'.format(
        subj_no)

    warped_image = 'bold_2_anat_sub-{0}.nii.gz'.format(subj_no)

    return composite_transform, warped_image  # always you need return


coreg = Node(name='coreg',
             interface=Function(
                 input_names=['bold_image'],
                 output_names=['composite_transform', 'warped_image'],
                 function=coreg))

# ========================================================================================================
# In[12]:

# mcflirt -in ${folder} -out ${folder}_mcf  -refvol example_func -plots -mats  -report;

McFlirt = Node(fsl.MCFLIRT(), name='McFlirt')
McFlirt.inputs.save_plots = True
McFlirt.inputs.save_mats = True
McFlirt.inputs.save_rms = True

# ========================================================================================================
# In[13]:
def folder_maker(path_name, folder_name=None):

    if folder_name is None:
        folder_name = 'bold'

    import os
    if not os.path.exists(path_name + '/' + folder_name):
        os.makedirs(path_name + '/' + folder_name)
        os.makedirs(path_name + '/' + folder_name + '/featDir')

    return path_name + '/' + folder_name + '/'


folderMaker = Node(Function(input_names=['path_name', 'folder_name'],
                            output_names=['folder_path'],
                            function=folder_maker),
                   name='folder_maker')


def fileNameBuilder(path, fname):
    return path + '/' + fname


def featFileNameBuilder(path, fname):
    return path + '.feat/' + fname


def selectFromList(inList, index):
    try:
        return inList[index]
    import numpy as np
    import os

    kernel = [4.3, 4.3, 16]

    smoothed_img = smooth_img(image, kernel)
    smoothed_img.to_filename('smoothed_all.nii.gz')

    smoothed_output = os.path.abspath('smoothed_all.nii.gz')
    return smoothed_output


nilearn_smoothing = Node(name='nilearn_smoothing',
                         interface=Function(input_names=['image'],
                                            output_names=['smoothed_output'],
                                            function=nilearn_smoothing))

#-----------------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------------
#mask only FA values > 0.2 to gurantee it is WM
thresh_FA = Node(fsl.Threshold(), name='thresh_FA')
thresh_FA.inputs.thresh = 0.2

#-----------------------------------------------------------------------------------------------------
#binarize this mask
binarize_FA = Node(fsl.UnaryMaths(), name='binarize_FA')
binarize_FA.inputs.operation = 'bin'
binarize_FA.inputs.output_datatype = 'char'

#-----------------------------------------------------------------------------------------------------