Esempio n. 1
0
    def statistics_pipeline(self, **name_maps):
        pipeline = self.new_pipeline(name='statistics',
                                     name_maps=name_maps,
                                     desc="Calculate statistics")

        merge_visits = pipeline.add(
            'merge_visits',
            Merge(numinputs=1),
            inputs={'in1': ('selected_metric', text_format)},
            joinsource=self.VISIT_ID,
            joinfield=['in1'])

        merge_subjects = pipeline.add('merge_subjects',
                                      Merge(numinputs=1, ravel_inputs=True),
                                      inputs={'in1': (merge_visits, 'out')},
                                      joinsource=self.SUBJECT_ID,
                                      joinfield=['in1'])

        concat = pipeline.add('concat',
                              ConcatFloats(),
                              inputs={'in_files': (merge_subjects, 'out')})

        pipeline.add('extract_metrics',
                     ExtractMetrics(),
                     inputs={'in_list': (concat, 'out_list')},
                     outputs={
                         'average': ('avg', float),
                         'std_dev': ('std', float)
                     })

        return pipeline
def warp_segments(name="warp_segments"):
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import IdentityInterface
    wf = pe.Workflow(name=name)
    seg = fs_segment()
    inputspec = pe.Node(IdentityInterface(fields=[
        'subject_id', 'subjects_dir', 'warp_file', 'ants_affine',
        'warped_brain'
    ]),
                        name="inputspec")
    from nipype.interfaces.ants import ApplyTransforms
    ap = pe.MapNode(ApplyTransforms(interpolation='NearestNeighbor'),
                    name="apply_transforms",
                    iterfield=["input_image"])
    wf.connect(inputspec, "subject_id", seg, "inputspec.subject_id")
    wf.connect(inputspec, "subjects_dir", seg, "inputspec.subjects_dir")
    from nipype.interfaces.utility import Merge
    merge = pe.Node(Merge(3), name="merge")
    wf.connect(seg, "outputspec.wm", merge, 'in1')
    wf.connect(seg, "outputspec.gm", merge, "in2")
    wf.connect(seg, "outputspec.csf", merge, "in3")
    wf.connect(merge, "out", ap, "input_image")
    wf.connect(inputspec, "warped_brain", ap, "reference_image")
    merge1 = pe.Node(Merge(2), name="get_transformations")
    wf.connect(inputspec, "warp_file", merge1, "in1")
    wf.connect(inputspec, "ants_affine", merge1, "in2")
    wf.connect(merge1, "out", ap, "transforms")
    outputspec = pe.Node(IdentityInterface(fields=["out_files"]),
                         name='outputspec')
    wf.connect(ap, "output_image", outputspec, "out_files")
    return wf
Esempio n. 3
0
 def pipeline4(self, **name_maps):
     pipeline = self.new_pipeline(
         'pipeline4',
         desc="",
         citations=[],
         name_maps=name_maps)
     merge1 = pipeline.add(
         'merge1',
         Merge(
             numinputs=1,
             ravel_inputs=True),
         inputs={
             'in1': ('derived_field1', int)},
         joinsource=self.SUBJECT_ID,
         joinfield='in1')
     merge2 = pipeline.add(
         'merge2',
         Merge(
             numinputs=1,
             ravel_inputs=True),
         inputs={
             'in1': (merge1, 'out')},
         joinsource=self.VISIT_ID,
         joinfield='in1')
     pipeline.add(
         'math',
         TestMath(
             op='add',
             as_file=False),
         inputs={
             'x': (merge2, 'out')},
         outputs={
             'derived_field4': ('z', int)})
     return pipeline
Esempio n. 4
0
 def pipeline2(self):
     pipeline = self.pipeline(
         name='pipeline2',
         inputs=[
             FilesetSpec('ones', text_format),
             FilesetSpec('twos', text_format)
         ],
         outputs=[FieldSpec('threes', float),
                  FieldSpec('fours', float)],
         desc=("A pipeline that tests loading of requirements in "
               "map nodes"),
         references=[],
     )
     # Convert from DICOM to NIfTI.gz format on input
     merge = pipeline.create_node(Merge(2), "merge")
     maths = pipeline.create_map_node(TestMathWithReq(),
                                      "maths",
                                      iterfield='x',
                                      requirements=[(notinstalled1_req,
                                                     notinstalled2_req,
                                                     first_req),
                                                    second_req])
     split = pipeline.create_node(Split(), 'split')
     split.inputs.splits = [1, 1]
     split.inputs.squeeze = True
     maths.inputs.op = 'add'
     maths.inputs.y = 2
     pipeline.connect_input('ones', merge, 'in1')
     pipeline.connect_input('twos', merge, 'in2')
     pipeline.connect(merge, 'out', maths, 'x')
     pipeline.connect(maths, 'z', split, 'inlist')
     pipeline.connect_output('threes', split, 'out1')
     pipeline.connect_output('fours', split, 'out2')
     return pipeline
Esempio n. 5
0
 def pipeline2(self, **name_maps):
     pipeline = self.new_pipeline(
         name='pipeline2',
         desc=("A pipeline that tests loading of requirements in "
               "map nodes"),
         name_maps=name_maps)
     # Convert from DICOM to NIfTI.gz format on input
     merge = pipeline.add("merge", Merge(2))
     maths = pipeline.add(
         "maths",
         TestMathWithReq(),
         iterfield='x',
         requirements=[first_req.v('0.15.9'),
                       second_req.v('1.0.2')])
     split = pipeline.add('split', Split())
     split.inputs.splits = [1, 1]
     split.inputs.squeeze = True
     maths.inputs.op = 'add'
     maths.inputs.y = 2
     pipeline.connect_input('ones', merge, 'in1', text_format)
     pipeline.connect_input('twos', merge, 'in2', text_format)
     pipeline.connect(merge, 'out', maths, 'x')
     pipeline.connect(maths, 'z', split, 'inlist')
     pipeline.connect_output('threes', split, 'out1', text_format)
     pipeline.connect_output('fours', split, 'out2', text_format)
     return pipeline
Esempio n. 6
0
    def create(self):
        sampleT1s = Node(interface=Select(), name='sampleT1s')
        sampleT2s = Node(interface=Select(), name='sampleT2s')
        sampleLabels = Node(interface=Select(), name='sampleLabels')

        testT1s = Node(interface=Select(), name='testT1s')
        testT2s = Node(interface=Select(), name='testT2s')
        testLabels = Node(interface=Select(), name='testLabels')

        intensityImages = Node(interface=Merge(2), name='intensityImages')

        jointFusion = Node(interface=JointFusion(), name='jointFusion')
        jointFusion.inputs.dimension = 3
        jointFusion.inputs.modalities = 1  #TODO: verify 2 for T1/T2
        jointFusion.inputs.method = 'Joint[0.1, 2]'
        jointFusion.inputs.output_label_image = 'fusion_neuro2012_20.nii.gz'

        outputs = Node(
            interface=IdentityInterface(fields=['output_label_image']),
            run_without_submitting=True,
            name='outputspec')

        self.connect([  # Don't worry about T2s now per Regina
            # (sampleT1s, intensityImages, [('out', 'in1')]),
            # (sampleT2s, intensityImages, [('out', 'in2')]),
            # (intensityImages, jointFusion, [('out', 'warped_intensity_images')]),
            (sampleT1s, jointFusion, [('out', 'warped_intensity_images')]),
            #END: per Regina
            (sampleLabels, jointFusion, [('out', 'warped_label_images')]),
            (jointFusion, outputs, [('output_label_image',
                                     'output_label_image')]),
        ])
Esempio n. 7
0
    def gather_outputs_pipeline(self, **name_maps):

        pipeline = self.new_pipeline(
            name='gather_motion_detection_outputs',
            desc=("Pipeline to gather together all the outputs from "
                  "the motion detection pipeline."),
            citations=[fsl_cite],
            name_maps=name_maps)

        merge_inputs = pipeline.add(
            'merge_inputs',
            Merge(5),
            inputs={
                'in1': ('mean_displacement_plot', png_format),
                'in2': ('motion_par', text_format),
                'in3': ('correction_factors', text_format),
                'in4': ('severe_motion_detection_report', text_format),
                'in5': ('timestamps', directory_format)
            })

        pipeline.add(
            'copy2dir',
            CopyToDir(),
            inputs={'in_files': (merge_inputs, 'out')},
            outputs={'motion_detection_output': ('out_dir', directory_format)})

        return pipeline
Esempio n. 8
0
def make_workflow(name=''):
    w = Workflow(f'prf_to_csv_' + name)

    n_in = Node(IdentityInterface(fields=[
        'atlas',
        'func_to_struct',
        'struct_to_freesurfer',
        'run_1',
        'run_2',
        'subject',
        'session',
    ]),
                name='input')

    n_out = Node(IdentityInterface(fields=[
        'csv_file',
    ]), name='output')

    n_prf = Node(PRF(), 'prf')
    n_prf.inputs.threshold = 10

    n_mean = Node(interface=TStat(), name='mean')
    n_mean.inputs.args = '-mean'
    n_mean.inputs.outputtype = 'NIFTI_GZ'

    n_merge = Node(Merge(len(REGIONS)), 'merge')

    n_csv = {}
    w_roi = {}
    for i, r in enumerate(REGIONS):
        w_roi[r] = make_workflow_roi(r)

        n_csv[r] = Node(PRF2CSV(), 'prf2csv_' + r)
        n_csv[r].inputs.threshold = 0.5
        n_csv[r].inputs.region = r

        w.connect(n_in, 'atlas', w_roi[r], 'input.atlas')
        w.connect(n_in, 'func_to_struct', w_roi[r], 'input.func_to_struct')
        w.connect(n_in, 'struct_to_freesurfer', w_roi[r],
                  'input.struct_to_freesurfer')
        w.connect(n_mean, 'out_file', w_roi[r], 'input.ref')
        w.connect(n_in, 'subject', n_csv[r], 'subject')
        w.connect(n_in, 'session', n_csv[r], 'session')
        for k in ['r2', 'phi', 'rho', 'sigma', 'hrf']:
            w.connect(n_prf, k + '_file', n_csv[r], k + '_file')
        w.connect(w_roi[r], 'output.mask_file', n_csv[r], 'mask_file')
        w.connect(n_csv[r], 'csv_file', n_merge, 'in' + str(i + 1))

    w.connect(n_in, 'run_1', n_prf, 'nii1_file')
    w.connect(n_in, 'run_2', n_prf, 'nii2_file')
    w.connect(n_in, 'subject', n_prf, 'subject')
    w.connect(n_in, 'session', n_prf, 'session')
    w.connect(n_in, 'run_1', n_mean, 'in_file')
    w.connect(n_merge, 'out', n_out, 'csv_file')

    return w
Esempio n. 9
0
def make_workflow_roi(region):
    """
    Benson_ROI_Names = {'V1', 'V2', 'V3', 'hV4', 'VO1', 'VO2', 'LO1', 'LO2', 'TO1', 'TO2', 'V3B', 'V3A'};

    Wang_ROI_Names = [
        'V1v', 'V1d', 'V2v', 'V2d', 'V3v', 'V3d', 'hV4', 'VO1', 'VO2', 'PHC1', 'PHC2',
        'TO2', 'TO1', 'LO2', 'LO1', 'V3B', 'V3A', 'IPS0', 'IPS1', 'IPS2', 'IPS3', 'IPS4' ,
        'IPS5', 'SPL1', 'FEF'];
    """

    w = Workflow(f'roi_{region}')

    n_in = Node(IdentityInterface(fields=[
        'atlas',
        'func_to_struct',
        'struct_to_freesurfer',
        'ref',
    ]),
                name='input')

    n_out = Node(IdentityInterface(fields=[
        'mask_file',
    ]), name='output')

    n_m = Node(Merge(2), 'merge')

    n_v = Node(MathsCommand(), region)
    n_v.inputs.out_file = 'roi.nii.gz'
    n_v.inputs.nan2zeros = True

    if region == 'V1':
        n_v.inputs.args = '-uthr 1 -bin'
    elif region == 'V2':
        n_v.inputs.args = '-thr 2 -uthr 3 -bin'
    elif region == 'V3':
        n_v.inputs.args = '-thr 4 -uthr 5 -bin'
    else:
        raise ValueError(f'Unknown region {region}. It should be V1, V2, V3')

    at = Node(ApplyTransforms(), 'applytransform')
    at.inputs.dimension = 3
    at.inputs.output_image = 'roi_func.nii.gz'
    at.inputs.interpolation = 'Linear'
    at.inputs.default_value = 0
    at.inputs.invert_transform_flags = [True, True]

    w.connect(n_in, 'atlas', n_v, 'in_file')
    w.connect(n_v, 'out_file', at, 'input_image')
    w.connect(n_in, 'ref', at, 'reference_image')
    w.connect(n_in, 'struct_to_freesurfer', n_m, 'in1')
    w.connect(n_in, 'func_to_struct', n_m, 'in2')
    w.connect(n_m, 'out', at, 'transforms')
    w.connect(at, 'output_image', n_out, 'mask_file')

    return w
Esempio n. 10
0
    def brain_extraction_pipeline(self, **name_maps):
        """
        Generates a whole brain mask using MRtrix's 'dwi2mask' command

        Parameters
        ----------
        mask_tool: Str
            Can be either 'bet' or 'dwi2mask' depending on which mask tool you
            want to use
        """

        if self.branch('bet_method', 'mrtrix'):
            pipeline = self.new_pipeline(
                'brain_extraction',
                desc="Generate brain mask from b0 images",
                citations=[mrtrix_cite],
                name_maps=name_maps)

            if self.provided('coreg_ref'):
                series = 'series_coreg'
            else:
                series = 'series_preproc'

            # Create mask node
            masker = pipeline.add(
                'dwi2mask',
                BrainMask(
                    out_file='brain_mask.nii.gz'),
                inputs={
                    'in_file': (series, nifti_gz_format),
                    'grad_fsl': self.fsl_grads(pipeline, coregistered=False)},
                outputs={
                    'brain_mask': ('out_file', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])

            merge = pipeline.add(
                'merge_operands',
                Merge(2),
                inputs={
                    'in1': ('mag_preproc', nifti_gz_format),
                    'in2': (masker, 'out_file')})

            pipeline.add(
                'apply_mask',
                MRCalc(
                    operation='multiply'),
                inputs={
                    'operands': (merge, 'out')},
                outputs={
                    'brain': ('out_file', nifti_gz_format)},
                requirements=[mrtrix_req.v('3.0rc3')])
        else:
            pipeline = super().brain_extraction_pipeline(**name_maps)
        return pipeline
Esempio n. 11
0
 def __init__(self, in1=0, **options):
     from nipype.interfaces.utility import Merge
     n = 1
     for ef in options:
         if 'in' in ef:
             n += 1
     mi = Merge(n)
     mi.inputs.in1 = in1
     for ef in options:
         setattr(mi.inputs, ef, options[ef])
     self.res = mi.run()
Esempio n. 12
0
 def pipeline1(self, **name_maps):
     pipeline = self.new_pipeline(
         'pipeline1',
         desc="",
         citations=[],
         name_maps=name_maps)
     math1 = pipeline.add(
         'math1',
         TestMath(
             op='add'),
         inputs={
             'x': ('acquired_fileset1', text_format),
             'y': ('acquired_fileset2', text_format)},
         requirements=[
             a_req.v('1.0.1'),
             b_req.v(2)])
     math2 = pipeline.add(
         'math2',
         TestMath(
             op='add'),
         inputs={
             'y': ('acquired_field1', float),
             'x': (math1, 'z')},
         requirements=[
             c_req.v(0.1)])
     # Set up different requirements based on switch
     math3_reqs = [a_req.v(1)]
     if self.branch('extra_req'):
         math3_reqs.append(d_req.v('0.8.6'))
     math3 = pipeline.add(
         'math3',
         TestMath(
             op='mul',
             y=self.parameter('multiplier')),
         inputs={
             'x': (math2, 'z')},
         requirements=[
             b_req.v('2.7.0', '3.0')])
     pipeline.add(
         'merge1',
         Merge(3),
         inputs={
             'in1': (math1, 'z'),
             'in2': (math2, 'z'),
             'in3': (math3, 'z')},
         outputs={
             'derived_field1': ('out', float)},
         requirements=math3_reqs)
     return pipeline
def create_report_pipeline(pipeline_name="report"):  #, contrasts):
    inputnode = pe.Node(interface=util.IdentityInterface(fields=[
        'struct', "raw_stat_images", "thresholded_stat_images",
        "ggmm_thresholded_stat_images", "mask", "plot_realign", 'contrasts',
        'task_name'
    ]),
                        name="inputnode")

    raw_stat_visualise = create_visualise_masked_overlay(
        pipeline_name=pipeline_name, name="raw_stat")
    thresholded_stat_visualise = create_visualise_thresholded_overlay(
        pipeline_name=pipeline_name, name="thresholded_stat")

    psmerge_raw = pe.Node(interface=neuroutils.PsMerge(), name="psmerge_raw")
    psmerge_raw.inputs.out_file = "merged.pdf"
    psmerge_th = psmerge_raw.clone(name="psmerge_th")
    psmerge_ggmm_th = psmerge_raw.clone(name="psmerge_ggmm_th")
    psmerge_all = psmerge_raw.clone(name="psmerge_all")
    mergeinputs = pe.Node(interface=Merge(4), name="mergeinputs")

    report = pe.Workflow(name="report")

    report.connect([
        (inputnode,
         raw_stat_visualise, [("struct", "inputnode.background"),
                              ("raw_stat_images", "inputnode.overlays"),
                              ("mask", "inputnode.mask"),
                              ('contrasts', 'inputnode.contrasts'),
                              ('task_name', 'inputnode.task_name')]),
        (inputnode, thresholded_stat_visualise,
         [("struct", "inputnode.background"),
          ("thresholded_stat_images", "inputnode.overlays"),
          ("ggmm_thresholded_stat_images", "inputnode.ggmm_overlays"),
          ('contrasts', 'inputnode.contrasts'),
          ('task_name', 'inputnode.task_name')]),
        (raw_stat_visualise, psmerge_raw, [("plot.plot", "in_files")]),
        (thresholded_stat_visualise, psmerge_th, [("plot.plot", "in_files")]),
        (thresholded_stat_visualise, psmerge_ggmm_th, [("plot_ggmm.plot",
                                                        "in_files")]),
        (inputnode, mergeinputs, [("plot_realign", "in1")]),
        (psmerge_raw, mergeinputs, [("merged_file", "in2")]),
        (psmerge_th, mergeinputs, [("merged_file", "in3")]),
        (psmerge_ggmm_th, mergeinputs, [("merged_file", "in4")]),
        (mergeinputs, psmerge_all, [("out", "in_files")]),
    ])
    return report
Esempio n. 14
0
 def pipeline5(self, **name_maps):
     pipeline = self.new_pipeline('pipeline5',
                                  desc="",
                                  citations=[],
                                  name_maps=name_maps)
     merge = pipeline.add('merge',
                          Merge(numinputs=3),
                          inputs={
                              'in1': ('derived_field2', int),
                              'in2': ('derived_field3', int),
                              'in3': ('derived_field4', int)
                          })
     pipeline.add('math',
                  TestMath(op='add', as_file=False),
                  inputs={'x': (merge, 'out')},
                  outputs={'derived_field5': ('z', float)})
     return pipeline
Esempio n. 15
0
 def pipeline_factory(self, incr, input, output):  # @ReservedAssignment
     pipeline = self.create_pipeline(
         name=output,
         inputs=[DatasetSpec(input, mrtrix_format)],
         outputs=[DatasetSpec(output, mrtrix_format)],
         desc=("A dummy pipeline used to test 'partial-complete' method"),
         version=1,
         citations=[])
     # Nodes
     operands = pipeline.create_node(Merge(2), name='merge')
     mult = pipeline.create_node(MRCalc(),
                                 name="convert1",
                                 requirements=[mrtrix3_req])
     operands.inputs.in2 = incr
     mult.inputs.operation = 'add'
     # Connect inputs
     pipeline.connect_input(input, operands, 'in1')
     # Connect inter-nodes
     pipeline.connect(operands, 'out', mult, 'operands')
     # Connect outputs
     pipeline.connect_output(output, mult, 'out_file')
     return pipeline
Esempio n. 16
0
 def pipeline(self):
     pipeline = self.create_pipeline(
         name='pipeline',
         inputs=[DatasetSpec('ones', nifti_gz_format)],
         outputs=[DatasetSpec('twos', nifti_gz_format)],
         desc=("A pipeline that tests loading of requirements"),
         version=1,
         citations=[],
     )
     # Convert from DICOM to NIfTI.gz format on input
     input_merge = pipeline.create_node(Merge(2), "input_merge")
     maths = pipeline.create_node(MRMath(),
                                  "maths",
                                  requirements=[(dummy1_req, dummy2_req,
                                                 mrtrix3_req),
                                                dcm2niix1_req])
     pipeline.connect_input('ones', input_merge, 'in1')
     pipeline.connect_input('ones', input_merge, 'in2')
     pipeline.connect(input_merge, 'out', maths, 'in_files')
     maths.inputs.operation = 'sum'
     pipeline.connect_output('twos', maths, 'out_file')
     pipeline.assert_connected()
     return pipeline
def create_reg_workflow(name='registration'):
    """Create a FEAT preprocessing workflow together with freesurfer

    Parameters
    ----------

        name : name of workflow (default: 'registration')

    Inputs::

        inputspec.source_files : files (filename or list of filenames to register)
        inputspec.mean_image : reference image to use
        inputspec.anatomical_image : anatomical image to coregister to
        inputspec.target_image : registration target

    Outputs::

        outputspec.func2anat_transform : FLIRT transform
        outputspec.anat2target_transform : FLIRT+FNIRT transform
        outputspec.transformed_files : transformed files in target space
        outputspec.transformed_mean : mean image in target space
    """

    register = Workflow(name=name)

    inputnode = Node(interface=IdentityInterface(fields=[
        'source_files', 'mean_image', 'subject_id', 'subjects_dir',
        'target_image'
    ]),
                     name='inputspec')

    outputnode = Node(interface=IdentityInterface(fields=[
        'func2anat_transform', 'out_reg_file', 'anat2target_transform',
        'transforms', 'transformed_mean', 'segmentation_files', 'anat2target',
        'aparc'
    ]),
                      name='outputspec')

    # Get the subject's freesurfer source directory
    fssource = Node(FreeSurferSource(), name='fssource')
    fssource.run_without_submitting = True
    register.connect(inputnode, 'subject_id', fssource, 'subject_id')
    register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')

    convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert")
    register.connect(fssource, 'T1', convert, 'in_file')

    # Coregister the median to the surface
    bbregister = Node(freesurfer.BBRegister(), name='bbregister')
    bbregister.inputs.init = 'fsl'
    bbregister.inputs.contrast_type = 't2'
    bbregister.inputs.out_fsl_file = True
    bbregister.inputs.epi_mask = True
    register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
    register.connect(inputnode, 'mean_image', bbregister, 'source_file')
    register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')
    """
    Estimate the tissue classes from the anatomical image. But use spm's segment
    as FSL appears to be breaking.
    """

    stripper = Node(fsl.BET(), name='stripper')
    register.connect(convert, 'out_file', stripper, 'in_file')
    fast = Node(fsl.FAST(), name='fast')
    register.connect(stripper, 'out_file', fast, 'in_files')
    """
    Binarize the segmentation
    """

    binarize = MapNode(fsl.ImageMaths(op_string='-nan -thr 0.9 -ero -bin'),
                       iterfield=['in_file'],
                       name='binarize')
    register.connect(fast, 'partial_volume_files', binarize, 'in_file')
    """
    Apply inverse transform to take segmentations to functional space
    """

    applyxfm = MapNode(freesurfer.ApplyVolTransform(inverse=True,
                                                    interp='nearest'),
                       iterfield=['target_file'],
                       name='inverse_transform')
    register.connect(inputnode, 'subjects_dir', applyxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', applyxfm, 'reg_file')
    register.connect(binarize, 'out_file', applyxfm, 'target_file')
    register.connect(inputnode, 'mean_image', applyxfm, 'source_file')
    """
    Apply inverse transform to aparc file
    """

    aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
                                                 interp='nearest'),
                    name='aparc_inverse_transform')
    register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
    register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm,
                     'target_file')
    register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
    """
    Convert the BBRegister transformation to ANTS ITK format
    """

    convert2itk = Node(C3dAffineTool(), name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
    register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
    register.connect(stripper, 'out_file', convert2itk, 'reference_file')
    """
    Compute registration between the subject's structural and MNI template
    This is currently set to perform a very quick registration. However, the
    registration can be made significantly more accurate for cortical
    structures by increasing the number of iterations
    All parameters are set using the example from:
    #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    """

    reg = Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[
        100, 30, 20
    ]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.float = True
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.num_threads = 4
    reg.plugin_args = {'qsub_args': '-l nodes=1:ppn=4'}
    register.connect(stripper, 'out_file', reg, 'moving_image')
    register.connect(inputnode, 'target_image', reg, 'fixed_image')
    """
    Concatenate the affine and ants transforms into a list
    """

    merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
    register.connect(convert2itk, 'itk_transform', merge, 'in2')
    register.connect(reg, 'composite_transform', merge, 'in1')
    """
    Transform the mean image. First to anatomical and then to target
    """

    warpmean = Node(ants.ApplyTransforms(), name='warpmean')
    warpmean.inputs.input_image_type = 3
    warpmean.inputs.interpolation = 'Linear'
    warpmean.inputs.invert_transform_flags = [False, False]
    warpmean.inputs.terminal_output = 'file'
    warpmean.inputs.args = '--float'
    warpmean.inputs.num_threads = 4

    register.connect(inputnode, 'target_image', warpmean, 'reference_image')
    register.connect(inputnode, 'mean_image', warpmean, 'input_image')
    register.connect(merge, 'out', warpmean, 'transforms')
    """
    Assign all the output files
    """

    register.connect(reg, 'warped_image', outputnode, 'anat2target')
    register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
    register.connect(applyxfm, 'transformed_file', outputnode,
                     'segmentation_files')
    register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc')
    register.connect(bbregister, 'out_fsl_file', outputnode,
                     'func2anat_transform')
    register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file')
    register.connect(reg, 'composite_transform', outputnode,
                     'anat2target_transform')
    register.connect(merge, 'out', outputnode, 'transforms')

    return register
Esempio n. 18
0
def builder(subject_id,
            subId,
            project_dir,
            data_dir,
            output_dir,
            output_final_dir,
            output_interm_dir,
            layout,
            anat=None,
            funcs=None,
            fmaps=None,
            task_name='',
            session=None,
            apply_trim=False,
            apply_dist_corr=False,
            apply_smooth=False,
            apply_filter=False,
            mni_template='2mm',
            apply_n4=True,
            ants_threads=8,
            readable_crash_files=False,
            write_logs=True):
    """
    Core function that returns a workflow. See wfmaker for more details.

    Args:
        subject_id: name of subject folder for final outputted sub-folder name
        subId: abbreviate name of subject for intermediate outputted sub-folder name
        project_dir: full path to root of project
        data_dir: full path to raw data files
        output_dir: upper level output dir (others will be nested within this)
        output_final_dir: final preprocessed sub-dir name
        output_interm_dir: intermediate preprcess sub-dir name
        layout: BIDS layout instance
    """

    ##################
    ### PATH SETUP ###
    ##################
    if session is not None:
        session = int(session)
        if session < 10:
            session = '0' + str(session)
        else:
            session = str(session)

    # Set MNI template
    MNItemplate = os.path.join(get_resource_path(),
                               'MNI152_T1_' + mni_template + '_brain.nii.gz')
    MNImask = os.path.join(get_resource_path(),
                           'MNI152_T1_' + mni_template + '_brain_mask.nii.gz')
    MNItemplatehasskull = os.path.join(get_resource_path(),
                                       'MNI152_T1_' + mni_template + '.nii.gz')

    # Set ANTs files
    bet_ants_template = os.path.join(get_resource_path(),
                                     'OASIS_template.nii.gz')
    bet_ants_prob_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz')
    bet_ants_registration_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz')

    #################################
    ### NIPYPE IMPORTS AND CONFIG ###
    #################################
    # Update nipype global config because workflow.config[] = ..., doesn't seem to work
    # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file

    # Create subject's intermediate directory before configuring nipype and the workflow because that's where we'll save log files in addition to intermediate files
    if not os.path.exists(os.path.join(output_interm_dir, subId, 'logs')):
        os.makedirs(os.path.join(output_interm_dir, subId, 'logs'))
    log_dir = os.path.join(output_interm_dir, subId, 'logs')
    from nipype import config
    if readable_crash_files:
        cfg = dict(execution={'crashfile_format': 'txt'})
        config.update_config(cfg)
    config.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': write_logs
        },
        'execution': {
            'crashdump_dir': log_dir
        }
    })
    from nipype import logging
    logging.update_logging(config)

    # Now import everything else
    from nipype.interfaces.io import DataSink
    from nipype.interfaces.utility import Merge, IdentityInterface
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.nipy.preprocess import ComputeMask
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection
    from nipype.interfaces.ants import Registration, ApplyTransforms
    from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP
    from nipype.interfaces.fsl.maths import MeanImage
    from nipype.interfaces.fsl import Merge as MERGE
    from nipype.interfaces.fsl.utils import Smooth
    from nipype.interfaces.nipy.preprocess import Trim
    from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask

    ##################
    ### INPUT NODE ###
    ##################

    # Turn functional file list into interable Node
    func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans')
    func_scans.iterables = ('scan', funcs)

    # Get TR for use in filtering below; we're assuming all BOLD runs have the same TR
    tr_length = layout.get_metadata(funcs[0])['RepetitionTime']

    #####################################
    ## TRIM ##
    #####################################
    if apply_trim:
        trim = Node(Trim(), name='trim')
        trim.inputs.begin_index = apply_trim

    #####################################
    ## DISTORTION CORRECTION ##
    #####################################

    if apply_dist_corr:
        # Get fmap file locations
        fmaps = [
            f.filename for f in layout.get(
                subject=subId, modality='fmap', extensions='.nii.gz')
        ]
        if not fmaps:
            raise IOError(
                "Distortion Correction requested but field map scans not found..."
            )

        # Get fmap metadata
        totalReadoutTimes, measurements, fmap_pes = [], [], []

        for i, fmap in enumerate(fmaps):
            # Grab total readout time for each fmap
            totalReadoutTimes.append(
                layout.get_metadata(fmap)['TotalReadoutTime'])

            # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans)
            measurements.append(nib.load(fmap).header['dim'][4])

            # Get phase encoding direction
            fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"]
            fmap_pes.append(fmap_pe)

        encoding_file_writer = Node(interface=Create_Encoding_File(),
                                    name='create_encoding')
        encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes
        encoding_file_writer.inputs.fmaps = fmaps
        encoding_file_writer.inputs.fmap_pes = fmap_pes
        encoding_file_writer.inputs.measurements = measurements
        encoding_file_writer.inputs.file_name = 'encoding_file.txt'

        merge_to_file_list = Node(interface=Merge(2),
                                  infields=['in1', 'in2'],
                                  name='merge_to_file_list')
        merge_to_file_list.inputs.in1 = fmaps[0]
        merge_to_file_list.inputs.in1 = fmaps[1]

        # Merge AP and PA distortion correction scans
        merger = Node(interface=MERGE(dimension='t'), name='merger')
        merger.inputs.output_type = 'NIFTI_GZ'
        merger.inputs.in_files = fmaps
        merger.inputs.merged_file = 'merged_epi.nii.gz'

        # Create distortion correction map
        topup = Node(interface=TOPUP(), name='topup')
        topup.inputs.output_type = 'NIFTI_GZ'

        # Apply distortion correction to other scans
        apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
        apply_topup.inputs.output_type = 'NIFTI_GZ'
        apply_topup.inputs.method = 'jac'
        apply_topup.inputs.interp = 'spline'

    ###################################
    ### REALIGN ###
    ###################################
    realign_fsl = Node(MCFLIRT(), name="realign")
    realign_fsl.inputs.cost = 'mutualinfo'
    realign_fsl.inputs.mean_vol = True
    realign_fsl.inputs.output_type = 'NIFTI_GZ'
    realign_fsl.inputs.save_mats = True
    realign_fsl.inputs.save_rms = True
    realign_fsl.inputs.save_plots = True

    ###################################
    ### MEAN EPIs ###
    ###################################
    # For coregistration after realignment
    mean_epi = Node(MeanImage(), name='mean_epi')
    mean_epi.inputs.dimension = 'T'

    # For after normalization is done to plot checks
    mean_norm_epi = Node(MeanImage(), name='mean_norm_epi')
    mean_norm_epi.inputs.dimension = 'T'

    ###################################
    ### MASK, ART, COV CREATION ###
    ###################################
    compute_mask = Node(ComputeMask(), name='compute_mask')
    compute_mask.inputs.m = .05

    art = Node(ArtifactDetect(), name='art')
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'FSL'

    make_cov = Node(Create_Covariates(), name='make_cov')

    ################################
    ### N4 BIAS FIELD CORRECTION ###
    ################################
    if apply_n4:
        n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction')
        n4_correction.inputs.copy_header = True
        n4_correction.inputs.save_bias = False
        n4_correction.inputs.num_threads = ants_threads
        n4_correction.inputs.input_image = anat

    ###################################
    ### BRAIN EXTRACTION ###
    ###################################
    brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction')
    brain_extraction_ants.inputs.dimension = 3
    brain_extraction_ants.inputs.use_floatingpoint_precision = 1
    brain_extraction_ants.inputs.num_threads = ants_threads
    brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask
    brain_extraction_ants.inputs.keep_temporary_files = 1
    brain_extraction_ants.inputs.brain_template = bet_ants_template
    brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask
    brain_extraction_ants.inputs.out_prefix = 'bet'

    ###################################
    ### COREGISTRATION ###
    ###################################
    coregistration = Node(Registration(), name='coregistration')
    coregistration.inputs.float = False
    coregistration.inputs.output_transform_prefix = "meanEpi2highres"
    coregistration.inputs.transforms = ['Rigid']
    coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )]
    coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]]
    coregistration.inputs.dimension = 3
    coregistration.inputs.num_threads = ants_threads
    coregistration.inputs.write_composite_transform = True
    coregistration.inputs.collapse_output_transforms = True
    coregistration.inputs.metric = ['MI']
    coregistration.inputs.metric_weight = [1]
    coregistration.inputs.radius_or_number_of_bins = [32]
    coregistration.inputs.sampling_strategy = ['Regular']
    coregistration.inputs.sampling_percentage = [0.25]
    coregistration.inputs.convergence_threshold = [1e-08]
    coregistration.inputs.convergence_window_size = [10]
    coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]]
    coregistration.inputs.sigma_units = ['mm']
    coregistration.inputs.shrink_factors = [[4, 3, 2, 1]]
    coregistration.inputs.use_estimate_learning_rate_once = [True]
    coregistration.inputs.use_histogram_matching = [False]
    coregistration.inputs.initial_moving_transform_com = True
    coregistration.inputs.output_warped_image = True
    coregistration.inputs.winsorize_lower_quantile = 0.01
    coregistration.inputs.winsorize_upper_quantile = 0.99

    ###################################
    ### NORMALIZATION ###
    ###################################
    # Settings Explanations
    # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference
    # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275
    # Things that matter the most:
    # smoothing_sigmas:
    # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm
    # Old settings [[3,2,1,0]]*3
    # shrink_factors
    # The coarseness with which to do registration
    # Old settings [[8,4,2,1]] * 3
    # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex
    # Other settings
    # transform_parameters:
    # how much regularization to do for fitting that transformation
    # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets
    # radius_or_number_of_bins
    # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer
    # use_histogram_matching
    # Use image intensity distribution to guide registration
    # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1)
    # convergence_threshold
    # threshold for optimizer
    # convergence_window_size
    # how many samples should optimizer average to compute threshold?
    # sampling_strategy
    # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass

    normalization = Node(Registration(), name='normalization')
    normalization.inputs.float = False
    normalization.inputs.collapse_output_transforms = True
    normalization.inputs.convergence_threshold = [1e-06]
    normalization.inputs.convergence_window_size = [10]
    normalization.inputs.dimension = 3
    normalization.inputs.fixed_image = MNItemplate
    normalization.inputs.initial_moving_transform_com = True
    normalization.inputs.metric = ['MI', 'MI', 'CC']
    normalization.inputs.metric_weight = [1.0] * 3
    normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
                                                 [1000, 500, 250, 100],
                                                 [100, 70, 50, 20]]
    normalization.inputs.num_threads = ants_threads
    normalization.inputs.output_transform_prefix = 'anat2template'
    normalization.inputs.output_inverse_warped_image = True
    normalization.inputs.output_warped_image = True
    normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None']
    normalization.inputs.shrink_factors = [[8, 4, 2, 1]] * 3
    normalization.inputs.sigma_units = ['vox'] * 3
    normalization.inputs.smoothing_sigmas = [[3, 2, 1, 0]] * 3
    normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    normalization.inputs.transform_parameters = [(0.1, ), (0.1, ),
                                                 (0.1, 3.0, 0.0)]
    normalization.inputs.use_histogram_matching = True
    normalization.inputs.winsorize_lower_quantile = 0.005
    normalization.inputs.winsorize_upper_quantile = 0.995
    normalization.inputs.write_composite_transform = True

    # NEW SETTINGS (need to be adjusted; specifically shink_factors and smoothing_sigmas need to be the same length)
    # normalization = Node(Registration(), name='normalization')
    # normalization.inputs.float = False
    # normalization.inputs.collapse_output_transforms = True
    # normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07]
    # normalization.inputs.convergence_window_size = [10]
    # normalization.inputs.dimension = 3
    # normalization.inputs.fixed_image = MNItemplate
    # normalization.inputs.initial_moving_transform_com = True
    # normalization.inputs.metric = ['MI', 'MI', 'CC']
    # normalization.inputs.metric_weight = [1.0]*3
    # normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
    #                                              [1000, 500, 250, 100],
    #                                              [100, 70, 50, 20]]
    # normalization.inputs.num_threads = ants_threads
    # normalization.inputs.output_transform_prefix = 'anat2template'
    # normalization.inputs.output_inverse_warped_image = True
    # normalization.inputs.output_warped_image = True
    # normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    # normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    # normalization.inputs.sampling_strategy = ['Regular',
    #                                           'Regular',
    #                                           'None']
    # normalization.inputs.shrink_factors = [[4, 3, 2, 1]]*3
    # normalization.inputs.sigma_units = ['vox']*3
    # normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]]
    # normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    # normalization.inputs.transform_parameters = [(0.1,),
    #                                              (0.1,),
    #                                              (0.1, 3.0, 0.0)]
    # normalization.inputs.use_histogram_matching = True
    # normalization.inputs.winsorize_lower_quantile = 0.005
    # normalization.inputs.winsorize_upper_quantile = 0.995
    # normalization.inputs.write_composite_transform = True

    ###################################
    ### APPLY TRANSFORMS AND SMOOTH ###
    ###################################
    merge_transforms = Node(Merge(2),
                            iterfield=['in2'],
                            name='merge_transforms')

    # Used for epi -> mni, via (coreg + norm)
    apply_transforms = Node(ApplyTransforms(),
                            iterfield=['input_image'],
                            name='apply_transforms')
    apply_transforms.inputs.input_image_type = 3
    apply_transforms.inputs.float = False
    apply_transforms.inputs.num_threads = 12
    apply_transforms.inputs.environ = {}
    apply_transforms.inputs.interpolation = 'BSpline'
    apply_transforms.inputs.invert_transform_flags = [False, False]
    apply_transforms.inputs.reference_image = MNItemplate

    # Used for t1 segmented -> mni, via (norm)
    apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg')
    apply_transform_seg.inputs.input_image_type = 3
    apply_transform_seg.inputs.float = False
    apply_transform_seg.inputs.num_threads = 12
    apply_transform_seg.inputs.environ = {}
    apply_transform_seg.inputs.interpolation = 'MultiLabel'
    apply_transform_seg.inputs.invert_transform_flags = [False]
    apply_transform_seg.inputs.reference_image = MNItemplate

    ###################################
    ### PLOTS ###
    ###################################
    plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign")
    plot_qa = Node(Plot_Quality_Control(), name="plot_qa")
    plot_normalization_check = Node(Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = MNItemplatehasskull

    ############################################
    ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ###
    ############################################
    # Use cosanlab_preproc for down sampling
    down_samp = Node(Down_Sample_Precision(), name="down_samp")

    # Use FSL for smoothing
    if apply_smooth:
        smooth = Node(Smooth(), name='smooth')
        if isinstance(apply_smooth, list):
            smooth.iterables = ("fwhm", apply_smooth)
        elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float):
            smooth.inputs.fwhm = apply_smooth
        else:
            raise ValueError("apply_smooth must be a list or int/float")

    # Use cosanlab_preproc for low-pass filtering
    if apply_filter:
        lp_filter = Node(Filter_In_Mask(), name='lp_filter')
        lp_filter.inputs.mask = MNImask
        lp_filter.inputs.sampling_rate = tr_length
        lp_filter.inputs.high_pass_cutoff = 0
        if isinstance(apply_filter, list):
            lp_filter.iterables = ("low_pass_cutoff", apply_filter)
        elif isinstance(apply_filter, int) or isinstance(apply_filter, float):
            lp_filter.inputs.low_pass_cutoff = apply_filter
        else:
            raise ValueError("apply_filter must be a list or int/float")

    ###################
    ### OUTPUT NODE ###
    ###################
    # Collect all final outputs in the output dir and get rid of file name additions
    datasink = Node(DataSink(), name='datasink')
    if session:
        datasink.inputs.base_directory = os.path.join(output_final_dir,
                                                      subject_id)
        datasink.inputs.container = 'ses-' + session
    else:
        datasink.inputs.base_directory = output_final_dir
        datasink.inputs.container = subject_id

    # Remove substitutions
    data_dir_parts = data_dir.split('/')[1:]
    if session:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + [
            'ses-' + session
        ] + ['func']
    else:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func']
    func_scan_names = [os.path.split(elem)[-1] for elem in funcs]
    to_replace = []
    for elem in func_scan_names:
        bold_name = elem.split(subject_id + '_')[-1]
        bold_name = bold_name.split('.nii.gz')[0]
        to_replace.append(('..'.join(prefix + [elem]), bold_name))
    datasink.inputs.substitutions = to_replace

    #####################
    ### INIT WORKFLOW ###
    #####################
    # If we have sessions provide the full path to the subject's intermediate directory
    # and only rely on workflow init to create the session container *within* that directory
    # Otherwise just point to the intermediate directory and let the workflow init create the subject container within the intermediate directory
    if session:
        workflow = Workflow(name='ses_' + session)
        workflow.base_dir = os.path.join(output_interm_dir, subId)
    else:
        workflow = Workflow(name=subId)
        workflow.base_dir = output_interm_dir

    ############################
    ######### PART (1a) #########
    # func -> discorr -> trim -> realign
    # OR
    # func -> trim -> realign
    # OR
    # func -> discorr -> realign
    # OR
    # func -> realign
    ############################
    if apply_dist_corr:
        workflow.connect([(encoding_file_writer, topup, [('encoding_file',
                                                          'encoding_file')]),
                          (encoding_file_writer, apply_topup,
                           [('encoding_file', 'encoding_file')]),
                          (merger, topup, [('merged_file', 'in_file')]),
                          (func_scans, apply_topup, [('scan', 'in_files')]),
                          (topup, apply_topup,
                           [('out_fieldcoef', 'in_topup_fieldcoef'),
                            ('out_movpar', 'in_topup_movpar')])])
        if apply_trim:
            # Dist Corr + Trim
            workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file')
                                                   ]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # Dist Corr + No Trim
            workflow.connect([(apply_topup, realign_fsl, [('out_corrected',
                                                           'in_file')])])
    else:
        if apply_trim:
            # No Dist Corr + Trim
            workflow.connect([(func_scans, trim, [('scan', 'in_file')]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # No Dist Corr + No Trim
            workflow.connect([
                (func_scans, realign_fsl, [('scan', 'in_file')]),
            ])

    ############################
    ######### PART (1n) #########
    # anat -> N4 -> bet
    # OR
    # anat -> bet
    ############################
    if apply_n4:
        workflow.connect([(n4_correction, brain_extraction_ants,
                           [('output_image', 'anatomical_image')])])
    else:
        brain_extraction_ants.inputs.anatomical_image = anat

    ##########################################
    ############### PART (2) #################
    # realign -> coreg -> mni (via t1)
    # t1 -> mni
    # covariate creation
    # plot creation
    ###########################################

    workflow.connect([
        (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]),
        (realign_fsl, plot_qa, [('out_file', 'dat_img')]),
        (realign_fsl, art, [('out_file', 'realigned_files'),
                            ('par_file', 'realignment_parameters')]),
        (realign_fsl, mean_epi, [('out_file', 'in_file')]),
        (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]),
        (mean_epi, compute_mask, [('out_file', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (art, plot_realign, [('outlier_files', 'outliers')]),
        (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]),
        (brain_extraction_ants, coregistration, [('BrainExtractionBrain',
                                                  'fixed_image')]),
        (mean_epi, coregistration, [('out_file', 'moving_image')]),
        (brain_extraction_ants, normalization, [('BrainExtractionBrain',
                                                 'moving_image')]),
        (coregistration, merge_transforms, [('composite_transform', 'in2')]),
        (normalization, merge_transforms, [('composite_transform', 'in1')]),
        (merge_transforms, apply_transforms, [('out', 'transforms')]),
        (realign_fsl, apply_transforms, [('out_file', 'input_image')]),
        (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]),
        (normalization, apply_transform_seg, [('composite_transform',
                                               'transforms')]),
        (brain_extraction_ants, apply_transform_seg,
         [('BrainExtractionSegmentation', 'input_image')]),
        (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')])
    ])

    ##################################################
    ################### PART (3) #####################
    # epi (in mni) -> filter -> smooth -> down sample
    # OR
    # epi (in mni) -> filter -> down sample
    # OR
    # epi (in mni) -> smooth -> down sample
    # OR
    # epi (in mni) -> down sample
    ###################################################

    if apply_filter:
        workflow.connect([(apply_transforms, lp_filter, [('output_image',
                                                          'in_file')])])

        if apply_smooth:
            # Filtering + Smoothing
            workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]),
                              (smooth, down_samp, [('smoothed_file', 'in_file')
                                                   ])])
        else:
            # Filtering + No Smoothing
            workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')])
                              ])
    else:
        if apply_smooth:
            # No Filtering + Smoothing
            workflow.connect([
                (apply_transforms, smooth, [('output_image', 'in_file')]),
                (smooth, down_samp, [('smoothed_file', 'in_file')])
            ])
        else:
            # No Filtering + No Smoothing
            workflow.connect([(apply_transforms, down_samp, [('output_image',
                                                              'in_file')])])

    ##########################################
    ############### PART (4) #################
    # down sample -> save
    # plots -> save
    # covs -> save
    # t1 (in mni) -> save
    # t1 segmented masks (in mni) -> save
    # realignment parms -> save
    ##########################################

    workflow.connect([
        (down_samp, datasink, [('out_file', 'functional.@down_samp')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_qa, datasink, [('plot', 'functional.@plot_qa')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')]),
        (normalization, datasink, [('warped_image', 'structural.@normanat')]),
        (apply_transform_seg, datasink, [('output_image',
                                          'structural.@normanatseg')]),
        (realign_fsl, datasink, [('par_file', 'functional.@motionparams')])
    ])

    if not os.path.exists(os.path.join(output_dir, 'pipeline.png')):
        workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'),
                             format='png')

    print(f"Creating workflow for subject: {subject_id}")
    if ants_threads != 8:
        print(
            f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing."
        )
    return workflow
Esempio n. 19
0
    def create_pipeline_flow(self, cmp_deriv_subject_directory,
                             nipype_deriv_subject_directory):

        # subject_directory = self.subject_directory

        # datasource.inputs.subject = self.subject

        if self.parcellation_scheme == 'Lausanne2008':
            bids_atlas_label = 'L2008'
        elif self.parcellation_scheme == 'Lausanne2018':
            bids_atlas_label = 'L2018'
        elif self.parcellation_scheme == 'NativeFreesurfer':
            bids_atlas_label = 'Desikan'

        # Data sinker for output
        sinker = pe.Node(nio.DataSink(), name="bold_sinker")
        sinker.inputs.base_directory = os.path.join(
            cmp_deriv_subject_directory)

        if self.parcellation_scheme == 'NativeFreesurfer':
            sinker.inputs.substitutions = [
                (
                    'eroded_brain_registered.nii.gz', self.subject + '_space-meanBOLD_desc-eroded_label-brain_dseg.nii.gz'),
                ('eroded_csf_registered.nii.gz', self.subject +
                 '_space-meanBOLD_desc-eroded_label-CSF_dseg.nii.gz'),
                ('wm_mask_registered.nii.gz', self.subject +
                 '_space-meanBOLD_label-WM_dseg.nii.gz'),
                ('eroded_wm_registered.nii.gz', self.subject +
                 '_space-meanBOLD_desc-eroded_label-WM_dseg.nii.gz'),
                ('fMRI_despike_st_mcf.nii.gz_mean_reg.nii.gz',
                 self.subject + '_meanBOLD.nii.gz'),
                ('fMRI_despike_st_mcf.nii.gz.par', self.subject + '_motion.par'),
                ('FD.npy', self.subject + '_desc-scrubbing_FD.npy'),
                ('DVARS.npy', self.subject + '_desc-scrubbing_DVARS.npy'),
                ('fMRI_bandpass.nii.gz', self.subject +
                 '_desc-bandpass_task-rest_bold.nii.gz'),

                (self.subject + '_label-' + bids_atlas_label + '_atlas_flirt.nii.gz',
                 self.subject + '_space-meanBOLD_label-' + bids_atlas_label + '_atlas.nii.gz'),
                # (self.subject+'_T1w_parc_freesurferaparc_flirt.nii.gz',self.subject+'_space-meanBOLD_label-Desikan_atlas.nii.gz'),
                ('connectome_freesurferaparc', self.subject + \
                 '_label-Desikan_conndata-network_connectivity'),
                ('averageTimeseries_freesurferaparc',
                 self.subject + '_atlas-Desikan_timeseries'),

            ]
        else:
            sinker.inputs.substitutions = [
                ('eroded_brain_registered.nii.gz', self.subject +
                 '_space-meanBOLD_desc-eroded_label-brain_dseg.nii.gz'),
                ('wm_mask_registered.nii.gz',
                 self.subject + '_space-meanBOLD_label-WM_dseg.nii.gz'),
                ('eroded_csf_registered.nii.gz', self.subject +
                 '_space-meanBOLD_desc-eroded_label-CSF_dseg.nii.gz'),
                ('eroded_wm_registered.nii.gz', self.subject +
                 '_space-meanBOLD_desc-eroded_label-WM_dseg.nii.gz'),
                ('fMRI_despike_st_mcf.nii.gz_mean_reg.nii.gz',
                 self.subject + '_meanBOLD.nii.gz'),
                ('fMRI_despike_st_mcf.nii.gz.par',
                 self.subject + '_motion.tsv'),
                ('FD.npy', self.subject + '_desc-scrubbing_FD.npy'),
                ('DVARS.npy', self.subject + '_desc-scrubbing_DVARS.npy'),
                ('fMRI_bandpass.nii.gz',
                 self.subject + '_desc-bandpass_task-rest_bold.nii.gz'),
                (self.subject + '_label-' + bids_atlas_label +
                 '_atlas_flirt.nii.gz',
                 self.subject + '_space-meanBOLD_label-' + bids_atlas_label +
                 '_atlas.nii.gz'),
                (self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale1_atlas_flirt.nii.gz',
                 self.subject + '_space-meanBOLD_label-' + bids_atlas_label +
                 '_desc-scale1_atlas.nii.gz'),
                (self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale1_atlas_flirt.nii.gz',
                 self.subject + '_space-meanBOLD_label-' + bids_atlas_label +
                 '_desc-scale2_atlas.nii.gz'),
                (self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale1_atlas_flirt.nii.gz',
                 self.subject + '_space-meanBOLD_label-' + bids_atlas_label +
                 '_desc-scale3_atlas.nii.gz'),
                (self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale1_atlas_flirt.nii.gz',
                 self.subject + '_space-meanBOLD_label-' + bids_atlas_label +
                 '_desc-scale4_atlas.nii.gz'),
                (self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale1_atlas_flirt.nii.gz',
                 self.subject + '_space-meanBOLD_label-' + bids_atlas_label +
                 '_desc-scale5_atlas.nii.gz'),
                ('connectome_freesurferaparc', self.subject +
                 '_label-Desikan_conndata-network_connectivity'),
                ('connectome_scale1',
                 self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale1_conndata-network_connectivity'),
                ('connectome_scale2',
                 self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale2_conndata-network_connectivity'),
                ('connectome_scale3',
                 self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale3_conndata-network_connectivity'),
                ('connectome_scale4',
                 self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale4_conndata-network_connectivity'),
                ('connectome_scale5',
                 self.subject + '_label-' + bids_atlas_label +
                 '_desc-scale5_conndata-network_connectivity'),
                ('averageTimeseries_scale1', self.subject + '_atlas-' +
                 bids_atlas_label + '_desc-scale1_timeseries'),
                ('averageTimeseries_scale2', self.subject + '_atlas-' +
                 bids_atlas_label + '_desc-scale2_timeseries'),
                ('averageTimeseries_scale3', self.subject + '_atlas-' +
                 bids_atlas_label + '_desc-scale3_timeseries'),
                ('averageTimeseries_scale4', self.subject + '_atlas-' +
                 bids_atlas_label + '_desc-scale4_timeseries'),
                ('averageTimeseries_scale5', self.subject + '_atlas-' +
                 bids_atlas_label + '_desc-scale5_timeseries'),
            ]

        # Data import
        datasource = pe.Node(interface=nio.DataGrabber(outfields=[
            'fMRI', 'T1', 'T2', 'aseg', 'brain', 'brain_mask', 'wm_mask_file',
            'wm_eroded', 'brain_eroded', 'csf_eroded', 'roi_volume_s1',
            'roi_volume_s2', 'roi_volume_s3', 'roi_volume_s4', 'roi_volume_s5',
            'roi_graphml_s1', 'roi_graphml_s2', 'roi_graphml_s3',
            'roi_graphml_s4', 'roi_graphml_s5'
        ]),
                             name='datasource')
        datasource.inputs.base_directory = cmp_deriv_subject_directory
        datasource.inputs.template = '*'
        datasource.inputs.raise_on_empty = False
        # datasource.inputs.field_template = dict(fMRI='fMRI.nii.gz',T1='T1.nii.gz',T2='T2.nii.gz')

        if self.parcellation_scheme == 'NativeFreesurfer':
            datasource.inputs.field_template = dict(
                fMRI='func/' + self.subject +
                '_task-rest_desc-cmp_bold.nii.gz',
                T1='anat/' + self.subject + '_desc-head_T1w.nii.gz',
                T2='anat/' + self.subject + '_T2w.nii.gz',
                aseg='anat/' + self.subject + '_desc-aseg_desg.nii.gz',
                brain='anat/' + self.subject + '_desc-brain_T1w.nii.gz',
                brain_mask='anat/' + self.subject + '_desc-brain_mask.nii.gz',
                wm_mask_file='anat/' + self.subject + '_label-WM_dseg.nii.gz',
                wm_eroded='anat/' + self.subject +
                '_label-WM_desc-eroded_dseg.nii.gz',
                brain_eroded='anat/' + self.subject +
                '_label-brain_desc-eroded_dseg.nii.gz',
                csf_eroded='anat/' + self.subject +
                '_label-CSF_desc-eroded_dseg.nii.gz',
                roi_volume_s1='anat/' + self.subject +
                '_label-Desikan_atlas.nii.gz',
                roi_volume_s2='anat/irrelevant.nii.gz',
                roi_volume_s3='anat/irrelevant.nii.gz',
                roi_volume_s4='anat/irrelevant.nii.gz',
                roi_volume_s5='anat/irrelevant.nii.gz',
                roi_graphml_s1='anat/' + self.subject +
                '_label-Desikan_atlas.graphml',
                roi_graphml_s2='anat/irrelevant.graphml',
                roi_graphml_s3='anat/irrelevant.graphml',
                roi_graphml_s4='anat/irrelevant.graphml',
                roi_graphml_s5='anat/irrelevant.graphml')
        else:
            datasource.inputs.field_template = dict(
                fMRI='func/' + self.subject +
                '_task-rest_desc-cmp_bold.nii.gz',
                T1='anat/' + self.subject + '_desc-head_T1w.nii.gz',
                T2='anat/' + self.subject + '_T2w.nii.gz',
                aseg='anat/' + self.subject + '_desc-aseg_desg.nii.gz',
                brain='anat/' + self.subject + '_desc-brain_T1w.nii.gz',
                brain_mask='anat/' + self.subject + '_desc-brain_mask.nii.gz',
                wm_mask_file='anat/' + self.subject + '_label-WM_dseg.nii.gz',
                wm_eroded='anat/' + self.subject +
                '_label-WM_desc-eroded_dseg.nii.gz',
                brain_eroded='anat/' + self.subject +
                '_label-brain_desc-eroded_dseg.nii.gz',
                csf_eroded='anat/' + self.subject +
                '_label-CSF_desc-eroded_dseg.nii.gz',
                roi_volume_s1='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale1_atlas.nii.gz',
                roi_volume_s2='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale2_atlas.nii.gz',
                roi_volume_s3='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale3_atlas.nii.gz',
                roi_volume_s4='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale4_atlas.nii.gz',
                roi_volume_s5='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale5_atlas.nii.gz',
                roi_graphml_s1='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale1_atlas.graphml',
                roi_graphml_s2='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale2_atlas.graphml',
                roi_graphml_s3='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale3_atlas.graphml',
                roi_graphml_s4='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale4_atlas.graphml',
                roi_graphml_s5='anat/' + self.subject + '_label-' +
                bids_atlas_label + '_desc-scale5_atlas.graphml')

        datasource.inputs.sort_filelist = False

        # Clear previous outputs
        self.clear_stages_outputs()

        # Create fMRI flow
        fMRI_flow = pe.Workflow(
            name='fMRI_pipeline',
            base_dir=os.path.abspath(nipype_deriv_subject_directory))
        fMRI_inputnode = pe.Node(interface=util.IdentityInterface(fields=[
            "fMRI", "T1", "T2", "subjects_dir", "subject_id", "wm_mask_file",
            "roi_volumes", "roi_graphMLs", "wm_eroded", "brain_eroded",
            "csf_eroded"
        ]),
                                 name="inputnode")
        fMRI_inputnode.inputs.parcellation_scheme = self.parcellation_scheme
        fMRI_inputnode.inputs.atlas_info = self.atlas_info
        fMRI_inputnode.subjects_dir = self.subjects_dir
        # fMRI_inputnode.subject_id = self.subject_id
        fMRI_inputnode.subject_id = os.path.basename(self.subject_id)

        # print('fMRI_inputnode.subjects_dir : {}'.format(fMRI_inputnode.subjects_dir))
        # print('fMRI_inputnode.subject_id : {}'.format(fMRI_inputnode.subject_id))

        fMRI_outputnode = pe.Node(
            interface=util.IdentityInterface(fields=["connectivity_matrices"]),
            name="outputnode")
        fMRI_flow.add_nodes([fMRI_inputnode, fMRI_outputnode])

        merge_roi_volumes = pe.Node(interface=Merge(5),
                                    name='merge_roi_volumes')
        merge_roi_graphmls = pe.Node(interface=Merge(5),
                                     name='merge_roi_graphmls')

        def remove_non_existing_scales(roi_volumes):
            out_roi_volumes = []
            for vol in roi_volumes:
                if vol is not None:
                    out_roi_volumes.append(vol)
            return out_roi_volumes

        fMRI_flow.connect([(datasource,
                            merge_roi_volumes, [("roi_volume_s1", "in1"),
                                                ("roi_volume_s2", "in2"),
                                                ("roi_volume_s3", "in3"),
                                                ("roi_volume_s4", "in4"),
                                                ("roi_volume_s5", "in5")])])

        fMRI_flow.connect([(datasource, merge_roi_graphmls,
                            [("roi_graphml_s1", "in1"),
                             ("roi_graphml_s2", "in2"),
                             ("roi_graphml_s3", "in3"),
                             ("roi_graphml_s4", "in4"),
                             ("roi_graphml_s5", "in5")])])

        fMRI_flow.connect([
            (datasource, fMRI_inputnode, [("fMRI", "fMRI"), ("T1", "T1"),
                                          ("T2", "T2"), ("aseg", "aseg"),
                                          ("wm_mask_file", "wm_mask_file"),
                                          ("brain_eroded", "brain_eroded"),
                                          ("wm_eroded", "wm_eroded"),
                                          ("csf_eroded", "csf_eroded")]),
            # ,( "roi_volumes","roi_volumes")])
            (merge_roi_volumes, fMRI_inputnode,
             [(("out", remove_non_existing_scales), "roi_volumes")]),
            (merge_roi_graphmls, fMRI_inputnode,
             [(("out", remove_non_existing_scales), "roi_graphMLs")]),
        ])

        if self.stages['Preprocessing'].enabled:
            preproc_flow = self.create_stage_flow("Preprocessing")
            fMRI_flow.connect([
                (fMRI_inputnode, preproc_flow, [("fMRI",
                                                 "inputnode.functional")]),
                (preproc_flow, sinker, [("outputnode.mean_vol",
                                         "func.@mean_vol")]),
            ])

        if self.stages['Registration'].enabled:
            reg_flow = self.create_stage_flow("Registration")
            fMRI_flow.connect([
                (fMRI_inputnode, reg_flow, [('T1', 'inputnode.T1')]),
                (fMRI_inputnode, reg_flow, [('T2', 'inputnode.T2')]),
                (preproc_flow, reg_flow, [('outputnode.mean_vol',
                                           'inputnode.target')]),
                (fMRI_inputnode, reg_flow,
                 [('wm_mask_file', 'inputnode.wm_mask'),
                  ('roi_volumes', 'inputnode.roi_volumes'),
                  ('brain_eroded', 'inputnode.eroded_brain'),
                  ('wm_eroded', 'inputnode.eroded_wm'),
                  ('csf_eroded', 'inputnode.eroded_csf')]),
                (reg_flow, sinker, [('outputnode.wm_mask_registered_crop',
                                     'anat.@registered_wm'),
                                    ('outputnode.roi_volumes_registered_crop',
                                     'anat.@registered_roi_volumes'),
                                    ('outputnode.eroded_wm_registered_crop',
                                     'anat.@eroded_wm'),
                                    ('outputnode.eroded_csf_registered_crop',
                                     'anat.@eroded_csf'),
                                    ('outputnode.eroded_brain_registered_crop',
                                     'anat.@eroded_brain')]),
            ])
            # if self.stages['FunctionalMRI'].config.global_nuisance:
            #     fMRI_flow.connect([
            #                   (fMRI_inputnode,reg_flow,[('brain_eroded','inputnode.eroded_brain')])
            #                 ])
            # if self.stages['FunctionalMRI'].config.csf:
            #     fMRI_flow.connect([
            #                   (fMRI_inputnode,reg_flow,[('csf_eroded','inputnode.eroded_csf')])
            #                 ])

        if self.stages['FunctionalMRI'].enabled:
            func_flow = self.create_stage_flow("FunctionalMRI")
            fMRI_flow.connect([
                (preproc_flow, func_flow, [('outputnode.functional_preproc',
                                            'inputnode.preproc_file')]),
                (reg_flow, func_flow,
                 [('outputnode.wm_mask_registered_crop',
                   'inputnode.registered_wm'),
                  ('outputnode.roi_volumes_registered_crop',
                   'inputnode.registered_roi_volumes'),
                  ('outputnode.eroded_wm_registered_crop',
                   'inputnode.eroded_wm'),
                  ('outputnode.eroded_csf_registered_crop',
                   'inputnode.eroded_csf'),
                  ('outputnode.eroded_brain_registered_crop',
                   'inputnode.eroded_brain')]),
                (func_flow, sinker, [('outputnode.func_file',
                                      'func.@func_file'),
                                     ("outputnode.FD", "func.@FD"),
                                     ("outputnode.DVARS", "func.@DVARS")]),
            ])
            if self.stages['FunctionalMRI'].config.scrubbing or self.stages[
                    'FunctionalMRI'].config.motion:
                fMRI_flow.connect([
                    (preproc_flow, func_flow, [("outputnode.par_file",
                                                "inputnode.motion_par_file")]),
                    (preproc_flow, sinker, [("outputnode.par_file",
                                             "func.@motion_par_file")])
                ])

        if self.stages['Connectome'].enabled:
            self.stages['Connectome'].config.subject = self.global_conf.subject
            con_flow = self.create_stage_flow("Connectome")
            fMRI_flow.connect([
                (fMRI_inputnode, con_flow,
                 [('parcellation_scheme', 'inputnode.parcellation_scheme'),
                  ('roi_graphMLs', 'inputnode.roi_graphMLs')]),
                (func_flow, con_flow,
                 [('outputnode.func_file', 'inputnode.func_file'),
                  ("outputnode.FD", "inputnode.FD"),
                  ("outputnode.DVARS", "inputnode.DVARS")]),
                (reg_flow, con_flow,
                 [("outputnode.roi_volumes_registered_crop",
                   "inputnode.roi_volumes_registered")]),
                (con_flow, fMRI_outputnode,
                 [("outputnode.connectivity_matrices", "connectivity_matrices")
                  ]),
                (con_flow, sinker, [("outputnode.connectivity_matrices",
                                     "func.@connectivity_matrices")]),
                (con_flow, sinker, [("outputnode.avg_timeseries",
                                     "func.@avg_timeseries")])
            ])

            # if self.parcellation_scheme == "Custom":
            #     fMRI_flow.connect(
            #         [(fMRI_inputnode, con_flow, [('atlas_info', 'inputnode.atlas_info')])])

        return fMRI_flow
Esempio n. 20
0
def petpvc_mask(wf_name="petpvc_mask"):
    """ A Workflow that returns a 4D merge of 4 volumes for PETPVC: GM, WM, CSF and background.

    Parameters
    ----------
    wf_name: str
        The name of the workflow.

    Nipype.Inputs
    -------------
    pvcmask_input.tissues: list of existing files
        List of tissue files in anatomical space, the 3 file
        paths must be in this order: GM, WM, CSF

    Nipype.Outputs
    --------------
    pvcmask_output.petpvc_mask: existing file
        A 4D volume file with these maps in order: GM, WM, CSF, background

    pvcmask_output.brain_mask: existing file
        A mask that is a binarised sum of the tissues file with fslmaths.
        Can be used as brain mask in anatomical space for the PET image.

    Returns
    -------
    wf: nipype Workflow
    """
    # define nodes
    # specify input and output fields
    in_fields = ["tissues"]

    out_fields = [
        "petpvc_mask",
        "brain_mask",
    ]

    # input
    pvcmask_input = setup_node(IdentityInterface(fields=in_fields,
                                                 mandatory_inputs=True),
                               name="pvcmask_input")

    tissues = setup_node(IdentityInterface(fields=["gm", "wm", "csf"],
                                           mandatory_inputs=True),
                         name="tissues")

    merge_list = setup_node(Merge(4), name="merge_list")

    # maths for background
    img_bkg = setup_node(Function(
        function=math_img,
        input_names=["formula", "out_file", "gm", "wm", "csf"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                         name='background')
    img_bkg.inputs.out_file = "tissue_bkg.nii.gz"
    img_bkg.inputs.formula = "np.maximum((-((gm + wm + csf) - 1)), 0)"

    # maths for brain mask
    brain_mask = setup_node(Function(
        function=math_img,
        input_names=["formula", "out_file", "gm", "wm", "csf"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                            name='brain_mask')
    brain_mask.inputs.out_file = "tissues_brain_mask.nii.gz"
    brain_mask.inputs.formula = "np.abs(gm + wm + csf) > 0"

    # concat the tissues images and the background for PETPVC
    merge_tissues = setup_node(Function(
        function=concat_imgs,
        input_names=["in_files"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                               name='merge_tissues')
    merge_tissues.inputs.out_file = "petpvc_mask.nii.gz"

    # output
    pvcmask_output = setup_node(IdentityInterface(fields=out_fields),
                                name="pvcmask_output")

    # Create the workflow object
    wf = Workflow(name=wf_name)

    # Connect the nodes
    wf.connect([
        # separate [GM, WM, CSF] into [GM] and [WM, CSF]
        (pvcmask_input, tissues, [(("tissues", selectindex, 0), "gm"),
                                  (("tissues", selectindex, 1), "wm"),
                                  (("tissues", selectindex, 2), "csf")]),
        (tissues, img_bkg, [("gm", "gm"), ("wm", "wm"), ("csf", "csf")]),
        (tissues, brain_mask, [("gm", "gm"), ("wm", "wm"), ("csf", "csf")]),
        (tissues, merge_list, [("gm", "in1"), ("wm", "in2"), ("csf", "in3")]),

        # create a list of [GM, WM, CSF, BKG]
        (img_bkg, merge_list, [("out_file", "in4")]),

        # merge into 4D: [GM, WM, CSF, BKG]
        (merge_list, merge_tissues, [("out", "in_files")]),

        # output
        (merge_tissues, pvcmask_output, [("out_file", "petpvc_mask")]),
        (brain_mask, pvcmask_output, [("out_file", "brain_mask")]),
    ])

    return wf
Esempio n. 21
0
def firstlevel_wf(subject_id, sink_directory, name='wmaze_frstlvl_wf'):
    frstlvl_wf = Workflow(name='frstlvl_wf')

    info = dict(
        task_mri_files=[['subject_id',
                         'wmaze']],  #dictionary used in datasource
        motion_noise_files=[['subject_id']])

    #function node to call subjectinfo function with name, onset, duration, and amplitude info
    subject_info = Node(Function(input_names=['subject_id'],
                                 output_names=['output'],
                                 function=subjectinfo),
                        name='subject_info')
    subject_info.inputs.ignore_exception = False
    subject_info.inputs.subject_id = subject_id

    #function node to define contrasts
    getcontrasts = Node(Function(input_names=['subject_id', 'info'],
                                 output_names=['contrasts'],
                                 function=get_contrasts),
                        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    getcontrasts.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getcontrasts, 'info')

    #function node to substitute names of folders and files created during pipeline
    getsubs = Node(
        Function(
            input_names=['cons'],
            output_names=['subs'],
            # Calls the function 'get_subs'
            function=get_subs),
        name='getsubs')
    getsubs.inputs.ignore_exception = False
    getsubs.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getsubs, 'info')
    frstlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    #datasource node to get task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/preproc/')
    datasource.inputs.field_template = dict(
        task_mri_files=
        '%s/func/smoothed_fullspectrum/_maskfunc2*/*%s*.nii.gz',  #functional files
        motion_noise_files='%s/noise/filter_regressor??.txt'
    )  #filter regressor noise files
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    #function node to remove last three volumes from functional data
    fslroi_epi = MapNode(
        ExtractROI(t_min=0,
                   t_size=197),  #start from first volume and end on -3
        iterfield=['in_file'],
        name='fslroi_epi')
    fslroi_epi.output_type = 'NIFTI_GZ'
    fslroi_epi.terminal_output = 'stream'
    frstlvl_wf.connect(datasource, 'task_mri_files', fslroi_epi, 'in_file')

    #function node to modify the motion and noise files to be single regressors
    motionnoise = Node(Function(input_names=['subjinfo', 'files'],
                                output_names=['subjinfo'],
                                function=motion_noise),
                       name='motionnoise')
    motionnoise.inputs.ignore_exception = False
    frstlvl_wf.connect(subject_info, 'output', motionnoise, 'subjinfo')
    frstlvl_wf.connect(datasource, 'motion_noise_files', motionnoise, 'files')

    #node to create model specifications compatible with spm/fsl designers (requires subjectinfo to be received in the form of a Bunch)
    specify_model = Node(SpecifyModel(), name='specify_model')
    specify_model.inputs.high_pass_filter_cutoff = -1.0  #high-pass filter cutoff in seconds
    specify_model.inputs.ignore_exception = False
    specify_model.inputs.input_units = 'secs'  #input units in either 'secs' or 'scans'
    specify_model.inputs.time_repetition = 2.0  #TR
    frstlvl_wf.connect(
        fslroi_epi, 'roi_file', specify_model,
        'functional_runs')  #editted data files for model -- list of 4D files
    #list of event description files in 3 column format corresponding to onsets, durations, and amplitudes
    frstlvl_wf.connect(motionnoise, 'subjinfo', specify_model, 'subject_info')

    #node for basic interface class generating identity mappings
    modelfit_inputspec = Node(IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ],
                                                mandatory_inputs=True),
                              name='modelfit_inputspec')
    modelfit_inputspec.inputs.bases = {'dgamma': {'derivs': False}}
    modelfit_inputspec.inputs.film_threshold = 0.0
    modelfit_inputspec.inputs.interscan_interval = 2.0
    modelfit_inputspec.inputs.model_serial_correlations = True
    frstlvl_wf.connect(fslroi_epi, 'roi_file', modelfit_inputspec,
                       'functional_data')
    frstlvl_wf.connect(getcontrasts, 'contrasts', modelfit_inputspec,
                       'contrasts')
    frstlvl_wf.connect(specify_model, 'session_info', modelfit_inputspec,
                       'session_info')

    #node for first level SPM design matrix to demonstrate contrasts and motion/noise regressors
    level1_design = MapNode(Level1Design(),
                            iterfield=['contrasts', 'session_info'],
                            name='level1_design')
    level1_design.inputs.ignore_exception = False
    frstlvl_wf.connect(modelfit_inputspec, 'interscan_interval', level1_design,
                       'interscan_interval')
    frstlvl_wf.connect(modelfit_inputspec, 'session_info', level1_design,
                       'session_info')
    frstlvl_wf.connect(modelfit_inputspec, 'contrasts', level1_design,
                       'contrasts')
    frstlvl_wf.connect(modelfit_inputspec, 'bases', level1_design, 'bases')
    frstlvl_wf.connect(modelfit_inputspec, 'model_serial_correlations',
                       level1_design, 'model_serial_correlations')

    #MapNode to generate a design.mat file for each run
    generate_model = MapNode(FEATModel(),
                             iterfield=['fsf_file', 'ev_files'],
                             name='generate_model')
    generate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    generate_model.inputs.ignore_exception = False
    generate_model.inputs.output_type = 'NIFTI_GZ'
    generate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(level1_design, 'fsf_files', generate_model, 'fsf_file')
    frstlvl_wf.connect(level1_design, 'ev_files', generate_model, 'ev_files')

    #MapNode to estimate the model using FILMGLS -- fits the design matrix to the voxel timeseries
    estimate_model = MapNode(FILMGLS(),
                             iterfield=['design_file', 'in_file', 'tcon_file'],
                             name='estimate_model')
    estimate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    estimate_model.inputs.ignore_exception = False
    estimate_model.inputs.mask_size = 5  #Susan-smooth mask size
    estimate_model.inputs.output_type = 'NIFTI_GZ'
    estimate_model.inputs.results_dir = 'results'
    estimate_model.inputs.smooth_autocorr = True  #smooth auto-correlation estimates
    estimate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(modelfit_inputspec, 'film_threshold', estimate_model,
                       'threshold')
    frstlvl_wf.connect(modelfit_inputspec, 'functional_data', estimate_model,
                       'in_file')
    frstlvl_wf.connect(
        generate_model, 'design_file', estimate_model,
        'design_file')  #mat file containing ascii matrix for design
    frstlvl_wf.connect(generate_model, 'con_file', estimate_model,
                       'tcon_file')  #contrast file containing contrast vectors

    #merge node to merge the contrasts - necessary for fsl 5.0.7 and greater
    merge_contrasts = MapNode(Merge(2),
                              iterfield=['in1'],
                              name='merge_contrasts')
    frstlvl_wf.connect(estimate_model, 'zstats', merge_contrasts, 'in1')

    #MapNode to transform the z2pval
    z2pval = MapNode(ImageMaths(), iterfield=['in_file'], name='z2pval')
    z2pval.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    z2pval.inputs.ignore_exception = False
    z2pval.inputs.op_string = '-ztop'  #defines the operation used
    z2pval.inputs.output_type = 'NIFTI_GZ'
    z2pval.inputs.suffix = '_pval'
    z2pval.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(merge_contrasts, ('out', pop_lambda), z2pval, 'in_file')

    #outputspec node using IdentityInterface() to receive information from estimate_model, merge_contrasts, z2pval, generate_model, and estimate_model
    modelfit_outputspec = Node(IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates',
        'zstats', 'design_image', 'design_file', 'design_cov', 'sigmasquareds'
    ],
                                                 mandatory_inputs=True),
                               name='modelfit_outputspec')
    frstlvl_wf.connect(estimate_model, 'copes', modelfit_outputspec,
                       'copes')  #lvl1 cope files
    frstlvl_wf.connect(estimate_model, 'varcopes', modelfit_outputspec,
                       'varcopes')  #lvl1 varcope files
    frstlvl_wf.connect(merge_contrasts, 'out', modelfit_outputspec,
                       'zstats')  #zstats across runs
    frstlvl_wf.connect(z2pval, 'out_file', modelfit_outputspec, 'pfiles')
    frstlvl_wf.connect(
        generate_model, 'design_image', modelfit_outputspec,
        'design_image')  #graphical representation of design matrix
    frstlvl_wf.connect(
        generate_model, 'design_file', modelfit_outputspec,
        'design_file')  #mat file containing ascii matrix for design
    frstlvl_wf.connect(
        generate_model, 'design_cov', modelfit_outputspec,
        'design_cov')  #graphical representation of design covariance
    frstlvl_wf.connect(estimate_model, 'param_estimates', modelfit_outputspec,
                       'parameter_estimates'
                       )  #parameter estimates for columns of design matrix
    frstlvl_wf.connect(estimate_model, 'dof_file', modelfit_outputspec,
                       'dof_file')  #degrees of freedom
    frstlvl_wf.connect(estimate_model, 'sigmasquareds', modelfit_outputspec,
                       'sigmasquareds')  #summary of residuals

    #datasink node to save output from multiple points in the pipeline
    sinkd = MapNode(DataSink(),
                    iterfield=[
                        'substitutions', 'modelfit.contrasts.@copes',
                        'modelfit.contrasts.@varcopes', 'modelfit.estimates',
                        'modelfit.contrasts.@zstats'
                    ],
                    name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    frstlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')
    frstlvl_wf.connect(modelfit_outputspec, 'parameter_estimates', sinkd,
                       'modelfit.estimates')
    frstlvl_wf.connect(modelfit_outputspec, 'sigmasquareds', sinkd,
                       'modelfit.estimates.@sigsq')
    frstlvl_wf.connect(modelfit_outputspec, 'dof_file', sinkd, 'modelfit.dofs')
    frstlvl_wf.connect(modelfit_outputspec, 'copes', sinkd,
                       'modelfit.contrasts.@copes')
    frstlvl_wf.connect(modelfit_outputspec, 'varcopes', sinkd,
                       'modelfit.contrasts.@varcopes')
    frstlvl_wf.connect(modelfit_outputspec, 'zstats', sinkd,
                       'modelfit.contrasts.@zstats')
    frstlvl_wf.connect(modelfit_outputspec, 'design_image', sinkd,
                       'modelfit.design')
    frstlvl_wf.connect(modelfit_outputspec, 'design_cov', sinkd,
                       'modelfit.design.@cov')
    frstlvl_wf.connect(modelfit_outputspec, 'design_file', sinkd,
                       'modelfit.design.@matrix')
    frstlvl_wf.connect(modelfit_outputspec, 'pfiles', sinkd,
                       'modelfit.contrasts.@pstats')

    return frstlvl_wf
Esempio n. 22
0
def segmentation(projectid, subjectid, sessionid, master_config, onlyT1=True, pipeline_name=''):
    import os.path
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from nipype.interfaces import ants
    from nipype.interfaces.utility import IdentityInterface, Function, Merge
    # Set universal pipeline options
    from nipype import config
    config.update_config(master_config)

    from PipeLineFunctionHelpers import ClipT1ImageWithBrainMask
    from .WorkupT1T2BRAINSCut import CreateBRAINSCutWorkflow
    from utilities.distributed import modify_qsub_args
    from nipype.interfaces.semtools import BRAINSSnapShotWriter

    # CLUSTER_QUEUE=master_config['queue']
    CLUSTER_QUEUE_LONG = master_config['long_q']
    baw200 = pe.Workflow(name=pipeline_name)

    # HACK: print for debugging
    for key, itme in list(master_config.items()):
        print(("-" * 30))
        print((key, ":", itme))
    print(("-" * 30))
    # END HACK

    inputsSpec = pe.Node(interface=IdentityInterface(fields=['t1_average',
                                                             't2_average',
                                                             'template_t1',
                                                             'hncma_atlas',
                                                             'LMIatlasToSubject_tx',
                                                             'inputLabels',
                                                             'inputHeadLabels',
                                                             'posteriorImages',
                                                             'UpdatedPosteriorsList',
                                                             'atlasToSubjectRegistrationState',
                                                             'rho',
                                                             'phi',
                                                             'theta',
                                                             'l_caudate_ProbabilityMap',
                                                             'r_caudate_ProbabilityMap',
                                                             'l_hippocampus_ProbabilityMap',
                                                             'r_hippocampus_ProbabilityMap',
                                                             'l_putamen_ProbabilityMap',
                                                             'r_putamen_ProbabilityMap',
                                                             'l_thalamus_ProbabilityMap',
                                                             'r_thalamus_ProbabilityMap',
                                                             'l_accumben_ProbabilityMap',
                                                             'r_accumben_ProbabilityMap',
                                                             'l_globus_ProbabilityMap',
                                                             'r_globus_ProbabilityMap',
                                                             'trainModelFile_txtD0060NT0060_gz',
                                                             ]),
                         run_without_submitting=True, name='inputspec')

    # outputsSpec = pe.Node(interface=IdentityInterface(fields=[...]),
    #                       run_without_submitting=True, name='outputspec')

    currentClipT1ImageWithBrainMaskName = 'ClipT1ImageWithBrainMask_' + str(subjectid) + "_" + str(sessionid)
    ClipT1ImageWithBrainMaskNode = pe.Node(interface=Function(function=ClipT1ImageWithBrainMask,
                                                              input_names=['t1_image', 'brain_labels',
                                                                           'clipped_file_name'],
                                                              output_names=['clipped_file']),
                                           name=currentClipT1ImageWithBrainMaskName)
    ClipT1ImageWithBrainMaskNode.inputs.clipped_file_name = 'clipped_from_BABC_labels_t1.nii.gz'

    baw200.connect([(inputsSpec, ClipT1ImageWithBrainMaskNode, [('t1_average', 't1_image'),
                                                                ('inputLabels', 'brain_labels')])])

    currentA2SantsRegistrationPostABCSyN = 'A2SantsRegistrationPostABCSyN_' + str(subjectid) + "_" + str(sessionid)
    ## TODO: It would be great to update the BRAINSABC atlasToSubjectTransform at this point, but
    ##       That requires more testing, and fixes to ANTS to properly collapse transforms.
    ##       For now we are simply creating a dummy node to pass through


    A2SantsRegistrationPostABCSyN = pe.Node(interface=ants.Registration(), name=currentA2SantsRegistrationPostABCSyN)

    many_cpu_ANTsSyN_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE_LONG, 8, 8, 16),
                                           'overwrite': True}
    A2SantsRegistrationPostABCSyN.plugin_args = many_cpu_ANTsSyN_options_dictionary
    CommonANTsRegistrationSettings(
        antsRegistrationNode=A2SantsRegistrationPostABCSyN,
        registrationTypeDescription="A2SantsRegistrationPostABCSyN",
        output_transform_prefix='AtlasToSubjectPostBABC_SyN',
        output_warped_image='atlas2subjectPostBABC.nii.gz',
        output_inverse_warped_image='subject2atlasPostBABC.nii.gz',
        save_state='SavedInternalSyNStatePostBABC.h5',
        invert_initial_moving_transform=False,
        initial_moving_transform=None)

    ## TODO: Try multi-modal registration here
    baw200.connect([(inputsSpec, A2SantsRegistrationPostABCSyN, [('atlasToSubjectRegistrationState', 'restore_state'),
                                                                 ('t1_average', 'fixed_image'),
                                                                 ('template_t1', 'moving_image')])
                    ])

    myLocalSegWF = CreateBRAINSCutWorkflow(projectid,
                                           subjectid,
                                           sessionid,
                                           master_config['queue'],
                                           master_config['long_q'],
                                           "Segmentation",
                                           onlyT1)
    MergeStage2AverageImagesName = "99_mergeAvergeStage2Images_" + str(sessionid)
    MergeStage2AverageImages = pe.Node(interface=Merge(2), run_without_submitting=True,
                                       name=MergeStage2AverageImagesName)

    baw200.connect([(inputsSpec, myLocalSegWF, [('t1_average', 'inputspec.T1Volume'),
                                                ('template_t1', 'inputspec.template_t1'),
                                                ('posteriorImages', "inputspec.posteriorDictionary"),
                                                ('inputLabels', 'inputspec.RegistrationROI'), ]),
                    (inputsSpec, MergeStage2AverageImages, [('t1_average', 'in1')]),
                    (A2SantsRegistrationPostABCSyN, myLocalSegWF, [('composite_transform',
                                                                    'inputspec.atlasToSubjectTransform')])
                    ])

    baw200.connect([(inputsSpec, myLocalSegWF,
                     [
                         ('rho', 'inputspec.rho'),
                         ('phi', 'inputspec.phi'),
                         ('theta', 'inputspec.theta'),
                         ('l_caudate_ProbabilityMap', 'inputspec.l_caudate_ProbabilityMap'),
                         ('r_caudate_ProbabilityMap', 'inputspec.r_caudate_ProbabilityMap'),
                         ('l_hippocampus_ProbabilityMap', 'inputspec.l_hippocampus_ProbabilityMap'),
                         ('r_hippocampus_ProbabilityMap', 'inputspec.r_hippocampus_ProbabilityMap'),
                         ('l_putamen_ProbabilityMap', 'inputspec.l_putamen_ProbabilityMap'),
                         ('r_putamen_ProbabilityMap', 'inputspec.r_putamen_ProbabilityMap'),
                         ('l_thalamus_ProbabilityMap', 'inputspec.l_thalamus_ProbabilityMap'),
                         ('r_thalamus_ProbabilityMap', 'inputspec.r_thalamus_ProbabilityMap'),
                         ('l_accumben_ProbabilityMap', 'inputspec.l_accumben_ProbabilityMap'),
                         ('r_accumben_ProbabilityMap', 'inputspec.r_accumben_ProbabilityMap'),
                         ('l_globus_ProbabilityMap', 'inputspec.l_globus_ProbabilityMap'),
                         ('r_globus_ProbabilityMap', 'inputspec.r_globus_ProbabilityMap'),
                         ('trainModelFile_txtD0060NT0060_gz', 'inputspec.trainModelFile_txtD0060NT0060_gz')
                     ]
                     )]
                   )

    if not onlyT1:
        baw200.connect([(inputsSpec, myLocalSegWF, [('t2_average', 'inputspec.T2Volume')]),
                        (inputsSpec, MergeStage2AverageImages, [('t2_average', 'in2')])])
        file_count = 15  # Count of files to merge into MergeSessionSubjectToAtlas
    else:
        file_count = 14  # Count of files to merge into MergeSessionSubjectToAtlas

    ## NOTE: Element 0 of AccumulatePriorsList is the accumulated GM tissue
    # baw200.connect([(AccumulateLikeTissuePosteriorsNode, myLocalSegWF,
    #               [(('AccumulatePriorsList', getListIndex, 0), "inputspec.TotalGM")]),
    #               ])

    ### Now define where the final organized outputs should go.
    DataSink = pe.Node(nio.DataSink(), name="CleanedDenoisedSegmentation_DS_" + str(subjectid) + "_" + str(sessionid))
    DataSink.overwrite = master_config['ds_overwrite']
    DataSink.inputs.base_directory = master_config['resultdir']
    # DataSink.inputs.regexp_substitutions = GenerateOutputPattern(projectid, subjectid, sessionid,'BRAINSCut')
    # DataSink.inputs.regexp_substitutions = GenerateBRAINSCutImagesOutputPattern(projectid, subjectid, sessionid)
    DataSink.inputs.substitutions = [
        ('Segmentations', os.path.join(projectid, subjectid, sessionid, 'CleanedDenoisedRFSegmentations')),
        ('subjectANNLabel_', ''),
        ('ANNContinuousPrediction', ''),
        ('subject.nii.gz', '.nii.gz'),
        ('_seg.nii.gz', '_seg.nii.gz'),
        ('.nii.gz', '_seg.nii.gz'),
        ('_seg_seg', '_seg')]

    baw200.connect([(myLocalSegWF, DataSink, [('outputspec.outputBinaryLeftCaudate', 'Segmentations.@LeftCaudate'),
                                              ('outputspec.outputBinaryRightCaudate', 'Segmentations.@RightCaudate'),
                                              ('outputspec.outputBinaryLeftHippocampus',
                                               'Segmentations.@LeftHippocampus'),
                                              ('outputspec.outputBinaryRightHippocampus',
                                               'Segmentations.@RightHippocampus'),
                                              ('outputspec.outputBinaryLeftPutamen', 'Segmentations.@LeftPutamen'),
                                              ('outputspec.outputBinaryRightPutamen', 'Segmentations.@RightPutamen'),
                                              ('outputspec.outputBinaryLeftThalamus', 'Segmentations.@LeftThalamus'),
                                              ('outputspec.outputBinaryRightThalamus', 'Segmentations.@RightThalamus'),
                                              ('outputspec.outputBinaryLeftAccumben', 'Segmentations.@LeftAccumben'),
                                              ('outputspec.outputBinaryRightAccumben', 'Segmentations.@RightAccumben'),
                                              ('outputspec.outputBinaryLeftGlobus', 'Segmentations.@LeftGlobus'),
                                              ('outputspec.outputBinaryRightGlobus', 'Segmentations.@RightGlobus'),
                                              ('outputspec.outputLabelImageName', 'Segmentations.@LabelImageName'),
                                              ('outputspec.outputCSVFileName', 'Segmentations.@CSVFileName')]),
                    # (myLocalSegWF, DataSink, [('outputspec.cleaned_labels', 'Segmentations.@cleaned_labels')])
                    ])

    MergeStage2BinaryVolumesName = "99_MergeStage2BinaryVolumes_" + str(sessionid)
    MergeStage2BinaryVolumes = pe.Node(interface=Merge(12), run_without_submitting=True,
                                       name=MergeStage2BinaryVolumesName)

    baw200.connect([(myLocalSegWF, MergeStage2BinaryVolumes, [('outputspec.outputBinaryLeftAccumben', 'in1'),
                                                              ('outputspec.outputBinaryLeftCaudate', 'in2'),
                                                              ('outputspec.outputBinaryLeftPutamen', 'in3'),
                                                              ('outputspec.outputBinaryLeftGlobus', 'in4'),
                                                              ('outputspec.outputBinaryLeftThalamus', 'in5'),
                                                              ('outputspec.outputBinaryLeftHippocampus', 'in6'),
                                                              ('outputspec.outputBinaryRightAccumben', 'in7'),
                                                              ('outputspec.outputBinaryRightCaudate', 'in8'),
                                                              ('outputspec.outputBinaryRightPutamen', 'in9'),
                                                              ('outputspec.outputBinaryRightGlobus', 'in10'),
                                                              ('outputspec.outputBinaryRightThalamus', 'in11'),
                                                              ('outputspec.outputBinaryRightHippocampus', 'in12')])
                    ])

    ## SnapShotWriter for Segmented result checking:
    SnapShotWriterNodeName = "SnapShotWriter_" + str(sessionid)
    SnapShotWriter = pe.Node(interface=BRAINSSnapShotWriter(), name=SnapShotWriterNodeName)

    SnapShotWriter.inputs.outputFilename = 'snapShot' + str(sessionid) + '.png'  # output specification
    SnapShotWriter.inputs.inputPlaneDirection = [2, 1, 1, 1, 1, 0, 0]
    SnapShotWriter.inputs.inputSliceToExtractInPhysicalPoint = [-3, -7, -3, 5, 7, 22, -22]

    baw200.connect([(MergeStage2AverageImages, SnapShotWriter, [('out', 'inputVolumes')]),
                    (MergeStage2BinaryVolumes, SnapShotWriter, [('out', 'inputBinaryVolumes')]),
                    (SnapShotWriter, DataSink, [('outputFilename', 'Segmentations.@outputSnapShot')])
                    ])

    # currentAntsLabelWarpToSubject = 'AntsLabelWarpToSubject' + str(subjectid) + "_" + str(sessionid)
    # AntsLabelWarpToSubject = pe.Node(interface=ants.ApplyTransforms(), name=currentAntsLabelWarpToSubject)
    #
    # AntsLabelWarpToSubject.inputs.num_threads = -1
    # AntsLabelWarpToSubject.inputs.dimension = 3
    # AntsLabelWarpToSubject.inputs.output_image = 'warped_hncma_atlas_seg.nii.gz'
    # AntsLabelWarpToSubject.inputs.interpolation = "MultiLabel"
    #
    # baw200.connect([(A2SantsRegistrationPostABCSyN, AntsLabelWarpToSubject, [('composite_transform', 'transforms')]),
    #                 (inputsSpec, AntsLabelWarpToSubject, [('t1_average', 'reference_image'),
    #                                                       ('hncma_atlas', 'input_image')])
    #                 ])
    # #####
    # ### Now define where the final organized outputs should go.
    # AntsLabelWarpedToSubject_DSName = "AntsLabelWarpedToSubject_DS_" + str(sessionid)
    # AntsLabelWarpedToSubject_DS = pe.Node(nio.DataSink(), name=AntsLabelWarpedToSubject_DSName)
    # AntsLabelWarpedToSubject_DS.overwrite = master_config['ds_overwrite']
    # AntsLabelWarpedToSubject_DS.inputs.base_directory = master_config['resultdir']
    # AntsLabelWarpedToSubject_DS.inputs.substitutions = [('AntsLabelWarpedToSubject', os.path.join(projectid, subjectid, sessionid, 'AntsLabelWarpedToSubject'))]
    #
    # baw200.connect([(AntsLabelWarpToSubject, AntsLabelWarpedToSubject_DS, [('output_image', 'AntsLabelWarpedToSubject')])])

    MergeSessionSubjectToAtlasName = "99_MergeSessionSubjectToAtlas_" + str(sessionid)
    MergeSessionSubjectToAtlas = pe.Node(interface=Merge(file_count), run_without_submitting=True,
                                         name=MergeSessionSubjectToAtlasName)

    baw200.connect([(myLocalSegWF, MergeSessionSubjectToAtlas, [('outputspec.outputBinaryLeftAccumben', 'in1'),
                                                                ('outputspec.outputBinaryLeftCaudate', 'in2'),
                                                                ('outputspec.outputBinaryLeftPutamen', 'in3'),
                                                                ('outputspec.outputBinaryLeftGlobus', 'in4'),
                                                                ('outputspec.outputBinaryLeftThalamus', 'in5'),
                                                                ('outputspec.outputBinaryLeftHippocampus', 'in6'),
                                                                ('outputspec.outputBinaryRightAccumben', 'in7'),
                                                                ('outputspec.outputBinaryRightCaudate', 'in8'),
                                                                ('outputspec.outputBinaryRightPutamen', 'in9'),
                                                                ('outputspec.outputBinaryRightGlobus', 'in10'),
                                                                ('outputspec.outputBinaryRightThalamus', 'in11'),
                                                                ('outputspec.outputBinaryRightHippocampus', 'in12')]),
                    # (FixWMPartitioningNode, MergeSessionSubjectToAtlas, [('UpdatedPosteriorsList', 'in13')]),
                    (inputsSpec, MergeSessionSubjectToAtlas, [('UpdatedPosteriorsList', 'in13')]),
                    (inputsSpec, MergeSessionSubjectToAtlas, [('t1_average', 'in14')])
                    ])

    if not onlyT1:
        assert file_count == 15
        baw200.connect([(inputsSpec, MergeSessionSubjectToAtlas, [('t2_average', 'in15')])])

    LinearSubjectToAtlasANTsApplyTransformsName = 'LinearSubjectToAtlasANTsApplyTransforms_' + str(sessionid)
    LinearSubjectToAtlasANTsApplyTransforms = pe.MapNode(interface=ants.ApplyTransforms(), iterfield=['input_image'],
                                                         name=LinearSubjectToAtlasANTsApplyTransformsName)
    LinearSubjectToAtlasANTsApplyTransforms.inputs.num_threads = -1
    LinearSubjectToAtlasANTsApplyTransforms.inputs.interpolation = 'Linear'

    baw200.connect(
        [(A2SantsRegistrationPostABCSyN, LinearSubjectToAtlasANTsApplyTransforms, [('inverse_composite_transform',
                                                                                    'transforms')]),
         (inputsSpec, LinearSubjectToAtlasANTsApplyTransforms, [('template_t1', 'reference_image')]),
         (MergeSessionSubjectToAtlas, LinearSubjectToAtlasANTsApplyTransforms, [('out', 'input_image')])
         ])

    MergeMultiLabelSessionSubjectToAtlasName = "99_MergeMultiLabelSessionSubjectToAtlas_" + str(sessionid)
    MergeMultiLabelSessionSubjectToAtlas = pe.Node(interface=Merge(2), run_without_submitting=True,
                                                   name=MergeMultiLabelSessionSubjectToAtlasName)

    baw200.connect([(inputsSpec, MergeMultiLabelSessionSubjectToAtlas, [('inputLabels', 'in1'),
                                                                        ('inputHeadLabels', 'in2')])
                    ])

    ### This is taking this sessions RF label map back into NAC atlas space.
    # {
    MultiLabelSubjectToAtlasANTsApplyTransformsName = 'MultiLabelSubjectToAtlasANTsApplyTransforms_' + str(
        sessionid) + '_map'
    MultiLabelSubjectToAtlasANTsApplyTransforms = pe.MapNode(interface=ants.ApplyTransforms(),
                                                             iterfield=['input_image'],
                                                             name=MultiLabelSubjectToAtlasANTsApplyTransformsName)
    MultiLabelSubjectToAtlasANTsApplyTransforms.inputs.num_threads = -1
    MultiLabelSubjectToAtlasANTsApplyTransforms.inputs.interpolation = 'MultiLabel'

    baw200.connect([(A2SantsRegistrationPostABCSyN, MultiLabelSubjectToAtlasANTsApplyTransforms,
                     [('inverse_composite_transform', 'transforms')]),
                    (inputsSpec, MultiLabelSubjectToAtlasANTsApplyTransforms, [('template_t1', 'reference_image')]),
                    (MergeMultiLabelSessionSubjectToAtlas, MultiLabelSubjectToAtlasANTsApplyTransforms,
                     [('out', 'input_image')])
                    ])
    # }
    ### Now we must take the sessions to THIS SUBJECTS personalized atlas.
    # {
    # }

    ### Now define where the final organized outputs should go.
    Subj2Atlas_DSName = "SubjectToAtlas_DS_" + str(sessionid)
    Subj2Atlas_DS = pe.Node(nio.DataSink(), name=Subj2Atlas_DSName)
    Subj2Atlas_DS.overwrite = master_config['ds_overwrite']
    Subj2Atlas_DS.inputs.base_directory = master_config['resultdir']
    Subj2Atlas_DS.inputs.regexp_substitutions = [(r'_LinearSubjectToAtlasANTsApplyTransforms_[^/]*',
                                                  r'' + sessionid + '/')]

    baw200.connect([(LinearSubjectToAtlasANTsApplyTransforms, Subj2Atlas_DS,
                     [('output_image', 'SubjectToAtlasWarped.@linear_output_images')])])

    Subj2AtlasTransforms_DSName = "SubjectToAtlasTransforms_DS_" + str(sessionid)
    Subj2AtlasTransforms_DS = pe.Node(nio.DataSink(), name=Subj2AtlasTransforms_DSName)
    Subj2AtlasTransforms_DS.overwrite = master_config['ds_overwrite']
    Subj2AtlasTransforms_DS.inputs.base_directory = master_config['resultdir']
    Subj2AtlasTransforms_DS.inputs.regexp_substitutions = [(r'SubjectToAtlasWarped',
                                                            r'SubjectToAtlasWarped/' + sessionid + '/')]

    baw200.connect([(A2SantsRegistrationPostABCSyN, Subj2AtlasTransforms_DS,
                     [('composite_transform', 'SubjectToAtlasWarped.@composite_transform'),
                      ('inverse_composite_transform', 'SubjectToAtlasWarped.@inverse_composite_transform')])])
    # baw200.connect([(MultiLabelSubjectToAtlasANTsApplyTransforms, Subj2Atlas_DS, [('output_image', 'SubjectToAtlasWarped.@multilabel_output_images')])])

    if master_config['plugin_name'].startswith(
            'SGE'):  # for some nodes, the qsub call needs to be modified on the cluster
        A2SantsRegistrationPostABCSyN.plugin_args = {'template': master_config['plugin_args']['template'],
                                                     'overwrite': True,
                                                     'qsub_args': modify_qsub_args(master_config['queue'], 8, 8, 24)}
        SnapShotWriter.plugin_args = {'template': master_config['plugin_args']['template'], 'overwrite': True,
                                      'qsub_args': modify_qsub_args(master_config['queue'], 1, 1, 1)}
        LinearSubjectToAtlasANTsApplyTransforms.plugin_args = {'template': master_config['plugin_args']['template'],
                                                               'overwrite': True,
                                                               'qsub_args': modify_qsub_args(master_config['queue'], 1,
                                                                                             1, 1)}
        MultiLabelSubjectToAtlasANTsApplyTransforms.plugin_args = {'template': master_config['plugin_args']['template'],
                                                                   'overwrite': True,
                                                                   'qsub_args': modify_qsub_args(master_config['queue'],
                                                                                                 1, 1, 1)}

    return baw200
Esempio n. 23
0
def CreateBRAINSCutWorkflow(projectid, subjectid, sessionid, CLUSTER_QUEUE,
                            CLUSTER_QUEUE_LONG, WFName, t1Only):
    cutWF = pe.Workflow(
        name=GenerateWFName(projectid, subjectid, sessionid, WFName))

    inputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'T1Volume', 'T2Volume', 'posteriorDictionary', 'RegistrationROI',
        'atlasToSubjectTransform', 'template_t1', 'rho', 'phi', 'theta',
        'l_caudate_ProbabilityMap', 'r_caudate_ProbabilityMap',
        'l_hippocampus_ProbabilityMap', 'r_hippocampus_ProbabilityMap',
        'l_putamen_ProbabilityMap', 'r_putamen_ProbabilityMap',
        'l_thalamus_ProbabilityMap', 'r_thalamus_ProbabilityMap',
        'l_accumben_ProbabilityMap', 'r_accumben_ProbabilityMap',
        'l_globus_ProbabilityMap', 'r_globus_ProbabilityMap',
        'trainModelFile_txtD0060NT0060_gz'
    ]),
                         name='inputspec')

    #Denoised T1 input for BRAINSCut
    denosingTimeStep = 0.0625
    denosingConductance = 0.4
    denosingIteration = 5

    DenoisedT1 = pe.Node(interface=GradientAnisotropicDiffusionImageFilter(),
                         name="DenoisedT1")
    DenoisedT1.inputs.timeStep = denosingTimeStep
    DenoisedT1.inputs.conductance = denosingConductance
    DenoisedT1.inputs.numberOfIterations = denosingIteration
    DenoisedT1.inputs.outputVolume = "DenoisedT1.nii.gz"

    cutWF.connect(inputsSpec, 'T1Volume', DenoisedT1, 'inputVolume')

    #Gradient Anistropic Diffusion T1 images for BRAINSCut
    GADT1 = pe.Node(interface=GradientAnisotropicDiffusionImageFilter(),
                    name="GADT1")
    GADT1.inputs.timeStep = 0.025
    GADT1.inputs.conductance = 1
    GADT1.inputs.numberOfIterations = 5
    GADT1.inputs.outputVolume = "GADT1.nii.gz"

    cutWF.connect(inputsSpec, 'T1Volume', GADT1, 'inputVolume')

    if not t1Only:
        #Denoised T1 input for BRAINSCut
        DenoisedT2 = pe.Node(
            interface=GradientAnisotropicDiffusionImageFilter(),
            name="DenoisedT2")
        DenoisedT2.inputs.timeStep = denosingTimeStep
        DenoisedT2.inputs.conductance = denosingConductance
        DenoisedT2.inputs.numberOfIterations = denosingIteration
        DenoisedT2.inputs.outputVolume = "DenoisedT2.nii.gz"

        cutWF.connect(inputsSpec, 'T2Volume', DenoisedT2, 'inputVolume')

        #Gradient Anistropic Diffusion T1 images for BRAINSCut
        GADT2 = pe.Node(interface=GradientAnisotropicDiffusionImageFilter(),
                        name="GADT2")
        GADT2.inputs.timeStep = 0.025
        GADT2.inputs.conductance = 1
        GADT2.inputs.numberOfIterations = 5
        GADT2.inputs.outputVolume = "GADT2.nii.gz"
        cutWF.connect(inputsSpec, 'T2Volume', GADT2, 'inputVolume')

        #Sum the gradient images for BRAINSCut
        SGI = pe.Node(interface=GenerateSummedGradientImage(), name="SGI")
        SGI.inputs.outputFileName = "SummedGradImage.nii.gz"

        cutWF.connect(GADT1, 'outputVolume', SGI, 'inputVolume1')
        cutWF.connect(GADT2, 'outputVolume', SGI, 'inputVolume2')

    #BRAINSCut
    RF12BC = pe.Node(interface=RF12BRAINSCutWrapper(),
                     name="IQR_NORM_SEP_RF12_BRAINSCut")
    # HACK
    # import os
    # RF12BC.inputs.environ = dict(os.environ)
    # many_cpu_RF12BC_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE,4,2,2), 'overwrite': True}
    # RF12BC.plugin_args = many_cpu_RF12BC_options_dictionary
    # END HACK
    RF12BC.inputs.trainingVectorFilename = "trainingVectorFilename.txt"
    RF12BC.inputs.xmlFilename = "BRAINSCutSegmentationDefinition.xml"
    RF12BC.inputs.vectorNormalization = "IQR"

    RF12BC.inputs.outputBinaryLeftCaudate = 'subjectANNLabel_l_caudate.nii.gz'
    RF12BC.inputs.outputBinaryRightCaudate = 'subjectANNLabel_r_caudate.nii.gz'
    RF12BC.inputs.outputBinaryLeftHippocampus = 'subjectANNLabel_l_hippocampus.nii.gz'
    RF12BC.inputs.outputBinaryRightHippocampus = 'subjectANNLabel_r_hippocampus.nii.gz'
    RF12BC.inputs.outputBinaryLeftPutamen = 'subjectANNLabel_l_putamen.nii.gz'
    RF12BC.inputs.outputBinaryRightPutamen = 'subjectANNLabel_r_putamen.nii.gz'
    RF12BC.inputs.outputBinaryLeftThalamus = 'subjectANNLabel_l_thalamus.nii.gz'
    RF12BC.inputs.outputBinaryRightThalamus = 'subjectANNLabel_r_thalamus.nii.gz'
    RF12BC.inputs.outputBinaryLeftAccumben = 'subjectANNLabel_l_accumben.nii.gz'
    RF12BC.inputs.outputBinaryRightAccumben = 'subjectANNLabel_r_accumben.nii.gz'
    RF12BC.inputs.outputBinaryLeftGlobus = 'subjectANNLabel_l_globus.nii.gz'
    RF12BC.inputs.outputBinaryRightGlobus = 'subjectANNLabel_r_globus.nii.gz'

    cutWF.connect(DenoisedT1, 'outputVolume', RF12BC, 'inputSubjectT1Filename')

    from PipeLineFunctionHelpers import MakeInclusionMaskForGMStructures
    makeCandidateRegionNode = pe.Node(
        interface=Function(['posteriorDictionary', 'candidateRegionFileName'],
                           ['outputCandidateRegionFileName'],
                           function=MakeInclusionMaskForGMStructures),
        name="MakeCandidateRegion")
    makeCandidateRegionNode.inputs.candidateRegionFileName = "RF12_CandidateRegionMask.nii.gz"
    cutWF.connect(inputsSpec, 'posteriorDictionary', makeCandidateRegionNode,
                  'posteriorDictionary')
    cutWF.connect(makeCandidateRegionNode, 'outputCandidateRegionFileName',
                  RF12BC, 'candidateRegion')

    cutWF.connect([(
        inputsSpec,
        RF12BC,
        [
            ('template_t1', 'inputTemplateT1'),
            # ('template_brain', 'inputTemplateRegistrationROIFilename'),
            ('rho', 'inputTemplateRhoFilename'),
            ('phi', 'inputTemplatePhiFilename'),
            ('theta', 'inputTemplateThetaFilename'),
            ('l_caudate_ProbabilityMap', 'probabilityMapsLeftCaudate'),
            ('r_caudate_ProbabilityMap', 'probabilityMapsRightCaudate'),
            ('l_hippocampus_ProbabilityMap', 'probabilityMapsLeftHippocampus'),
            ('r_hippocampus_ProbabilityMap',
             'probabilityMapsRightHippocampus'),
            ('l_putamen_ProbabilityMap', 'probabilityMapsLeftPutamen'),
            ('r_putamen_ProbabilityMap', 'probabilityMapsRightPutamen'),
            ('l_thalamus_ProbabilityMap', 'probabilityMapsLeftThalamus'),
            ('r_thalamus_ProbabilityMap', 'probabilityMapsRightThalamus'),
            ('l_accumben_ProbabilityMap', 'probabilityMapsLeftAccumben'),
            ('r_accumben_ProbabilityMap', 'probabilityMapsRightAccumben'),
            ('l_globus_ProbabilityMap', 'probabilityMapsLeftGlobus'),
            ('r_globus_ProbabilityMap', 'probabilityMapsRightGlobus'),
        ])])

    # TODO:
    if not t1Only:
        cutWF.connect(DenoisedT2, 'outputVolume', RF12BC,
                      'inputSubjectT2Filename')
        # cutWF.connect(inputsSpec,'TotalGM',RF12BC,'inputSubjectTotalGMFilename')
        # cutWF.connect(inputsSpec,'RegistrationROI',RF12BC,'inputSubjectRegistrationROIFilename')
        # Error cutWF.connect(SGI,'outputVolume',RF12BC,'inputSubjectGadSGFilename')
        cutWF.connect(SGI, 'outputFileName', RF12BC,
                      'inputSubjectGadSGFilename')
        cutWF.connect(inputsSpec, 'trainModelFile_txtD0060NT0060_gz', RF12BC,
                      'modelFilename')
    else:
        ### TODO:  Replace with proper atlas file name in the future!!! This is a HACK
        ### to avoid changing the hash keys of the input files from the atlas.
        def ChangeModelPathDirectory(multiModalFileName):
            return multiModalFileName.replace('modelFiles', 'T1OnlyModels')

        cutWF.connect([(inputsSpec, RF12BC,
                        [(('trainModelFile_txtD0060NT0060_gz',
                           ChangeModelPathDirectory), 'modelFilename')])])

    ## Need to index from next line cutWF.connect(inputsSpec,'atlasToSubjectTransform',RF12BC,'deformationFromTemplateToSubject')
    cutWF.connect([
        (inputsSpec, RF12BC, [(('atlasToSubjectTransform', getListIndex, 0),
                               'deformationFromTemplateToSubject')]),
    ])

    mergeAllLabels = pe.Node(interface=Merge(12), name="labelMergeNode")
    # NOTE: Ordering is important
    cutWF.connect(RF12BC, 'outputBinaryLeftCaudate', mergeAllLabels, 'in1')
    cutWF.connect(RF12BC, 'outputBinaryRightCaudate', mergeAllLabels, 'in2')
    cutWF.connect(RF12BC, 'outputBinaryLeftPutamen', mergeAllLabels, 'in3')
    cutWF.connect(RF12BC, 'outputBinaryRightPutamen', mergeAllLabels, 'in4')
    cutWF.connect(RF12BC, 'outputBinaryLeftHippocampus', mergeAllLabels, 'in5')
    cutWF.connect(RF12BC, 'outputBinaryRightHippocampus', mergeAllLabels,
                  'in6')
    cutWF.connect(RF12BC, 'outputBinaryLeftThalamus', mergeAllLabels, 'in7')
    cutWF.connect(RF12BC, 'outputBinaryRightThalamus', mergeAllLabels,
                  'in8')  # HACK:  CHECK ORDERING
    cutWF.connect(RF12BC, 'outputBinaryLeftAccumben', mergeAllLabels, 'in9')
    cutWF.connect(RF12BC, 'outputBinaryRightAccumben', mergeAllLabels, 'in10')
    cutWF.connect(RF12BC, 'outputBinaryLeftGlobus', mergeAllLabels, 'in11')
    cutWF.connect(RF12BC, 'outputBinaryRightGlobus', mergeAllLabels, 'in12')

    computeOneLabelMap = pe.Node(interface=Function([
        'listOfImages', 'LabelImageName', 'CSVFileName', 'posteriorDictionary',
        'projectid', 'subjectid', 'sessionid'
    ], [
        'outputLabelImageName', 'outputCSVFileName', 'CleanedLeftCaudate',
        'CleanedRightCaudate', 'CleanedLeftHippocampus',
        'CleanedRightHippocampus', 'CleanedLeftPutamen', 'CleanedRightPutamen',
        'CleanedLeftThalamus', 'CleanedRightThalamus', 'CleanedLeftAccumben',
        'CleanedRightAccumben', 'CleanedLeftGlobus', 'CleanedRightGlobus'
    ],
                                                    function=CreateLabelMap),
                                 name="ComputeOneLabelMap")
    computeOneLabelMap.inputs.projectid = projectid
    computeOneLabelMap.inputs.subjectid = subjectid
    computeOneLabelMap.inputs.sessionid = sessionid
    computeOneLabelMap.inputs.LabelImageName = "allLabels.nii.gz"
    computeOneLabelMap.inputs.CSVFileName = "allLabels_seg.csv"
    cutWF.connect(inputsSpec, 'posteriorDictionary', computeOneLabelMap,
                  'posteriorDictionary')
    cutWF.connect(mergeAllLabels, 'out', computeOneLabelMap, 'listOfImages')

    outputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'outputBinaryLeftCaudate', 'outputBinaryRightCaudate',
        'outputBinaryLeftHippocampus', 'outputBinaryRightHippocampus',
        'outputBinaryLeftPutamen', 'outputBinaryRightPutamen',
        'outputBinaryLeftThalamus', 'outputBinaryRightThalamus',
        'outputBinaryLeftAccumben', 'outputBinaryRightAccumben',
        'outputBinaryLeftGlobus', 'outputBinaryRightGlobus',
        'outputLabelImageName', 'outputCSVFileName', 'xmlFilename'
    ]),
                          name='outputspec')

    cutWF.connect(computeOneLabelMap, 'outputLabelImageName', outputsSpec,
                  'outputLabelImageName')
    cutWF.connect(computeOneLabelMap, 'outputCSVFileName', outputsSpec,
                  'outputCSVFileName')
    cutWF.connect(computeOneLabelMap, 'CleanedLeftCaudate', outputsSpec,
                  'outputBinaryLeftCaudate')
    cutWF.connect(computeOneLabelMap, 'CleanedRightCaudate', outputsSpec,
                  'outputBinaryRightCaudate')
    cutWF.connect(computeOneLabelMap, 'CleanedLeftHippocampus', outputsSpec,
                  'outputBinaryLeftHippocampus')
    cutWF.connect(computeOneLabelMap, 'CleanedRightHippocampus', outputsSpec,
                  'outputBinaryRightHippocampus')
    cutWF.connect(computeOneLabelMap, 'CleanedLeftPutamen', outputsSpec,
                  'outputBinaryLeftPutamen')
    cutWF.connect(computeOneLabelMap, 'CleanedRightPutamen', outputsSpec,
                  'outputBinaryRightPutamen')
    cutWF.connect(computeOneLabelMap, 'CleanedLeftThalamus', outputsSpec,
                  'outputBinaryLeftThalamus')
    cutWF.connect(computeOneLabelMap, 'CleanedRightThalamus', outputsSpec,
                  'outputBinaryRightThalamus')
    cutWF.connect(computeOneLabelMap, 'CleanedLeftAccumben', outputsSpec,
                  'outputBinaryLeftAccumben')
    cutWF.connect(computeOneLabelMap, 'CleanedRightAccumben', outputsSpec,
                  'outputBinaryRightAccumben')
    cutWF.connect(computeOneLabelMap, 'CleanedLeftGlobus', outputsSpec,
                  'outputBinaryLeftGlobus')
    cutWF.connect(computeOneLabelMap, 'CleanedRightGlobus', outputsSpec,
                  'outputBinaryRightGlobus')

    cutWF.connect(RF12BC, 'xmlFilename', outputsSpec, 'xmlFilename')

    return cutWF
Esempio n. 24
0
def CreateANTSRegistrationWorkflow(WFname,
                                   CLUSTER_QUEUE,
                                   CLUSTER_QUEUE_LONG,
                                   NumberOfThreads=-1):
    ANTSWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'fixedVolumesList', 'movingVolumesList', 'initial_moving_transform',
        'fixedBinaryVolume', 'movingBinaryVolume', 'warpFixedVolumesList'
    ]),
                         name='inputspec')

    print("""Run ANTS Registration""")

    BFitAtlasToSubject = pe.Node(interface=BRAINSFit(), name="bfA2S")
    BF_cpu_sge_options_dictionary = {
        'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 4, 4, 24),
        'overwrite': True
    }
    BFitAtlasToSubject.plugin_args = BF_cpu_sge_options_dictionary
    BFitAtlasToSubject.inputs.costMetric = "MMI"
    BFitAtlasToSubject.inputs.numberOfSamples = 1000000
    BFitAtlasToSubject.inputs.numberOfIterations = [1500]
    BFitAtlasToSubject.inputs.numberOfHistogramBins = 50
    BFitAtlasToSubject.inputs.maximumStepLength = 0.2
    BFitAtlasToSubject.inputs.minimumStepLength = [0.000005]
    BFitAtlasToSubject.inputs.useAffine = True  # Using initial transform from BRAINSABC
    BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter = 65
    BFitAtlasToSubject.inputs.outputVolume = "Trial_Initializer_Output.nii.gz"
    # Bug in BRAINSFit PREDICTIMG-1379 BFitAtlasToSubject.inputs.outputFixedVolumeROI="FixedROI.nii.gz"
    # Bug in BRAINSFit PREDICTIMG-1379 BFitAtlasToSubject.inputs.outputMovingVolumeROI="MovingROI.nii.gz"
    BFitAtlasToSubject.inputs.outputTransform = "Trial_Initializer_Output.h5"
    BFitAtlasToSubject.inputs.maskProcessingMode = "ROIAUTO"
    BFitAtlasToSubject.inputs.ROIAutoDilateSize = 4
    # BFitAtlasToSubject.inputs.maskProcessingMode="ROI"
    # ANTSWF.connect(inputsSpec,'fixedBinaryVolume',BFitAtlasToSubject,'fixedBinaryVolume')
    # ANTSWF.connect(inputsSpec,'movingBinaryVolume',BFitAtlasToSubject,'movingBinaryVolume')
    ANTSWF.connect(inputsSpec, 'fixedVolumesList', BFitAtlasToSubject,
                   'fixedVolume')
    ANTSWF.connect(inputsSpec, 'movingVolumesList', BFitAtlasToSubject,
                   'movingVolume')
    ANTSWF.connect(inputsSpec, 'initial_moving_transform', BFitAtlasToSubject,
                   'initialTransform')

    ComputeAtlasToSubjectTransform = pe.Node(interface=antsRegistration(),
                                             name="antsA2S")
    many_cpu_sge_options_dictionary = {
        'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 8, 8, 24),
        'overwrite': True
    }
    ComputeAtlasToSubjectTransform.plugin_args = many_cpu_sge_options_dictionary

    ComputeAtlasToSubjectTransform.inputs.dimension = 3
    ComputeAtlasToSubjectTransform.inputs.metric = 'CC'  # This is a family of interfaces, CC,MeanSquares,Demons,GC,MI,Mattes
    ComputeAtlasToSubjectTransform.inputs.transform = 'SyN[0.25,3.0,0.0]'
    ComputeAtlasToSubjectTransform.inputs.number_of_iterations = [250, 100, 20]
    ComputeAtlasToSubjectTransform.inputs.convergence_threshold = 1e-7
    ComputeAtlasToSubjectTransform.inputs.smoothing_sigmas = [0, 0, 0]
    ComputeAtlasToSubjectTransform.inputs.sigma_units = ["vox"]
    ComputeAtlasToSubjectTransform.inputs.shrink_factors = [3, 2, 1]
    ComputeAtlasToSubjectTransform.inputs.use_estimate_learning_rate_once = True
    ComputeAtlasToSubjectTransform.inputs.use_histogram_matching = True
    ComputeAtlasToSubjectTransform.inputs.invert_initial_moving_transform = False
    ComputeAtlasToSubjectTransform.inputs.output_transform_prefix = 'antsRegPrefix_'
    ComputeAtlasToSubjectTransform.inputs.output_warped_image = 'moving_to_fixed.nii.gz'
    ComputeAtlasToSubjectTransform.inputs.output_inverse_warped_image = 'fixed_to_moving.nii.gz'
    # ComputeAtlasToSubjectTransform.inputs.num_threads=-1
    # if os.environ.has_key('NSLOTS'):
    #    ComputeAtlasToSubjectTransform.inputs.num_threads=int(os.environ.has_key('NSLOTS'))
    # else:
    #    ComputeAtlasToSubjectTransform.inputs.num_threads=NumberOfThreads
    # ComputeAtlasToSubjectTransform.inputs.fixedMask=SUBJ_A_small_T2_mask.nii.gz
    # ComputeAtlasToSubjectTransform.inputs.movingMask=SUBJ_B_small_T2_mask.nii.gz

    ANTSWF.connect(inputsSpec, 'fixedVolumesList',
                   ComputeAtlasToSubjectTransform, "fixed_image")
    ANTSWF.connect(inputsSpec, 'movingVolumesList',
                   ComputeAtlasToSubjectTransform, "moving_image")
    ANTSWF.connect(BFitAtlasToSubject, 'outputTransform',
                   ComputeAtlasToSubjectTransform, 'initial_moving_transform')

    if 1 == 1:
        mergeAffineWarp = pe.Node(interface=Merge(2), name="Merge_AffineWarp")
        ANTSWF.connect(ComputeAtlasToSubjectTransform, 'warp_transform',
                       mergeAffineWarp, 'in1')
        ANTSWF.connect(BFitAtlasToSubject, 'outputTransform', mergeAffineWarp,
                       'in2')

        from nipype.interfaces.ants import WarpImageMultiTransform
        debugWarpTest = pe.Node(interface=WarpImageMultiTransform(),
                                name="dbgWarpTest")
        # Not allowed as an input debugWarpTest.inputs.output_image = 'debugWarpedMovingToFixed.nii.gz'

        ANTSWF.connect(inputsSpec, 'fixedVolumesList', debugWarpTest,
                       'reference_image')
        ANTSWF.connect(inputsSpec, 'movingVolumesList', debugWarpTest,
                       'moving_image')
        ANTSWF.connect(mergeAffineWarp, 'out', debugWarpTest,
                       'transformation_series')

    #############
    outputsSpec = pe.Node(interface=IdentityInterface(fields=[
        'warped_image', 'inverse_warped_image', 'warp_transform',
        'inverse_warp_transform', 'affine_transform'
    ]),
                          name='outputspec')

    ANTSWF.connect(ComputeAtlasToSubjectTransform, 'warped_image', outputsSpec,
                   'warped_image')
    ANTSWF.connect(ComputeAtlasToSubjectTransform, 'inverse_warped_image',
                   outputsSpec, 'inverse_warped_image')
    ANTSWF.connect(ComputeAtlasToSubjectTransform, 'warp_transform',
                   outputsSpec, 'warp_transform')
    ANTSWF.connect(ComputeAtlasToSubjectTransform, 'inverse_warp_transform',
                   outputsSpec, 'inverse_warp_transform')
    ANTSWF.connect(BFitAtlasToSubject, 'outputTransform', outputsSpec,
                   'affine_transform')

    return ANTSWF
def create_workflow(files,
                    target_file,
                    subject_id,
                    TR,
                    slice_times,
                    norm_threshold=1,
                    num_components=5,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    subjects_dir=None,
                    sink_directory=os.getcwd(),
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Rename files in case they are named identically
    name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
                          iterfield=['in_file', 'run'],
                          name='rename')
    name_unique.inputs.keep_ext = True
    name_unique.inputs.run = list(range(1, len(files) + 1))
    name_unique.inputs.in_file = files

    realign = Node(interface=spm.Realign(), name="realign")
    realign.inputs.jobtype = 'estwrite'

    num_slices = len(slice_times)
    slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing")
    slice_timing.inputs.num_slices = num_slices
    slice_timing.inputs.time_repetition = TR
    slice_timing.inputs.time_acquisition = TR - TR / float(num_slices)
    slice_timing.inputs.slice_order = (np.argsort(slice_times) + 1).tolist()
    slice_timing.inputs.ref_slice = int(num_slices / 2)

    # Comute TSNR on realigned data regressing polynomials upto order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file')

    # Compute the median image across runs
    calc_median = Node(Function(input_names=['in_files'],
                                output_names=['median_file'],
                                function=median,
                                imports=imports),
                       name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
    """Segment and Register
    """

    registration = create_reg_workflow(name='registration')
    wf.connect(calc_median, 'median_file', registration,
               'inputspec.mean_image')
    registration.inputs.inputspec.subject_id = subject_id
    registration.inputs.inputspec.subjects_dir = subjects_dir
    registration.inputs.inputspec.target_image = target_file
    """Use :class:`nipype.algorithms.rapidart` to determine which of the
    images in the functional series are outliers based on deviations in
    intensity or movement.
    """

    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, True]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = norm_threshold
    art.inputs.zintensity_threshold = 9
    art.inputs.mask_type = 'spm_global'
    art.inputs.parameter_source = 'SPM'
    """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
    to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
    voxel sizes.
    """

    wf.connect([
        (name_unique, realign, [('out_file', 'in_files')]),
        (realign, slice_timing, [('realigned_files', 'in_files')]),
        (slice_timing, art, [('timecorrected_files', 'realigned_files')]),
        (realign, art, [('realignment_parameters', 'realignment_parameters')]),
    ])

    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(
            np.array(filename_to_list(files))[idx].tolist())

    mask = Node(fsl.BET(), name='getmask')
    mask.inputs.mask = True
    wf.connect(calc_median, 'median_file', mask, 'in_file')

    # get segmentation in normalized functional space

    def merge_files(in1, in2):
        out_files = filename_to_list(in1)
        out_files.extend(filename_to_list(in2))
        return out_files

    # filter some noise

    # Compute motion regressors
    motreg = Node(Function(
        input_names=['motion_params', 'order', 'derivatives'],
        output_names=['out_files'],
        function=motion_regressors,
        imports=imports),
                  name='getmotionregress')
    wf.connect(realign, 'realignment_parameters', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=build_filter1,
        imports=imports),
                         name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii',
                              out_pf_name='pF_mcart.nii',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filtermotion')

    wf.connect(slice_timing, 'timecorrected_files', filter1, 'in_file')
    wf.connect(slice_timing, ('timecorrected_files', rename, '_filtermotart'),
               filter1, 'out_res_name')
    wf.connect(createfilter1, 'out_files', filter1, 'design')

    createfilter2 = MapNode(Function(input_names=[
        'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
    ],
                                     output_names=['out_files'],
                                     function=extract_noise_components,
                                     imports=imports),
                            iterfield=['realigned_file', 'extra_regressors'],
                            name='makecompcorrfilter')
    createfilter2.inputs.num_components = num_components

    wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(registration,
               ('outputspec.segmentation_files', selectindex, [0, 2]),
               createfilter2, 'mask_file')

    filter2 = MapNode(fsl.GLM(out_f_name='F.nii',
                              out_pf_name='pF.nii',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filter_noise_nosmooth')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2,
               'out_res_name')
    wf.connect(createfilter2, 'out_files', filter2, 'design')
    wf.connect(mask, 'mask_file', filter2, 'mask')

    bandpass = Node(Function(
        input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'],
        output_names=['out_files'],
        function=bandpass_filter,
        imports=imports),
                    name='bandpass_unsmooth')
    bandpass.inputs.fs = 1. / TR
    bandpass.inputs.highpass_freq = highpass_freq
    bandpass.inputs.lowpass_freq = lowpass_freq
    wf.connect(filter2, 'out_res', bandpass, 'files')
    """Smooth the functional data using
    :class:`nipype.interfaces.spm.Smooth`.
    """

    smooth = Node(interface=spm.Smooth(), name="smooth")
    smooth.inputs.fwhm = vol_fwhm

    wf.connect(bandpass, 'out_files', smooth, 'in_files')

    collector = Node(Merge(2), name='collect_streams')
    wf.connect(smooth, 'smoothed_files', collector, 'in1')
    wf.connect(bandpass, 'out_files', collector, 'in2')
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = MapNode(ants.ApplyTransforms(),
                      iterfield=['input_image'],
                      name='warpall')
    warpall.inputs.input_image_type = 3
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.inputs.terminal_output = 'file'
    warpall.inputs.reference_image = target_file
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 1

    # transform to target
    wf.connect(collector, 'out', warpall, 'input_image')
    wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')

    mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')

    wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')

    maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
    wf.connect(warpall, 'output_image', maskts, 'in_file')
    wf.connect(mask_target, 'out_file', maskts, 'mask_file')

    # map to surface
    # extract aparc+aseg ROIs
    # extract subcortical ROIs
    # extract target space ROIs
    # combine subcortical and cortical rois into a single cifti file

    #######
    # Convert aparc to subject functional space

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(
        freesurfer.SegStats(default_color_table=True),
        iterfield=['in_file', 'summary_file', 'avgwf_txt_file'],
        name='aparc_ts')
    sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) +
                                     [17, 18, 26, 47] + list(range(49, 55)) +
                                     [58] + list(range(1001, 1036)) +
                                     list(range(2001, 2036)))

    wf.connect(registration, 'outputspec.aparc', sampleaparc,
               'segmentation_file')
    wf.connect(collector, 'out', sampleaparc, 'in_file')

    def get_names(files, suffix):
        """Generate appropriate names for output files
        """
        from nipype.utils.filemanip import (split_filename, filename_to_list,
                                            list_to_filename)
        out_names = []
        for filename in files:
            _, name, _ = split_filename(filename)
            out_names.append(name + suffix)
        return list_to_filename(out_names)

    wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc,
               'avgwf_txt_file')
    wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc,
               'summary_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(freesurfer.SampleToSurface(),
                        iterfield=['source_file'],
                        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    samplerlh.inputs.smooth_surf = surf_fwhm
    # samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = subjects_dir

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(collector, 'out', samplerlh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(collector, 'out', samplerrh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(Function(input_names=['left', 'right'],
                                output_names=['out_file'],
                                function=combine_hemi,
                                imports=imports),
                       iterfield=['left', 'right'],
                       name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(Function(
        input_names=['timeseries_file', 'label_file', 'indices'],
        output_names=['out_file'],
        function=extract_subrois,
        imports=imports),
                     iterfield=['timeseries_file'],
                     name='getsubcortts')
    ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
        list(range(49, 55)) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm_v2.nii.gz'))
    wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')

    ######

    substitutions = [('_target_subject_', ''),
                     ('_filtermotart_cleaned_bp_trans_masked', ''),
                     ('_filtermotart_cleaned_bp', '')]
    regex_subs = [
        ('_ts_masker.*/sar', '/smooth/'),
        ('_ts_masker.*/ar', '/unsmooth/'),
        ('_combiner.*/sar', '/smooth/'),
        ('_combiner.*/ar', '/unsmooth/'),
        ('_aparc_ts.*/sar', '/smooth/'),
        ('_aparc_ts.*/ar', '/unsmooth/'),
        ('_getsubcortts.*/sar', '/smooth/'),
        ('_getsubcortts.*/ar', '/unsmooth/'),
        ('series/sar', 'series/smooth/'),
        ('series/ar', 'series/unsmooth/'),
        ('_inverse_transform./', ''),
    ]
    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = substitutions
    datasink.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(realign, 'realignment_parameters', datasink,
               'resting.qa.motion')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.segmentation_files', datasink,
               'resting.mask_files')
    wf.connect(registration, 'outputspec.anat2target', datasink,
               'resting.qa.ants')
    wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
    wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
    wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
    wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
    wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
    wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
    wf.connect(bandpass, 'out_files', datasink,
               'resting.timeseries.@bandpassed')
    wf.connect(smooth, 'smoothed_files', datasink,
               'resting.timeseries.@smoothed')
    wf.connect(createfilter1, 'out_files', datasink,
               'resting.regress.@regressors')
    wf.connect(createfilter2, 'out_files', datasink,
               'resting.regress.@compcorr')
    wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
    wf.connect(sampleaparc, 'summary_file', datasink,
               'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file', datasink,
               'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file', datasink,
               'resting.parcellations.grayo.@subcortical')

    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = substitutions
    datasink2.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file', datasink2,
               'resting.parcellations.grayo.@surface')
    return wf
Esempio n. 26
0
def spm_mrpet_preprocessing(wf_name="spm_mrpet_preproc"):
    """ Run the PET pre-processing workflow against the
    gunzip_pet.in_file files.
    It depends on the anat_preproc_workflow, so if this
    has not been run, this function will run it too.

    # TODO: organize the anat2pet hack/condition somehow:
    If anat2pet:
    - SPM12 Coregister T1 and tissues to PET
    - PVC the PET image in PET space
    - SPM12 Warp PET to MNI
    else:
    - SPM12 Coregister PET to T1
    - PVC the PET image in anatomical space
    - SPM12 Warp PET in anatomical space to MNI through the
    `anat_to_mni_warp`.

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    pet_input.in_file: traits.File
        The raw NIFTI_GZ PET image file

    pet_input.anat: traits.File
        Path to the high-contrast anatomical image.
        Reference file of the warp_field, i.e., the
        anatomical image in its native space.

    pet_input.anat_to_mni_warp: traits.File
        The warp field from the transformation of the
        anatomical image to the standard MNI space.

    pet_input.atlas_anat: traits.File
        The atlas file in anatomical space.

    pet_input.tissues: list of traits.File
        List of tissues files from the New Segment process.
        At least the first 3 tissues must be present.

    Nipype outputs
    --------------
    pet_output.pvc_out: existing file
        The results of the PVC process

    pet_output.brain_mask: existing file
        A brain mask calculated with the tissues file.

    pet_output.coreg_ref: existing file
        The coregistered reference image to PET space.

    pet_output.coreg_others: list of existing files
        List of coregistered files from coreg_pet.apply_to_files

    pet_output.pvc_warped: existing file
        Results from PETPVC normalized to MNI.
        The result of every internal pre-processing step
        is normalized to MNI here.

    pet_output.warp_field: existing files
        Spatial normalization parameters .mat files

    pet_output.gm_norm: existing file
        The output of the grey matter intensity
        normalization process.
        This is the last step in the PET signal correction,
        before registration.

    pet_output.atlas_pet: existing file
        Atlas image warped to PET space.
        If the `atlas_file` option is an existing file and
        `normalize_atlas` is True.

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields  = ["in_file",
                  "anat",
                  "anat_to_mni_warp",
                  "tissues",]

    out_fields = ["brain_mask",
                  "coreg_others",
                  "coreg_ref",
                  "pvc_warped",
                  "pet_warped", # 'pet_warped' is a dummy entry to keep the fields pattern.
                  "warp_field",
                  "pvc_out",
                  "pvc_mask",
                  "gm_norm",]

    do_atlas, _ = check_atlas_file()
    if do_atlas:
        in_fields  += ["atlas_anat"]
        out_fields += ["atlas_pet" ]

    # input
    pet_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
                           name="pet_input")

    # workflow to perform partial volume correction
    petpvc    = petpvc_workflow(wf_name="petpvc")

    merge_list = setup_node(Merge(4), name='merge_for_unzip')
    gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])

    warp_pet = setup_node(spm_normalize(), name="warp_pet")

    tpm_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="tpm_bbox")
    tpm_bbox.inputs.in_file = spm_tpm_priors_path()

    # output
    pet_output = setup_node(IdentityInterface(fields=out_fields), name="pet_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # check how to perform the registration, to decide how to build the pipeline
    anat2pet = get_config_setting('registration.anat2pet', False)
    if anat2pet:
        wf.connect([
                    # inputs
                    (pet_input, petpvc,     [("in_file", "pvc_input.in_file"),
                                             ("anat",    "pvc_input.reference_file"),
                                             ("tissues", "pvc_input.tissues")]),

                    # gunzip some files for SPM Normalize
                    (petpvc,    merge_list, [("pvc_output.pvc_out",    "in1"),
                                             ("pvc_output.brain_mask", "in2"),
                                             ("pvc_output.gm_norm",    "in3")]),
                    (pet_input, merge_list, [("in_file",               "in4")]),

                    (merge_list, gunzipper, [("out", "in_file")]),

                    # warp the PET PVCed to MNI
                    (petpvc,    warp_pet,   [("pvc_output.coreg_ref", "image_to_align")]),
                    (gunzipper, warp_pet,   [("out_file",             "apply_to_files")]),
                    (tpm_bbox,  warp_pet,   [("bbox",                 "write_bounding_box")]),

                    # output
                    (petpvc,    pet_output, [("pvc_output.pvc_out",      "pvc_out"),
                                             ("pvc_output.brain_mask",   "brain_mask"),
                                             ("pvc_output.coreg_ref",    "coreg_ref"),
                                             ("pvc_output.coreg_others", "coreg_others"),
                                             ("pvc_output.gm_norm",      "gm_norm")]),

                    # output
                    (warp_pet,  pet_output, [("normalized_files",  "pvc_warped"),
                                             ("deformation_field", "warp_field")]),
                   ])
    else: # PET 2 ANAT
        collector  = setup_node(Merge(2), name='merge_for_warp')
        apply_warp = setup_node(spm_apply_deformations(), name="warp_pet")

        wf.connect([
                    # inputs
                    (pet_input, petpvc,     [("in_file", "pvc_input.in_file"),
                                             ("anat",    "pvc_input.reference_file"),
                                             ("tissues", "pvc_input.tissues")]),

                    # gunzip some files for SPM Normalize
                    (petpvc,    merge_list, [("pvc_output.pvc_out",    "in1"),
                                             ("pvc_output.brain_mask", "in2"),
                                             ("pvc_output.gm_norm",    "in3")]),
                    (pet_input, merge_list, [("in_file",               "in4")]),

                    (merge_list, gunzipper, [("out",                   "in_file")]),

                    # warp the PET PVCed to MNI
                    (gunzipper,   collector,   [("out_file",             "in1")]),
                    (petpvc,      collector,   [("pvc_output.coreg_ref", "in2")]),

                    (pet_input,   apply_warp,  [("anat_to_mni_warp", "deformation_file")]),
                    (collector,   apply_warp,  [("out",              "apply_to_files")]),
                    (tpm_bbox,    apply_warp,  [("bbox",             "write_bounding_box")]),

                    # output
                    (petpvc,    pet_output, [("pvc_output.pvc_out",      "pvc_out"),
                                             ("pvc_output.brain_mask",   "brain_mask"),
                                             ("pvc_output.petpvc_mask",  "petpvc_mask"),
                                             ("pvc_output.coreg_ref",    "coreg_ref"),
                                             ("pvc_output.coreg_others", "coreg_others"),
                                             ("pvc_output.gm_norm",      "gm_norm")]),

                    # output
                    (apply_warp,  pet_output, [("normalized_files",  "pvc_warped"),
                                               ("deformation_field", "warp_field")]),
                   ])


    if do_atlas:
        coreg_atlas = setup_node(spm_coregister(cost_function="mi"), name="coreg_atlas")

        # set the registration interpolation to nearest neighbour.
        coreg_atlas.inputs.write_interp = 0
        wf.connect([
                    (pet_input,   coreg_atlas, [("anat",                 "source")]),
                    (petpvc,      coreg_atlas, [("pvc_output.coreg_ref", "target")]),
                    (pet_input,   coreg_atlas, [("atlas_anat",           "apply_to_files")]),
                    (coreg_atlas, pet_output,  [("coregistered_files",   "atlas_pet")]),
        ])

    return wf
Esempio n. 27
0
def spm_mrpet_grouptemplate_preprocessing(wf_name="spm_mrpet_grouptemplate_preproc"):
    """ Run the PET pre-processing workflow against the gunzip_pet.in_file files.
    It depends on the anat_preproc_workflow, so if this has not been run, this function
    will run it too.

    This is identical to the workflow defined in `spm_mrpet_preprocessing`,
    with the only difference that we now normalize all subjects agains a custom
    template using the spm Old Normalize interface.

    It does:
    - SPM12 Coregister T1 and tissues to PET
    - PVC the PET image in PET space
    - SPM12 Warp PET to the given template

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    pet_input.in_file: traits.File
        The raw NIFTI_GZ PET image file.

    pet_input.atlas_anat: traits.File
        The atlas file in anatomical space.

    pet_input.anat: traits.File
        Path to the high-contrast anatomical image.
        Reference file of the warp_field, i.e., the anatomical image in its native space.

    pet_input.tissues: list of traits.File
        List of tissues files from the New Segment process. At least the first
        3 tissues must be present.

    pet_input.pet_template: traits.File
        The template file for inter-subject registration reference.

    Nipype outputs
    --------------
    pet_output.pvc_out: existing file
        The results of the PVC process.

    pet_output.brain_mask: existing file
        A brain mask calculated with the tissues file.

    pet_output.coreg_ref: existing file
        The coregistered reference image to PET space.

    pet_output.coreg_others: list of existing files
        List of coregistered files from coreg_pet.apply_to_files.

    pet_output.pet_warped: existing file
        PET image normalized to the group template.

    pet_output.pvc_warped: existing file
        The outputs of the PETPVC workflow normalized to the group template.
        The result of every internal pre-processing step is normalized to the
        group template here.

    pet_output.warp_field: existing files
        Spatial normalization parameters .mat files.

    pet_output.gm_norm: existing file
        The output of the grey matter intensity normalization process.
        This is the last step in the PET signal correction, before registration.

    pet_output.atlas_pet: existing file
        Atlas image warped to PET space.
        If the `atlas_file` option is an existing file and `normalize_atlas` is True.

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields  = ["in_file",
                  "anat",
                  "tissues",
                  "pet_template"]

    out_fields = ["brain_mask",
                  "coreg_others",
                  "coreg_ref",
                  "pvc_warped",
                  "pet_warped",
                  "warp_field",
                  "pvc_out",
                  "pvc_mask",
                  "gm_norm",]

    do_atlas, _ = check_atlas_file()
    if do_atlas:
        in_fields  += ["atlas_anat"]
        out_fields += ["atlas_pet" ]

    # input
    pet_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
                           name="pet_input")

    # workflow to perform partial volume correction
    petpvc = petpvc_workflow(wf_name="petpvc")

    unzip_mrg = setup_node(Merge(4), name='merge_for_unzip')
    gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])

    # warp each subject to the group template
    gunzip_template = setup_node(Gunzip(), name="gunzip_template",)
    gunzip_pet      = setup_node(Gunzip(), name="gunzip_pet",)

    warp_mrg = setup_node(Merge(2), name='merge_for_warp')
    warp2template = setup_node(spm.Normalize(jobtype="estwrite", out_prefix="wgrptemplate_"),
                               name="warp2template",)

    get_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="get_bbox")

    # output
    pet_output = setup_node(IdentityInterface(fields=out_fields), name="pet_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    wf.connect([
                # inputs
                (pet_input,   petpvc,  [("in_file", "pvc_input.in_file"),
                                        ("anat",    "pvc_input.reference_file"),
                                        ("tissues", "pvc_input.tissues")]),

                # get template bounding box to apply to results
                (pet_input, get_bbox,  [("pet_template", "in_file")]),

                # gunzip some inputs
                (pet_input, gunzip_pet,      [("in_file",      "in_file")]),
                (pet_input, gunzip_template, [("pet_template", "in_file")]),

                # gunzip some files for SPM Normalize
                (petpvc,    unzip_mrg, [("pvc_output.pvc_out",    "in1"),
                                        ("pvc_output.brain_mask", "in2"),
                                        ("pvc_output.gm_norm",    "in3")]),
                (pet_input, unzip_mrg, [("in_file",               "in4")]),

                (unzip_mrg, gunzipper, [("out", "in_file")]),

                (gunzipper, warp_mrg,  [("out_file", "in1")]),

                (warp_mrg, warp2template, [(("out", flatten_list), "apply_to_files")]),

                # prepare the target parameters of the warp to template
                (gunzip_pet,      warp2template, [("out_file", "source")]),
                (gunzip_template, warp2template, [("out_file", "template")]),
                (get_bbox,        warp2template, [("bbox",     "write_bounding_box")]),

                # output
                (warp2template, pet_output, [("normalization_parameters", "warp_field"),
                                             ("normalized_files" ,        "pvc_warped"),
                                             ("normalized_source",        "pet_warped"),
                                            ]),

                # output
                (petpvc,   pet_output, [("pvc_output.pvc_out",      "pvc_out"),
                                        ("pvc_output.brain_mask",   "brain_mask"),
                                        ("pvc_output.coreg_ref",    "coreg_ref"),
                                        ("pvc_output.coreg_others", "coreg_others"),
                                        ("pvc_output.gm_norm",      "gm_norm")]),
                ])

    if do_atlas:
        coreg_atlas = setup_node(spm_coregister(cost_function="mi"), name="coreg_atlas")

        # set the registration interpolation to nearest neighbour.
        coreg_atlas.inputs.write_interp = 0
        wf.connect([
                    (pet_input,   coreg_atlas, [("anat",                 "source")]),
                    (petpvc,      coreg_atlas, [("pvc_output.coreg_ref", "target")]),
                    (pet_input,   coreg_atlas, [("atlas_anat",           "apply_to_files")]),
                    (coreg_atlas, pet_output,  [("coregistered_files",   "atlas_pet")]),

                    # warp the atlas to the template space as well
                    (coreg_atlas, warp_mrg,    [("coregistered_files",   "in2")]),
        ])

    return wf
    def connectome(subject_list, base_directory, out_directory):

        # ==================================================================
        # Loading required packages
        import nipype.pipeline.engine as pe
        import nipype.interfaces.utility as util
        from nipype.interfaces.freesurfer import ApplyVolTransform
        from nipype.interfaces.freesurfer import BBRegister
        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.diffusion_toolkit as dtk
        from nipype.interfaces.utility import Merge
        import numpy as np
        from additional_interfaces import AtlasValues
        from additional_interfaces import AparcStats
        from additional_interfaces import CalcMatrix
        from additional_interfaces import FreeSurferValues
        from additional_interfaces import Tractography
        from additional_pipelines import DWIPreproc
        from additional_pipelines import SubjectSpaceParcellation
        from additional_pipelines import T1Preproc

        from nipype import SelectFiles
        import os

        # ==================================================================
        # Defining the nodes for the workflow

        # Getting the subject ID
        infosource = pe.Node(
            interface=util.IdentityInterface(fields=['subject_id']),
            name='infosource')
        infosource.iterables = ('subject_id', subject_list)

        # Getting the relevant diffusion-weighted data
        templates = dict(T1='{subject_id}/anat/{subject_id}_T1w.nii.gz',
                         dwi='{subject_id}/dwi/{subject_id}_dwi.nii.gz',
                         bvec='{subject_id}/dwi/{subject_id}_dwi.bvec',
                         bval='{subject_id}/dwi/{subject_id}_dwi.bval')

        selectfiles = pe.Node(SelectFiles(templates), name='selectfiles')
        selectfiles.inputs.base_directory = os.path.abspath(base_directory)

        # ==============================================================
        # T1 processing
        t1_preproc = pe.Node(interface=T1Preproc(), name='t1_preproc')
        t1_preproc.inputs.out_directory = out_directory + '/connectome/'
        t1_preproc.inputs.template_directory = template_directory

        # DWI processing
        dwi_preproc = pe.Node(interface=DWIPreproc(), name='dwi_preproc')
        dwi_preproc.inputs.out_directory = out_directory + '/connectome/'
        dwi_preproc.inputs.acqparams = acquisition_parameters
        dwi_preproc.inputs.index_file = index_file
        dwi_preproc.inputs.out_directory = out_directory + '/connectome/'

        # Eroding the brain mask
        erode_mask = pe.Node(interface=fsl.maths.ErodeImage(),
                             name='erode_mask')

        # Reconstruction and tractography
        tractography = pe.Node(interface=Tractography(), name='tractography')
        tractography.iterables = ('model', ['CSA', 'CSD'])

        # smoothing the tracts
        smooth = pe.Node(interface=dtk.SplineFilter(step_length=0.5),
                         name='smooth')

        # Moving to subject space
        subject_parcellation = pe.Node(interface=SubjectSpaceParcellation(),
                                       name='subject_parcellation')
        subject_parcellation.inputs.source_subject = 'fsaverage'
        subject_parcellation.inputs.source_annot_file = 'aparc'
        subject_parcellation.inputs.out_directory = out_directory + '/connectome/'
        subject_parcellation.inputs.parcellation_directory = parcellation_directory

        # Co-registering T1 and dwi
        bbreg = pe.Node(interface=BBRegister(), name='bbreg')
        bbreg.inputs.init = 'fsl'
        bbreg.inputs.contrast_type = 't2'

        applyreg = pe.Node(interface=ApplyVolTransform(), name='applyreg')
        applyreg.inputs.interp = 'nearest'
        applyreg.inputs.inverse = True

        # Merge outputs to pass on to CalcMatrix
        merge = pe.Node(interface=Merge(3), name='merge')

        # calcuating the connectome matrix
        calc_matrix = pe.MapNode(interface=CalcMatrix(),
                                 name='calc_matrix',
                                 iterfield=['scalar_file'])
        calc_matrix.iterables = ('threshold', np.arange(0, 100, 10))

        # Getting values of diffusion measures
        FA_values = pe.Node(interface=AtlasValues(), name='FA_values')
        RD_values = pe.Node(interface=AtlasValues(), name='RD_values')
        AD_values = pe.Node(interface=AtlasValues(), name='AD_values')
        MD_values = pe.Node(interface=AtlasValues(), name='MD_values')

        # Getting additional surface measures
        aparcstats = pe.Node(interface=AparcStats(), name='aparcstats')
        aparcstats.inputs.parcellation_name = 'aparc'

        freesurfer_values = pe.Node(interface=FreeSurferValues(),
                                    name='freesurfer_values')
        freesurfer_values.inputs.parcellation_name = 'aparc'

        # ==================================================================
        # Setting up the workflow
        connectome = pe.Workflow(name='connectome')

        # Reading in files
        connectome.connect(infosource, 'subject_id', selectfiles, 'subject_id')

        # DWI preprocessing
        connectome.connect(infosource, 'subject_id', dwi_preproc, 'subject_id')
        connectome.connect(selectfiles, 'dwi', dwi_preproc, 'dwi')
        connectome.connect(selectfiles, 'bval', dwi_preproc, 'bvals')
        connectome.connect(selectfiles, 'bvec', dwi_preproc, 'bvecs')

        # CSD model and streamline tracking
        connectome.connect(dwi_preproc, 'mask', erode_mask, 'in_file')

        connectome.connect(selectfiles, 'bvec', tractography, 'bvec')
        connectome.connect(selectfiles, 'bval', tractography, 'bval')
        connectome.connect(dwi_preproc, 'dwi', tractography, 'in_file')
        connectome.connect(dwi_preproc, 'FA', tractography, 'FA')
        connectome.connect(erode_mask, 'out_file', tractography, 'brain_mask')

        # Smoothing the trackfile
        connectome.connect(tractography, 'out_track', smooth, 'track_file')

        # Preprocessing the T1-weighted file
        connectome.connect(infosource, 'subject_id', t1_preproc, 'subject_id')
        connectome.connect(selectfiles, 'T1', t1_preproc, 'T1')
        connectome.connect(t1_preproc, 'wm', subject_parcellation, 'wm')
        connectome.connect(t1_preproc, 'subjects_dir', subject_parcellation,
                           'subjects_dir')
        connectome.connect(t1_preproc, 'subject_id', subject_parcellation,
                           'subject_id')

        # Getting the parcellation into diffusion space
        connectome.connect(t1_preproc, 'subject_id', bbreg, 'subject_id')
        connectome.connect(t1_preproc, 'subjects_dir', bbreg, 'subjects_dir')
        connectome.connect(dwi_preproc, 'b0', bbreg, 'source_file')

        connectome.connect(dwi_preproc, 'b0', applyreg, 'source_file')
        connectome.connect(bbreg, 'out_reg_file', applyreg, 'reg_file')
        connectome.connect(subject_parcellation, 'renum_expanded', applyreg,
                           'target_file')

        # Calculating the FA connectome
        connectome.connect(tractography, 'out_file', calc_matrix, 'track_file')
        connectome.connect(dwi_preproc, 'FA', merge, 'in1')
        connectome.connect(dwi_preproc, 'RD', merge, 'in2')
        connectome.connect(tractography, 'GFA', merge, 'in3')
        connectome.connect(merge, 'out', calc_matrix, 'scalar_file')
        connectome.connect(applyreg, 'transformed_file', calc_matrix,
                           'ROI_file')

        # Getting values for additional measures
        connectome.connect(dwi_preproc, 'FA', FA_values, 'morpho_filename')
        connectome.connect(dwi_preproc, 'RD', RD_values, 'morpho_filename')
        connectome.connect(dwi_preproc, 'AD', AD_values, 'morpho_filename')
        connectome.connect(dwi_preproc, 'MD', MD_values, 'morpho_filename')
        connectome.connect(applyreg, 'transformed_file', FA_values,
                           'atlas_filename')
        connectome.connect(applyreg, 'transformed_file', RD_values,
                           'atlas_filename')
        connectome.connect(applyreg, 'transformed_file', AD_values,
                           'atlas_filename')
        connectome.connect(applyreg, 'transformed_file', MD_values,
                           'atlas_filename')

        # Getting FreeSurfer morphological values
        connectome.connect(t1_preproc, 'subject_id', aparcstats, 'subject_id')
        connectome.connect(t1_preproc, 'subjects_dir', aparcstats,
                           'subjects_dir')
        connectome.connect(aparcstats, 'lh_stats', freesurfer_values,
                           'lh_filename')
        connectome.connect(aparcstats, 'rh_stats', freesurfer_values,
                           'rh_filename')

        # ==================================================================
        # Running the workflow
        connectome.base_dir = os.path.abspath(out_directory)
        connectome.write_graph()
        connectome.run()
Esempio n. 29
0
def rest_noise_filter_wf(wf_name='rest_noise_removal'):
    """ Create a resting-state fMRI noise removal node.

    Nipype Inputs
    -------------
    rest_noise_input.in_file

    rest_noise_input.brain_mask

    rest_noise_input.wm_mask

    rest_noise_input.csf_mask

    rest_noise_input.motion_params
        Nipy motion parameters.

    Nipype Outputs
    --------------
    rest_noise_output.tsnr_file
        A SNR estimation volume file for QA purposes.

    rest_noise_output.motion_corrected
        The fMRI motion corrected image.

    rest_noise_output.nuis_corrected
        The resulting nuisance corrected image.
        This will be the same as 'motion_corrected' if compcor
        is disabled.

    rest_noise_output.motion_regressors
        Motion regressors file.

    rest_noise_output.compcor_regressors
        CompCor regressors file.

    rest_noise_output.art_displacement_files
        One image file containing the voxel-displacement timeseries.

    rest_noise_output.art_intensity_files
        One file containing the global intensity values determined
        from the brainmask.

    rest_noise_output.art_norm_files
        One file containing the composite norm.

    rest_noise_output.art_outlier_files
         One file containing a list of 0-based indices corresponding
         to outlier volumes.

    rest_noise_output.art_plot_files
        One image file containing the detected outliers.

    rest_noise_output.art_statistic_files
        One file containing information about the different types of
        artifacts and if design info is provided then details of
        stimulus correlated motion and a listing or artifacts by
        event type.

    Returns
    -------
    rm_nuisance_wf: nipype Workflow
    """

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    in_fields = [
        "in_file",
        "brain_mask",
        "wm_mask",
        "csf_mask",
        "motion_params",
    ]

    out_fields = [
        "tsnr_file",
        "motion_corrected",
        "nuis_corrected",
        "motion_regressors",
        "compcor_regressors",
        "gsr_regressors",
        "art_displacement_files",
        "art_intensity_files",
        "art_norm_files",
        "art_outlier_files",
        "art_plot_files",
        "art_statistic_files",
    ]

    # input identities
    rest_noise_input = setup_node(IdentityInterface(fields=in_fields,
                                                    mandatory_inputs=True),
                                  name="rest_noise_input")

    # get the settings for filters
    filters = _get_params_for('rest_filter')

    # Compute TSNR on realigned data regressing polynomial up to order 2
    tsnr = setup_node(TSNR(regress_poly=2), name='tsnr')

    # Use :class:`nipype.algorithms.rapidart` to determine which of the
    # images in the functional series are outliers based on deviations in
    # intensity or movement.
    art = setup_node(rapidart_fmri_artifact_detection(),
                     name="detect_artifacts")

    # Compute motion regressors
    motion_regs = setup_node(Function(
        input_names=[
            'motion_params',
            'order',
            'derivatives',
        ],
        output_names=['out_files'],
        function=motion_regressors,
    ),
                             name='motion_regressors')

    # Create a filter to remove motion and art confounds
    motart_pars = setup_node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=create_regressors),
                             name='motart_parameters')

    motion_filter = setup_node(fsl.GLM(out_f_name='F_mcart.nii.gz',
                                       out_pf_name='pF_mcart.nii.gz',
                                       demean=True),
                               name='motion_filter')

    # Noise confound regressors
    compcor_pars = setup_node(Function(
        input_names=[
            'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
        ],
        output_names=['out_files'],
        function=extract_noise_components,
    ),
                              name='compcor_pars')
    #compcor_pars = setup_node(ACompCor(), name='compcor_pars')
    #compcor_pars.inputs.components_file = 'noise_components.txt'

    compcor_filter = setup_node(fsl.GLM(out_f_name='F.nii.gz',
                                        out_pf_name='pF.nii.gz',
                                        demean=True),
                                name='compcor_filter')

    # Global signal regression
    gsr_pars = setup_node(Function(
        input_names=[
            'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
        ],
        output_names=['out_files'],
        function=extract_noise_components,
    ),
                          name='gsr_pars')

    gsr_filter = setup_node(fsl.GLM(out_f_name='F_gsr.nii.gz',
                                    out_pf_name='pF_gsr.nii.gz',
                                    demean=True),
                            name='gsr_filter')

    # output identities
    rest_noise_output = setup_node(IdentityInterface(fields=out_fields,
                                                     mandatory_inputs=True),
                                   name="rest_noise_output")

    # Connect the nodes
    wf.connect([
        # tsnr
        (rest_noise_input, tsnr, [("in_file", "in_file")]),

        # artifact detection
        (rest_noise_input, art, [
            ("in_file", "realigned_files"),
            ("motion_params", "realignment_parameters"),
            ("brain_mask", "mask_file"),
        ]),

        # calculte motion regressors
        (rest_noise_input, motion_regs, [("motion_params", "motion_params")]),

        # create motion and confound regressors parameters file
        (art, motart_pars, [
            ("norm_files", "comp_norm"),
            ("outlier_files", "outliers"),
        ]),
        (motion_regs, motart_pars, [("out_files", "motion_params")]),

        # motion filtering
        (rest_noise_input, motion_filter, [
            ("in_file", "in_file"),
            (("in_file", rename, "_filtermotart"), "out_res_name"),
        ]),
        (motart_pars, motion_filter, [(("out_files", selectindex, [0]),
                                       "design")]),

        # output
        (tsnr, rest_noise_output, [("tsnr_file", "tsnr_file")]),
        (motart_pars, rest_noise_output, [("out_files", "motion_regressors")]),
        (motion_filter, rest_noise_output, [("out_res", "motion_corrected")]),
        (art, rest_noise_output, [
            ("displacement_files", "art_displacement_files"),
            ("intensity_files", "art_intensity_files"),
            ("norm_files", "art_norm_files"),
            ("outlier_files", "art_outlier_files"),
            ("plot_files", "art_plot_files"),
            ("statistic_files", "art_statistic_files"),
        ]),
    ])

    last_filter = motion_filter

    # compcor filter
    if filters['compcor_csf'] or filters['compcor_wm']:
        wf.connect([
            # calculate compcor regressor and parameters file
            (motart_pars, compcor_pars, [
                (("out_files", selectindex, [0]), "extra_regressors"),
            ]),
            (motion_filter, compcor_pars, [
                ("out_res", "realigned_file"),
            ]),

            # the compcor filter
            (motion_filter, compcor_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_cleaned"), "out_res_name"),
            ]),
            (compcor_pars, compcor_filter, [(("out_files", selectindex, [0]),
                                             "design")]),
            #(compcor_pars,     compcor_filter,    [("components_file",  "design")]),
            (rest_noise_input, compcor_filter, [("brain_mask", "mask")]),

            # output
            (compcor_pars, rest_noise_output, [("out_files",
                                                "compcor_regressors")]),
            #(compcor_pars,     rest_noise_output, [("components_file",   "compcor_regressors")]),
        ])
        last_filter = compcor_filter

    # global signal regression
    if filters['gsr']:
        wf.connect([
            # calculate gsr regressors parameters file
            (last_filter, gsr_pars, [("out_res", "realigned_file")]),
            (rest_noise_input, gsr_pars, [("brain_mask", "mask_file")]),

            # the output file name
            (rest_noise_input, gsr_filter, [("brain_mask", "mask")]),
            (last_filter, gsr_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_gsr"), "out_res_name"),
            ]),
            (gsr_pars, gsr_filter, [(("out_files", selectindex, [0]), "design")
                                    ]),

            # output
            (gsr_pars, rest_noise_output, [("out_files", "gsr_regressors")]),
        ])
        last_filter = gsr_filter

    # connect the final nuisance correction output node
    wf.connect([
        (last_filter, rest_noise_output, [("out_res", "nuis_corrected")]),
    ])

    if filters['compcor_csf'] and filters['compcor_wm']:
        mask_merge = setup_node(Merge(2), name="mask_merge")
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, mask_merge, [("wm_mask", "in1")]),
            (rest_noise_input, mask_merge, [("csf_mask", "in2")]),
            (mask_merge, compcor_pars, [("out", "mask_file")]),
        ])

    elif filters['compcor_csf']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("csf_mask", "mask_file")]),
        ])

    elif filters['compcor_wm']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("wm_mask", "mask_file")]),
        ])

    return wf
    def make_neuromet_fs_workflow(self):

        # Infosource: Iterate through subject names
        infosource = Node(interface=IdentityInterface(fields=['subject_id']),
                          name="infosource")
        infosource.iterables = ('subject_id', self.subject_list)

        mask_source = Node(interface=GetMaskValue(csv_file=self.mask_file),
                           name='get_mask')

        split_sub_str = Node(Function(['subject_str'],
                                      ['subject_id', 'session_id'],
                                      self.split_subject_ses),
                             name='split_sub_str')

        # Datasource: Build subjects' filenames from IDs
        info = dict(mask=[[
            'subject_id', 'session_id', 'anat', 'subject_id', 'session_id',
            'mask', 'ro_brain_bin.nii.gz'
        ]],
                    uni_bias_corr=[[
                        'subject_id', 'session_id', 'anat', 'subject_id',
                        'session_id', 'UNI', 'ro_bfcorr.nii'
                    ]],
                    den_ro=[[
                        'subject_id', 'session_id', 'anat', 'subject_id',
                        'session_id', 'UNIDEN', 'ro_bfcorr.nii'
                    ]])

        datasource = Node(interface=DataGrabber(
            infields=['subject_id', 'session_id', 'mask'],
            outfields=['mask', 'uni_bias_corr', 'den_ro']),
                          name='datasource')
        datasource.inputs.base_directory = self.derivatives_dir
        datasource.inputs.template = 'sub-NeuroMET%s/ses-0%s/%s/sub-NeuroMET%s_ses-0%s_desc-%s_%s'
        datasource.inputs.template_args = info
        datasource.inputs.sort_filelist = False

        sink = self.make_sink()

        comb_imgs = self.make_comb_imgs()

        freesurfer = self.make_freesurfer()

        neuromet_fs = Workflow(name='NeuroMET', base_dir=self.temp_dir)
        neuromet_fs.connect(infosource, 'subject_id', split_sub_str,
                            'subject_str')
        neuromet_fs.connect(split_sub_str, 'subject_id', datasource,
                            'subject_id')
        neuromet_fs.connect(split_sub_str, 'session_id', datasource,
                            'session_id')
        neuromet_fs.connect(infosource, 'subject_id', mask_source,
                            'subject_id')
        neuromet_fs.connect(mask_source, 'mask_value', datasource, 'mask')
        neuromet_fs.connect(datasource, 'uni_bias_corr', comb_imgs,
                            'mask_uni_bias.in_file')
        neuromet_fs.connect(datasource, 'mask', comb_imgs,
                            'mask_uni_bias.mask_file')
        neuromet_fs.connect(datasource, 'den_ro', comb_imgs,
                            'uni_brain_den_surr_mas.in_file')

        neuromet_fs.connect(comb_imgs, 'uni_brain_den_surr_add.out_file',
                            freesurfer, 'fs_recon1.T1_files')
        neuromet_fs.connect(datasource, 'mask', freesurfer,
                            'fs_mriconv.in_file')

        out_dir_source = Node(interface=IdentityInterface(
            fields=['out_dir'], mandatory_inputs=True),
                              name='out_dir_source')
        out_dir_source.inputs.out_dir = self.bids_root
        make_list_str = Node(interface=Merge(2), name='make_list_of_paths')
        merge_strs = Node(interface=OsPathJoin(), name='merge_sub_id_dir')

        neuromet_fs.connect(comb_imgs, 'uni_brain_den_surr_add.out_file', sink,
                            '@img')
        #neuromet_fs.connect(infosource, 'subject_id', copy_freesurfer_dir, 'sub_id')
        #neuromet_fs.connect(freesurfer, 'segment_hp.subjects_dir', copy_freesurfer_dir, 'in_dir')
        neuromet_fs.connect(freesurfer, 'segment_hp.subjects_dir',
                            make_list_str, 'in1')
        neuromet_fs.connect(freesurfer, 'segment_hp.subject_id', make_list_str,
                            'in2')
        neuromet_fs.connect(make_list_str, 'out', merge_strs, 'str_list')
        neuromet_fs.connect(merge_strs, 'out_path', sink, '@recon_all')
        #neuromet_fs.connect(out_dir_source, 'out_dir', copy_freesurfer_dir, 'out_dir')

        #ToDo:
        # 04.12.2020 QDec + Adjust volumes. It hangs if qdec is in a workflow, works a single interface
        #neuromet_fs.connect(freesurfer, 'segment_hp.subject_id', qdec, 'devnull')
        #neuromet_fs.connect(datasource, 'base_directory', qdec, 'basedir')
        #neuromet_fs.connect(qdec, 'stats_directory', adj_vol, 'stats_directory')
        #neuromet_fs.connect(qdec, 'stats_directory', sink, '@stat_dir')
        #neuromet_fs.connect(adj_vol, 'adjusted_stats', sink, '@adj_stats')

        return neuromet_fs