Example #1
0
def _bgplot(in_file, base_directory):
    from nipype.interfaces.io import DataSink
    if not in_file:
        return ''

    ds = DataSink(base_directory=base_directory, parameterization=False)
    setattr(ds.inputs, '@bg_fitting', in_file)
    return ds.run().outputs.out_file
Example #2
0
def attach_canica(main_wf, wf_name="canica", **kwargs):
    """ Attach a nilearn CanICA interface to `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    kwargs: dict[str]->str
        input_node: str
            Name of the input node from where to connect the source `input_connect`.

        input_connection: str
            Name of the connection to obtain the source files.

    Nipype Inputs for `main_wf`
    ---------------------------
    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    srcwf_name   = kwargs['input_node']
    srcconn_name = kwargs['input_connection']

    src_wf   = main_wf.get_node(srcwf_name)
    datasink = get_datasink(main_wf, name='datasink')

    base_outdir  = datasink.inputs.base_directory
    ica_datasink = pe.Node(DataSink(parameterization=False,
                                    base_directory=base_outdir,),
                           name="{}_datasink".format(wf_name))

    # the list of the subjects files
    ica_subjs = pe.JoinNode(interface=IdentityInterface(fields=["ica_subjs"]),
                            joinsource="infosrc",
                            joinfield="ica_subjs",
                            name="ica_subjs")

    # warp each subject to the group template
    ica = setup_node(CanICAInterface(), name="{}_ica".format(wf_name),)

    # Connect the nodes
    main_wf.connect([
                     # file list input
                     (src_wf,     ica_subjs, [(srcconn_name, "ica_subjs")]),

                     # canica
                     (ica_subjs,  ica,    [("ica_subjs",  "in_files")]),

                     # canica output
                     (ica, ica_datasink,  [("components", "canica.@components")]),
                     (ica, ica_datasink,  [("loadings",   "canica.@loadings")]),
                     (ica, ica_datasink,  [("score",      "canica.@score")]),
                   ])
    return main_wf
Example #3
0
    def build_output_node(self):
        """Build and connect an output node to the pipeline."""
        import nipype.interfaces.utility as nutil
        from nipype.interfaces.io import DataSink
        import nipype.pipeline.engine as npe
        from clinica.utils.nipype import (fix_join, container_from_filename)
        from clinica.utils.filemanip import get_subject_id

        # Write node
        # ----------------------
        write_node = npe.Node(name="WriteCaps", interface=DataSink())
        write_node.inputs.base_directory = self.caps_directory
        write_node.inputs.parameterization = False

        # Get subject ID node
        # ----------------------
        image_id_node = npe.Node(interface=nutil.Function(
            input_names=['bids_or_caps_file'],
            output_names=['image_id'],
            function=get_subject_id),
                                 name='ImageID')

        # Find container path from t1w filename
        # ----------------------
        container_path = npe.Node(nutil.Function(
            input_names=['bids_or_caps_filename'],
            output_names=['container'],
            function=container_from_filename),
                                  name='ContainerPath')

        self.connect([
            (self.input_node, image_id_node, [('input_nifti',
                                               'bids_or_caps_file')]),
            (self.input_node, container_path, [('input_nifti',
                                                'bids_or_caps_filename')]),
            # (image_id_node, write_node, [('image_id', '@image_id')]),
            (image_id_node, write_node, [('image_id', '@image_id')]),
        ])

        subfolder = 'image_based'
        if self.parameters.get('extract_method') == 'slice':
            subfolder = 'slice_based'
            self.connect([(self.output_node, write_node, [('slices_rgb_T1',
                                                           '@slices_rgb_T1')]),
                          (self.output_node, write_node,
                           [('slices_original_T1', '@slices_original_T1')])])

        elif self.parameters.get('extract_method') == 'patch':
            subfolder = 'patch_based'
            self.connect([(self.output_node, write_node, [('patches_T1',
                                                           '@patches_T1')])])
        else:
            self.connect([(self.output_node, write_node,
                           [('output_pt_file', '@output_pt_file')])])

        self.connect([
            (container_path, write_node,
             [(('container', fix_join, 'deeplearning_prepare_data', subfolder,
                't1_linear'), 'container')]),
        ])
def test_DataSink_outputs():
    output_map = dict(out_file=dict(),
    )
    outputs = DataSink.output_spec()

    for key, metadata in output_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(outputs.traits()[key], metakey), value
Example #5
0
def datasink(base_directory, name, container=None, overwrite=False):
    output = pipe.Node(interface=DataSink(), name=name)
    output.inputs.base_directory = base_directory
    if container is not None:
        output.inputs.container = container
    output.inputs.parameterization = False
    output.overwrite = overwrite
    return output
Example #6
0
File: base.py Project: p3proc/p3
 def __init__(self,settings):
     # Define datasink node
     self.datasink = Node(
         DataSink(
             base_directory=os.path.join(settings['output_dir']),
             substitutions=[
                 ('_subject_','sub-')
             ]
         ),
         name='datasink'
     )
Example #7
0
def create_DARTEL_wf(subj_list, file_template, work_dir, out_dir):
    '''
    Aligns all images to a template (average of all images), then warps images into MNI space (using an SPM tissue probability map, see https://www.fil.ion.ucl.ac.uk/spm/doc/manual.pdf, section 25.4).

    subj_list: list of subject IDs
        e.g. [sub-001, sub-002]

    file_template: string to identify all files to align (using glob).
        e.g. file_template = os.path.join(work_dir, 'pag_mask', '*_pag_mask.nii')
            The template can identify a larger set of files, and the subject_list will grab a subset.
                e.g. The template may grab sub-001, sub-002, sub-003 ...
                But if the subject_list only includes sub-001, then only sub-001 will be used.
                This means the template can overgeneralize, but specific subjects can be easily excluded (e.g. for movement)

    work_dir: string, denoting path to working directory.

    out_dir: string, denoting output directory (results saved to work directory and output)
    '''
    import nibabel as nib
    import numpy as np
    from nipype.interfaces.spm.preprocess import DARTEL, CreateWarped
    from nipype.interfaces.io import DataSink
    import nipype.pipeline.engine as pe
    import os
    from jtnipyutil.util import files_from_template
    # set up workflow.
    DARTEL_wf = pe.Workflow(name='DARTEL_wf')
    DARTEL_wf.base_dir = work_dir

    # get images
    images = files_from_template(subj_list, file_template)

    # set up DARTEL.
    dartel = pe.Node(interface=DARTEL(), name='dartel')
    dartel.inputs.image_files = [images]

    dartel_warp = pe.Node(interface=CreateWarped(), name='dartel_warp')
    dartel_warp.inputs.image_files = images
    #     warp_data.inputs.flowfield_files = # from inputspec

    ################## Setup datasink.
    sinker = pe.Node(DataSink(parameterization=True), name='sinker')
    sinker.inputs.base_directory = out_dir

    DARTEL_wf.connect([
        (dartel, dartel_warp, [('dartel_flow_fields', 'flowfield_files')]),
        (dartel, sinker, [('final_template_file', 'avg_template'),
                          ('template_files', 'avg_template.@template_stages'),
                          ('dartel_flow_fields', 'dartel_flow')]),
        (dartel_warp, sinker, [('warped_files', 'warped_PAG')])
    ])

    return DARTEL_wf
Example #8
0
    def build_output_node(self):
        """Build and connect an output node to the pipeline."""
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from nipype.interfaces.io import DataSink

        from clinica.utils.nipype import container_from_filename, fix_join

        from .t1_linear_utils import get_substitutions_datasink

        # Writing node
        write_node = npe.Node(name="WriteCaps", interface=DataSink())
        write_node.inputs.base_directory = self.caps_directory
        write_node.inputs.parameterization = False

        # Other nodes
        # =====================================
        # Get substitutions to rename files
        get_ids = npe.Node(
            interface=nutil.Function(
                input_names=["bids_file"],
                output_names=["image_id_out", "subst_ls"],
                function=get_substitutions_datasink,
            ),
            name="GetIDs",
        )
        # Find container path from t1w filename
        container_path = npe.Node(
            nutil.Function(
                input_names=["bids_or_caps_filename"],
                output_names=["container"],
                function=container_from_filename,
            ),
            name="ContainerPath",
        )
        # fmt: off
        self.connect([
            (self.input_node, container_path, [("t1w", "bids_or_caps_filename")
                                               ]),
            (container_path, write_node, [(("container", fix_join,
                                            "t1_linear"), "container")]),
            (self.output_node, get_ids, [("image_id", "bids_file")]),
            (get_ids, write_node, [("subst_ls", "substitutions")]),
            (get_ids, write_node, [("image_id_out", "@image_id")]),
            (self.output_node, write_node, [("outfile_reg", "@outfile_reg")]),
            (self.output_node, write_node, [("affine_mat", "@affine_mat")]),
        ])

        if not (self.parameters.get("uncropped_image")):
            self.connect([
                (self.output_node, write_node, [("outfile_crop",
                                                 "@outfile_crop")]),
            ])
Example #9
0
def define_workflow(subject_list, run_list, experiment_dir, output_dir):
    """run the smooth workflow given subject and runs"""
    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=4, t_size=-1, output_type='NIFTI'),
                   name="extract")

    # Smooth - image smoothing
    smooth = Node(Smooth(fwhm=[8, 8, 8]), name="smooth")

    # Mask - applying mask to smoothed
    # mask_func = Node(ApplyMask(output_type='NIFTI'),
    # name="mask_func")

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'run_num']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list),
                            ('run_num', run_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)
    func_file = opj(
        'sub-{subject_id}', 'func',
        'sub-{subject_id}_task-tsl_run-{run_num}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz'
    )
    templates = {'func': func_file}
    selectfiles = Node(SelectFiles(templates, base_directory=data_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    ## Use the following DataSink output substitutions
    substitutions = [('_subject_id_', 'sub-'), ('ssub', 'sub'),
                     ('_space-MNI152NLin2009cAsym_desc-preproc_', '_fwhm-8_'),
                     ('_fwhm_', ''), ('_roi', '')]
    substitutions += [('_run_num_%s' % r, '') for r in run_list]
    datasink.inputs.substitutions = substitutions

    # Create a preprocessing workflow
    preproc = Workflow(name='preproc')
    preproc.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the preprocessing workflow (spm smooth)
    preproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
                                                ('run_num', 'run_num')]),
                     (selectfiles, extract, [('func', 'in_file')]),
                     (extract, smooth, [('roi_file', 'in_files')]),
                     (smooth, datasink, [('smoothed_files', 'preproc.@smooth')
                                         ])])
    return preproc
Example #10
0
def cope_merge_wf(subject_id, sink_directory, name='cope_merge_wf'):
    cope_merge_wf = Workflow(name='cope_merge_wf')

    info = dict(
        learning_cope=[['subject_id']],  #dictionary for Datagrabber
        nonlearning_cope=[['subject_id']])

    #node to grab corr and incorr cope files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/frstlvl/model_LSS2')
    datasource.inputs.field_template = dict(
        learning_cope='%s/deriv/learn/*.nii.gz',
        nonlearning_cope='%s/deriv/nonlearn/*.nii.gz')
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    #node to merge learning trials across all 6 runs
    merge_learning = Node(Merge(), name='merge_learning')
    merge_learning.inputs.dimension = 't'
    merge_learning.inputs.output_type = 'NIFTI_GZ'
    merge_learning.inputs.merged_file = 'cope_learning.nii.gz'
    merge_learning.inputs.tr = 2.00
    cope_merge_wf.connect(datasource, 'learning_cope', merge_learning,
                          'in_files')

    #node to merge nonlearning trials across all 6 runs
    merge_nonlearning = Node(Merge(), name='merge_nonlearning')
    merge_nonlearning.inputs.dimension = 't'
    merge_nonlearning.inputs.output_type = 'NIFTI_GZ'
    merge_nonlearning.inputs.merged_file = 'cope_nonlearning.nii.gz'
    merge_nonlearning.inputs.tr = 2.00
    cope_merge_wf.connect(datasource, 'nonlearning_cope', merge_nonlearning,
                          'in_files')

    #node to output data
    dsink = Node(DataSink(), name='dsink')
    dsink.inputs.base_directory = sink_directory
    dsink.inputs.container = subject_id
    cope_merge_wf.connect(merge_learning, 'merged_file', dsink,
                          'merged.@learning')
    cope_merge_wf.connect(merge_nonlearning, 'merged_file', dsink,
                          'merged.@nonlearning')

    return cope_merge_wf
def create_workflow(xfm_dir,
                    xfm_pattern,
                    atlas_dir,
                    atlas_pattern,
                    source_dir,
                    source_pattern,
                    work_dir,
                    out_dir,
                    name="new_data_to_atlas_space"):

    wf = Workflow(name=name)
    wf.base_dir = os.path.join(work_dir)

    datasource_source = Node(interface=DataGrabber(sort_filelist=True),
                             name='datasource_source')
    datasource_source.inputs.base_directory = os.path.abspath(source_dir)
    datasource_source.inputs.template = source_pattern

    datasource_xfm = Node(interface=DataGrabber(sort_filelist=True),
                          name='datasource_xfm')
    datasource_xfm.inputs.base_directory = os.path.abspath(xfm_dir)
    datasource_xfm.inputs.template = xfm_pattern

    datasource_atlas = Node(interface=DataGrabber(sort_filelist=True),
                            name='datasource_atlas')
    datasource_atlas.inputs.base_directory = os.path.abspath(atlas_dir)
    datasource_atlas.inputs.template = atlas_pattern

    resample = MapNode(interface=Resample(sinc_interpolation=True),
                       name='resample_',
                       iterfield=['input_file', 'transformation'])
    wf.connect(datasource_source, 'outfiles', resample, 'input_file')
    wf.connect(datasource_xfm, 'outfiles', resample, 'transformation')
    wf.connect(datasource_atlas, 'outfiles', resample, 'like')

    bigaverage = Node(interface=BigAverage(output_float=True, robust=False),
                      name='bigaverage',
                      iterfield=['input_file'])

    wf.connect(resample, 'output_file', bigaverage, 'input_files')

    datasink = Node(interface=DataSink(base_directory=out_dir,
                                       container=out_dir),
                    name='datasink')

    wf.connect([(bigaverage, datasink, [('output_file', 'average')])])
    wf.connect([(resample, datasink, [('output_file', 'atlas_space')])])
    wf.connect([(datasource_xfm, datasink, [('outfiles', 'transforms')])])

    return wf
Example #12
0
def cli(ctx, working_dir, name, results, save, container, image, keep, force):
    if not ctx.obj:
        ctx.obj = {}

    ctx.obj['save'] = save
    ctx.obj['container'] = container
    ctx.obj['wdir'] = click.format_filename(working_dir)
    ctx.obj['output'] = results
    ctx.obj['force'] = force
    ctx.obj['temp'] = ''
    ctx.obj['container_dir'] = ''

    if not ctx.obj['container']:
        datasink = pe.Node(DataSink(base_directory=ctx.obj['wdir'],
                                    container=ctx.obj['output']),
                           name="datasink")
        wf = pe.Workflow(name=name, base_dir=ctx.obj['wdir'])
    else:
        if find_spec("docker") is None:
            click.echo(
                'The --container option was specified but the docker package is not installed.'
            )
            sys.exit(1)

        ctx.obj['save'] = True
        ctx.obj['temp'] = tempfile.mkdtemp(dir=ctx.obj['wdir'])
        ctx.obj['container_dir'] = '/tmp'
        datasink = pe.Node(DataSink(base_directory=ctx.obj['container_dir'],
                                    container=os.path.join(
                                        ctx.obj['container_dir'], 'output')),
                           name="datasink")
        wf = pe.Workflow(name=name, base_dir=ctx.obj['temp'])

    wf.add_nodes([datasink])
    ctx.obj['workflow'] = wf
    ctx.obj['results'] = datasink
Example #13
0
    def build_output_node(self):
        """Build and connect an output node to the pipeline.
        """
        import nipype.interfaces.utility as nutil
        from nipype.interfaces.io import DataSink
        import nipype.pipeline.engine as npe
        from clinica.utils.nipype import fix_join
        from .t1_linear_utils import (container_from_filename, get_substitutions_datasink)

        # Writing node
        write_node = npe.Node(
                name="WriteCaps",
                interface=DataSink()
                )
        write_node.inputs.base_directory = self.caps_directory
        write_node.inputs.parameterization = False

        # Other nodes
        # =====================================
        # Get substitutions to rename files
        get_ids = npe.Node(
                interface=nutil.Function(
                    input_names=['bids_file'],
                    output_names=['image_id_out', 'subst_ls'],
                    function=get_substitutions_datasink),
                name="GetIDs")
        # Find container path from t1w filename
        container_path = npe.Node(
                nutil.Function(
                    input_names=['bids_or_caps_filename'],
                    output_names=['container'],
                    function=container_from_filename),
                name='ContainerPath')
        self.connect([
            (self.input_node, container_path, [('t1w', 'bids_or_caps_filename')]),
            (container_path, write_node, [(('container', fix_join, 't1_linear'), 'container')]),
            (self.output_node, get_ids, [('image_id', 'bids_file')]),
            (get_ids, write_node, [('subst_ls', 'substitutions')]),
            (get_ids, write_node, [('image_id_out', '@image_id')]),
            (self.output_node, write_node, [('outfile_reg', '@outfile_reg')]),
            (self.output_node, write_node, [('affine_mat', '@affine_mat')]),
            ])

        if (self.parameters.get('crop_image')):
            self.connect([
                (self.output_node, write_node, [('outfile_crop', '@outfile_crop')]),
                ])
Example #14
0
def smooth_images(write_dir, **template_dict):
    """This function runs smoothing on input images. Ex: modulated images"""
    from nipype.interfaces import spm
    from nipype.interfaces.io import DataSink
    smooth = pe.Node(interface=spm.Smooth(), name='smooth')
    smooth.inputs.paths = template_dict['spm_path']
    smooth.inputs.implicit_masking = template_dict['implicit_masking']
    smooth.inputs.in_files = glob.glob(os.path.join(write_dir, 'mwc*.nii'))
    smooth.inputs.fwhm = template_dict['FWHM_SMOOTH']
    vbm_smooth_modulated_images = pe.Workflow(
        name="vbm_smooth_modulated_images")
    datasink = pe.Node(interface=DataSink(), name='datasink')
    datasink.inputs.base_directory = write_dir
    vbm_smooth_modulated_images.connect([(smooth, datasink, [('smoothed_files',
                                                              write_dir)])])
    with stdchannel_redirected(sys.stderr, os.devnull):
        vbm_smooth_modulated_images.run()
Example #15
0
def index_lesion_workflow(msid, mseid, lesion):
    import nipype.interfaces.ants as ants
    from nipype.pipeline.engine import Node, Workflow, MapNode
    from nipype.interfaces.io import DataSink, DataGrabber
    from nipype.interfaces.utility import IdentityInterface, Function
    import nipype.interfaces.fsl as fsl
    from nipype.utils.filemanip import load_json

    working_directory = '/working/henry_temp/keshavan/'
    output_directory = os.path.split(lesion)[0]

    register = Workflow(name="indexed_lesion_{0}_{1}".format(msid, mseid))
    register.base_dir = working_directory
    inputnode = Node(IdentityInterface(fields=["lesion"]), name="inputspec")
    inputnode.inputs.lesion = lesion

    bin_math = Node(fsl.BinaryMaths(), name="Convert_to_binary")
    bin_math.inputs.operand_value = 1
    bin_math.inputs.operation = 'min'
    register.connect(inputnode, "lesion", bin_math, "in_file")

    cluster_lesion = Node(fsl.Cluster(threshold=0.0001,
                                      out_index_file=True,
                                      use_mm=True),
                          name="cluster_lesion")

    sinker = Node(DataSink(), name="sinker")
    sinker.inputs.base_directory = output_directory
    sinker.inputs.container = '.'
    sinker.inputs.substitutions = [('_maths', '')]

    register.connect(bin_math, "out_file", cluster_lesion, "in_file")
    register.connect(cluster_lesion, "index_file", sinker, "@cluster")

    from nipype.interfaces.freesurfer import SegStats
    segstats_lesion = Node(SegStats(), name="segstats_lesion")
    register.connect(cluster_lesion, "index_file", segstats_lesion,
                     "segmentation_file")
    register.connect(segstats_lesion, "summary_file", sinker, "@summaryfile")

    register.write_graph(graph2use='orig')
    register.config["Execution"] = {
        "keep_inputs": True,
        "remove_unnecessary_outputs": False
    }
    return register
Example #16
0
def runNipypeBet(controller, subject_list, anatomical_id, proj_directory):

    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list)]

    #anat_file = opj('{subject_id}','{subject_id}_{anatomical_id}.nii')
    seperator = ''
    concat_words = ('{subject_id}_', anatomical_id, '.nii.gz')
    anat_file_name = seperator.join(concat_words)

    if controller.b_radiological_convention.get() == True:
        anat_file = opj('{subject_id}', anat_file_name)
    else:
        anat_file = opj('{subject_id}', 'Intermediate_Files', 'Original_Files',
                        anat_file_name)

    templates = {'anat': anat_file}

    selectfiles = Node(SelectFiles(templates, base_directory=proj_directory),
                       name="selectfiles")

    skullstrip = Node(BET(robust=True,
                          frac=0.5,
                          vertical_gradient=0,
                          output_type='NIFTI_GZ'),
                      name="skullstrip")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=proj_directory), name="datasink")

    wf_sub = Workflow(name="wf_sub")
    wf_sub.base_dir = proj_directory
    wf_sub.connect(infosource, "subject_id", selectfiles, "subject_id")
    wf_sub.connect(selectfiles, "anat", skullstrip, "in_file")
    wf_sub.connect(skullstrip, "out_file", datasink, "bet.@out_file")

    substitutions = [('%s_brain' % (anatomical_id), 'brain')]
    # Feed the substitution strings to the DataSink node
    datasink.inputs.substitutions = substitutions
    # Run the workflow again with the substitutions in place
    wf_sub.run(plugin='MultiProc')

    return 'brain'
Example #17
0
def deface(in_file):
    deface_wf = pe.Workflow('deface_wf')
    inputnode = pe.Node(niu.IdentityInterface(['in_file']),
                     name='inputnode')
    # outputnode = pe.Node(niu.IdentityInterface(['out_file']),
    #                      name='outputnode')
    bet = pe.Node(BET(mask=True), name='bet')
    quickshear = pe.Node(Quickshear(), name='quickshear')
    sinker = pe.Node(DataSink(), name='store_results')
    sinker.inputs.base_directory = os.getcwd()

    deface_wf.connect([
        (inputnode, bet, [('in_file', 'in_file')]),
        (inputnode, quickshear, [('in_file', 'in_file')]),
        (bet, quickshear, [('mask_file', 'mask_file')]),
        (quickshear, sinker, [('out_file', '@')]),
    ])
    inputnode.inputs.in_file = in_file
    res = deface_wf.run()
Example #18
0
def cli(ctx, working_dir, name, results):
    if not ctx.obj:
        ctx.obj = {}
    if not working_dir:
        ctx.obj['wdir'] = os.path.abspath('.')
    else:
        ctx.obj['wdir'] = click.format_filename(working_dir)
    if not results:
        ctx.obj['output'] = 'trampolino'
    else:
        ctx.obj['output'] = results
    datasink = pe.Node(DataSink(base_directory=ctx.obj['wdir'],
                                container=ctx.obj['output']),
                       name="datasink")
    if not name:
        name = 'meta'
    wf = pe.Workflow(name=name, base_dir=ctx.obj['wdir'])
    wf.add_nodes([datasink])
    ctx.obj['workflow'] = wf
    ctx.obj['results'] = datasink
def test_DataSink_inputs():
    input_map = dict(_outputs=dict(usedefault=True,
    ),
    base_directory=dict(),
    container=dict(),
    ignore_exception=dict(nohash=True,
    usedefault=True,
    ),
    parameterization=dict(usedefault=True,
    ),
    regexp_substitutions=dict(),
    remove_dest_dir=dict(usedefault=True,
    ),
    strip_dir=dict(),
    substitutions=dict(),
    )
    inputs = DataSink.input_spec()

    for key, metadata in input_map.items():
        for metakey, value in metadata.items():
            yield assert_equal, getattr(inputs.traits()[key], metakey), value
Example #20
0
def make_workflow(config, strip_standard, moving_image, fixed_image,
                  fixed_is_standard, output_dir):
    log.info('Creating Workflow...')
    # Declare and create output directories
    workflow_directory = '/flywheel/v0/nipype/workflow'
    Path(workflow_directory).mkdir(parents=True)

    wf = Workflow(name='moving2fixed', base_dir=workflow_directory)

    # Create the registration node
    registration_node = create_registration_node(config, moving_image)

    # If the fixed image is standard and we are to skullstrip it, do that
    log.debug('\tChecking if fixed is standard')
    if strip_standard and fixed_is_standard:
        log.debug('\t fixed is standard, and skull strip true.  Adding strip node')
        skullstrip_node = skullstrip_standard_node()
        log.debug('\tconnecting to wf')
        wf.connect(skullstrip_node, "output_product_image", registration_node, "fixed_image")
    else:
        log.debug('adding fixed image to registration node')
        registration_node.inputs.fixed_image = fixed_image

    # Create data sink for output
    log.debug('\tCreating sink')
    sink = Node(DataSink(), name='sinker')

    # Set the sink output to the flywheel output
    log.debug(f'\tSetting sink output to {output_dir}')
    sink.inputs.base_directory = str(output_dir)
    wf.connect(registration_node, 'warped_image', sink, 'Registered_image')
    if config['save_transform']:
        log.debug('\tsaving transformation parameters')
        wf.connect(registration_node, 'forward_warp_field', sink, 'forward_warp_field')
        wf.connect(registration_node, 'inverse_warp_field', sink, 'inverse_warp_field')
        wf.connect(registration_node, 'out_matrix', sink, 'out_matrix')

    log.info('...Complete!')

    return (wf)
def create_corthick_wf():
    import nipype.pipeline.engine as pe  # pypeline engine
    import os
    from nipype import IdentityInterface
    from nipype.interfaces.ants.segmentation import KellyKapowski
    from nipype.interfaces.io import DataSink

    corthick_wf = pe.Workflow(name='corthick_wf')

    inputspec = pe.Node(IdentityInterface(
        fields=['seg_file', 'wmprob_file', 'out_dir'], mandatory_inputs=False),
                        name='inputspec')
    DiReCT = pe.Node(KellyKapowski(), name='DiReCt')
    sinker = pe.Node(DataSink(parameterization=True), name='sinker')

    corthick_wf.connect([
        (inputspec, DiReCT, [('seg_file', 'segmentation_image'),
                             ('wmprob_file', 'white_matter_prob_image')]),
        (inputspec, sinker, [('out_dir', 'base_directory')]),
        (DiReCT, sinker, [('cortical_thickness', 'out.@thick'),
                          ('warped_white_matter', 'out.@wm')]),
    ])
    return corthick_wf
Example #22
0
def create_motion_confound_workflow(order=2,
                                    fd_cutoff=.2,
                                    name='motion_confound'):

    input_node = pe.Node(interface=IdentityInterface(
        fields=['par_file', 'output_directory', 'sub_id']),
                         name='inputspec')

    output_node = pe.Node(
        interface=IdentityInterface(fields=['out_fd', 'out_ext_moco']),
        name='outputspec')

    datasink = pe.Node(DataSink(), name='sinker')
    datasink.inputs.parameterization = False

    extend_motion_parameters = pe.MapNode(Extend_motion_parameters,
                                          iterfield=['par_file'],
                                          name='extend_motion_parameters')
    extend_motion_parameters.inputs.order = order

    framewise_disp = pe.MapNode(FramewiseDisplacement(parameter_source='FSL'),
                                iterfield=['in_file'],
                                name='framewise_disp')

    mcf_wf = pe.Workflow(name=name)
    mcf_wf.connect(input_node, 'output_directory', datasink, 'base_directory')
    mcf_wf.connect(input_node, 'sub_id', datasink, 'container')
    mcf_wf.connect(input_node, 'par_file', extend_motion_parameters,
                   'par_file')
    mcf_wf.connect(input_node, 'par_file', framewise_disp, 'in_file')
    mcf_wf.connect(extend_motion_parameters, 'out_ext', output_node,
                   'out_ext_moco')
    mcf_wf.connect(framewise_disp, 'out_file', output_node, 'out_fd')
    mcf_wf.connect(extend_motion_parameters, 'out_ext', datasink, 'confounds')
    mcf_wf.connect(framewise_disp, 'out_file', datasink, 'confounds.@df')

    return mcf_wf
Example #23
0
def create_volatlas_workflow(wf_name, wf_base_dir, subject_list, cnxn_names,
                             fstr_dict, ref_file, agg_cnxn_names_dict):

    from nipype.pipeline import engine as pe
    from nipype.pipeline.engine import Node, JoinNode, MapNode, Workflow

    from nipype.pipeline.engine.utils import IdentityInterface
    from nipype.interfaces import Function

    from nipype.interfaces.io import DataSink
    """
  Variables
  """

    dpy_fstr = fstr_dict['dpy']
    warp_fstr = fstr_dict['warp']
    parc_fstr = fstr_dict['parc']
    cnxn_mapping_fstr = fstr_dict['cnxn_mapping']
    """
  Node: Infosource
  """

    # (iterates over subjects)
    def mirror(subject_id):
        return subject_id

    mirror_npfunc = Function(['subject_id'], ['subject_id'], mirror)

    node__infosource = Node(interface=mirror_npfunc, name="infosource")
    node__infosource.iterables = [("subject_id", subject_list)]
    """
  Node: Get data
  """

    # (also iterates over cnxn ids)

    def get_sub_files_func(subject_id, cnxn_name, dpy_fstr, warp_fstr,
                           parc_fstr, cnxn_mapping_fstr):
        dpy_file = dpy_fstr % subject_id
        cnxn_mapping_file = cnxn_mapping_fstr % subject_id
        parc_file = parc_fstr % subject_id
        warp_file = warp_fstr % subject_id

        return dpy_file, parc_file, warp_file, cnxn_mapping_file, cnxn_name, subject_id

    get_sub_files_npfunc = Function([
        'subject_id', 'cnxn_name', 'dpy_fstr', 'warp_fstr', 'parc_fstr',
        'cnxn_mapping_fstr'
    ], [
        'dpy_file', 'parc_file', 'warp_file', 'cnxn_mapping_file', 'cnxn_name',
        'subject_id'
    ], get_sub_files_func)

    node__datasource = Node(interface=get_sub_files_npfunc, name='datasource')
    node__datasource.inputs.dpy_fstr = dpy_fstr
    node__datasource.inputs.parc_fstr = parc_fstr
    node__datasource.inputs.warp_fstr = warp_fstr
    node__datasource.inputs.cnxn_mapping_fstr = cnxn_mapping_fstr
    node__datasource.iterables = [('cnxn_name', cnxn_names)]
    """
  Node: Make sub cnxn visitation map
  """

    make_sub_vismap_npfunc = Function([
        'sub', 'dpy_file', 'parc_file', 'warp_file', 'ref_file',
        'cnxn_inds_file', 'cnxn_name', 'vismap_fstr'
    ], ['sub_vismap_file'], make_sub_cnxn_visitation_map)

    node__makesubvismap = Node(interface=make_sub_vismap_npfunc,
                               name="make_sub_vismap")

    node__makesubvismap.inputs.ref_file = ref_file
    node__makesubvismap.inputs.vismap_fstr = 'temp_vismap_%s.nii.gz'
    #node__makesubvismap.inputs.overwrite=True
    """
  Node: make grp cnxn visitation map
  """

    make_grp_vismap_npfunc = Function(
        ['cnxn_name', 'sub_vismaps', 'grp_vismap_fstr', 'subs_list'],
        ['grp_vismap_fpath', 'grp_vismap_norm_fpath', 'subs_list_file'],
        make_group_cnxn_visitation_map)

    node__makegrpvismap = JoinNode(interface=make_grp_vismap_npfunc,
                                   name='make_grp_vismap',
                                   joinsource="infosource",
                                   joinfield=["sub_vismaps",
                                              'subs_list'])  #subject_id")

    node__makegrpvismap.inputs.grp_vismap_fstr = 'grp_vismap_%s.nii.gz'  # this needs to be changed to come from previous node in wf
    """
  Node: aggregate group cnxn visitation map
  """
    # (to do...)

    agg_grp_vismap_npfunc = Function(['in_files', 'cnxn_names', 'outfname'],
                                     ['agg_image_file', 'agg_list_file'],
                                     aggregate_grp_vismap,
                                     imports=['import os'])

    node__agggrpvismap = JoinNode(interface=agg_grp_vismap_npfunc,
                                  name='agg_grp_vismap',
                                  joinsource="datasource",
                                  joinfield=["in_files"])

    node__agggrpvismap.iterables = [("cnxn_names",
                                     agg_cnxn_names_dict.values()),
                                    ("outfname", agg_cnxn_names_dict.keys())]
    node__agggrpvismap.synchronize = True
    """
  Node: datasink
  """
    # I want to use a mapnode for this, but can't get it to work
    # so have to settle with this followed by a command line copy...

    # (if you don't have a mapnode, just get same result as outputs of agggrpvismap node...)
    node__datasink = Node(DataSink(), name='datasink')
    node__datasink.inputs.base_directory = wf_base_dir

    #node__datasinkniifile = MapNode(DataSink(infields=['agg_image_file']),name='ds_nii', iterfield=['agg_image_file'])
    #node__datasinkniifile.inputs.base_directory=wf_base_dir
    #node__datasinktxtfile = MapNode(DataSink(infields=['agg_list_file']),name='ds_txt', iterfield=['agg_list_file'])
    #node__datasinktxtfile.inputs.base_directory=wf_base_dir
    """
  Workflow: put it all together
  """

    wf = pe.Workflow(name=wf_name)
    wf.base_dir = wf_base_dir

    wf.connect(node__infosource, 'subject_id', node__datasource, 'subject_id')
    wf.connect(node__datasource, 'subject_id', node__makesubvismap, 'sub')
    wf.connect(node__datasource, 'dpy_file', node__makesubvismap, 'dpy_file')
    wf.connect(node__datasource, 'parc_file', node__makesubvismap, 'parc_file')
    wf.connect(node__datasource, 'warp_file', node__makesubvismap, 'warp_file')
    wf.connect(node__datasource, 'cnxn_mapping_file', node__makesubvismap,
               'cnxn_inds_file')
    wf.connect(node__datasource, 'cnxn_name', node__makesubvismap, 'cnxn_name')
    wf.connect(node__makesubvismap, 'sub_vismap_file', node__makegrpvismap,
               'sub_vismaps')
    wf.connect(node__datasource, 'cnxn_name', node__makegrpvismap, 'cnxn_name')
    wf.connect(node__datasource, 'subject_id', node__makegrpvismap,
               'subs_list')

    wf.connect(node__makegrpvismap, 'grp_vismap_norm_fpath',
               node__agggrpvismap, 'in_files')

    wf.connect(node__agggrpvismap, 'agg_image_file', node__datasink,
               '@agg_image_file')
    wf.connect(node__agggrpvismap, 'agg_list_file', node__datasink,
               '@agg_list_file')

    #wf.connect(node__agggrpvismap, 'agg_image_file', node__datasinkniifile, '@agg_image_file')
    #wf.connect(node__agggrpvismap, 'agg_list_file',  node__datasinktxtfile, '@agg_list_file')

    return wf
Example #24
0
def create_all_calcarine_reward_2_h5_workflow(
        analysis_info, name='all_calcarine_reward_nii_2_h5'):
    import os.path as op
    import tempfile
    import nipype.pipeline as pe
    from nipype.interfaces import fsl
    from nipype.interfaces.utility import Function, Merge, IdentityInterface
    from spynoza.nodes.utils import get_scaninfo, dyns_min_1, topup_scan_params, apply_scan_params
    from nipype.interfaces.io import SelectFiles, DataSink

    # Importing of custom nodes from spynoza packages; assumes that spynoza is installed:
    # pip install git+https://github.com/spinoza-centre/spynoza.git@develop
    from utils.utils import mask_nii_2_hdf5, combine_eye_hdfs_to_nii_hdf

    input_node = pe.Node(
        IdentityInterface(fields=['sub_id', 'preprocessed_data_dir']),
        name='inputspec')

    # i/o node
    datasource_templates = dict(mcf='{sub_id}/mcf/*.nii.gz',
                                psc='{sub_id}/psc/*.nii.gz',
                                tf='{sub_id}/tf/*.nii.gz',
                                GLM='{sub_id}/GLM/*.nii.gz',
                                eye='{sub_id}/eye/h5/*.h5',
                                rois='{sub_id}/roi/*_vol.nii.gz')
    datasource = pe.Node(SelectFiles(datasource_templates,
                                     sort_filelist=True,
                                     raise_on_empty=False),
                         name='datasource')

    hdf5_psc_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                              name='hdf5_psc_masker')
    hdf5_psc_masker.inputs.folder_alias = 'psc'
    hdf5_psc_masker.inputs.hdf5_file = op.join(tempfile.mkdtemp(), 'roi.h5')

    hdf5_tf_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                             name='hdf5_tf_masker')
    hdf5_tf_masker.inputs.folder_alias = 'tf'
    hdf5_psc_masker.inputs.hdf5_file = op.join(tempfile.mkdtemp(), 'roi.h5')

    hdf5_mcf_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                              name='hdf5_mcf_masker')
    hdf5_mcf_masker.inputs.folder_alias = 'mcf'

    hdf5_GLM_masker = pe.Node(Function(
        input_names=['in_files', 'mask_files', 'hdf5_file', 'folder_alias'],
        output_names=['hdf5_file'],
        function=mask_nii_2_hdf5),
                              name='hdf5_GLM_masker')
    hdf5_GLM_masker.inputs.folder_alias = 'GLM'

    eye_hdfs_to_nii_masker = pe.Node(Function(
        input_names=['nii_hdf5_file', 'eye_hdf_filelist', 'new_alias'],
        output_names=['nii_hdf5_file'],
        function=combine_eye_hdfs_to_nii_hdf),
                                     name='eye_hdfs_to_nii_masker')
    eye_hdfs_to_nii_masker.inputs.new_alias = 'eye'

    # node for datasinking
    datasink = pe.Node(DataSink(), name='sinker')
    datasink.inputs.parameterization = False

    all_calcarine_reward_nii_2_h5_workflow = pe.Workflow(name=name)

    all_calcarine_reward_nii_2_h5_workflow.connect(input_node,
                                                   'preprocessed_data_dir',
                                                   datasink, 'base_directory')
    all_calcarine_reward_nii_2_h5_workflow.connect(input_node, 'sub_id',
                                                   datasink, 'container')

    all_calcarine_reward_nii_2_h5_workflow.connect(input_node,
                                                   'preprocessed_data_dir',
                                                   datasource,
                                                   'base_directory')
    all_calcarine_reward_nii_2_h5_workflow.connect(input_node, 'sub_id',
                                                   datasource, 'sub_id')

    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'psc',
                                                   hdf5_psc_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_psc_masker,
                                                   'mask_files')

    # the hdf5_file is created by the psc node, and then passed from masker to masker on into the datasink.
    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_psc_masker,
                                                   'hdf5_file', hdf5_tf_masker,
                                                   'hdf5_file')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'tf',
                                                   hdf5_tf_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_tf_masker,
                                                   'mask_files')

    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_tf_masker, 'hdf5_file',
                                                   hdf5_mcf_masker,
                                                   'hdf5_file')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'mcf',
                                                   hdf5_mcf_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_mcf_masker,
                                                   'mask_files')

    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'GLM',
                                                   hdf5_GLM_masker, 'in_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'rois',
                                                   hdf5_GLM_masker,
                                                   'mask_files')
    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_mcf_masker,
                                                   'hdf5_file',
                                                   hdf5_GLM_masker,
                                                   'hdf5_file')

    all_calcarine_reward_nii_2_h5_workflow.connect(hdf5_GLM_masker,
                                                   'hdf5_file',
                                                   eye_hdfs_to_nii_masker,
                                                   'nii_hdf5_file')
    all_calcarine_reward_nii_2_h5_workflow.connect(datasource, 'eye',
                                                   eye_hdfs_to_nii_masker,
                                                   'eye_hdf_filelist')

    all_calcarine_reward_nii_2_h5_workflow.connect(eye_hdfs_to_nii_masker,
                                                   'nii_hdf5_file', datasink,
                                                   'h5')

    return all_calcarine_reward_nii_2_h5_workflow
Example #25
0
copemerge = Node(fsl.Merge(dimension='t', in_files=listCopeFiles),
                 name="copemerge")

# merging varcope files
varcopemerge = Node(fsl.Merge(dimension='t', in_files=listVarcopeFiles),
                    name="varcopemerge")

# merging mask files
maskmerge = Node(fsl.Merge(dimension='t', in_files=listMaskFiles),
                 name="maskmerge")

# calculating the minimum across time points on merged mask image
minmask = Node(fsl.MinImage(), name="minmask")

# creating datasink to collect outputs
datasink = Node(DataSink(base_directory=os.path.join(
    outDir, 'FingerFootLips_Test_Cope6_Handedness')),
                name='datasink')

###########
#
# SETTING UP THE WORKFLOW NODES
#
###########

# creating the workflow
secondLevel = Workflow(name="Level2", base_dir=outDir)

# connecting nodes
secondLevel.connect(level2design, 'design_mat', flameo, 'design_file')
secondLevel.connect(level2design, 'design_con', flameo, 't_con_file')
secondLevel.connect(level2design, 'design_grp', flameo, 'cov_split_file')
Example #26
0
            'fwhm-{fwhm_id}_sasub-{subject_id}_task-{task_name}_bold.nii'),
        'anat':
        opj('/data/wellbeing_analysis/datasink/preproc', 'sub-{subject_id}',
            'task-{task_name}', 'sub-{subject_id}_T1w_brain.nii.gz'),
        'transform':
        opj('/data/wellbeing_analysis/datasink/antsreg', 'sub-{subject_id}',
            'transformComposite.h5')
    }

    selectfiles = Node(SelectFiles(templates,
                                   base_directory=experiment_dir,
                                   sort_filelist=True),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    # Use the following DataSink output substitutions
    substitutions = [
        ('_fwhm_id_%s_subject_id_%s_task_name_empathy/_apply_norm_anat0' %
         (f, sub), 'sub-%s/anat/' % (sub)) for f in fwhm
        for sub in subject_list
    ]
    subjFolders = [
        ('_fwhm_id_%s_subject_id_%s_task_name_empathy/_apply_norm_bold0' %
         (f, sub), 'sub-%s/bold/task-empathy/' % (sub)) for f in fwhm
        for sub in subject_list
    ]

    substitutions.extend(subjFolders)
def create_workflow(files,
                    target_file,
                    subject_id,
                    TR,
                    slice_times,
                    norm_threshold=1,
                    num_components=5,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    subjects_dir=None,
                    sink_directory=os.getcwd(),
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Rename files in case they are named identically
    name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
                          iterfield=['in_file', 'run'],
                          name='rename')
    name_unique.inputs.keep_ext = True
    name_unique.inputs.run = list(range(1, len(files) + 1))
    name_unique.inputs.in_file = files

    realign = Node(interface=spm.Realign(), name="realign")
    realign.inputs.jobtype = 'estwrite'

    num_slices = len(slice_times)
    slice_timing = Node(interface=spm.SliceTiming(), name="slice_timing")
    slice_timing.inputs.num_slices = num_slices
    slice_timing.inputs.time_repetition = TR
    slice_timing.inputs.time_acquisition = TR - TR / float(num_slices)
    slice_timing.inputs.slice_order = (np.argsort(slice_times) + 1).tolist()
    slice_timing.inputs.ref_slice = int(num_slices / 2)

    # Comute TSNR on realigned data regressing polynomials upto order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(slice_timing, 'timecorrected_files', tsnr, 'in_file')

    # Compute the median image across runs
    calc_median = Node(Function(input_names=['in_files'],
                                output_names=['median_file'],
                                function=median,
                                imports=imports),
                       name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
    """Segment and Register
    """

    registration = create_reg_workflow(name='registration')
    wf.connect(calc_median, 'median_file', registration,
               'inputspec.mean_image')
    registration.inputs.inputspec.subject_id = subject_id
    registration.inputs.inputspec.subjects_dir = subjects_dir
    registration.inputs.inputspec.target_image = target_file
    """Use :class:`nipype.algorithms.rapidart` to determine which of the
    images in the functional series are outliers based on deviations in
    intensity or movement.
    """

    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, True]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = norm_threshold
    art.inputs.zintensity_threshold = 9
    art.inputs.mask_type = 'spm_global'
    art.inputs.parameter_source = 'SPM'
    """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
    to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
    voxel sizes.
    """

    wf.connect([
        (name_unique, realign, [('out_file', 'in_files')]),
        (realign, slice_timing, [('realigned_files', 'in_files')]),
        (slice_timing, art, [('timecorrected_files', 'realigned_files')]),
        (realign, art, [('realignment_parameters', 'realignment_parameters')]),
    ])

    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(
            np.array(filename_to_list(files))[idx].tolist())

    mask = Node(fsl.BET(), name='getmask')
    mask.inputs.mask = True
    wf.connect(calc_median, 'median_file', mask, 'in_file')

    # get segmentation in normalized functional space

    def merge_files(in1, in2):
        out_files = filename_to_list(in1)
        out_files.extend(filename_to_list(in2))
        return out_files

    # filter some noise

    # Compute motion regressors
    motreg = Node(Function(
        input_names=['motion_params', 'order', 'derivatives'],
        output_names=['out_files'],
        function=motion_regressors,
        imports=imports),
                  name='getmotionregress')
    wf.connect(realign, 'realignment_parameters', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=build_filter1,
        imports=imports),
                         name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii',
                              out_pf_name='pF_mcart.nii',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filtermotion')

    wf.connect(slice_timing, 'timecorrected_files', filter1, 'in_file')
    wf.connect(slice_timing, ('timecorrected_files', rename, '_filtermotart'),
               filter1, 'out_res_name')
    wf.connect(createfilter1, 'out_files', filter1, 'design')

    createfilter2 = MapNode(Function(input_names=[
        'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
    ],
                                     output_names=['out_files'],
                                     function=extract_noise_components,
                                     imports=imports),
                            iterfield=['realigned_file', 'extra_regressors'],
                            name='makecompcorrfilter')
    createfilter2.inputs.num_components = num_components

    wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(registration,
               ('outputspec.segmentation_files', selectindex, [0, 2]),
               createfilter2, 'mask_file')

    filter2 = MapNode(fsl.GLM(out_f_name='F.nii',
                              out_pf_name='pF.nii',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filter_noise_nosmooth')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2,
               'out_res_name')
    wf.connect(createfilter2, 'out_files', filter2, 'design')
    wf.connect(mask, 'mask_file', filter2, 'mask')

    bandpass = Node(Function(
        input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'],
        output_names=['out_files'],
        function=bandpass_filter,
        imports=imports),
                    name='bandpass_unsmooth')
    bandpass.inputs.fs = 1. / TR
    bandpass.inputs.highpass_freq = highpass_freq
    bandpass.inputs.lowpass_freq = lowpass_freq
    wf.connect(filter2, 'out_res', bandpass, 'files')
    """Smooth the functional data using
    :class:`nipype.interfaces.spm.Smooth`.
    """

    smooth = Node(interface=spm.Smooth(), name="smooth")
    smooth.inputs.fwhm = vol_fwhm

    wf.connect(bandpass, 'out_files', smooth, 'in_files')

    collector = Node(Merge(2), name='collect_streams')
    wf.connect(smooth, 'smoothed_files', collector, 'in1')
    wf.connect(bandpass, 'out_files', collector, 'in2')
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = MapNode(ants.ApplyTransforms(),
                      iterfield=['input_image'],
                      name='warpall')
    warpall.inputs.input_image_type = 3
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.inputs.terminal_output = 'file'
    warpall.inputs.reference_image = target_file
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 1

    # transform to target
    wf.connect(collector, 'out', warpall, 'input_image')
    wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')

    mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')

    wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')

    maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
    wf.connect(warpall, 'output_image', maskts, 'in_file')
    wf.connect(mask_target, 'out_file', maskts, 'mask_file')

    # map to surface
    # extract aparc+aseg ROIs
    # extract subcortical ROIs
    # extract target space ROIs
    # combine subcortical and cortical rois into a single cifti file

    #######
    # Convert aparc to subject functional space

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(
        freesurfer.SegStats(default_color_table=True),
        iterfield=['in_file', 'summary_file', 'avgwf_txt_file'],
        name='aparc_ts')
    sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) +
                                     [17, 18, 26, 47] + list(range(49, 55)) +
                                     [58] + list(range(1001, 1036)) +
                                     list(range(2001, 2036)))

    wf.connect(registration, 'outputspec.aparc', sampleaparc,
               'segmentation_file')
    wf.connect(collector, 'out', sampleaparc, 'in_file')

    def get_names(files, suffix):
        """Generate appropriate names for output files
        """
        from nipype.utils.filemanip import (split_filename, filename_to_list,
                                            list_to_filename)
        out_names = []
        for filename in files:
            _, name, _ = split_filename(filename)
            out_names.append(name + suffix)
        return list_to_filename(out_names)

    wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc,
               'avgwf_txt_file')
    wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc,
               'summary_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(freesurfer.SampleToSurface(),
                        iterfield=['source_file'],
                        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    samplerlh.inputs.smooth_surf = surf_fwhm
    # samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = subjects_dir

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(collector, 'out', samplerlh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(collector, 'out', samplerrh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(Function(input_names=['left', 'right'],
                                output_names=['out_file'],
                                function=combine_hemi,
                                imports=imports),
                       iterfield=['left', 'right'],
                       name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(Function(
        input_names=['timeseries_file', 'label_file', 'indices'],
        output_names=['out_file'],
        function=extract_subrois,
        imports=imports),
                     iterfield=['timeseries_file'],
                     name='getsubcortts')
    ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
        list(range(49, 55)) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm_v2.nii.gz'))
    wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')

    ######

    substitutions = [('_target_subject_', ''),
                     ('_filtermotart_cleaned_bp_trans_masked', ''),
                     ('_filtermotart_cleaned_bp', '')]
    regex_subs = [
        ('_ts_masker.*/sar', '/smooth/'),
        ('_ts_masker.*/ar', '/unsmooth/'),
        ('_combiner.*/sar', '/smooth/'),
        ('_combiner.*/ar', '/unsmooth/'),
        ('_aparc_ts.*/sar', '/smooth/'),
        ('_aparc_ts.*/ar', '/unsmooth/'),
        ('_getsubcortts.*/sar', '/smooth/'),
        ('_getsubcortts.*/ar', '/unsmooth/'),
        ('series/sar', 'series/smooth/'),
        ('series/ar', 'series/unsmooth/'),
        ('_inverse_transform./', ''),
    ]
    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = substitutions
    datasink.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(realign, 'realignment_parameters', datasink,
               'resting.qa.motion')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.segmentation_files', datasink,
               'resting.mask_files')
    wf.connect(registration, 'outputspec.anat2target', datasink,
               'resting.qa.ants')
    wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
    wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
    wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
    wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
    wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
    wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
    wf.connect(bandpass, 'out_files', datasink,
               'resting.timeseries.@bandpassed')
    wf.connect(smooth, 'smoothed_files', datasink,
               'resting.timeseries.@smoothed')
    wf.connect(createfilter1, 'out_files', datasink,
               'resting.regress.@regressors')
    wf.connect(createfilter2, 'out_files', datasink,
               'resting.regress.@compcorr')
    wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
    wf.connect(sampleaparc, 'summary_file', datasink,
               'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file', datasink,
               'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file', datasink,
               'resting.parcellations.grayo.@subcortical')

    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = substitutions
    datasink2.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file', datasink2,
               'resting.parcellations.grayo.@surface')
    return wf
Example #28
0
def smoothing_skullstrip(
    fmriprep_dir,
    output_dir,
    work_dir,
    subject_list,
    task,
    run,
    fwhm=6.0,
    name="smoothing_skullstrip",
):
    """
    FSL smooth fMRIprep output
    """
    workflow = pe.Workflow(name=name)
    workflow.base_dir = work_dir

    template = {
        "bolds": "sub-{subject}/func/sub-{subject}_task-{task}_run-{run}_space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz",
        "mask": "sub-{subject}/func/sub-{subject}_task-{task}_run-{run}_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz",
    }

    bg = pe.Node(SelectFiles(template, base_directory=fmriprep_dir), name="datagrabber")
    bg.iterables = [("subject", subject_list), ("task", task), ("run", run)]

    # Create DataSink object
    sinker = pe.Node(DataSink(), name="sinker")
    sinker.inputs.base_directory = output_dir
    sinker.inputs.substitutions = [
        ("_run_1_subject_", "sub-"),
        ("_skip0", "func"),
        ("desc-preproc_bold_smooth_masked_roi", f"desc-preproc-fwhm{int(fwhm)}mm_bold"),
    ]

    # Smoothing
    susan = create_susan_smooth()
    susan.inputs.inputnode.fwhm = fwhm

    # masking the smoothed output
    # note: susan workflow returns a list but apply mask only accept string of path
    mask_results = pe.MapNode(
        ApplyMask(), name="mask_results", iterfield=["in_file", "mask_file"]
    )

    # remove first five volumes
    skip = pe.MapNode(fsl.ExtractROI(), name="skip", iterfield=["in_file"])
    skip.inputs.t_min = 5
    skip.inputs.t_size = -1

    workflow.connect(
        [
            (
                bg,
                susan,
                [("bolds", "inputnode.in_files"), ("mask", "inputnode.mask_file")],
            ),
            (bg, mask_results, [("mask", "mask_file")]),
            (susan, mask_results, [("outputnode.smoothed_files", "in_file")]),
            (mask_results, skip, [("out_file", "in_file")]),
            (skip, sinker, [("roi_file", f"func_smooth-{int(fwhm)}mm.@out_file")]),
        ]
    )
    return workflow
# In[4]:

templates = {
    'all_skeleton': 'Waxholm_Template/*/{map_id}/All_*_skeletonised.nii.gz',
    'skeleton_mask':
    'Waxholm_Template/*/{map_id}/mean_FA_skeleton_mask.nii.gz',
    'all_image': 'Waxholm_Template/*/{map_id}/All_{map_id}_WAX.nii.gz',
    'mean_FA': 'Waxholm_Template/*/{map_id}/mean_FA.nii.gz',
}

selectfiles = Node(SelectFiles(templates, base_directory=experiment_dir),
                   name="selectfiles")
#-----------------------------------------------------------------------------------------------------
# In[5]:

datasink = Node(DataSink(), name='datasink')
datasink.inputs.container = output_dir
datasink.inputs.base_directory = experiment_dir

substitutions = [('_map_id_', ' ')]

datasink.inputs.substitutions = substitutions

#-----------------------------------------------------------------------------------------------------
#Design with two contrasts only

design = '/home/in/aeed/TBSS/Design_TBSS.mat'
contrast = '/home/in/aeed/TBSS/Design_TBSS.con'

#-----------------------------------------------------------------------------------------------------
#randomise on the skeletonised data
level1design = Node(fsl.Level1Design(bases={'dgamma':{'derivs': True}},
                                     interscan_interval=TR,
                                     model_serial_correlations=True,
                                     contrasts=contrast_list),
                    name="level1design")

# creating all the other files necessary to run the model
modelgen = Node(fsl.FEATModel(),
                name='modelgen')

# then running through FEAT
feat = Node(fsl.FEAT(),
            name="feat")

# creating datasink to collect outputs
datasink = Node(DataSink(base_directory=outDir),
                name='datasink')

## Use the following DataSink output substitutions
substitutions = [('_subject_id_', 'sub-'),
                 ('_subsession_id_', '/ses-')
                 ]

datasink.inputs.substitutions = substitutions

###########
#
# SETTING UP THE WORKFLOW NODES
#
###########