コード例 #1
0
def stub_wf(*args, **kwargs):
    wflow = Workflow(name='realigner')
    inputnode = Node(IdentityInterface(fields=['func']), name='inputspec')
    outputnode = Node(
        interface=IdentityInterface(fields=['realigned_file']),
        name='outputspec')
    wflow.connect(inputnode, 'func', outputnode, 'realigned_file')
    return wflow
コード例 #2
0
def fristons_twenty_four_wf(wf_name='fristons_twenty_four'):
    """ The main purpose of this workflow is to calculate 24 parameters including
    the 6 motion parameters of the current volume and the preceeding volume,
    plus each of these values squared.

    Parameters
    ----------
    wf_name: str
        Workflow name

    Returns
    -------
    wf: workflow object

    Nipype Inputs
    -------------
    f24_input.in_file: str
        Path to the input movement file from motion correction.

    Nipype Outputs
    -------------
    f24_output.out_file: str
        Path to 1D file containing the friston 24 parameters.

    References
    ----------
 .. [1] Friston, K. J., Williams, S., Howard, R., Frackowiak, R. S., & Turner, R. (1996).
       Movement-related effects in fMRI time-series. Magnetic Resonance in Medicine, 35(3),346-355
    """
    wf = pe.Workflow(name=wf_name)

    # specify input and output fields
    in_fields = [
        "in_file",
    ]

    out_fields = [
        "out_file",
    ]

    f24_input = setup_node(IdentityInterface(fields=in_fields,
                                             mandatory_inputs=True),
                           name='f24_input')

    calc_friston = setup_node(Function(input_names=['in_file'],
                                       output_names=['out_file'],
                                       function=calc_friston_twenty_four),
                              name='calc_friston')

    f24_output = setup_node(IdentityInterface(fields=out_fields),
                            name='f24_output')

    # Connect the nodes
    wf.connect([
        (f24_input, calc_friston, [("in_file", "in_file")]),
        (calc_friston, f24_output, [("out_file", "out_file")]),
    ])
    return wf
コード例 #3
0
ファイル: decompose.py プロジェクト: erramuzpe/pypes
def attach_canica(main_wf, wf_name="canica", **kwargs):
    """ Attach a nilearn CanICA interface to `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    kwargs: dict[str]->str
        input_node: str
            Name of the input node from where to connect the source `input_connect`.

        input_connection: str
            Name of the connection to obtain the source files.

    Nipype Inputs for `main_wf`
    ---------------------------
    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    srcwf_name   = kwargs['input_node']
    srcconn_name = kwargs['input_connection']

    src_wf   = main_wf.get_node(srcwf_name)
    datasink = get_datasink(main_wf, name='datasink')

    base_outdir  = datasink.inputs.base_directory
    ica_datasink = pe.Node(DataSink(parameterization=False,
                                    base_directory=base_outdir,),
                           name="{}_datasink".format(wf_name))

    # the list of the subjects files
    ica_subjs = pe.JoinNode(interface=IdentityInterface(fields=["ica_subjs"]),
                            joinsource="infosrc",
                            joinfield="ica_subjs",
                            name="ica_subjs")

    # warp each subject to the group template
    ica = setup_node(CanICAInterface(), name="{}_ica".format(wf_name),)

    # Connect the nodes
    main_wf.connect([
                     # file list input
                     (src_wf,     ica_subjs, [(srcconn_name, "ica_subjs")]),

                     # canica
                     (ica_subjs,  ica,    [("ica_subjs",  "in_files")]),

                     # canica output
                     (ica, ica_datasink,  [("components", "canica.@components")]),
                     (ica, ica_datasink,  [("loadings",   "canica.@loadings")]),
                     (ica, ica_datasink,  [("score",      "canica.@score")]),
                   ])
    return main_wf
コード例 #4
0
ファイル: test_reportlets.py プロジェクト: pndni/PipelineQC
def test_crash(tmp_path):
    fakenode = pe.Node(IdentityInterface(['field1']), 'name')
    traceback = ['test\n', 'string\n']
    pklfile = tmp_path / 'test.pklz'
    outfile = tmp_path / 'out.txt'
    nputils.filemanip.savepkl(str(pklfile), {
        'node': fakenode,
        'traceback': traceback
    },
                              versioning=True)
    reportlets.crash(name='testcrash',
                     crashfiles=[str(pklfile)],
                     out_file=str(outfile))
    outstr = outfile.read_text()
    assert 'class="crash"' in outstr
    assert 'class="success"' not in outstr
    reportlets.crash(name='testcrash', crashfiles=[], out_file=str(outfile))
    outstr = outfile.read_text()
    assert 'class="crash"' not in outstr
    assert 'class="success"' in outstr
    i = interfaces.Crash(name='testcrash', crashfiles=[str(pklfile)])
    r = i.run()
    outstr = Path(r.outputs.out_file).read_text()
    assert 'class="crash"' in outstr
    assert 'class="success"' not in outstr
コード例 #5
0
def stub_node_factory(*args, **kwargs):
    if 'name' not in kwargs.keys():
        raise Exception()
    name = kwargs['name']
    if name == 'compcor':
        return Node(*args, **kwargs)
    else:  # replace with an IdentityInterface
        return Node(IdentityInterface(fields=ALL_FIELDS), name=name)
コード例 #6
0
def attach_spm_fmri_grouptemplate_wf(main_wf, wf_name='spm_epi_grouptemplate'):
    """ Attach a fMRI pre-processing workflow that uses SPM12 to `main_wf`.
    This workflow picks all spm_fmri_preproc outputs 'fmri_output.warped_files' in `main_wf`
    to create a group template.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow


    Nipype Inputs
    -------------
    rest_input.in_file: traits.File
        The slice time and motion corrected fMRI file.

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    rest_output.avg_epi_mni: input node

    datasink: nipype Node

    spm_rest_preproc_mni: nipype Workflow

    Nipype Outputs
    --------------
    group_template.fmri_template: file
        The path to the fMRI group template.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - fmri_cleanup: for the `rest_output.avg_epi` output

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    fmri_cleanup_wf = get_subworkflow(main_wf, 'fmri_cleanup')

    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf, name='datasink')

    # The base name of the 'rest' file for the substitutions
    fmri_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'rest')))

    # the group template datasink
    base_outdir = datasink.inputs.base_directory
    grp_datasink = pe.Node(
        DataSink(parameterization=False, base_directory=base_outdir),
        name='{}_grouptemplate_datasink'.format(fmri_fbasename))
    grp_datasink.inputs.container = '{}_grouptemplate'.format(fmri_fbasename)

    # the list of the average EPIs from all the subjects
    # avg_epi_map = pe.MapNode(IdentityInterface(fields=['avg_epis']), iterfield=['avg_epis'], name='avg_epi_map')

    avg_epis = pe.JoinNode(IdentityInterface(fields=['avg_epis']),
                           joinsource='infosrc',
                           joinfield='avg_epis',
                           name='avg_epis')

    # directly warp the avg EPI to the SPM standard template
    warp_epis = spm_warp_to_mni("spm_warp_avgepi_to_mni")

    # the group template workflow
    template_wf = spm_create_group_template_wf(wf_name)

    # output node
    output = setup_node(IdentityInterface(fields=['fmri_template']),
                        name='group_template')

    # group dataSink output substitutions
    regexp_subst = [
        (r'/wgrptemplate{fmri}_merged_mean_smooth.nii$',
         '/{fmri}_grouptemplate_mni.nii'),
        (r'/w{fmri}_merged_mean_smooth.nii$', '/{fmri}_grouptemplate_mni.nii'),
    ]
    regexp_subst = format_pair_list(regexp_subst, fmri=fmri_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    grp_datasink.inputs.regexp_substitutions = extend_trait_list(
        grp_datasink.inputs.regexp_substitutions, regexp_subst)

    # Connect the nodes
    main_wf.connect([
        # the avg EPI inputs
        (fmri_cleanup_wf, avg_epis, [('rest_output.avg_epi', 'avg_epis')]),

        # warp avg EPIs to MNI
        (avg_epis, warp_epis, [('avg_epis', 'warp_input.in_files')]),

        # group template wf
        (warp_epis, template_wf, [('warp_output.warped_files',
                                   'grptemplate_input.in_files')]),

        # output node
        (template_wf, output, [('grptemplate_output.template', 'fmri_template')
                               ]),

        # template output
        (output, grp_datasink, [('fmri_template', '@fmri_group_template')]),
        (warp_epis, grp_datasink, [('warp_output.warped_files',
                                    'individuals.@warped')]),
    ])

    return main_wf
コード例 #7
0
def motion_power_stats_wf(wf_name='gen_motion_stats'):
    """ The main purpose of this workflow is to get various statistical measures from the
    movement/motion parameters obtained in functional preprocessing.

    These parameters (FD calculations) are also required to carry out scrubbing.

    Order of commands:

    - Calculate Frame Wise Displacement FD as per power et al., 2012

      Differentiating head realignment parameters across frames yields a six dimensional timeseries that represents
      instantaneous head motion.
      Rotational displacements are converted from degrees to millimeters by calculating displacement on the surface of
      a sphere of radius 50 mm.[R5]

    - Calculate Frame wise Displacement FD as per jenkinson et al., 2002

    - Calculate Frames to exclude

      Remove all frames which are below the threshold

    - Calculate Frames to include

      Include all the frames which are above the threshold

    - Calculate DVARS

      DVARS (D temporal derivative of timecourses, VARS referring to RMS variance over voxels) indexes
      the rate of change of BOLD signal across the entire brain at each frame of data.To calculate
      DVARS, the volumetric timeseries is differentiated (by backwards differences) and RMS signal
      change is calculated over the whole brain.DVARS is thus a measure of how much the intensity
      of a brain image changes in comparison to the previous timepoint (as opposed to the global
      signal, which is the average value of a brain image at a timepoint).[R5]

    - Calculate Power parameters::

        MeanFD : Mean (across time/frames) of the absolute values for Framewise Displacement (FD),
        computed as described in Power et al., Neuroimage, 2012)

        rootMeanSquareFD : Root mean square (RMS; across time/frames) of the absolute values for FD

        NumFD >=threshold : Number of frames (time points) where movement (FD) exceeded threshold

        rmsFD : Root mean square (RMS; across time/frames) of the absolute values for FD

        FDquartile(top 1/4th FD) : Mean of the top 25% highest FD values

        PercentFD( > threshold) : Number of frames (time points) where movement (FD) exceeded threshold
                                  expressed as a percentage of the total number of frames (time points)

        MeanDVARS : Mean of voxel DVARS

    - Calculate Motion Parameters

      Following motion parameters are calculated::

        Subject, Scan, Mean Relative RMS Displacement, Max Relative RMS Displacement,
        Movements >threshold, Mean Relative Mean Rotation, Mean Relative Maxdisp,
        Max Relative Maxdisp, Max Abs Maxdisp, Max Relative Roll,Max Relative Pitch,
        Max Relative Yaw, Max Relative dS-I, Max Relative dL-R,Max Relative dP-A,
        Mean Relative Roll, Mean Relative Pitch,Mean Relative Yaw, Mean Relative dS-I,
        Mean Relative dL-R, Mean Relative dP-A, Max Abs Roll, Max Abs Pitch, Max Abs Yaw,
        Max Abs dS-I, Max Abs dL-R, Max Abs dP-A, Mean Abs Roll,Mean Abs Pitch,Mean Abs Yaw,
        Mean Abs dS-I,Mean Abs dL-R,Mean Abs dP-A

    Parameters
    ----------
    wf_name: workflow object
        Workflow name

    Returns
    -------
    param_wf: workflow object
          Workflow object containing various movement/motion and power parameters estimates.

    Nipype inputs
    -------------
    inputspec.motion_correct : string (func/rest file or a list of func/rest nifti file)
        Path to motion corrected functional data

    inputspec.mask : string (nifti file)
        Path to field contianing brain-only mask for the functional data

    inputspec.max_displacement : string (Mat file)
        maximum displacement (in mm) vector for brain voxels in each volume.
        This file is obtained in functional preprocessing step

    inputspec.movement_parameters : string (Mat file)
        1D file containing six movement/motion parameters(3 Translation, 3 Rotations)
        in different columns (roll pitch yaw dS  dL  dP), obtained in functional preprocessing step

    scrubbing_input.threshold : a float
        scrubbing threshold

    scrubbing_input.remove_frames_before : an integer
        count of preceding frames to the offending time
        frames to be removed (i.e.,those exceeding FD threshold)

    scrubbing_input.remove_frames_after : an integer
        count of subsequent frames to the offending time
        frames to be removed (i.e., those exceeding FD threshold)

    Nipype outputs
    --------------
    outputspec.FD_1D : 1D file
        mean Framewise Displacement (FD)

    outputspec.frames_ex_1D : 1D file
        Number of frames that would be censored ("scrubbed")
        also removing the offending time frames (i.e., those exceeding the threshold),
        the preceeding frame, and the two subsequent frames

    outputspec.frames_in_1D : 1d file
        Number of frames left after removing for scrubbing

    outputspec.power_params : txt file
        Text file various power parameters for scrubbing.

    outputspec.motion_params : txt file
       Text file containing various movement parameters

    References
    ----------
    .. [1] Power, J. D., Barnes, K. A., Snyder, A. Z., Schlaggar, B. L., & Petersen, S. E. (2012). Spurious
           but systematic correlations in functional connectivity MRI networks arise from subject motion. NeuroImage, 59(3),
           2142-2154. doi:10.1016/j.neuroimage.2011.10.018

    .. [2] Power, J. D., Barnes, K. A., Snyder, A. Z., Schlaggar, B. L., & Petersen, S. E. (2012). Steps
           toward optimizing motion artifact removal in functional connectivity MRI; a reply to Carp.
           NeuroImage. doi:10.1016/j.neuroimage.2012.03.017

    .. [3] Jenkinson, M., Bannister, P., Brady, M., Smith, S., 2002. Improved optimization for the robust
           and accurate linear registration and motion correction of brain images. Neuroimage 17, 825-841.
    """
    wf = pe.Workflow(name=wf_name)

    # specify input and output fields
    in_fields = [
        "in_files",
        "anat",
        "atlas_anat",
        "coreg_target",
        "tissues",
        "lowpass_freq",
        "highpass_freq",
    ]

    out_fields = [
        "motion_corrected",
        "motion_params",
        "tissues",
        "anat",
        "time_filtered",
        "smooth",
        "time_filtered_mni",
        "smooth_mni",
        "tsnr_file",
        "epi_brain_mask",
        "tissues_brain_mask",
        "motion_regressors",
        "compcor_regressors",
        "gsr_regressors",
        "nuis_corrected",
        "epi_mni",
        "epi_mni_warpfield",
    ]

    inputNode = setup_node(IdentityInterface(fields=[
        'subject_id', 'scan_id', 'movement_parameters', 'max_displacement',
        'motion_correct', 'mask', 'oned_matrix_save'
    ]),
                           name='inputspec')

    scrubbing_input = setup_node(IdentityInterface(
        fields=['threshold', 'remove_frames_before', 'remove_frames_after']),
                                 name='scrubbing_input')

    outputNode = setup_node(IdentityInterface(fields=[
        'FD_1D', 'FDJ_1D', 'frames_ex_1D', 'frames_in_1D', 'power_params',
        'motion_params'
    ]),
                            name='outputspec')

    # calculate mean DVARS
    cal_DVARS = setup_node(Function(input_names=['in_file', 'mask'],
                                    output_names=['out_file'],
                                    function=calculate_DVARS),
                           name='cal_DVARS')
    wf.connect(inputNode, 'motion_correct', cal_DVARS, 'rest')
    wf.connect(inputNode, 'mask', cal_DVARS, 'mask')

    # Calculating mean Framewise Displacement as per power et al., 2012
    calculate_FD = setup_node(Function(input_names=['in_file'],
                                       output_names=['out_file'],
                                       function=calculate_FD_P),
                              name='calculate_FD')

    wf.connect(inputNode, 'movement_parameters', calculate_FD, 'in_file')
    wf.connect(calculate_FD, 'out_file', outputNode, 'FD_1D')

    # Calculating mean Framewise Displacement as per jenkinson et al., 2002
    calculate_FDJ = setup_node(Function(input_names=['in_file'],
                                        output_names=['out_file'],
                                        function=calculate_FD_J),
                               name='calculate_FDJ')

    wf.connect(inputNode, 'oned_matrix_save', calculate_FDJ, 'in_file')
    wf.connect(calculate_FDJ, 'out_file', outputNode, 'FDJ_1D')

    ##calculating frames to exclude and include after scrubbing
    exclude_frames = setup_node(Function(
        input_names=['in_file', 'threshold', 'frames_before', 'frames_after'],
        output_names=['out_file'],
        function=set_frames_ex),
                                name='exclude_frames')

    wf.connect(calculate_FD, 'out_file', exclude_frames, 'in_file')
    wf.connect(scrubbing_input, 'threshold', exclude_frames, 'threshold')
    wf.connect(scrubbing_input, 'remove_frames_before', exclude_frames,
               'frames_before')
    wf.connect(scrubbing_input, 'remove_frames_after', exclude_frames,
               'frames_after')
    wf.connect(exclude_frames, 'out_file', outputNode, 'frames_ex_1D')

    include_frames = setup_node(Function(
        input_names=['in_file', 'threshold', 'exclude_list'],
        output_names=['out_file'],
        function=set_frames_in),
                                name='include_frames')

    wf.connect(calculate_FD, 'out_file', include_frames, 'in_file')
    wf.connect(scrubbing_input, 'threshold', include_frames, 'threshold')
    wf.connect(exclude_frames, 'out_file', include_frames, 'exclude_list')
    wf.connect(include_frames, 'out_file', outputNode, 'frames_in_1D')

    calc_motion_parameters = setup_node(Function(
        input_names=[
            "subject_id", "scan_id", "movement_parameters", "max_displacement"
        ],
        output_names=['out_file'],
        function=gen_motion_parameters),
                                        name='calc_motion_parameters')
    wf.connect(inputNode, 'subject_id', calc_motion_parameters, 'subject_id')
    wf.connect(inputNode, 'scan_id', calc_motion_parameters, 'scan_id')
    wf.connect(inputNode, 'movement_parameters', calc_motion_parameters,
               'movement_parameters')
    wf.connect(inputNode, 'max_displacement', calc_motion_parameters,
               'max_displacement')
    wf.connect(calc_motion_parameters, 'out_file', outputNode, 'motion_params')

    calc_power_parameters = setup_node(Function(input_names=[
        "subject_id", "scan_id", "FD_1D", "FDJ_1D", "threshold", "DVARS"
    ],
                                                output_names=['out_file'],
                                                function=gen_power_parameters),
                                       name='calc_power_parameters')
    wf.connect(inputNode, 'subject_id', calc_power_parameters, 'subject_id')
    wf.connect(inputNode, 'scan_id', calc_power_parameters, 'scan_id')
    wf.connect(cal_DVARS, 'out_file', calc_power_parameters, 'DVARS')
    wf.connect(calculate_FD, 'out_file', calc_power_parameters, 'FD_1D')
    wf.connect(calculate_FDJ, 'out_file', calc_power_parameters, 'FDJ_1D')
    wf.connect(scrubbing_input, 'threshold', calc_power_parameters,
               'threshold')

    wf.connect(calc_power_parameters, 'out_file', outputNode, 'power_params')

    return wf
コード例 #8
0
ファイル: grouptemplate.py プロジェクト: zuxfoucault/pypes
def attach_spm_pet_grouptemplate(main_wf, wf_name="spm_pet_template"):
    """ Attach a PET pre-processing workflow that uses SPM12 to `main_wf`.
    This workflow picks all spm_pet_preproc outputs 'pet_output.warped_files' in `main_wf`
    to create a group template.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    pet_output.warped_files: input node

    datasink: nipype Node

    spm_pet_preproc: nipype Workflow

    Nipype Outputs
    --------------
    group_template.pet_template: file
        The path to the PET group template.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - spm_pet_preproc
    - spm_anat_preproc if `spm_pet_template.do_petpvc` is True.

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    pet_wf = get_subworkflow(main_wf, "spm_pet_preproc")

    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf, name='datasink')

    # The base name of the 'pet' file for the substitutions
    pet_fbasename = remove_ext(os.path.basename(get_input_file_name(in_files, 'pet')))

    # the group template datasink
    base_outdir = datasink.inputs.base_directory
    grp_datasink = pe.Node(
        DataSink(parameterization=False, base_directory=base_outdir),
        name='{}_grouptemplate_datasink'.format(pet_fbasename)
    )
    grp_datasink.inputs.container = '{}_grouptemplate'.format(pet_fbasename)

    # the list of the raw pet subjects
    warped_pets = pe.JoinNode(
        interface=IdentityInterface(fields=["warped_pets"]),
        joinsource="infosrc",
        joinfield="warped_pets",
        name="warped_pets"
    )

    # the group template workflow
    template_wf = spm_create_group_template_wf(wf_name)

    # output node
    output = setup_node(IdentityInterface(fields=["pet_template"]), name="group_template")

    # group dataSink output substitutions
    regexp_subst = [
        (r"/wgrptemplate{pet}_merged_mean_smooth.nii$", "/{pet}_grouptemplate_mni.nii"),
        (r"/w{pet}_merged_mean_smooth.nii$",            "/{pet}_grouptemplate_mni.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)

    grp_datasink.inputs.regexp_substitutions = extend_trait_list(
        grp_datasink.inputs.regexp_substitutions,
        regexp_subst
    )

    # Connect the nodes
    main_wf.connect([
        # warped pets file list input
        (pet_wf, warped_pets, [("warp_output.warped_files", "warped_pets")]),

        # group template wf
        (warped_pets, template_wf, [(("warped_pets", flatten_list), "grptemplate_input.in_files")]),

        # output node
        (template_wf, output, [("grptemplate_output.template", "pet_template")]),

        # template output
        (output, grp_datasink, [("pet_template", "@pet_grouptemplate")]),
    ])

    # Now we start with the correction and registration of each subject to the group template
    do_petpvc = get_config_setting('spm_pet_template.do_petpvc')
    if do_petpvc:
        get_subworkflow(main_wf, 'spm_anat_preproc')

        preproc_wf_name = "spm_mrpet_grouptemplate_preproc"
        main_wf = attach_spm_mrpet_preprocessing(main_wf, wf_name=preproc_wf_name, do_group_template=True)
        preproc_wf = get_subworkflow(main_wf, preproc_wf_name)

        main_wf.connect([(output, preproc_wf, [
            ("pet_template", "pet_input.pet_template")]),
        ])
    else:
        # add the pet template to the preproc workflow
        reg_wf = spm_register_to_template_wf(wf_name="spm_pet_register_to_grouptemplate")
        main_wf.connect([
            (output,      reg_wf, [("pet_template", "reg_input.template")]),
            (in_files,    reg_wf, [("pet",          "reg_input.in_file")]),

            (reg_wf, datasink, [
                ("reg_output.warped",     "pet.group_template.@warped"),
                ("reg_output.warp_field", "pet.group_template.@warp_field"),
            ]),
        ])

    # per-subject datasink output substitutions
    regexp_subst = [
        (r"group_template/{pet}_sn.mat$",           "group_template/{pet}_grptemplate_params.mat"),
        (r"group_template/wgrptemplate_{pet}.nii$", "group_template/{pet}_grptemplate.nii"),
        (r"group_template/w{pet}.nii",              "group_template/{pet}_grptemplate.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions,
        regexp_subst
    )

    return main_wf
コード例 #9
0
ファイル: main.py プロジェクト: davidmeunier79/sctva
def create_main_pipeline(subject_list=SUBJECTS):
    RADIUS = 5  # selection sphere radius (mm)

    # create node that contains meta-variables about data
    inputnode = Node(
        IdentityInterface(fields=[
            "subject_id", "center", "modality", "acquisition", "correction"
        ]),
        name="inputnode",
    )
    inputnode.inputs.center = CENTER
    inputnode.inputs.modality = MODALITY
    inputnode.inputs.acquisition = ACQUISITION
    inputnode.inputs.correction = CORRECTION
    inputnode.iterables = [("subject_id", subject_list)]

    #
    templates = {
        "diffusion_volume":
        "DTI/{center}/{subject_id}/{modality}/{acquisition}/{"
        "correction}/corrected_dwi_{subject_id}.nii.gz",
        "bvals":
        "DTI/{center}/{subject_id}/{modality}/{acquisition}/raw_bvals_{subject_id}.txt",
        "bvecs":
        "DTI/{center}/{subject_id}/{modality}/{acquisition}/{correction}/corrected_bvecs_{subject_id}.txt",
        "t1_volume":
        "analysis_{subject_id}/anat/{"
        "subject_id}_ses-01_T1w_denoised_debiased_in-MNI152.nii.gz",
        "func_contrast_volume":
        "analysis_{subject_id}/spm_realign/results_8WM_9CSF_0mvt/In-MNI152_{subject_id}_res-8WM_9CSF_0mvt_human_vs_all_t.nii.gz",
    }
    datagrabber = pe.Node(SelectFiles(templates), name="datagrabber")
    datagrabber.inputs.base_directory = PRIMAVOICE

    study_pipeline = create_study_pipeline(radius=RADIUS)

    main_pipeline = pe.Workflow(name="main_pipeline")

    main_pipeline.connect([(
        inputnode,
        datagrabber,
        [
            ("subject_id", "subject_id"),
            ("center", "center"),
            ("modality", "modality"),
            ("acquisition", "acquisition"),
            ("correction", "correction"),
        ],
    )])
    main_pipeline.connect([(
        datagrabber,
        study_pipeline,
        [
            ("diffusion_volume", "inputnode.diffusion_volume"),
            ("bvals", "inputnode.bvals"),
            ("bvecs", "inputnode.bvecs"),
            ("t1_volume", "inputnode.t1_volume"),
            ("func_contrast_volume", "inputnode.func_contrast_volume"),
        ],
    )])
    return main_pipeline
コード例 #10
0
ファイル: decompose.py プロジェクト: erramuzpe/pypes
def attach_concat_canica(main_wf, wf_name="canica", **kwargs):
    """ Attach a Concat and a nilearn CanICA interface to `main_wf`.

    The Concat node will merge all the files together in one 4D volume before delivering it to CanICA.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    kwargs: dict[str]->str
        input_node: str
            Name of the input node from where to connect the source `input_connect`.

        input_connection: str
            Name of the connection to obtain the source files.

    Nipype Inputs for `main_wf`
    ---------------------------
    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    srcwf_name   = kwargs['input_node']
    srcconn_name = kwargs['input_connection']

    src_wf   = main_wf.get_node(srcwf_name)
    datasink = get_datasink(main_wf, name='datasink')

    base_outdir  = datasink.inputs.base_directory
    ica_datasink = pe.Node(DataSink(parameterization=False,
                                    base_directory=base_outdir,),
                           name="ica_datasink".format(wf_name))
    ica_datasink.inputs.container = 'ica_{}'.format(wf_name)

    # the list of the raw pet subjects
    ica_subjs = pe.JoinNode(interface=IdentityInterface(fields=["ica_subjs"]),
                            joinsource="infosrc",
                            joinfield="ica_subjs",
                            name="ica_subjs")

    # concat images
    concat = setup_node(Function(function=concat_3D_imgs,
                                 input_names=["in_files"],
                                 output_names=["out_file"],
                                 imports=['from pypes.interfaces.nilearn import ni2file']),
                        name="concat")

    # warp each subject to the group template
    ica = setup_node(CanICAInterface(), name="{}_ica".format(wf_name),)
    algorithm = get_config_setting("{}_ica.algorithm".format(wf_name),
                                   default=get_config_setting('canica.algorithm',
                                   default=''))
    if algorithm:
        ica.inputs.algorithm = algorithm

    # Connect the nodes
    main_wf.connect([
                     # file list input
                     (src_wf, ica_subjs, [(srcconn_name, "ica_subjs")]),

                     # concat images
                     (ica_subjs, concat, [("ica_subjs", "in_files")]),

                     # canica
                     (concat, ica, [(("out_file", _check_list), "in_files")]),

                     # canica output
                     (ica, ica_datasink, [("components", "@components"),
                                          ("loadings",   "@loadings"),
                                          ("score",      "@score"),
                                         ]),
                   ])

    # plot the ICA results?
    do_plot = get_config_setting('canica_extra.plot', default=True)
    if not do_plot:
        return main_wf

    # get the plot threshold from the ICA node or the config file (in that order).
    plot_thr = get_config_setting('canica_extra.plot_thr', default=0)
    plot_thr = get_trait_value(ica.inputs, 'threshold', default=plot_thr)

    # plto ica results images
    plot_ica = setup_node(Function(function=plot_ica_results,
                                   input_names=["ica_result", "application", "mask_file", "zscore", "bg_img"],
                                   output_names=["all_icc_plot", "iccs_plot", "sliced_ic_plots"],),
                          name="plot_ica")
    plot_ica.inputs.zscore      = plot_thr
    plot_ica.inputs.mask_file   = get_trait_value(ica.inputs, 'mask')
    plot_ica.inputs.application = 'nilearn'

    # Connect the plotting nodes
    main_wf.connect([
                     # canica
                     (ica,   plot_ica,        [("components",   "ica_result")]),

                     # canica output
                     (plot_ica, ica_datasink, [("all_icc_plot",     "@all_icc_plot"),
                                               ("iccs_plot",        "@iccs_plot"),
                                               ("sliced_ic_plots",  "@sliced_ic_plots"),
                                              ]),
                     ])

    return main_wf