Example #1
0
def attach_spm_pet_preprocessing(main_wf, wf_name='spm_pet_preproc'):
    """ Attach a FDG-PET only pre-processing workflow that uses SPM12 to `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    input_files.select.pet: input node

    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)

    # The base name of the 'pet' file for the substitutions
    pet_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'pet')))

    # get the PET preprocessing pipeline
    warp_pet_wf = spm_warp_to_mni(wf_name=wf_name)

    # dataSink output substitutions
    regexp_subst = [
        (r'/w{pet}.nii', '/{pet}_mni.nii'),
    ]
    regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    regexp_subst = concat_to_pair_list(regexp_subst, prefix='/pet')
    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions, regexp_subst)

    # Connect the nodes
    main_wf.connect([
        # pet file input
        (in_files, warp_pet_wf, [('pet', 'warp_input.in_files')]),
        (warp_pet_wf, datasink, [
            ('warp_output.warped_files', 'pet.@warped'),
        ]),
    ])

    return main_wf
Example #2
0
def attach_dcm2niix(main_wf, wf_name="dcm2niix"):
    """ Attach the dcm2niix workflow to the `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the dcm2niix workflow

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    input_files.dcm_dir: input node

    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)

    # The workflow box
    d2n_wf = dcm2niix_wf(wf_name=wf_name)

    main_wf.connect([
        (in_files, d2n_wf, [("dcm_dir", "dcm2niix_input.in_dcmdir")]),
        (d2n_wf, datasink, [
            ("dcm2niix_output.bids", "@bids"),
            ("dcm2niix_output.bvals", "@bvals"),
            ("dcm2niix_output.bvecs", "@bvecs"),
            ("dcm2niix_output.converted_files", "@converted_files"),
        ])
    ])

    return main_wf
Example #3
0
def attach_spm_fmri_grouptemplate_wf(main_wf, wf_name='spm_epi_grouptemplate'):
    """ Attach a fMRI pre-processing workflow that uses SPM12 to `main_wf`.
    This workflow picks all spm_fmri_preproc outputs 'fmri_output.warped_files' in `main_wf`
    to create a group template.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow


    Nipype Inputs
    -------------
    rest_input.in_file: traits.File
        The slice time and motion corrected fMRI file.

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    rest_output.avg_epi_mni: input node

    datasink: nipype Node

    spm_rest_preproc_mni: nipype Workflow

    Nipype Outputs
    --------------
    group_template.fmri_template: file
        The path to the fMRI group template.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - fmri_cleanup: for the `rest_output.avg_epi` output

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    fmri_cleanup_wf = get_subworkflow(main_wf, 'fmri_cleanup')

    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf, name='datasink')

    # The base name of the 'rest' file for the substitutions
    fmri_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'rest')))

    # the group template datasink
    base_outdir = datasink.inputs.base_directory
    grp_datasink = pe.Node(
        DataSink(parameterization=False, base_directory=base_outdir),
        name='{}_grouptemplate_datasink'.format(fmri_fbasename))
    grp_datasink.inputs.container = '{}_grouptemplate'.format(fmri_fbasename)

    # the list of the average EPIs from all the subjects
    # avg_epi_map = pe.MapNode(IdentityInterface(fields=['avg_epis']), iterfield=['avg_epis'], name='avg_epi_map')

    avg_epis = pe.JoinNode(IdentityInterface(fields=['avg_epis']),
                           joinsource='infosrc',
                           joinfield='avg_epis',
                           name='avg_epis')

    # directly warp the avg EPI to the SPM standard template
    warp_epis = spm_warp_to_mni("spm_warp_avgepi_to_mni")

    # the group template workflow
    template_wf = spm_create_group_template_wf(wf_name)

    # output node
    output = setup_node(IdentityInterface(fields=['fmri_template']),
                        name='group_template')

    # group dataSink output substitutions
    regexp_subst = [
        (r'/wgrptemplate{fmri}_merged_mean_smooth.nii$',
         '/{fmri}_grouptemplate_mni.nii'),
        (r'/w{fmri}_merged_mean_smooth.nii$', '/{fmri}_grouptemplate_mni.nii'),
    ]
    regexp_subst = format_pair_list(regexp_subst, fmri=fmri_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    grp_datasink.inputs.regexp_substitutions = extend_trait_list(
        grp_datasink.inputs.regexp_substitutions, regexp_subst)

    # Connect the nodes
    main_wf.connect([
        # the avg EPI inputs
        (fmri_cleanup_wf, avg_epis, [('rest_output.avg_epi', 'avg_epis')]),

        # warp avg EPIs to MNI
        (avg_epis, warp_epis, [('avg_epis', 'warp_input.in_files')]),

        # group template wf
        (warp_epis, template_wf, [('warp_output.warped_files',
                                   'grptemplate_input.in_files')]),

        # output node
        (template_wf, output, [('grptemplate_output.template', 'fmri_template')
                               ]),

        # template output
        (output, grp_datasink, [('fmri_template', '@fmri_group_template')]),
        (warp_epis, grp_datasink, [('warp_output.warped_files',
                                    'individuals.@warped')]),
    ])

    return main_wf
Example #4
0
def attach_spm_pet_grouptemplate(main_wf, wf_name="spm_pet_template"):
    """ Attach a PET pre-processing workflow that uses SPM12 to `main_wf`.
    This workflow picks all spm_pet_preproc outputs 'pet_output.warped_files' in `main_wf`
    to create a group template.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    pet_output.warped_files: input node

    datasink: nipype Node

    spm_pet_preproc: nipype Workflow

    Nipype Outputs
    --------------
    group_template.pet_template: file
        The path to the PET group template.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - spm_pet_preproc
    - spm_anat_preproc if `spm_pet_template.do_petpvc` is True.

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    pet_wf = get_subworkflow(main_wf, "spm_pet_preproc")

    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf, name='datasink')

    # The base name of the 'pet' file for the substitutions
    pet_fbasename = remove_ext(os.path.basename(get_input_file_name(in_files, 'pet')))

    # the group template datasink
    base_outdir = datasink.inputs.base_directory
    grp_datasink = pe.Node(
        DataSink(parameterization=False, base_directory=base_outdir),
        name='{}_grouptemplate_datasink'.format(pet_fbasename)
    )
    grp_datasink.inputs.container = '{}_grouptemplate'.format(pet_fbasename)

    # the list of the raw pet subjects
    warped_pets = pe.JoinNode(
        interface=IdentityInterface(fields=["warped_pets"]),
        joinsource="infosrc",
        joinfield="warped_pets",
        name="warped_pets"
    )

    # the group template workflow
    template_wf = spm_create_group_template_wf(wf_name)

    # output node
    output = setup_node(IdentityInterface(fields=["pet_template"]), name="group_template")

    # group dataSink output substitutions
    regexp_subst = [
        (r"/wgrptemplate{pet}_merged_mean_smooth.nii$", "/{pet}_grouptemplate_mni.nii"),
        (r"/w{pet}_merged_mean_smooth.nii$",            "/{pet}_grouptemplate_mni.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)

    grp_datasink.inputs.regexp_substitutions = extend_trait_list(
        grp_datasink.inputs.regexp_substitutions,
        regexp_subst
    )

    # Connect the nodes
    main_wf.connect([
        # warped pets file list input
        (pet_wf, warped_pets, [("warp_output.warped_files", "warped_pets")]),

        # group template wf
        (warped_pets, template_wf, [(("warped_pets", flatten_list), "grptemplate_input.in_files")]),

        # output node
        (template_wf, output, [("grptemplate_output.template", "pet_template")]),

        # template output
        (output, grp_datasink, [("pet_template", "@pet_grouptemplate")]),
    ])

    # Now we start with the correction and registration of each subject to the group template
    do_petpvc = get_config_setting('spm_pet_template.do_petpvc')
    if do_petpvc:
        get_subworkflow(main_wf, 'spm_anat_preproc')

        preproc_wf_name = "spm_mrpet_grouptemplate_preproc"
        main_wf = attach_spm_mrpet_preprocessing(main_wf, wf_name=preproc_wf_name, do_group_template=True)
        preproc_wf = get_subworkflow(main_wf, preproc_wf_name)

        main_wf.connect([(output, preproc_wf, [
            ("pet_template", "pet_input.pet_template")]),
        ])
    else:
        # add the pet template to the preproc workflow
        reg_wf = spm_register_to_template_wf(wf_name="spm_pet_register_to_grouptemplate")
        main_wf.connect([
            (output,      reg_wf, [("pet_template", "reg_input.template")]),
            (in_files,    reg_wf, [("pet",          "reg_input.in_file")]),

            (reg_wf, datasink, [
                ("reg_output.warped",     "pet.group_template.@warped"),
                ("reg_output.warp_field", "pet.group_template.@warp_field"),
            ]),
        ])

    # per-subject datasink output substitutions
    regexp_subst = [
        (r"group_template/{pet}_sn.mat$",           "group_template/{pet}_grptemplate_params.mat"),
        (r"group_template/wgrptemplate_{pet}.nii$", "group_template/{pet}_grptemplate.nii"),
        (r"group_template/w{pet}.nii",              "group_template/{pet}_grptemplate.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions,
        regexp_subst
    )

    return main_wf
Example #5
0
def attach_petpvc_workflow(main_wf, wf_name="spm_petpvc"):
    """ Attach a PETPVC workflow.

    This will also attach the anat preprocessing workflow to `main_wf`. The reason
    for this is that the PET pre-processing steps here make use of anatomical MR
    pre-processing outputs.

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    input_files.select.pet: input node

    datasink: nipype Node

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - spm_anat_preproc

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    anat_wf = get_subworkflow(main_wf, 'spm_anat_preproc')
    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)

    # The base name of the 'pet' file for the substitutions
    anat_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'anat')))
    pet_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'pet')))

    # get the PET preprocessing pipeline
    pet_wf = petpvc_workflow(wf_name=wf_name)

    # dataSink output substitutions
    regexp_subst = [
        (r"/{pet}_.*_pvc.nii.gz$", "/{pet}_pvc.nii.gz"),
        (r"/{pet}_.*_pvc_maths.nii.gz$", "/{pet}_pvc_norm.nii.gz"),
    ]
    regexp_subst = format_pair_list(regexp_subst,
                                    pet=pet_fbasename,
                                    anat=anat_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    regexp_subst = concat_to_pair_list(regexp_subst, prefix='/mrpet')
    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions, regexp_subst)

    # Connect the nodes
    main_wf.connect([
        # pet file input
        (in_files, pet_wf, [("pet", "pvc_input.in_file")]),

        # pet to anat registration
        (anat_wf, pet_wf, [
            ("new_segment.bias_corrected_images", "pet_input.reference_file"),
            ("new_segment.native_class_images", "pet_input.tissues"),
        ]),
        (pet_wf, datasink, [
            ("pvc_output.coreg_others", "mrpet.tissues"),
            ("pvc_output.coreg_ref", "mrpet.@anat"),
            ("pvc_output.pvc_out", "mrpet.@pvc"),
            ("pvc_output.petpvc_mask", "mrpet.@petpvc_mask"),
            ("pvc_output.brain_mask", "mrpet.@brain_mask"),
            ("pvc_output.gm_norm", "mrpet.@gm_norm"),
        ]),
    ])

    return main_wf
Example #6
0
def attach_fmri_cleanup_wf(main_wf, wf_name="fmri_cleanup"):
    """ Attach the resting-state MRI pre-processing workflow to the `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the registration workflow.

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    input_files.select.anat: input node

    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)

    anat_output = get_interface_node(main_wf, "anat_output")

    # create the fMRI preprocessing pipelines
    cleanup_wf = fmri_cleanup_wf(wf_name)

    # dataSink output substitutions
    # The base name of the 'rest' file for the substitutions
    rest_fbasename = remove_ext(os.path.basename(get_input_file_name(in_files, 'rest')))
    anat_fbasename = remove_ext(os.path.basename(get_input_file_name(in_files, 'anat')))

    regexp_subst = [
        (r"/rc1[\w]+_corrected\.nii$", "/gm_{rest}.nii"),
        (r"/rc2[\w]+_corrected\.nii$", "/wm_{rest}.nii"),
        (r"/rc3[\w]+_corrected\.nii$", "/csf_{rest}.nii"),
        (r"/rm[\w]+_corrected\.nii$", "/{anat}_{rest}.nii"),
        (r"/corr_stc{rest}_trim\.nii$", "/slice_time_corrected.nii"),
        (r"/stc{rest}_trim\.nii\.par$", "/motion_parameters.txt"),
        (r"/corr_stc{rest}_trim_filt\.nii$", "/time_filt.nii"),
        (r"/corr_stc{rest}_trim_mean_mask\.\.nii$", "/epi_brain_mask_{rest}.nii"),
        (r"/tissue_brain_mask\.nii$", "/tissue_brain_mask_{rest}.nii"),
        (r"/corr_stc{rest}_trim_mean\.nii$", "/avg_epi.nii"),

        (r"/art\..*_outliers\.txt$", "/artifact_outliers.txt"),
        (r"/global_intensity\..*\.txt$", "/global_intensities.txt"),
        (r"/norm\..*_outliers\.txt$", "/motion_norms.txt"),
        (r"/stats\..*\.txt$", "/motion_stats.json"),
        (r"/plot\..*\.png$", "/artifact_plots.png"),

        (r"/corr_stc{rest}_trim_filtermotart\.nii$", "/{rest}_motion_corrected.nii"),
        (r"/corr_stc{rest}_trim_filtermotart[\w_]*_cleaned\.nii$", "/{rest}_nuisance_corrected.nii"),
        (r"/corr_stc{rest}_trim_filtermotart[\w_]*_gsr\.nii$", "/{rest}_nuisance_corrected.nii"),
        (r"/corr_stc{rest}_trim_filtermotart[\w_]*_bandpassed\.nii$", "/{rest}_time_filtered.nii"),
        (r"/corr_stc{rest}_trim_filtermotart[\w_]*_smooth\.nii$", "/{rest}_smooth.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst, rest=rest_fbasename, anat=anat_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions,
        regexp_subst
    )

    # input and output anat workflow to main workflow connections
    main_wf.connect([
        (in_files, cleanup_wf, [("rest", "rest_input.in_file")]),

        # anat to fMRI registration inputs
        (anat_output, cleanup_wf, [
            ("tissues_native", "rest_input.tissues"),
            ("anat_biascorr", "rest_input.anat"),
        ]),

        # clean_up_wf to datasink
        (cleanup_wf, datasink, [
            ("rest_output.epi_brain_mask", "rest.@epi_brain_mask"),
            ("rest_output.tissues_brain_mask", "rest.@tissues_brain_mask"),
            ("rest_output.tissues", "rest.@tissues"),
            ("rest_output.anat", "rest.@anat"),
            ("rest_output.motion_regressors", "rest.@motion_regressors"),
            ("rest_output.compcor_regressors", "rest.@compcor_regressors"),
            ("rest_output.gsr_regressors", "rest.@gsr_regressors"),
            ("rest_output.motion_params", "rest.@motion_params"),
            ("rest_output.motion_corrected", "rest.@motion_corrected"),
            ("rest_output.nuis_corrected", "rest.@nuis_corrected"),
            ("rest_output.time_filtered", "rest.@time_filtered"),
            ("rest_output.smooth", "rest.@smooth"),
            ("rest_output.avg_epi", "rest.@avg_epi"),
            ("rest_output.tsnr_file", "rest.@tsnr"),
            ("rest_output.art_displacement_files", "rest.artifact_stats.@displacement"),
            ("rest_output.art_intensity_files", "rest.artifact_stats.@art_intensity"),
            ("rest_output.art_norm_files", "rest.artifact_stats.@art_norm"),
            ("rest_output.art_outlier_files", "rest.artifact_stats.@art_outlier"),
            ("rest_output.art_plot_files", "rest.artifact_stats.@art_plot"),
            ("rest_output.art_statistic_files", "rest.artifact_stats.@art_statistic"),
        ]),
    ])

    return main_wf
Example #7
0
def attach_spm_mrpet_preprocessing(main_wf,
                                   wf_name="spm_mrpet_preproc",
                                   do_group_template=False):
    """ Attach a PET pre-processing workflow that uses SPM12 to `main_wf`.
    This workflow needs MRI based workflow.

    This function is using the workflows defined in the function above:
    spm_mrpet_preprocessing or spm_mrpet_grouptemplate_preprocessing. Depending
    if group template is enabled.

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a
    `datasink` nodes.

    input_files.select.pet: input node

    datasink: nipype Node

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    do_group_template: bool
        If True will attach the group template creation and pre-processing pipeline.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - spm_anat_preproc

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)

    anat_output = get_interface_node(main_wf, "anat_output")

    # The base name of the 'pet' file for the substitutions
    anat_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'anat')))
    pet_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'pet')))

    # get the PET preprocessing pipeline
    if do_group_template:
        pet_wf = spm_mrpet_grouptemplate_preprocessing(wf_name=wf_name)
        template_name = 'grptemplate'
        output_subfolder = 'group_template'
    else:
        pet_wf = spm_mrpet_preprocessing(wf_name=wf_name)
        template_name = 'stdtemplate'
        output_subfolder = 'std_template'

    # dataSink output substitutions
    regexp_subst = [
        (r"/{pet}_.*_pvc.nii.gz$", "/{pet}_pvc.nii.gz"),
        (r"/{pet}_.*_pvc_maths.nii.gz$", "/{pet}_pvc_norm.nii.gz"),
        (r"/{pet}_.*_pvc_intnormed.nii.gz$", "/{pet}_pvc_norm.nii.gz"),
        (r"/tissues_brain_mask.nii$", "/brain_mask_anat.nii"),
        (r"/w{pet}.nii", "/{pet}_{template}.nii"),
        (r"/w{pet}_.*_pvc.nii$", "/{pet}_pvc_{template}.nii"),
        (r"/w{pet}_.*_pvc_maths.nii$", "/{pet}_pvc_norm_{template}.nii"),
        (r"/w{pet}_.*_pvc_intnormed.nii$", "/{pet}_pvc_norm_{template}.nii"),
        (r"/wbrain_mask.nii", "/brain_mask_{template}.nii"),
        (r"/r{pet}.nii", "/{pet}_anat.nii"),
        (r"/r{pet}_.*_pvc.nii$", "/{pet}_pvc_anat.nii"),
        (r"/r{pet}_.*_pvc_maths.nii$", "/{pet}_pvc_norm_anat.nii"),
        (r"/r{pet}_.*_pvc_intnormed.nii$", "/{pet}_pvc_norm_anat.nii"),
        (r"/y_rm{anat}_corrected.nii", "/{anat}_{pet}_warpfield.nii"),
        (r"/rm{anat}_corrected.nii$", "/{anat}_{pet}.nii"),
        (r"/rc1{anat}_corrected.nii$", "/gm_{pet}.nii"),
        (r"/rc2{anat}_corrected.nii$", "/wm_{pet}.nii"),
        (r"/rc3{anat}_corrected.nii$", "/csf_{pet}.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst,
                                    pet=pet_fbasename,
                                    anat=anat_fbasename,
                                    template=template_name)

    # prepare substitution for atlas_file, if any
    do_atlas, atlas_file = check_atlas_file()
    if do_atlas:
        atlas_basename = remove_ext(os.path.basename(atlas_file))
        regexp_subst.extend([(r"/[\w]*{atlas}\.nii$", "/{atlas}_{pet}.nii")])
        regexp_subst = format_pair_list(regexp_subst,
                                        pet=pet_fbasename,
                                        atlas=atlas_basename)

    regexp_subst += extension_duplicates(regexp_subst)
    regexp_subst = concat_to_pair_list(regexp_subst, prefix='/mrpet')

    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions, regexp_subst)

    # Connect the nodes
    main_wf.connect([
        # pet file input
        (in_files, pet_wf, [("pet", "pet_input.in_file")]),

        # pet to anat registration
        (anat_output, pet_wf, [("anat_biascorr", "pet_input.anat"),
                               ("tissues_native", "pet_input.tissues")]),
        (
            pet_wf,
            datasink,
            [
                ("pet_output.gm_norm", "mrpet.@norm"),
                ("pet_output.coreg_others",
                 "mrpet.tissues"),  # careful changing this, look regexp_subst
                ("pet_output.coreg_ref", "mrpet.@anat"),
                ("pet_output.pvc_mask", "mrpet.@pvc_mask"),
                ("pet_output.pvc_out", "mrpet.@pvc"),
                ("pet_output.brain_mask", "mrpet.@brain_mask"),
                ("pet_output.pvc_warped",
                 "mrpet.{}.@pvc".format(output_subfolder)),
                ("pet_output.warp_field",
                 "mrpet.{}.@warp_field".format(output_subfolder)),
                ("pet_output.pet_warped",
                 "mrpet.{}.@pet_warped".format(output_subfolder)),
            ])
    ])

    if not do_group_template:
        # Connect the nodes
        main_wf.connect([
            # pet to anat registration
            (anat_output, pet_wf, [("warp_forward",
                                    "pet_input.anat_to_mni_warp")]),
        ])

    if do_atlas:
        main_wf.connect([
            (anat_output, pet_wf, [("atlas_anat", "pet_input.atlas_anat")]),
            (pet_wf, datasink, [("pet_output.atlas_pet", "mrpet.@atlas")]),
        ])

    return main_wf
Example #8
0
def attach_spm_anat_preprocessing(main_wf, wf_name="spm_anat_preproc"):
    """ Attach the SPM12 anatomical MRI pre-processing workflow to
    the `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an
    `input_files` and a `datasink` nodes.

    input_files.anat: input node

    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)

    # The workflow box
    anat_wf = spm_anat_preprocessing(wf_name=wf_name)

    # The base name of the 'anat' file for the substitutions
    anat_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'anat')))

    # dataSink output substitutions
    regexp_subst = [
        (r"/{anat}_.*corrected_seg8.mat$", "/{anat}_to_mni_affine.mat"),
        (r"/m{anat}.*_corrected.nii$", "/{anat}_biascorrected.nii"),
        (r"/wm{anat}.*_corrected.nii$", "/{anat}_mni.nii"),
        (r"/y_{anat}.*nii$", "/{anat}_to_mni_field.nii"),
        (r"/iy_{anat}.*nii$", "/{anat}_to_mni_inv_field.nii"),
        (r"/mwc1{anat}.*nii$", "/{anat}_gm_mod_mni.nii"),
        (r"/mwc2{anat}.*nii$", "/{anat}_wm_mod_mni.nii"),
        (r"/mwc3{anat}.*nii$", "/{anat}_csf_mod_mni.nii"),
        (r"/mwc4{anat}.*nii$", "/{anat}_nobrain_mod_mni.nii"),
        (r"/c1{anat}.*nii$", "/{anat}_gm.nii"),
        (r"/c2{anat}.*nii$", "/{anat}_wm.nii"),
        (r"/c3{anat}.*nii$", "/{anat}_csf.nii"),
        (r"/c4{anat}.*nii$", "/{anat}_nobrain.nii"),
        (r"/c5{anat}.*nii$", "/{anat}_nobrain_mask.nii"),
        (r"/direct_cortical_thickness.nii$",
         "/{anat}_gm_cortical_thickness.nii"),
        (r"/direct_warped_white_matter.nii$",
         "/{anat}_warped_white_matter.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst, anat=anat_fbasename)

    # prepare substitution for atlas_file, if any
    do_atlas, atlas_file = check_atlas_file()
    if do_atlas:
        atlas_basename = remove_ext(os.path.basename(atlas_file))
        regexp_subst.extend([
            (r"/w{atlas}\.nii$", "/{atlas}_anat_space.nii"),
        ])
        regexp_subst = format_pair_list(regexp_subst,
                                        anat=anat_fbasename,
                                        atlas=atlas_basename)

    # add nii.gz patterns
    regexp_subst += extension_duplicates(regexp_subst)

    # add parent folder to paths
    regexp_subst = concat_to_pair_list(regexp_subst, prefix='/anat')

    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions, regexp_subst)

    main_wf.connect([
        (in_files, anat_wf, [("anat", "anat_input.in_file")]),
        (anat_wf, datasink, [
            ("anat_output.anat_mni", "anat.@mni"),
            ("anat_output.tissues_warped", "anat.tissues.warped"),
            ("anat_output.tissues_native", "anat.tissues.native"),
            ("anat_output.affine_transform", "anat.transform.@linear"),
            ("anat_output.warp_forward", "anat.transform.@forward"),
            ("anat_output.warp_inverse", "anat.transform.@inverse"),
            ("anat_output.anat_biascorr", "anat.@biascor"),
            ("anat_output.brain_mask", "anat.@brain_mask"),
        ]),
    ])

    # check optional outputs
    if do_atlas:
        main_wf.connect([
            (anat_wf, datasink, [("anat_output.atlas_anat", "anat.@atlas")]),
        ])

    do_cortical_thickness = get_config_setting(
        'anat_preproc.do_cortical_thickness', False)
    if do_cortical_thickness:
        main_wf.connect([
            (anat_wf, datasink, [
                ("anat_output.cortical_thickness", "anat.@cortical_thickness"),
                ("anat_output.warped_white_matter",
                 "anat.@warped_white_matter"),
            ]),
        ])

    return main_wf
Example #9
0
def attach_camino_tractography(main_wf, wf_name="camino_tract"):
    """ Attach the Camino-based tractography workflow to the `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    atlas_file: str
        Path to the anatomical atlas.

    wf_name: str
        Name of the preprocessing workflow

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    input_files.select.diff: input node

    datasink: nipype Node

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - spm_anat_preproc
    - spm_fsl_dti_preprocessing

    Returns
    -------
    main_wf: nipype Workflow
    """
    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)
    dti_coreg_output = get_interface_node(main_wf, 'dti_co_output')
    dti_artif_output = get_interface_node(main_wf, 'dti_art_output')

    # The workflow box
    tract_wf = camino_tractography(wf_name=wf_name)

    # input and output diffusion MRI workflow to main workflow connections
    main_wf.connect([
        (in_files, tract_wf, [("bval", "tract_input.bval")]),
        (dti_coreg_output, tract_wf, [("brain_mask_diff", "tract_input.mask")]),

        (dti_artif_output, tract_wf, [
            ("eddy_corr_file", "tract_input.diff"),
            ("bvec_rotated", "tract_input.bvec"),
        ]),

        # output
        (tract_wf, datasink, [
            ("tract_output.tensor", "tract.@tensor"),
            ("tract_output.tracks", "tract.@tracks"),
            ("tract_output.connectivity", "tract.@connectivity"),
            ("tract_output.mean_fa", "tract.@mean_fa"),
            ("tract_output.fa", "tract.@fa"),
        ])
    ])

    # pass the atlas if it's the case
    do_atlas, _ = check_atlas_file()
    if do_atlas:
        main_wf.connect([(dti_coreg_output, tract_wf, [("atlas_diff", "tract_input.atlas")])])

    return main_wf
Example #10
0
def attach_spm_fsl_dti_preprocessing(main_wf,
                                     wf_name="spm_fsl_dti_preprocessing"):
    """ Attach a set of pipelines to the `main_wf` for Diffusion MR (`diff`) image processing.
    - dti_artifact_correction
    - spm_anat_to_diff_coregistration
    - dti_tensor_fitting

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    input_files.select.diff: input node

    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)
    anat_output = get_interface_node(main_wf, 'anat_output')

    # attach the artifact detection and correction pipeline
    main_wf = attach_dti_artifact_correction(main_wf)
    dti_art_output = get_interface_node(main_wf, 'dti_art_output')

    # The workflow boxes
    coreg_dti_wf = spm_anat_to_diff_coregistration(wf_name=wf_name)

    # dataSink output substitutions
    ## The base name of the 'diff' file for the substitutions
    diff_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'diff')))
    anat_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'anat')))

    regexp_subst = [
        (r"/brain_mask_{diff}_space\.nii$", "/brain_mask.nii"),
        (r"/eddy_corrected\.nii$", "/{diff}_eddycor.nii"),
        (r"/rc1anat_hc_corrected\.nii$", "/gm_diff.nii"),
        (r"/rc2anat_hc_corrected\.nii$", "/wm_diff.nii"),
        (r"/rc3anat_hc_corrected\.nii$", "/csf_diff.nii"),
        (r"/rmanat_hc_corrected\.nii$", "/{anat}_diff.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst,
                                    diff=diff_fbasename,
                                    anat=anat_fbasename)

    # prepare substitution for atlas_file, if any
    do_atlas, atlas_file = check_atlas_file()
    if do_atlas:
        atlas_basename = remove_ext(os.path.basename(atlas_file))
        regexp_subst.extend([
            (r"/[\w]*{atlas}.*\.nii$", "/{atlas}_{diff}_space.nii"),
        ])
        regexp_subst = format_pair_list(regexp_subst,
                                        atlas=atlas_basename,
                                        diff=diff_fbasename)

    regexp_subst += extension_duplicates(regexp_subst)
    regexp_subst = concat_to_pair_list(regexp_subst, prefix='/diff')
    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions, regexp_subst)

    # input and output diffusion MRI workflow to main workflow connections
    main_wf.connect([
        (dti_art_output, coreg_dti_wf, [
            ("avg_b0", "dti_co_input.avg_b0"),
        ]),
        (anat_output, coreg_dti_wf, [("tissues_native",
                                      "dti_co_input.tissues"),
                                     ("anat_biascorr", "dti_co_input.anat")]),
        (coreg_dti_wf, datasink, [
            ("dti_co_output.anat_diff", "diff.@anat_diff"),
            ("dti_co_output.tissues_diff", "diff.tissues.@tissues_diff"),
            ("dti_co_output.brain_mask_diff", "diff.@brain_mask"),
        ]),
    ])

    if do_atlas:
        main_wf.connect([
            (anat_output, coreg_dti_wf, [("atlas_anat",
                                          "dti_co_input.atlas_anat")]),
            (coreg_dti_wf, datasink, [("dti_co_output.atlas_diff",
                                       "diff.@atlas")]),
        ])

    return main_wf
Example #11
0
def attach_dti_artifact_correction(main_wf, wf_name="dti_artifact_correction"):
    """ Attach the FSL-based diffusion MRI artifact detection and correction
    workflow to the `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    params: dict with parameter values
        atlas_file: str
            Path to the anatomical atlas to be transformed to diffusion MRI space.

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    input_files.select.diff: input node

    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)

    # The workflow box
    art_dti_wf = dti_artifact_correction(wf_name=wf_name)

    # dataSink output substitutions
    ## The base name of the 'diff' file for the substitutions
    diff_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'diff')))

    regexp_subst = [
        (r"/brain_mask_{diff}_space\.nii$", "/brain_mask.nii"),
        (r"/eddy_corrected\.nii$", "/{diff}_eddycor.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst, diff=diff_fbasename)

    regexp_subst += extension_duplicates(regexp_subst)
    regexp_subst = concat_to_pair_list(regexp_subst, prefix='/diff')
    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions, regexp_subst)

    # input and output diffusion MRI workflow to main workflow connections
    main_wf.connect([
        (in_files, art_dti_wf, [
            ("diff", "dti_art_input.diff"),
            ("bval", "dti_art_input.bval"),
            ("bvec", "dti_art_input.bvec"),
        ]),
        (art_dti_wf, datasink, [
            ("dti_art_output.eddy_corr_file", "diff.@eddy_corr_file"),
            ("dti_art_output.bvec_rotated", "diff.@bvec_rotated"),
            ("dti_art_output.brain_mask_1", "diff.@brain_mask_1"),
            ("dti_art_output.brain_mask_2", "diff.@brain_mask_2"),
            ("dti_art_output.acqp", "diff.@acquisition_pars"),
            ("dti_art_output.index", "diff.@acquisition_idx"),
            ("dti_art_output.avg_b0", "diff.@avg_b0"),
        ]),
    ])

    do_rapidart = get_config_setting("dmri.artifact_detect", True)
    if do_rapidart:
        main_wf.connect([
            (art_dti_wf, datasink, [
                ("dti_art_output.hmc_corr_file",
                 "diff.artifact_stats.@hmc_corr_file"),
                ("dti_art_output.hmc_corr_bvec",
                 "diff.artifact_stats.@hmc_rot_bvec"),
                ("dti_art_output.hmc_corr_xfms",
                 "diff.artifact_stats.@hmc_corr_xfms"),
                ("dti_art_output.art_displacement_files",
                 "diff.artifact_stats.@art_disp_files"),
                ("dti_art_output.art_intensity_files",
                 "diff.artifact_stats.@art_ints_files"),
                ("dti_art_output.art_norm_files",
                 "diff.artifact_stats.@art_norm_files"),
                ("dti_art_output.art_outlier_files",
                 "diff.artifact_stats.@art_outliers"),
                ("dti_art_output.art_plot_files",
                 "diff.artifact_stats.@art_plots"),
                ("dti_art_output.art_statistic_files",
                 "diff.artifact_stats.@art_stats"),
            ]),
        ])

    return main_wf
Example #12
0
def attach_spm_warp_fmri_wf(main_wf,
                            registration_wf_name="spm_warp_fmri",
                            do_group_template=False):
    """ Attach the fMRI inter-subject spatial normalization workflow to the `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    registration_wf_name: str
        Name of the registration workflow.

    do_group_template: bool
        If True will attach the group template creation and pre-processing pipeline.

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    input_files.select.anat: input node

    datasink: nipype Node

    Workflow Dependencies
    ---------------------
    fmri_cleanup, the cleanup and preprocessing of the fMRI data

    spm_anat_preproc, for the anatomical to MNI space transformation

    spm_fmri_template, if do_group_template is True

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    anat_wf = get_subworkflow(main_wf, 'spm_anat_preproc')
    cleanup_wf = get_subworkflow(main_wf, 'fmri_cleanup')

    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf)

    if do_group_template:
        template_name = 'grptemplate'
    else:
        template_name = 'stdtemplate'

    warp_wf_name = "{}_{}".format(registration_wf_name, template_name)
    warp_fmri_wf = spm_warp_fmri_wf(warp_wf_name,
                                    register_to_grptemplate=do_group_template)

    # dataSink output substitutions
    # The base name of the 'rest' file for the substitutions
    rest_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'rest')))
    anat_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'anat')))

    regexp_subst = [
        (r"/corr_stc{fmri}_trim_mean_sn.mat$",
         "/{fmri}_grptemplate_params.mat"),
        (r"/y_corr_stc{fmri}_trim_mean\.nii$", "/{fmri}_to_mni_warpfield.nii"),
        (r"/rcorr_stc{fmri}_trim_mean.nii$", "/avg_epi_anat.nii"),
        (r"/wgrptmpl_corr_stc{fmri}_trim_mean\.nii$",
         "/avg_epi_grptemplate.nii"),
        (r"/wgrptmpl_corr_stc{fmri}_trim\.nii$",
         "/{fmri}_trimmed_grptemplate.nii"),
        (r"/wgrptmpl_corr_stc{fmri}_trim_filtermotart[\w_]*_cleaned\.nii$",
         "/{fmri}_nuisance_corrected_grptemplate.nii"),
        (r"/wgrptmpl_corr_stc{fmri}_trim_filtermotart[\w_]*_gsr\.nii$",
         "/{fmri}_nuisance_corrected_grptemplate.nii"),
        (r"/wgrptmpl_corr_stc{fmri}_trim_filtermotart[\w_]*_bandpassed\.nii$",
         "/{fmri}_time_filtered_grptemplate.nii"),
        (r"/wgrptmpl_corr_stc{fmri}_trim_filtermotart[\w_]*_smooth\.nii$",
         "/{fmri}_smooth_grptemplate.nii"),
        (r"/w[r]?corr_stc{fmri}_trim_mean\.nii$", "/avg_epi_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim\.nii$", "/{fmri}_trimmed_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim_filtermotart[\w_]*_cleaned\.nii$",
         "/{fmri}_nuisance_corrected_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim_filtermotart[\w_]*_gsr\.nii$",
         "/{fmri}_nuisance_corrected_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim_filtermotart[\w_]*_bandpassed\.nii$",
         "/{fmri}_time_filtered_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim_filtermotart[\w_]*_smooth\.nii$",
         "/{fmri}_smooth_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim[\w_]*_smooth\.nii$",
         "/{fmri}_nofilt_smooth_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim[\w_]*_cleaned\.nii$",
         "/{fmri}_nofilt_nuisance_corrected_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim[\w_]*_gsr\.nii$",
         "/{fmri}_nofilt_nuisance_corrected_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim[\w_]*_bandpassed\.nii$",
         "/{fmri}_nofilt_time_filtered_mni.nii"),
        (r"/w[r]?corr_stc{fmri}_trim[\w_]*_smooth\.nii$",
         "/{fmri}_nofilt_smooth_mni.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst,
                                    fmri=rest_fbasename,
                                    anat=anat_fbasename)

    # prepare substitution for atlas_file, if any
    do_atlas, atlas_file = check_atlas_file()
    if do_atlas:
        atlas_basename = remove_ext(os.path.basename(atlas_file))
        regexp_subst.extend([
            (r"/[\w]*{atlas}.*\.nii$", "/{atlas}_{fmri}_space.nii"),
        ])
        regexp_subst = format_pair_list(regexp_subst,
                                        atlas=atlas_basename,
                                        fmri=rest_fbasename)

    regexp_subst += extension_duplicates(regexp_subst)
    regexp_subst = concat_to_pair_list(regexp_subst, prefix='/rest')

    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions, regexp_subst)

    # input and output anat workflow to main workflow connections
    main_wf.connect([
        # clean_up_wf to registration_wf
        (cleanup_wf, warp_fmri_wf, [
            ("rest_output.motion_corrected", "wfmri_input.in_file"),
            ("rest_output.anat", "wfmri_input.anat_fmri"),
            ("rest_output.time_filtered", "wfmri_input.time_filtered"),
            ("rest_output.avg_epi", "wfmri_input.avg_epi"),
            ("rest_output.tissues_brain_mask", "wfmri_input.brain_mask"),
        ]),
        # output
        (warp_fmri_wf, datasink, [
            ("wfmri_output.warped_fmri",
             "rest.{}.@warped_fmri".format(template_name)),
            ("wfmri_output.wtime_filtered",
             "rest.{}.@time_filtered".format(template_name)),
            ("wfmri_output.smooth", "rest.{}.@smooth".format(template_name)),
            ("wfmri_output.wavg_epi",
             "rest.{}.@avg_epi".format(template_name)),
            ("wfmri_output.warp_field",
             "rest.{}.@warp_field".format(template_name)),
        ]),
    ])

    if not do_group_template:
        main_wf.connect([
            (anat_wf, warp_fmri_wf, [
                ("anat_output.anat_biascorr", "wfmri_input.reference_file"),
                ("anat_output.warp_forward", "wfmri_input.anat_to_mni_warp"),
            ]),
            # output
            (warp_fmri_wf, datasink, [
                ("wfmri_output.coreg_avg_epi", "rest.@coreg_fmri_anat"),
                ("wfmri_output.coreg_others", "rest.@coreg_others"),
            ]),
        ])

    if do_atlas:
        main_wf.connect([
            (anat_wf, warp_fmri_wf, [("anat_output.atlas_anat",
                                      "wfmri_input.atlas_anat")]),
            (warp_fmri_wf, datasink, [("wfmri_output.atlas_fmri",
                                       "rest.@atlas")]),
        ])
    return main_wf