Ejemplo n.º 1
0
def spm_warp_to_mni(wf_name="spm_warp_to_mni"):
    """ Run Gunzip and SPM Normalize12 to the list of files input and outputs the list of warped files.

    It does:
    - Warp each individual input image to the standard SPM template

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    warp_input.in_files: list of traits.File
        The raw NIFTI_GZ image files

    Nipype outputs
    --------------
    warp_output.warped_files: list of existing file
        The warped files.

    Returns
    -------
    wf: nipype Workflow
    """
    # input
    # check if spm_pet_preproc.do_petpvc is True
    in_fields = ["in_files"]
    out_fields = ["warped_files"]

    input = setup_node(
        IdentityInterface(fields=in_fields, mandatory_inputs=True),
        name="warp_input",
    )

    gunzip = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])

    warp = setup_node(spm.Normalize12(jobtype='estwrite',
                                      affine_regularization_type='mni'),
                      name="normalize12",
                      type="map",
                      iterfield=['image_to_align'])

    # output
    output = setup_node(IdentityInterface(fields=out_fields),
                        name="warp_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    wf.connect([
        # inputs
        (input, gunzip, [("in_files", "in_file")]),
        (gunzip, warp, [("out_file", "image_to_align")]),

        # output
        (warp, output, [("normalized_image", "warped_files")]),
    ])

    return wf
Ejemplo n.º 2
0
def dcm2niix_wf(wf_name='dcm2niix'):
    """Run dcm2niix over one folder with DICOM files.

    Nipype Inputs
    -------------
    dcm2niix.in_dcmdir: traits.Dir
        path to the DICOM images folder.

    Nipype Outputs
    --------------
    dcm2niix.bids: (a list of items which are an existing file name)

    dcm2niix.bvals: (a list of items which are an existing file name)

    dcm2niix.bvecs: (a list of items which are an existing file name)

    dcm2niix.converted_files: (a list of items which are an existing file name)

    Returns
    -------
    wf: nipype Workflow
    """
    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # specify input and output fields
    in_fields = [
        "in_dcmdir",
    ]

    out_fields = ["bids", "bvals", "bvecs", "converted_files"]

    # input node
    dcm2niix_input = setup_node(IdentityInterface(fields=in_fields,
                                                  mandatory_inputs=True),
                                name="dcm2niix_input")
    # T1 preprocessing nodes
    dcm2niix = setup_node(Dcm2niix(), name="dcm2niix")

    # output node
    dcm2niix_output = setup_node(IdentityInterface(fields=out_fields),
                                 name="dcm2niix_output")

    # Connect the nodes
    wf.connect([
        # input
        (dcm2niix_input, dcm2niix, [
            ("in_dcmdir", "source_dir"),
        ]),

        # output
        (dcm2niix, dcm2niix_output, [
            ("bids", "bids"),
            ("bvals", "bvals"),
            ("bvecs", "bvecs"),
            ("converted_files", "converted_files"),
        ]),
    ])

    return wf
Ejemplo n.º 3
0
def fristons_twenty_four_wf(wf_name='fristons_twenty_four'):
    """ The main purpose of this workflow is to calculate 24 parameters including
    the 6 motion parameters of the current volume and the preceeding volume,
    plus each of these values squared.

    Parameters
    ----------
    wf_name: str
        Workflow name

    Returns
    -------
    wf: workflow object

    Nipype Inputs
    -------------
    f24_input.in_file: str
        Path to the input movement file from motion correction.

    Nipype Outputs
    -------------
    f24_output.out_file: str
        Path to 1D file containing the friston 24 parameters.

    References
    ----------
 .. [1] Friston, K. J., Williams, S., Howard, R., Frackowiak, R. S., & Turner, R. (1996).
       Movement-related effects in fMRI time-series. Magnetic Resonance in Medicine, 35(3),346-355
    """
    wf = pe.Workflow(name=wf_name)

    # specify input and output fields
    in_fields = [
        "in_file",
    ]

    out_fields = [
        "out_file",
    ]

    f24_input = setup_node(IdentityInterface(fields=in_fields,
                                             mandatory_inputs=True),
                           name='f24_input')

    calc_friston = setup_node(Function(input_names=['in_file'],
                                       output_names=['out_file'],
                                       function=calc_friston_twenty_four),
                              name='calc_friston')

    f24_output = setup_node(IdentityInterface(fields=out_fields),
                            name='f24_output')

    # Connect the nodes
    wf.connect([
        (f24_input, calc_friston, [("in_file", "in_file")]),
        (calc_friston, f24_output, [("out_file", "out_file")]),
    ])
    return wf
Ejemplo n.º 4
0
def rest_noise_filter_wf(wf_name='rest_noise_removal'):
    """ Create a resting-state fMRI noise removal node.

    Nipype Inputs
    -------------
    rest_noise_input.in_file

    rest_noise_input.brain_mask

    rest_noise_input.wm_mask

    rest_noise_input.csf_mask

    rest_noise_input.motion_params
        Nipy motion parameters.

    Nipype Outputs
    --------------
    rest_noise_output.tsnr_file
        A SNR estimation volume file for QA purposes.

    rest_noise_output.motion_corrected
        The fMRI motion corrected image.

    rest_noise_output.nuis_corrected
        The resulting nuisance corrected image.
        This will be the same as 'motion_corrected' if compcor
        is disabled.

    rest_noise_output.motion_regressors
        Motion regressors file.

    rest_noise_output.compcor_regressors
        CompCor regressors file.

    rest_noise_output.art_displacement_files
        One image file containing the voxel-displacement timeseries.

    rest_noise_output.art_intensity_files
        One file containing the global intensity values determined
        from the brainmask.

    rest_noise_output.art_norm_files
        One file containing the composite norm.

    rest_noise_output.art_outlier_files
         One file containing a list of 0-based indices corresponding
         to outlier volumes.

    rest_noise_output.art_plot_files
        One image file containing the detected outliers.

    rest_noise_output.art_statistic_files
        One file containing information about the different types of
        artifacts and if design info is provided then details of
        stimulus correlated motion and a listing or artifacts by
        event type.

    Returns
    -------
    rm_nuisance_wf: nipype Workflow
    """

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    in_fields = [
        "in_file",
        "brain_mask",
        "wm_mask",
        "csf_mask",
        "motion_params",
    ]

    out_fields = [
        "tsnr_file",
        "motion_corrected",
        "nuis_corrected",
        "motion_regressors",
        "compcor_regressors",
        "gsr_regressors",
        "art_displacement_files",
        "art_intensity_files",
        "art_norm_files",
        "art_outlier_files",
        "art_plot_files",
        "art_statistic_files",
    ]

    # input identities
    rest_noise_input = setup_node(IdentityInterface(fields=in_fields,
                                                    mandatory_inputs=True),
                                  name="rest_noise_input")

    # get the settings for filters
    filters = _get_params_for('rest_filter')

    # Compute TSNR on realigned data regressing polynomial up to order 2
    tsnr = setup_node(TSNR(regress_poly=2), name='tsnr')

    # Use :class:`nipype.algorithms.rapidart` to determine which of the
    # images in the functional series are outliers based on deviations in
    # intensity or movement.
    art = setup_node(rapidart_fmri_artifact_detection(),
                     name="rapidart_artifacts")

    # Compute motion regressors
    motion_regs = setup_node(Function(
        input_names=['motion_params', 'order', 'derivatives'],
        output_names=['out_files'],
        function=motion_regressors),
                             name='motion_regressors')

    # Create a filter to remove motion and art confounds
    motart_pars = setup_node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=create_regressors),
                             name='motart_parameters')

    motion_filter = setup_node(fsl.GLM(out_f_name='F_mcart.nii.gz',
                                       out_pf_name='pF_mcart.nii.gz',
                                       demean=True),
                               name='motion_filter')

    # Noise confound regressors
    compcor_pars = setup_node(Function(input_names=[
        'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
    ],
                                       output_names=['components_file'],
                                       function=extract_noise_components),
                              name='compcor_pars')
    # compcor_pars = setup_node(ACompCor(), name='compcor_pars')
    # compcor_pars.inputs.components_file = 'noise_components.txt'

    compcor_filter = setup_node(fsl.GLM(out_f_name='F.nii.gz',
                                        out_pf_name='pF.nii.gz',
                                        demean=True),
                                name='compcor_filter')

    # Global signal regression
    gsr_pars = setup_node(Function(input_names=[
        'realigned_file', 'mask_file', 'num_components', 'extra_regressors'
    ],
                                   output_names=['components_file'],
                                   function=extract_noise_components),
                          name='gsr_pars')

    gsr_filter = setup_node(fsl.GLM(out_f_name='F_gsr.nii.gz',
                                    out_pf_name='pF_gsr.nii.gz',
                                    demean=True),
                            name='gsr_filter')

    # output identities
    rest_noise_output = setup_node(IdentityInterface(fields=out_fields,
                                                     mandatory_inputs=True),
                                   name="rest_noise_output")

    # Connect the nodes
    wf.connect([
        # tsnr
        (rest_noise_input, tsnr, [("in_file", "in_file")]),

        # artifact detection
        (rest_noise_input, art, [("in_file", "realigned_files"),
                                 ("motion_params", "realignment_parameters"),
                                 ("brain_mask", "mask_file"), ß]),

        # calculte motion regressors
        (rest_noise_input, motion_regs, [("motion_params", "motion_params")]),

        # create motion and confound regressors parameters file
        (art, motart_pars, [
            ("norm_files", "comp_norm"),
            ("outlier_files", "outliers"),
        ]),
        (motion_regs, motart_pars, [("out_files", "motion_params")]),

        # motion filtering
        (rest_noise_input, motion_filter, [
            ("in_file", "in_file"),
            (("in_file", rename, "_filtermotart"), "out_res_name"),
        ]),
        (motart_pars, motion_filter, [(("out_files", selectindex, 0), "design")
                                      ]),

        # output
        (tsnr, rest_noise_output, [("tsnr_file", "tsnr_file")]),
        (motart_pars, rest_noise_output, [("out_files", "motion_regressors")]),
        (motion_filter, rest_noise_output, [("out_res", "motion_corrected")]),
        (art, rest_noise_output, [
            ("displacement_files", "art_displacement_files"),
            ("intensity_files", "art_intensity_files"),
            ("norm_files", "art_norm_files"),
            ("outlier_files", "art_outlier_files"),
            ("plot_files", "art_plot_files"),
            ("statistic_files", "art_statistic_files"),
        ]),
    ])

    last_filter = motion_filter

    # compcor filter
    if filters['compcor_csf'] or filters['compcor_wm']:
        wf.connect([
            # calculate compcor regressor and parameters file
            (motart_pars, compcor_pars, [
                (("out_files", selectindex, 0), "extra_regressors"),
            ]),
            (motion_filter, compcor_pars, [
                ("out_res", "realigned_file"),
            ]),

            # the compcor filter
            (motion_filter, compcor_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_cleaned"), "out_res_name"),
            ]),
            (compcor_pars, compcor_filter, [("components_file", "design")]),
            (rest_noise_input, compcor_filter, [("brain_mask", "mask")]),

            # output
            (compcor_pars, rest_noise_output, [("components_file",
                                                "compcor_regressors")]),
        ])
        last_filter = compcor_filter

    # global signal regression
    if filters['gsr']:
        wf.connect([
            # calculate gsr regressors parameters file
            (last_filter, gsr_pars, [("out_res", "realigned_file")]),
            (rest_noise_input, gsr_pars, [("brain_mask", "mask_file")]),

            # the output file name
            (rest_noise_input, gsr_filter, [("brain_mask", "mask")]),
            (last_filter, gsr_filter, [
                ("out_res", "in_file"),
                (("out_res", rename, "_gsr"), "out_res_name"),
            ]),
            (gsr_pars, gsr_filter, [("components_file", "design")]),

            # output
            (gsr_pars, rest_noise_output, [("components_file",
                                            "gsr_regressors")]),
        ])
        last_filter = gsr_filter

    # connect the final nuisance correction output node
    wf.connect([
        (last_filter, rest_noise_output, [("out_res", "nuis_corrected")]),
    ])

    if filters['compcor_csf'] and filters['compcor_wm']:
        mask_merge = setup_node(Merge(2), name="mask_merge")
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, mask_merge, [("wm_mask", "in1")]),
            (rest_noise_input, mask_merge, [("csf_mask", "in2")]),
            (mask_merge, compcor_pars, [("out", "mask_file")]),
        ])

    elif filters['compcor_csf']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("csf_mask", "mask_file")]),
        ])

    elif filters['compcor_wm']:
        wf.connect([
            ## the mask for the compcor filter
            (rest_noise_input, compcor_pars, [("wm_mask", "mask_file")]),
        ])

    return wf
Ejemplo n.º 5
0
def spm_mrpet_preprocessing(wf_name="spm_mrpet_preproc"):
    """ Run the PET pre-processing workflow against the
    gunzip_pet.in_file files.
    It depends on the anat_preproc_workflow, so if this
    has not been run, this function will run it too.

    # TODO: organize the anat2pet hack/condition somehow:
    If anat2pet:
    - SPM12 Coregister T1 and tissues to PET
    - PETPVC the PET image in PET space
    - SPM12 Warp PET to MNI
    else:
    - SPM12 Coregister PET to T1
    - PETPVC the PET image in anatomical space
    - SPM12 Warp PET in anatomical space to MNI through the
    `anat_to_mni_warp`.

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    pet_input.in_file: traits.File
        The raw NIFTI_GZ PET image file

    pet_input.anat: traits.File
        Path to the high-contrast anatomical image.
        Reference file of the warp_field, i.e., the
        anatomical image in its native space.

    pet_input.anat_to_mni_warp: traits.File
        The warp field from the transformation of the
        anatomical image to the standard MNI space.

    pet_input.atlas_anat: traits.File
        The atlas file in anatomical space.

    pet_input.tissues: list of traits.File
        List of tissues files from the New Segment process.
        At least the first 3 tissues must be present.

    Nipype outputs
    --------------
    pet_output.pvc_out: existing file
        The results of the PVC process

    pet_output.brain_mask: existing file
        A brain mask calculated with the tissues file.

    pet_output.coreg_ref: existing file
        The coregistered reference image to PET space.

    pet_output.coreg_others: list of existing files
        List of coregistered files from coreg_pet.apply_to_files

    pet_output.pvc_warped: existing file
        Results from PETPVC normalized to MNI.
        The result of every internal pre-processing step
        is normalized to MNI here.

    pet_output.warp_field: existing files
        Spatial normalization parameters .mat files

    pet_output.gm_norm: existing file
        The output of the grey matter intensity
        normalization process.
        This is the last step in the PET signal correction,
        before registration.

    pet_output.atlas_pet: existing file
        Atlas image warped to PET space.
        If the `atlas_file` option is an existing file and
        `normalize_atlas` is True.

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields = ["in_file", "anat", "anat_to_mni_warp", "tissues"]

    out_fields = [
        "brain_mask",
        "coreg_others",
        "coreg_ref",
        "pvc_warped",
        "pet_warped",  # 'pet_warped' is a dummy entry to keep the fields pattern.
        "warp_field",
        "pvc_out",
        "pvc_mask",
        "gm_norm"
    ]

    do_atlas, _ = check_atlas_file()
    if do_atlas:
        in_fields += ["atlas_anat"]
        out_fields += ["atlas_pet"]

    # input
    pet_input = setup_node(IdentityInterface(fields=in_fields,
                                             mandatory_inputs=True),
                           name="pet_input")

    # workflow to perform partial volume correction
    petpvc = petpvc_workflow(wf_name="petpvc")

    merge_list = setup_node(Merge(4), name='merge_for_unzip')
    gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])

    warp_pet = setup_node(spm_normalize(), name="warp_pet")

    tpm_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="tpm_bbox")
    tpm_bbox.inputs.in_file = spm_tpm_priors_path()

    # output
    pet_output = setup_node(IdentityInterface(fields=out_fields),
                            name="pet_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # check how to perform the registration, to decide how to build the pipeline
    anat2pet = get_config_setting('registration.anat2pet', False)
    if anat2pet:
        wf.connect([
            # inputs
            (pet_input, petpvc, [("in_file", "pvc_input.in_file"),
                                 ("anat", "pvc_input.reference_file"),
                                 ("tissues", "pvc_input.tissues")]),

            # gunzip some files for SPM Normalize
            (petpvc, merge_list, [("pvc_output.pvc_out", "in1"),
                                  ("pvc_output.brain_mask", "in2"),
                                  ("pvc_output.gm_norm", "in3")]),
            (pet_input, merge_list, [("in_file", "in4")]),
            (merge_list, gunzipper, [("out", "in_file")]),

            # warp the PET PVCed to MNI
            (petpvc, warp_pet, [("pvc_output.coreg_ref", "image_to_align")]),
            (gunzipper, warp_pet, [("out_file", "apply_to_files")]),
            (tpm_bbox, warp_pet, [("bbox", "write_bounding_box")]),

            # output
            (petpvc, pet_output, [("pvc_output.pvc_out", "pvc_out"),
                                  ("pvc_output.brain_mask", "brain_mask"),
                                  ("pvc_output.coreg_ref", "coreg_ref"),
                                  ("pvc_output.coreg_others", "coreg_others"),
                                  ("pvc_output.gm_norm", "gm_norm")]),

            # output
            (warp_pet, pet_output, [("normalized_files", "pvc_warped"),
                                    ("deformation_field", "warp_field")]),
        ])
    else:  # PET 2 ANAT
        collector = setup_node(Merge(2), name='merge_for_warp')
        apply_warp = setup_node(spm_apply_deformations(), name="warp_pet")

        wf.connect([
            # inputs
            (pet_input, petpvc, [("in_file", "pvc_input.in_file"),
                                 ("anat", "pvc_input.reference_file"),
                                 ("tissues", "pvc_input.tissues")]),

            # gunzip some files for SPM Normalize
            (petpvc, merge_list, [("pvc_output.pvc_out", "in1"),
                                  ("pvc_output.brain_mask", "in2"),
                                  ("pvc_output.gm_norm", "in3")]),
            (pet_input, merge_list, [("in_file", "in4")]),
            (merge_list, gunzipper, [("out", "in_file")]),

            # warp the PET PVCed to MNI
            (gunzipper, collector, [("out_file", "in1")]),
            (petpvc, collector, [("pvc_output.coreg_ref", "in2")]),
            (pet_input, apply_warp, [("anat_to_mni_warp", "deformation_file")
                                     ]),
            (collector, apply_warp, [("out", "apply_to_files")]),
            (tpm_bbox, apply_warp, [("bbox", "write_bounding_box")]),

            # output
            (petpvc, pet_output, [("pvc_output.pvc_out", "pvc_out"),
                                  ("pvc_output.brain_mask", "brain_mask"),
                                  ("pvc_output.petpvc_mask", "petpvc_mask"),
                                  ("pvc_output.coreg_ref", "coreg_ref"),
                                  ("pvc_output.coreg_others", "coreg_others"),
                                  ("pvc_output.gm_norm", "gm_norm")]),

            # output
            (apply_warp, pet_output, [("normalized_files", "pvc_warped"),
                                      ("deformation_field", "warp_field")]),
        ])

    if do_atlas:
        coreg_atlas = setup_node(spm_coregister(cost_function="mi"),
                                 name="coreg_atlas")

        # set the registration interpolation to nearest neighbour.
        coreg_atlas.inputs.write_interp = 0
        wf.connect([
            (pet_input, coreg_atlas, [("anat", "source")]),
            (petpvc, coreg_atlas, [("pvc_output.coreg_ref", "target")]),
            (pet_input, coreg_atlas, [("atlas_anat", "apply_to_files")]),
            (coreg_atlas, pet_output, [("coregistered_files", "atlas_pet")]),
        ])

    return wf
Ejemplo n.º 6
0
def spm_mrpet_grouptemplate_preprocessing(
        wf_name="spm_mrpet_grouptemplate_preproc"):
    """ Run the PET pre-processing workflow against the gunzip_pet.in_file files.
    It depends on the anat_preproc_workflow, so if this has not been run, this function
    will run it too.

    This is identical to the workflow defined in `spm_mrpet_preprocessing`,
    with the only difference that we now normalize all subjects agains a custom
    template using the spm Old Normalize interface.

    It does:
    - SPM12 Coregister T1 and tissues to PET
    - PVC the PET image in PET space
    - SPM12 Warp PET to the given template

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    pet_input.in_file: traits.File
        The raw NIFTI_GZ PET image file.

    pet_input.atlas_anat: traits.File
        The atlas file in anatomical space.

    pet_input.anat: traits.File
        Path to the high-contrast anatomical image.
        Reference file of the warp_field, i.e., the anatomical image in its native space.

    pet_input.tissues: list of traits.File
        List of tissues files from the New Segment process. At least the first
        3 tissues must be present.

    pet_input.pet_template: traits.File
        The template file for inter-subject registration reference.

    Nipype outputs
    --------------
    pet_output.pvc_out: existing file
        The results of the PVC process.

    pet_output.brain_mask: existing file
        A brain mask calculated with the tissues file.

    pet_output.coreg_ref: existing file
        The coregistered reference image to PET space.

    pet_output.coreg_others: list of existing files
        List of coregistered files from coreg_pet.apply_to_files.

    pet_output.pet_warped: existing file
        PET image normalized to the group template.

    pet_output.pvc_warped: existing file
        The outputs of the PETPVC workflow normalized to the group template.
        The result of every internal pre-processing step is normalized to the
        group template here.

    pet_output.warp_field: existing files
        Spatial normalization parameters .mat files.

    pet_output.gm_norm: existing file
        The output of the grey matter intensity normalization process.
        This is the last step in the PET signal correction, before registration.

    pet_output.atlas_pet: existing file
        Atlas image warped to PET space.
        If the `atlas_file` option is an existing file and `normalize_atlas` is True.

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields = ["in_file", "anat", "tissues", "pet_template"]

    out_fields = [
        "brain_mask", "coreg_others", "coreg_ref", "pvc_warped", "pet_warped",
        "warp_field", "pvc_out", "pvc_mask", "gm_norm"
    ]

    do_atlas, _ = check_atlas_file()
    if do_atlas:
        in_fields += ["atlas_anat"]
        out_fields += ["atlas_pet"]

    # input
    pet_input = setup_node(IdentityInterface(fields=in_fields,
                                             mandatory_inputs=True),
                           name="pet_input")

    # workflow to perform partial volume correction
    petpvc = petpvc_workflow(wf_name="petpvc")

    unzip_mrg = setup_node(Merge(4), name='merge_for_unzip')
    gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])

    # warp each subject to the group template
    gunzip_template = setup_node(Gunzip(), name="gunzip_template")
    gunzip_pet = setup_node(Gunzip(), name="gunzip_pet")

    warp_mrg = setup_node(Merge(2), name='merge_for_warp')
    warp2template = setup_node(spm.Normalize(jobtype="estwrite",
                                             out_prefix="wgrptemplate_"),
                               name="warp2template")

    get_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="get_bbox")

    # output
    pet_output = setup_node(IdentityInterface(fields=out_fields),
                            name="pet_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    wf.connect([
        # inputs
        (pet_input, petpvc, [("in_file", "pvc_input.in_file"),
                             ("anat", "pvc_input.reference_file"),
                             ("tissues", "pvc_input.tissues")]),

        # get template bounding box to apply to results
        (pet_input, get_bbox, [("pet_template", "in_file")]),

        # gunzip some inputs
        (pet_input, gunzip_pet, [("in_file", "in_file")]),
        (pet_input, gunzip_template, [("pet_template", "in_file")]),

        # gunzip some files for SPM Normalize
        (petpvc, unzip_mrg, [("pvc_output.pvc_out", "in1"),
                             ("pvc_output.brain_mask", "in2"),
                             ("pvc_output.gm_norm", "in3")]),
        (pet_input, unzip_mrg, [("in_file", "in4")]),
        (unzip_mrg, gunzipper, [("out", "in_file")]),
        (gunzipper, warp_mrg, [("out_file", "in1")]),
        (warp_mrg, warp2template, [(("out", flatten_list), "apply_to_files")]),

        # prepare the target parameters of the warp to template
        (gunzip_pet, warp2template, [("out_file", "source")]),
        (gunzip_template, warp2template, [("out_file", "template")]),
        (get_bbox, warp2template, [("bbox", "write_bounding_box")]),

        # output
        (warp2template, pet_output, [
            ("normalization_parameters", "warp_field"),
            ("normalized_files", "pvc_warped"),
            ("normalized_source", "pet_warped"),
        ]),

        # output
        (petpvc, pet_output, [("pvc_output.pvc_out", "pvc_out"),
                              ("pvc_output.brain_mask", "brain_mask"),
                              ("pvc_output.coreg_ref", "coreg_ref"),
                              ("pvc_output.coreg_others", "coreg_others"),
                              ("pvc_output.gm_norm", "gm_norm")]),
    ])

    if do_atlas:
        coreg_atlas = setup_node(spm_coregister(cost_function="mi"),
                                 name="coreg_atlas")

        # set the registration interpolation to nearest neighbour.
        coreg_atlas.inputs.write_interp = 0
        wf.connect([
            (pet_input, coreg_atlas, [("anat", "source")]),
            (petpvc, coreg_atlas, [("pvc_output.coreg_ref", "target")]),
            (pet_input, coreg_atlas, [("atlas_anat", "apply_to_files")]),
            (coreg_atlas, pet_output, [("coregistered_files", "atlas_pet")]),
            # warp the atlas to the template space as well
            (coreg_atlas, warp_mrg, [("coregistered_files", "in2")])
        ])

    return wf
Ejemplo n.º 7
0
def dti_artifact_correction(wf_name="dti_artifact_correction"):
    """ Run the diffusion MRI pre-processing workflow against the diff files in `data_dir`.

    It will resample/regrid the diffusion image to have isometric voxels.
    Corrects for head motion correction and Eddy currents.
    Estimates motion outliers and exports motion reports using nipype.algorithms.RapidArt.

    Nipype Inputs
    -------------
    dti_art_input.diff: traits.File
        path to the diffusion MRI image

    dti_art_input.bval: traits.File
        path to the bvals file

    dti_art_input.bvec: traits.File
        path to the bvecs file


    Nipype Outputs
    --------------
    dti_art_output.eddy_corr_file: traits.File
        Eddy currents corrected DTI image.

    dti_art_output.bvec_rotated: traits.File
        Rotated bvecs file

    dti_art_output.brain_mask_1: traits.File
        Brain mask extracted using BET on the first B0 image.

    dti_art_output.brain_mask_2: traits.File
        Brain mask extracted using BET on the average B0 image,
        after motion correction.

    dti_art_output.acpq: traits.File
        Text file with acquisition parameters calculated for Eddy.

    dti_art_output.index: traits.File
        Text file with acquisition indices calculated for Eddy.

    dti_art_output.avg_b0: traits.File
        The average b=0 image extracted from the motion and eddy
        currents correted diffusion MRI.

    dti_art_output.hmc_corr_file: traits.File

    dti_art_output.hmc_corr_bvec: traits.File

    dti_art_output.hmc_corr_xfms: traits.File

    dti_art_output.art_displacement_files: traits.File

    dti_art_output.art_intensity_files: traits.File

    dti_art_output.art_norm_files: traits.File

    dti_art_output.art_outlier_files: traits.File

    dti_art_output.art_plot_files: traits.File

    dti_art_output.art_statistic_files: traits.File

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields = ["diff", "bval", "bvec"]
    out_fields = [
        "eddy_corr_file",
        "bvec_rotated",
        "brain_mask_1",
        "brain_mask_2",
        "acqp",
        "index",
        "avg_b0",
    ]

    do_rapidart = get_config_setting("dmri.artifact_detect", True)
    if do_rapidart:
        out_fields += [
            "hmc_corr_file",
            "hmc_corr_bvec",
            "hmc_corr_xfms",
            "art_displacement_files",
            "art_intensity_files",
            "art_norm_files",
            "art_outlier_files",
            "art_plot_files",
            "art_statistic_files",
        ]

    # input interface
    dti_input = setup_node(IdentityInterface(fields=in_fields,
                                             mandatory_inputs=True),
                           name="dti_art_input")

    # resample
    resample = setup_node(Function(
        function=reslice,
        input_names=['in_file', 'new_zooms', 'order', 'out_file'],
        output_names=['out_file']),
                          name='dti_reslice')

    ## extract first b0 for Eddy and HMC brain mask
    list_b0 = pe.Node(Function(
        function=b0_indices,
        input_names=['in_bval'],
        output_names=['out_idx'],
    ),
                      name='b0_indices')

    extract_b0 = pe.Node(ExtractROI(t_size=1), name="extract_first_b0")

    # For Eddy, the mask is only used for selecting voxels for the estimation of the hyperparameters,
    # so isn’t very critical.
    # Note also that it is better with a too conservative (small) mask than a too big.
    bet_dwi0 = setup_node(BET(frac=0.3, mask=True, robust=True),
                          name='bet_dwi_pre')

    pick_first = lambda lst: lst[0]

    # motion artifacts detection, requires linear co-registration for motion estimation.
    if do_rapidart:
        # head motion correction
        hmc = hmc_pipeline()

        art = setup_node(rapidart_dti_artifact_detection(),
                         name="detect_artifacts")

    # Eddy
    eddy = setup_node(Eddy(method='jac'), name="eddy")

    ## acquisition parameters for Eddy
    write_acqp = setup_node(Function(
        function=dti_acquisition_parameters,
        input_names=["in_file"],
        output_names=["out_acqp", "out_index"],
    ),
                            name="write_acqp")

    ## rotate b-vecs
    rot_bvec = setup_node(Function(
        function=eddy_rotate_bvecs,
        input_names=["in_bvec", "eddy_params"],
        output_names=["out_file"],
    ),
                          name="rot_bvec")

    ## extract all b0s and average them after Eddy correction
    avg_b0_post = pe.Node(Function(
        function=b0_average,
        input_names=['in_dwi', 'in_bval'],
        output_names=['out_file'],
    ),
                          name='b0_avg_post')

    bet_dwi1 = setup_node(BET(frac=0.3, mask=True, robust=True),
                          name='bet_dwi_post')

    # nlmeans denoise
    apply_nlmeans = get_config_setting("dmri.apply_nlmeans", True)
    if apply_nlmeans:
        nlmeans = setup_node(Function(
            function=nlmeans_denoise,
            input_names=['in_file', 'mask_file', 'out_file', 'N'],
            output_names=['out_file']),
                             name='nlmeans_denoise')

    # output interface
    dti_output = setup_node(IdentityInterface(fields=out_fields),
                            name="dti_art_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # Connect the nodes
    wf.connect([
        # resample to iso-voxel
        (dti_input, resample, [
            ("diff", "in_file"),
        ]),

        # read from input file the acquisition parameters for eddy
        (dti_input, write_acqp, [("diff", "in_file")]),

        # reference mask for hmc and eddy
        (dti_input, list_b0, [("bval", "in_bval")]),
        (resample, extract_b0, [("out_file", "in_file")]),
        (list_b0, extract_b0, [(("out_idx", pick_first), "t_min")]),
        (extract_b0, bet_dwi0, [("roi_file", "in_file")]),

        # Eddy
        (resample, eddy, [("out_file", "in_file")]),
        (bet_dwi0, eddy, [("mask_file", "in_mask")]),
        (dti_input, eddy, [("bval", "in_bval"), ("bvec", "in_bvec")]),
        (write_acqp, eddy, [("out_acqp", "in_acqp"),
                            ("out_index", "in_index")]),

        # rotate bvecs
        (dti_input, rot_bvec, [("bvec", "in_bvec")]),
        (eddy, rot_bvec, [("out_parameter", "eddy_params")]),

        # final avg b0
        (dti_input, avg_b0_post, [("bval", "in_bval")]),
        (eddy, avg_b0_post, [("out_corrected", "in_dwi")]),
        (avg_b0_post, bet_dwi1, [("out_file", "in_file")]),

        # output
        (write_acqp, dti_output, [("out_acqp", "acqp"),
                                  ("out_index", "index")]),
        (bet_dwi0, dti_output, [("mask_file", "brain_mask_1")]),
        (bet_dwi1, dti_output, [("mask_file", "brain_mask_2")]),
        (rot_bvec, dti_output, [("out_file", "bvec_rotated")]),
        (avg_b0_post, dti_output, [("out_file", "avg_b0")]),
    ])

    if apply_nlmeans:
        wf.connect([
            # non-local means
            (eddy, nlmeans, [("out_corrected", "in_file")]),
            (bet_dwi1, nlmeans, [("mask_file", "mask_file")]),

            # output
            (nlmeans, dti_output, [("out_file", "eddy_corr_file")]),
        ])
    else:
        wf.connect([
            # output
            (eddy, dti_output, [("out_corrected", "eddy_corr_file")]),
        ])

    if do_rapidart:
        wf.connect([
            # head motion correction
            (dti_input, hmc, [
                ("bval", "inputnode.in_bval"),
                ("bvec", "inputnode.in_bvec"),
            ]),
            (resample, hmc, [("out_file", "inputnode.in_file")]),
            (bet_dwi0, hmc, [("mask_file", "inputnode.in_mask")]),
            (list_b0, hmc, [
                (("out_idx", pick_first), "inputnode.ref_num"),
            ]),

            # artifact detection
            (hmc, art, [
                ("outputnode.out_file", "realigned_files"),
                ("outputnode.out_xfms", "realignment_parameters"),
            ]),
            (bet_dwi1, art, [
                ("mask_file", "mask_file"),
            ]),

            # output
            (hmc, dti_output, [
                ("outputnode.out_file", "hmc_corr_file"),
                ("outputnode.out_bvec", "hmc_corr_bvec"),
                ("outputnode.out_xfms", "hmc_corr_xfms"),
            ]),
            (art, dti_output, [
                ("displacement_files", "art_displacement_files"),
                ("intensity_files", "art_intensity_files"),
                ("norm_files", "art_norm_files"),
                ("outlier_files", "art_outlier_files"),
                ("plot_files", "art_plot_files"),
                ("statistic_files", "art_statistic_files"),
            ]),
        ])

    return wf
Ejemplo n.º 8
0
def intensity_norm(wf_name='intensity_norm'):
    """ Workflow that uses a mask against a source from where the mean value will be taken.
    This mean value will be used to demean the whole source and leave it in out_file.

    Parameters
    ----------
    wf_name: str
        The name of the workflow.

    Nipype Inputs
    -------------
    intnorm_input.source: existing file
        The image from where to extract the signal values and normalize.

    intnorm_input.mask: existing file
        The mask to specify which voxels to use to calculate the statistics
        for normalization.

    Nipype Outputs
    --------------
    intnorm_output.out_file: existing file

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields = ["source", "mask"]

    out_fields = ["out_file"]

    # input
    intnorm_input = setup_node(IdentityInterface(fields=in_fields,
                                                 mandatory_inputs=True),
                               name="intnorm_input")

    # fix the affine matrix (it's necessary for some cases)
    resample = setup_node(Function(
        function=resample_to_img,
        input_names=["in_file", "target", "interpolation"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                          name="resample_mask")

    resample.inputs.interpolation = "nearest"

    # calculate masked mean value
    mean_val = setup_node(Function(
        function=math_img,
        input_names=["formula", "img", "mask"],
        output_names=["out_value"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                          name='mean_value')
    mean_val.inputs.formula = "np.mean(np.nonzero(img[mask > 0]))"

    # normalize
    norm_img = setup_node(Function(
        function=math_img,
        input_names=["formula", "out_file", "img", "val"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                          name='norm_img')
    norm_img.inputs.formula = "img / val"

    # output
    intnorm_output = setup_node(IdentityInterface(fields=out_fields),
                                name="intnorm_output")

    # Create the workflow object
    wf = Workflow(name=wf_name)

    wf.connect([
        # resample
        (intnorm_input, resample, [("source", "target"), ("mask", "in_file")]),
        # normalize
        (intnorm_input, mean_val, [("source", "img")]),
        (resample, mean_val, [("out_file", "mask")]),
        (intnorm_input, norm_img, [
            ("source", "img"),
            (("source", rename, "_intnormed"), "out_file"),
        ]),
        (mean_val, norm_img, [("out_value", "val")]),
        (norm_img, intnorm_output, [("out_file", "out_file")]),
    ])

    return wf
Ejemplo n.º 9
0
def attach_spm_pet_grouptemplate(main_wf, wf_name="spm_pet_template"):
    """ Attach a PET pre-processing workflow that uses SPM12 to `main_wf`.
    This workflow picks all spm_pet_preproc outputs 'pet_output.warped_files' in `main_wf`
    to create a group template.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    pet_output.warped_files: input node

    datasink: nipype Node

    spm_pet_preproc: nipype Workflow

    Nipype Outputs
    --------------
    group_template.pet_template: file
        The path to the PET group template.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - spm_pet_preproc
    - spm_anat_preproc if `spm_pet_template.do_petpvc` is True.

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    pet_wf = get_subworkflow(main_wf, "spm_pet_preproc")

    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf, name='datasink')

    # The base name of the 'pet' file for the substitutions
    pet_fbasename = remove_ext(os.path.basename(get_input_file_name(in_files, 'pet')))

    # the group template datasink
    base_outdir = datasink.inputs.base_directory
    grp_datasink = pe.Node(
        DataSink(parameterization=False, base_directory=base_outdir),
        name='{}_grouptemplate_datasink'.format(pet_fbasename)
    )
    grp_datasink.inputs.container = '{}_grouptemplate'.format(pet_fbasename)

    # the list of the raw pet subjects
    warped_pets = pe.JoinNode(
        interface=IdentityInterface(fields=["warped_pets"]),
        joinsource="infosrc",
        joinfield="warped_pets",
        name="warped_pets"
    )

    # the group template workflow
    template_wf = spm_create_group_template_wf(wf_name)

    # output node
    output = setup_node(IdentityInterface(fields=["pet_template"]), name="group_template")

    # group dataSink output substitutions
    regexp_subst = [
        (r"/wgrptemplate{pet}_merged_mean_smooth.nii$", "/{pet}_grouptemplate_mni.nii"),
        (r"/w{pet}_merged_mean_smooth.nii$",            "/{pet}_grouptemplate_mni.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)

    grp_datasink.inputs.regexp_substitutions = extend_trait_list(
        grp_datasink.inputs.regexp_substitutions,
        regexp_subst
    )

    # Connect the nodes
    main_wf.connect([
        # warped pets file list input
        (pet_wf, warped_pets, [("warp_output.warped_files", "warped_pets")]),

        # group template wf
        (warped_pets, template_wf, [(("warped_pets", flatten_list), "grptemplate_input.in_files")]),

        # output node
        (template_wf, output, [("grptemplate_output.template", "pet_template")]),

        # template output
        (output, grp_datasink, [("pet_template", "@pet_grouptemplate")]),
    ])

    # Now we start with the correction and registration of each subject to the group template
    do_petpvc = get_config_setting('spm_pet_template.do_petpvc')
    if do_petpvc:
        get_subworkflow(main_wf, 'spm_anat_preproc')

        preproc_wf_name = "spm_mrpet_grouptemplate_preproc"
        main_wf = attach_spm_mrpet_preprocessing(main_wf, wf_name=preproc_wf_name, do_group_template=True)
        preproc_wf = get_subworkflow(main_wf, preproc_wf_name)

        main_wf.connect([(output, preproc_wf, [
            ("pet_template", "pet_input.pet_template")]),
        ])
    else:
        # add the pet template to the preproc workflow
        reg_wf = spm_register_to_template_wf(wf_name="spm_pet_register_to_grouptemplate")
        main_wf.connect([
            (output,      reg_wf, [("pet_template", "reg_input.template")]),
            (in_files,    reg_wf, [("pet",          "reg_input.in_file")]),

            (reg_wf, datasink, [
                ("reg_output.warped",     "pet.group_template.@warped"),
                ("reg_output.warp_field", "pet.group_template.@warp_field"),
            ]),
        ])

    # per-subject datasink output substitutions
    regexp_subst = [
        (r"group_template/{pet}_sn.mat$",           "group_template/{pet}_grptemplate_params.mat"),
        (r"group_template/wgrptemplate_{pet}.nii$", "group_template/{pet}_grptemplate.nii"),
        (r"group_template/w{pet}.nii",              "group_template/{pet}_grptemplate.nii"),
    ]
    regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    datasink.inputs.regexp_substitutions = extend_trait_list(
        datasink.inputs.regexp_substitutions,
        regexp_subst
    )

    return main_wf
Ejemplo n.º 10
0
def auto_spm_slicetime(in_file=traits.Undefined,
                       out_prefix='stc',
                       num_slices=traits.Undefined,
                       time_repetition=traits.Undefined,
                       time_acquisition=traits.Undefined,
                       ref_slice=traits.Undefined,
                       slice_order=traits.Undefined,
                       wf_name='auto_spm_slicetime'):
    """ A workflow that tries to automatically read the slice timing correction parameters
    from the input file and passes them to a spm.SliceTiming node.

    Parameters
    ----------
    in_files: str
        Path to the input file.

    out_prefix: str
        Prefix to the output file.
        Default: 'a'

    num_slices: int
        Number of slices of `in_files`.

    time_repetition: int or str
        The time repetition (TR) of the input dataset in seconds
        Default: 0
        If left to default will read the TR from the nifti image header.

    time_acquisition: int
        Time of volume acquisition. usually calculated as TR-(TR/num_slices)

    ref_slice: int
        Index of the reference slice

    slice_order: list of int
        List of integers with the order in which slices are acquired

    wf_name: str
        Name of the workflow

    Nipype Inputs
    -------------
    ## Mandatory:
    stc_input.in_files:

    ## Optional:
    stc_input.num_slices

    stc_input.slice_order

    stc_input.time_repetition

    stc_input.time_acquisition

    stc_input.ref_slice

    stc_input.slice_mode

    Nipype Outputs
    --------------
    stc_output.timecorrected_files

    stc_output.time_repetition

    Returns
    -------
    auto_spm_stc: nipype Workflow
        SPM slice timing correction workflow with automatic
        parameters detection.
    """

    # helper functions
    def _sum_one_to_each(slice_order):  # SPM starts count from 1
        return [i + 1 for i in slice_order]

    def _sum_one(num):
        return num + 1

    def _pick_first(sequence):
        return sequence[0]

    input_fields = [
        "in_file",
        "num_slices",
        "slice_order",
        "time_repetition",
        "time_acquisition",
        "ref_slice",
        "slice_mode",
    ]

    # the input and output nodes
    stc_input = setup_node(IdentityInterface(fields=input_fields),
                           name="stc_input")

    stc_output = setup_node(IdentityInterface(fields=[
        "timecorrected_files",
        "time_repetition",
    ]),
                            name="stc_output")

    # Declare the processing nodes
    params = setup_node(STCParametersInterface(in_files=in_file),
                        name='stc_params')
    gunzip = setup_node(Gunzip(), name="gunzip")
    stc = setup_node(spm_slicetime(out_prefix=out_prefix,
                                   num_slices=num_slices,
                                   time_repetition=time_repetition,
                                   time_acquisition=time_acquisition,
                                   ref_slice=ref_slice,
                                   slice_order=slice_order),
                     name='slice_timer')

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # Connect the nodes
    wf.connect([
        (stc_input, params, [
            ("in_file", "in_files"),
            ("num_slices", "num_slices"),
            ("slice_order", "slice_order"),
            ("time_repetition", "time_repetition"),
            ("time_acquisition", "time_acquisition"),
            ("ref_slice", "ref_slice"),
            ("slice_mode", "slice_mode"),
        ]),

        # processing nodes
        (params, gunzip, [(("in_files", _pick_first), "in_file")]),
        (params, stc, [
            (("slice_order", _sum_one_to_each), "slice_order"),
            (("ref_slice", _sum_one), "ref_slice"),
            ("num_slices", "num_slices"),
            ("time_acquisition", "time_acquisition"),
            ("time_repetition", "time_repetition"),
        ]),
        (gunzip, stc, [("out_file", "in_files")]),

        # output node
        (params, stc_output, [("time_repetition", "time_repetition")]),
        (stc, stc_output, [("timecorrected_files", "timecorrected_files")]),
    ])

    return wf
Ejemplo n.º 11
0
def attach_concat_canica(main_wf, wf_name="canica", **kwargs):
    """ Attach a Concat and a nilearn CanICA interface to `main_wf`.

    The Concat node will merge all the files together in one 4D volume before delivering it to CanICA.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    kwargs: dict[str]->str
        input_node: str
            Name of the input node from where to connect the source `input_connect`.

        input_connection: str
            Name of the connection to obtain the source files.

    Nipype Inputs for `main_wf`
    ---------------------------
    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    srcwf_name = kwargs['input_node']
    srcconn_name = kwargs['input_connection']

    src_wf = get_subworkflow(main_wf, srcwf_name)
    datasink = get_datasink(main_wf, name='datasink')

    base_outdir = datasink.inputs.base_directory
    ica_datasink = pe.Node(DataSink(
        parameterization=False,
        base_directory=base_outdir,
    ),
                           name="ica_datasink".format(wf_name))
    ica_datasink.inputs.container = 'ica_{}'.format(wf_name)

    # the list of the raw pet subjects
    ica_subjs = pe.JoinNode(interface=IdentityInterface(fields=["ica_subjs"]),
                            joinsource="infosrc",
                            joinfield="ica_subjs",
                            name="ica_subjs")

    # concat images
    concat = setup_node(Function(
        function=concat_3D_imgs,
        input_names=["in_files"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                        name="concat")

    # warp each subject to the group template
    ica = setup_node(
        CanICAInterface(),
        name="{}_ica".format(wf_name),
    )
    algorithm = get_config_setting("{}_ica.algorithm".format(wf_name),
                                   default=get_config_setting(
                                       'canica.algorithm', default=''))

    if algorithm:
        ica.inputs.algorithm = algorithm

    # Connect the nodes
    main_wf.connect([
        # file list input
        (src_wf, ica_subjs, [(srcconn_name, "ica_subjs")]),
        # concat images
        (ica_subjs, concat, [("ica_subjs", "in_files")]),
        # canica
        (concat, ica, [(("out_file", _check_list), "in_files")]),

        # canica output
        (ica, ica_datasink, [
            ("components", "@components"),
            ("loadings", "@loadings"),
            ("score", "@score"),
        ]),
    ])

    # plot the ICA results?
    do_plot = get_config_setting('canica_extra.plot', default=True)
    if not do_plot:
        return main_wf

    # get the plot threshold from the ICA node or the config file (in that order).
    plot_thr = get_config_setting('canica_extra.plot_thr', default=0)
    plot_thr = get_trait_value(ica.inputs, 'threshold', default=plot_thr)

    # plto ica results images
    plot_ica = setup_node(Function(
        function=plot_ica_results,
        input_names=[
            "ica_result", "application", "mask_file", "zscore", "bg_img"
        ],
        output_names=["all_icc_plot", "iccs_plot", "sliced_ic_plots"],
    ),
                          name="plot_ica")
    plot_ica.inputs.zscore = plot_thr
    plot_ica.inputs.mask_file = get_trait_value(ica.inputs, 'mask')
    plot_ica.inputs.application = 'nilearn'

    # Connect the plotting nodes
    main_wf.connect([
        # canica
        (ica, plot_ica, [("components", "ica_result")]),

        # canica output
        (plot_ica, ica_datasink, [
            ("all_icc_plot", "@all_icc_plot"),
            ("iccs_plot", "@iccs_plot"),
            ("sliced_ic_plots", "@sliced_ic_plots"),
        ]),
    ])
    return main_wf
Ejemplo n.º 12
0
def attach_canica(main_wf, wf_name="canica", **kwargs):
    """ Attach a nilearn CanICA interface to `main_wf`.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    kwargs: dict[str]->str
        input_node: str
            Name of the input node from where to connect the source `input_connect`.

        input_connection: str
            Name of the connection to obtain the source files.

    Nipype Inputs for `main_wf`
    ---------------------------
    datasink: nipype Node

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    srcwf_name = kwargs['input_node']
    srcconn_name = kwargs['input_connection']

    src_wf = get_subworkflow(main_wf, srcwf_name)
    datasink = get_datasink(main_wf, name='datasink')

    base_outdir = datasink.inputs.base_directory
    ica_datasink = pe.Node(DataSink(
        parameterization=False,
        base_directory=base_outdir,
    ),
                           name="{}_datasink".format(wf_name))

    # the list of the subjects files
    ica_subjs = pe.JoinNode(interface=IdentityInterface(fields=["ica_subjs"]),
                            joinsource="infosrc",
                            joinfield="ica_subjs",
                            name="ica_subjs")

    # warp each subject to the group template
    ica = setup_node(
        CanICAInterface(),
        name="{}_ica".format(wf_name),
    )

    # Connect the nodes
    main_wf.connect([
        # file list input
        (src_wf, ica_subjs, [(srcconn_name, "ica_subjs")]),

        # canica
        (ica_subjs, ica, [("ica_subjs", "in_files")]),

        # canica output
        (ica, ica_datasink, [("components", "canica.@components")]),
        (ica, ica_datasink, [("loadings", "canica.@loadings")]),
        (ica, ica_datasink, [("score", "canica.@score")]),
    ])
    return main_wf
Ejemplo n.º 13
0
def spm_register_to_template_wf(wf_name="spm_registration_to_template"):
    """Return a workflow that registers each reg_input.in_file to the file in reg_input.template.
    For now this does not do atlas registration.

    It does:
    - SPM12 Warp input image to the given template

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    reg_input.in_file: traits.File
        The raw NIFTI_GZ subject image file.

    reg_input.template: list of traits.File
        The template file for inter-subject registration reference.

    Nipype outputs
    --------------
    reg_output.warped: existing file
        Image normalized to the given template.

    reg_output.warp_field: existing files
        Spatial normalization parameters .mat file.

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields = [
        "in_file",
        "template",
    ]

    out_fields = [
        "warped",
        "warp_field",
    ]

    # input
    reg_input = setup_node(IdentityInterface(fields=in_fields,
                                             mandatory_inputs=True),
                           name="reg_input")

    # warp each subject to the group template
    gunzip_template = setup_node(
        Gunzip(),
        name="gunzip_template",
    )
    gunzip_input = setup_node(
        Gunzip(),
        name="gunzip_input",
    )

    warp2template = setup_node(spm.Normalize(jobtype="estwrite",
                                             out_prefix="wgrptemplate_"),
                               name="warp2template")

    get_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="get_bbox")

    # output
    reg_output = setup_node(IdentityInterface(fields=out_fields),
                            name="reg_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    wf.connect([
        # get template bounding box to apply to results
        (reg_input, get_bbox, [("template", "in_file")]),

        # gunzip some inputs
        (reg_input, gunzip_input, [("in_file", "in_file")]),
        (reg_input, gunzip_template, [("template", "in_file")]),

        # prepare the target parameters of the warp to template
        (gunzip_template, warp2template, [("out_file", "template")]),
        (get_bbox, warp2template, [("bbox", "write_bounding_box")]),

        # directly warp pet to the template
        (gunzip_input, warp2template, [("out_file", "source")]),

        # output
        (warp2template, reg_output, [
            ("normalization_parameters", "warp_field"),
            ("normalized_source", "warped"),
        ]),
    ])

    return wf
Ejemplo n.º 14
0
def spm_create_group_template_wf(wf_name="spm_create_group_template"):
    """ Pick all subject files in `grptemplate_input.in_files`, calculate an average
    image and smooth it with `"{}_smooth".format(wf_name)` node (you can configure the smooth `fwhm` from
    a config file.).

    It does:
    - calculate a mean image (across subjects) and
    - smooth it with a 8x8x8mm^3 gaussian kernel -> the result of this is the template.
    The size of the isometric smoothing gaussian kernel is given by one integer for the
    "{}_smooth.fwhm".format(wf_name) setting.

    You can also avoid calculating the mean image across subjects and setting a specific group template file by
    setting the configuration "{}.template_file".format(wf_name) to the path of the file you want.
    This image will be smoothed and used as a common template for the further pipeline steps.

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    grptemplate_input.in_files: list of traits.File
        The raw NIFTI_GZ PET image files

    Nipype outputs
    --------------
    grptemplate_output.template: existing file
        The common custom PET template file.

    Returns
    -------
    wf: nipype Workflow
    """
    # input
    input = setup_node(
        IdentityInterface(fields=["in_files"]),
        name="grptemplate_input",
    )

    # checking if a template file has been set already
    template_file = get_config_setting("{}.template_file".format(wf_name))

    use_common_template = path.exists(template_file)
    if not use_common_template:
        # merge
        concat = setup_node(Function(
            function=concat_imgs,
            input_names=["in_files"],
            output_names=["out_file"],
            imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                            name='merge_time')

        # average
        average = setup_node(Function(
            function=mean_img,
            input_names=["in_file", "out_file"],
            output_names=["out_file"],
            imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                             name='group_average')
        average.inputs.out_file = 'group_average.nii.gz'

    # TODO: check what is the difference between nilearn.image.smooth_img and FSL IsotropicSmooth
    # smooth
    # smooth = setup_node(Function(function=smooth_img,
    #                             input_names=["in_file", "fwhm"],
    #                             output_names=["out_file"],
    #                             imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
    #                     name="{}_smooth".format(wf_name))
    smooth = setup_node(fsl.IsotropicSmooth(fwhm=8),
                        name="{}_smooth".format(wf_name))

    # output
    output = setup_node(
        IdentityInterface(fields=["template"]),
        name="grptemplate_output",
    )

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # if I have to create the group template
    if not use_common_template:
        wf.connect([
            # input
            (input, concat, [("in_files", "in_files")]),

            # merge, average and smooth
            (concat, average, [("out_file", "in_file")]),
            (average, smooth, [("out_file", "in_file")]),

            # output
            (smooth, output, [("out_file", "template")]),
        ])
    else:  # if the template has been specified in the configuration file
        wf.add_nodes([input])

        smooth.inputs.in_file = template_file

        wf.connect([
            # output
            (smooth, output, [("out_file", "template")]),
        ])

    return wf
Ejemplo n.º 15
0
def fmri_cleanup_wf(wf_name="fmri_cleanup"):
    """ Run the resting-state fMRI pre-processing workflow against the rest files in `data_dir`.

    Tasks:
    - Trim first 6 volumes of the rs-fMRI file.
    - Slice Timing correction.
    - Motion and nuisance correction.
    - Calculate brain mask in fMRI space.
    - Bandpass frequency filtering for resting-state fMRI.
    - Smoothing.
    - Tissue maps co-registration to fMRI space.

    Parameters
    ----------
    wf_name: str

    Nipype Inputs
    -------------
    rest_input.in_file: traits.File
        The resting-state fMRI file.

    rest_input.anat: traits.File
        Path to the high-contrast anatomical image.

    rest_input.tissues: list of traits.File
        Paths to the tissue segmentations in anatomical space.
        Expected to have this order: GM, WM and CSF.

    rest_input.highpass_sigma:traits.Float
        Band pass timeseries filter higher bound in Hz.

    rest_input.lowpass_sigma: traits.Float
        Band pass timeseries filter lower bound in Hz.

    Nipype Outputs
    --------------
    rest_output.smooth: traits.File
        The isotropically smoothed time filtered nuisance corrected image.

    rest_output.nuis_corrected: traits.File
        The nuisance corrected fMRI file.

    rest_output.motion_params: traits.File
        The affine transformation file.

    rest_output.time_filtered: traits.File
        The bandpass time filtered fMRI file.

    rest_output.epi_brain_mask: traits.File
        An estimated brain mask from mean EPI volume.

    rest_output.tissues_brain_mask: traits.File
        A brain mask calculated from the addition of coregistered
        GM, WM and CSF segmentation volumes from the anatomical
        segmentation.

    rest_output.tissues: list of traits.File
        The tissues segmentation volume in fMRI space.
        Expected to have this order: GM, WM and CSF.

    rest_output.anat: traits.File
        The T1w image in fMRI space.

    rest_output.avg_epi: traits.File
        The average EPI image in fMRI space after slice-time and motion correction.

    rest_output.motion_regressors: traits.File

    rest_output.compcor_regressors: traits.File

    rest_output.art_displacement_files
        One image file containing the voxel-displacement timeseries.

    rest_output.art_intensity_files
        One file containing the global intensity values determined from the brainmask.

    rest_output.art_norm_files
        One file containing the composite norm.

    rest_output.art_outlier_files
         One file containing a list of 0-based indices corresponding to outlier volumes.

    rest_output.art_plot_files
        One image file containing the detected outliers.

    rest_output.art_statistic_files
        One file containing information about the different types of artifacts and if design info is provided then
        details of stimulus correlated motion and a listing or artifacts by event type.

    Returns
    -------
    wf: nipype Workflow
    """
    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # specify input and output fields
    in_fields = [
        "in_file",
        "anat",
        "atlas_anat",
        "coreg_target",
        "tissues",
        "lowpass_freq",
        "highpass_freq",
    ]

    out_fields = [
        "motion_corrected",
        "motion_params",
        "tissues",
        "anat",
        "avg_epi",
        "time_filtered",
        "smooth",
        "tsnr_file",
        "epi_brain_mask",
        "tissues_brain_mask",
        "motion_regressors",
        "compcor_regressors",
        "gsr_regressors",
        "nuis_corrected",
        "art_displacement_files",
        "art_intensity_files",
        "art_norm_files",
        "art_outlier_files",
        "art_plot_files",
        "art_statistic_files",
    ]

    # input identities
    rest_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
                            name="rest_input")

    # rs-fMRI preprocessing nodes
    trim = setup_node(Trim(), name="trim")

    stc_wf = auto_spm_slicetime()
    realign = setup_node(nipy_motion_correction(), name='realign')

    # average
    average = setup_node(
        Function(
            function=mean_img,
            input_names=["in_file"],
            output_names=["out_file"],
            imports=['from neuro_pypes.interfaces.nilearn import ni2file']
        ),
        name='average_epi'
    )

    mean_gunzip = setup_node(Gunzip(), name="mean_gunzip")

    # co-registration nodes
    coreg = setup_node(spm_coregister(cost_function="mi"), name="coreg_fmri")
    brain_sel = setup_node(Select(index=[0, 1, 2]), name="brain_sel")

    # brain mask made with EPI
    epi_mask = setup_node(ComputeMask(), name='epi_mask')

    # brain mask made with the merge of the tissue segmentations
    tissue_mask = setup_node(fsl.MultiImageMaths(), name='tissue_mask')
    tissue_mask.inputs.op_string = "-add %s -add %s -abs -kernel gauss 4 -dilM -ero -kernel gauss 1 -dilM -bin"
    tissue_mask.inputs.out_file = "tissue_brain_mask.nii.gz"

    # select tissues
    gm_select = setup_node(Select(index=[0]), name="gm_sel")
    wmcsf_select = setup_node(Select(index=[1, 2]), name="wmcsf_sel")

    # noise filter
    noise_wf = rest_noise_filter_wf()
    wm_select = setup_node(Select(index=[1]), name="wm_sel")
    csf_select = setup_node(Select(index=[2]), name="csf_sel")

    # bandpass filtering
    bandpass = setup_node(
        Function(
            input_names=['files', 'lowpass_freq', 'highpass_freq', 'tr'],
            output_names=['out_files'],
            function=bandpass_filter
        ),
        name='bandpass'
    )

    # smooth
    smooth = setup_node(
        Function(
            function=smooth_img,
            input_names=["in_file", "fwhm"],
            output_names=["out_file"],
            imports=['from neuro_pypes.interfaces.nilearn import ni2file']
        ),
        name="smooth"
    )
    smooth.inputs.fwhm = get_config_setting('fmri_smooth.fwhm', default=8)
    smooth.inputs.out_file = "smooth_std_{}.nii.gz".format(wf_name)

    # output identities
    rest_output = setup_node(IdentityInterface(fields=out_fields), name="rest_output")

    # Connect the nodes
    wf.connect([
        # trim
        (rest_input, trim, [("in_file", "in_file")]),

        # slice time correction
        (trim, stc_wf, [("out_file", "stc_input.in_file")]),

        # motion correction
        (stc_wf, realign, [("stc_output.timecorrected_files", "in_file")]),

        # coregistration target
        (realign, average, [("out_file", "in_file")]),
        (average, mean_gunzip, [("out_file", "in_file")]),
        (mean_gunzip, coreg, [("out_file", "target")]),

        # epi brain mask
        (average, epi_mask, [("out_file", "mean_volume")]),

        # coregistration
        (rest_input, coreg, [("anat", "source")]),
        (rest_input, brain_sel, [("tissues", "inlist")]),
        (brain_sel, coreg, [(("out", flatten_list), "apply_to_files")]),

        # tissue brain mask
        (coreg, gm_select, [("coregistered_files", "inlist")]),
        (coreg, wmcsf_select, [("coregistered_files", "inlist")]),
        (gm_select, tissue_mask, [(("out", flatten_list), "in_file")]),
        (wmcsf_select, tissue_mask, [(("out", flatten_list), "operand_files")]),

        # nuisance correction
        (coreg, wm_select, [("coregistered_files", "inlist",)]),
        (coreg, csf_select, [("coregistered_files", "inlist",)]),
        (realign, noise_wf, [("out_file", "rest_noise_input.in_file",)]),
        (tissue_mask, noise_wf, [("out_file", "rest_noise_input.brain_mask")]),
        (wm_select, noise_wf, [(("out", flatten_list), "rest_noise_input.wm_mask")]),
        (csf_select, noise_wf, [(("out", flatten_list), "rest_noise_input.csf_mask")]),

        (realign, noise_wf, [("par_file", "rest_noise_input.motion_params",)]),

        # temporal filtering
        (noise_wf, bandpass, [("rest_noise_output.nuis_corrected", "files")]),
        # (realign,     bandpass,    [("out_file", "files")]),
        (stc_wf, bandpass, [("stc_output.time_repetition", "tr")]),
        (rest_input, bandpass, [
            ("lowpass_freq", "lowpass_freq"),
            ("highpass_freq", "highpass_freq"),
        ]),
        (bandpass, smooth, [("out_files", "in_file")]),

        # output
        (epi_mask, rest_output, [("brain_mask", "epi_brain_mask")]),
        (tissue_mask, rest_output, [("out_file", "tissues_brain_mask")]),
        (realign, rest_output, [
            ("out_file", "motion_corrected"),
            ("par_file", "motion_params"),
        ]),
        (coreg, rest_output, [
            ("coregistered_files", "tissues"),
            ("coregistered_source", "anat"),
        ]),
        (noise_wf, rest_output, [
            ("rest_noise_output.motion_regressors", "motion_regressors"),
            ("rest_noise_output.compcor_regressors", "compcor_regressors"),
            ("rest_noise_output.gsr_regressors", "gsr_regressors"),
            ("rest_noise_output.nuis_corrected", "nuis_corrected"),
            ("rest_noise_output.tsnr_file", "tsnr_file"),
            ("rest_noise_output.art_displacement_files", "art_displacement_files"),
            ("rest_noise_output.art_intensity_files", "art_intensity_files"),
            ("rest_noise_output.art_norm_files", "art_norm_files"),
            ("rest_noise_output.art_outlier_files", "art_outlier_files"),
            ("rest_noise_output.art_plot_files", "art_plot_files"),
            ("rest_noise_output.art_statistic_files", "art_statistic_files"),
        ]),
        (average, rest_output, [("out_file", "avg_epi")]),
        (bandpass, rest_output, [("out_files", "time_filtered")]),
        (smooth, rest_output, [("out_file", "smooth")]),
    ])

    return wf
Ejemplo n.º 16
0
def petpvc_workflow(wf_name="petpvc"):
    """ Run the PET pre-processing workflow against the gunzip_pet.in_file files.
    It coregisters the reference_file and tissues to PET space, then applies PVC and grey matter normalization.

    It does:
    - SPM12 Coregister T1 and tisues to PET
    - PVC the PET image in PET space

    Parameters
    ----------
    wf_name: str
        Name of the workflow.

    Nipype Inputs
    -------------
    pvc_input.in_file: traits.File
        The raw NIFTI_GZ PET image file

    pvc_input.reference_file: traits.File
        The anatomical image in its native space. For registration reference.

    pvc_input.tissues: list of traits.File
        List of tissues files from the New Segment process. At least the first
        3 tissues must be present.

    Nipype outputs
    --------------
    pvc_output.coreg_ref: existing file
        The coregistered reference_file image in PET space.

    pvc_output.coreg_others: list of existing files
        List of coregistered files from coreg_pet.apply_to_files

    pvc_output.pvc_out: existing file
        The output of the PETPVC process.

    pvc_output.petpvc_mask: existing file
        The mask built for the PETPVC.

    pvc_output.brain_mask: existing file
        A brain mask calculated with the tissues file.

    pvc_output.gm_norm: existing file
        The output of the grey matter intensity normalization process.
        This is the last step in the PET signal correction.

    Returns
    -------
    wf: nipype Workflow
    """
    # fixed parameters of the NUK mMR
    psf_fwhm = (4.3, 4.3, 4.3)

    # specify input and output fields
    in_fields = [
        "in_file",
        "reference_file",
        "tissues",
    ]

    out_fields = [
        "coreg_ref",
        "coreg_others",
        "pvc_out",
        "petpvc_mask",
        "brain_mask",
        "gm_norm",
    ]

    # input
    pet_input = setup_node(IdentityInterface(fields=in_fields,
                                             mandatory_inputs=True),
                           name="pvc_input")

    flat_list = pe.Node(Function(input_names=['list_of_lists'],
                                 output_names=['out'],
                                 function=flatten_list),
                        name='flatten_tissue_list')

    # coreg pet
    gunzip_pet = setup_node(Gunzip(), name="gunzip_pet")
    coreg_pet = setup_node(spm_coregister(cost_function="mi"),
                           name="coreg_pet")

    tissues_sel = setup_node(Select(index=[0, 1, 2]), name="tissues")
    select_gm = setup_node(Select(index=[0]), name="select_gm")
    pvc = setup_node(petpvc_cmd(fwhm_mm=psf_fwhm, pvc_method='RBV'),
                     name="pvc")

    # output
    pvc_output = setup_node(IdentityInterface(fields=out_fields),
                            name="pvc_output")

    # workflow to create the mask
    mask_wf = petpvc_mask(wf_name="petpvc_mask")

    # workflow for intensity normalization
    norm_wf = intensity_norm(wf_name="intensity_norm_gm")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    wf.connect([
        # inputs
        (pet_input, gunzip_pet, [("in_file", "in_file")]),
        (pet_input, tissues_sel, [("tissues", "inlist")]),
    ])

    # check how to perform the registration, to decide how to build the pipeline
    anat2pet = get_config_setting('registration.anat2pet', False)
    if anat2pet:
        wf.connect([
            # inputs
            (pet_input, coreg_pet, [("reference_file", "source")]),

            # unzip to coregister the reference file (anatomical image) to PET space.
            (gunzip_pet, coreg_pet, [("out_file", "target")]),
            (tissues_sel, flat_list, [("out", "list_of_lists")]),
            (flat_list, coreg_pet, [("out", "apply_to_files")]),

            # the list of tissues to the mask wf and the GM for PET intensity normalization
            (coreg_pet, select_gm, [("coregistered_files", "inlist")]),
            (coreg_pet, mask_wf, [("coregistered_files",
                                   "pvcmask_input.tissues")]),

            # the PET in native space to PVC correction
            (gunzip_pet, pvc, [("out_file", "in_file")]),

            # the merged file with 4 tissues to PCV correction
            (mask_wf, pvc, [("pvcmask_output.petpvc_mask", "mask_file")]),

            # normalize voxel values of PET PVCed by demeaning it entirely by GM PET voxel values
            (pvc, norm_wf, [("out_file", "intnorm_input.source")]),
            (select_gm, norm_wf, [("out", "intnorm_input.mask")]),

            # output
            (coreg_pet, pvc_output, [("coregistered_source", "coreg_ref")]),
            (coreg_pet, pvc_output, [("coregistered_files", "coreg_others")]),
            (pvc, pvc_output, [("out_file", "pvc_out")]),
            (mask_wf, pvc_output, [("pvcmask_output.brain_mask", "brain_mask")
                                   ]),
            (mask_wf, pvc_output, [("pvcmask_output.petpvc_mask",
                                    "petpvc_mask")]),
            (norm_wf, pvc_output, [("intnorm_output.out_file", "gm_norm")]),
        ])
    else:  # PET to ANAT
        wf.connect([
            # inputs
            (pet_input, coreg_pet, [("reference_file", "target")]),

            # unzip PET image and set as a source to register it to anatomical space.
            (gunzip_pet, coreg_pet, [("out_file", "source")]),
            (tissues_sel, flat_list, [("out", "list_of_lists")]),
            (flat_list, coreg_pet, [("out", "apply_to_files")]),

            # the list of tissues to the mask wf and the GM for PET intensity normalization
            (tissues_sel, select_gm, [("out", "inlist")]),
            (flat_list, mask_wf, [("out", "pvcmask_input.tissues")]),

            # the PET in ANAT space to PVC correction
            (coreg_pet, pvc, [("coregistered_source", "in_file")]),

            # the merged file with 4 tissues to PCV correction
            (mask_wf, pvc, [("pvcmask_output.petpvc_mask", "mask_file")]),

            # normalize voxel values of PET PVCed by demeaning it entirely by GM PET voxel values
            (pvc, norm_wf, [("out_file", "intnorm_input.source")]),
            (select_gm, norm_wf, [("out", "intnorm_input.mask")]),

            # output
            # TODO: coreg_ref should have a different name in this case
            (coreg_pet, pvc_output, [("coregistered_source", "coreg_ref")]),
            (coreg_pet, pvc_output, [("coregistered_files", "coreg_others")]),
            (pvc, pvc_output, [("out_file", "pvc_out")]),
            (mask_wf, pvc_output, [("pvcmask_output.brain_mask", "brain_mask")
                                   ]),
            (mask_wf, pvc_output, [("pvcmask_output.petpvc_mask",
                                    "petpvc_mask")]),
            (norm_wf, pvc_output, [("intnorm_output.out_file", "gm_norm")]),
        ])

    return wf
Ejemplo n.º 17
0
def auto_nipy_slicetime(in_files=traits.Undefined,
                        time_repetition=traits.Undefined,
                        slice_order=traits.Undefined,
                        loops=5,
                        wf_name='auto_nipy_slicetime'):
    """ A workflow that tries to automatically read the slice timing correction parameters
    from the input files and passes them to a nipy.fMRIRealign4D node.

    Parameters
    ----------
    in_file: str or list of str
        Path to the input file(s).

    time_repetition: int or str
        The time repetition (TR) of the input dataset in seconds
        Default: 0
        If left to default will read the TR from the nifti image header.

    slice_order: list of int
        List of integers with the order in which slices are acquired

    loops: int
        Number of loops used to realignment runs.

    wf_name: str
        Name of the workflow

    Nipype Inputs
    -------------
    ## Mandatory:
    params.in_files:

    params.time_repetition

    ## Optional:
    params.slice_order

    params.ref_slice

    params.slice_mode

    Nipype Outputs
    --------------
    slice_timer.out_file

    slice_timer.par_file

    Returns
    -------
    auto_nipy_stc: nipype Workflow
        Nipy 4D alignment and slice timing correction workflow with automatic
        parameters detection.
    """
    # Declare the processing nodes
    params = setup_node(STCParametersInterface(in_files=in_files),
                        name='stc_params')
    stc = setup_node(nipy_fmrirealign4d(time_repetition=time_repetition,
                                        slice_order=slice_order,
                                        loops=loops),
                     name='slice_timer')

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # Connect the nodes
    wf.connect([
        (params, stc, [
            ("in_files", "in_files"),
            ("slice_order", "slice_order"),
            ("time_repetition", "tr"),
        ]),
    ])

    return wf
Ejemplo n.º 18
0
def motion_power_stats_wf(wf_name='gen_motion_stats'):
    """ The main purpose of this workflow is to get various statistical measures from the
    movement/motion parameters obtained in functional preprocessing.

    These parameters (FD calculations) are also required to carry out scrubbing.

    Order of commands:

    - Calculate Frame Wise Displacement FD as per power et al., 2012

      Differentiating head realignment parameters across frames yields a six dimensional timeseries that represents
      instantaneous head motion.
      Rotational displacements are converted from degrees to millimeters by calculating displacement on the surface of
      a sphere of radius 50 mm.[R5]

    - Calculate Frame wise Displacement FD as per jenkinson et al., 2002

    - Calculate Frames to exclude

      Remove all frames which are below the threshold

    - Calculate Frames to include

      Include all the frames which are above the threshold

    - Calculate DVARS

      DVARS (D temporal derivative of timecourses, VARS referring to RMS variance over voxels) indexes
      the rate of change of BOLD signal across the entire brain at each frame of data.To calculate
      DVARS, the volumetric timeseries is differentiated (by backwards differences) and RMS signal
      change is calculated over the whole brain.DVARS is thus a measure of how much the intensity
      of a brain image changes in comparison to the previous timepoint (as opposed to the global
      signal, which is the average value of a brain image at a timepoint).[R5]

    - Calculate Power parameters::

        MeanFD : Mean (across time/frames) of the absolute values for Framewise Displacement (FD),
        computed as described in Power et al., Neuroimage, 2012)

        rootMeanSquareFD : Root mean square (RMS; across time/frames) of the absolute values for FD

        NumFD >=threshold : Number of frames (time points) where movement (FD) exceeded threshold

        rmsFD : Root mean square (RMS; across time/frames) of the absolute values for FD

        FDquartile(top 1/4th FD) : Mean of the top 25% highest FD values

        PercentFD( > threshold) : Number of frames (time points) where movement (FD) exceeded threshold
                                  expressed as a percentage of the total number of frames (time points)

        MeanDVARS : Mean of voxel DVARS

    - Calculate Motion Parameters

      Following motion parameters are calculated::

        Subject, Scan, Mean Relative RMS Displacement, Max Relative RMS Displacement,
        Movements >threshold, Mean Relative Mean Rotation, Mean Relative Maxdisp,
        Max Relative Maxdisp, Max Abs Maxdisp, Max Relative Roll,Max Relative Pitch,
        Max Relative Yaw, Max Relative dS-I, Max Relative dL-R,Max Relative dP-A,
        Mean Relative Roll, Mean Relative Pitch,Mean Relative Yaw, Mean Relative dS-I,
        Mean Relative dL-R, Mean Relative dP-A, Max Abs Roll, Max Abs Pitch, Max Abs Yaw,
        Max Abs dS-I, Max Abs dL-R, Max Abs dP-A, Mean Abs Roll,Mean Abs Pitch,Mean Abs Yaw,
        Mean Abs dS-I,Mean Abs dL-R,Mean Abs dP-A

    Parameters
    ----------
    wf_name: workflow object
        Workflow name

    Returns
    -------
    param_wf: workflow object
          Workflow object containing various movement/motion and power parameters estimates.

    Nipype inputs
    -------------
    inputspec.motion_correct : string (func/rest file or a list of func/rest nifti file)
        Path to motion corrected functional data

    inputspec.mask : string (nifti file)
        Path to field contianing brain-only mask for the functional data

    inputspec.max_displacement : string (Mat file)
        maximum displacement (in mm) vector for brain voxels in each volume.
        This file is obtained in functional preprocessing step

    inputspec.movement_parameters : string (Mat file)
        1D file containing six movement/motion parameters(3 Translation, 3 Rotations)
        in different columns (roll pitch yaw dS  dL  dP), obtained in functional preprocessing step

    scrubbing_input.threshold : a float
        scrubbing threshold

    scrubbing_input.remove_frames_before : an integer
        count of preceding frames to the offending time
        frames to be removed (i.e.,those exceeding FD threshold)

    scrubbing_input.remove_frames_after : an integer
        count of subsequent frames to the offending time
        frames to be removed (i.e., those exceeding FD threshold)

    Nipype outputs
    --------------
    outputspec.FD_1D : 1D file
        mean Framewise Displacement (FD)

    outputspec.frames_ex_1D : 1D file
        Number of frames that would be censored ("scrubbed")
        also removing the offending time frames (i.e., those exceeding the threshold),
        the preceeding frame, and the two subsequent frames

    outputspec.frames_in_1D : 1d file
        Number of frames left after removing for scrubbing

    outputspec.power_params : txt file
        Text file various power parameters for scrubbing.

    outputspec.motion_params : txt file
       Text file containing various movement parameters

    References
    ----------
    .. [1] Power, J. D., Barnes, K. A., Snyder, A. Z., Schlaggar, B. L., & Petersen, S. E. (2012). Spurious
           but systematic correlations in functional connectivity MRI networks arise from subject motion. NeuroImage, 59(3),
           2142-2154. doi:10.1016/j.neuroimage.2011.10.018

    .. [2] Power, J. D., Barnes, K. A., Snyder, A. Z., Schlaggar, B. L., & Petersen, S. E. (2012). Steps
           toward optimizing motion artifact removal in functional connectivity MRI; a reply to Carp.
           NeuroImage. doi:10.1016/j.neuroimage.2012.03.017

    .. [3] Jenkinson, M., Bannister, P., Brady, M., Smith, S., 2002. Improved optimization for the robust
           and accurate linear registration and motion correction of brain images. Neuroimage 17, 825-841.
    """
    wf = pe.Workflow(name=wf_name)

    # specify input and output fields
    in_fields = [
        "in_files",
        "anat",
        "atlas_anat",
        "coreg_target",
        "tissues",
        "lowpass_freq",
        "highpass_freq",
    ]

    out_fields = [
        "motion_corrected",
        "motion_params",
        "tissues",
        "anat",
        "time_filtered",
        "smooth",
        "time_filtered_mni",
        "smooth_mni",
        "tsnr_file",
        "epi_brain_mask",
        "tissues_brain_mask",
        "motion_regressors",
        "compcor_regressors",
        "gsr_regressors",
        "nuis_corrected",
        "epi_mni",
        "epi_mni_warpfield",
    ]

    inputNode = setup_node(IdentityInterface(fields=[
        'subject_id', 'scan_id', 'movement_parameters', 'max_displacement',
        'motion_correct', 'mask', 'oned_matrix_save'
    ]),
                           name='inputspec')

    scrubbing_input = setup_node(IdentityInterface(
        fields=['threshold', 'remove_frames_before', 'remove_frames_after']),
                                 name='scrubbing_input')

    outputNode = setup_node(IdentityInterface(fields=[
        'FD_1D', 'FDJ_1D', 'frames_ex_1D', 'frames_in_1D', 'power_params',
        'motion_params'
    ]),
                            name='outputspec')

    # calculate mean DVARS
    cal_DVARS = setup_node(Function(input_names=['in_file', 'mask'],
                                    output_names=['out_file'],
                                    function=calculate_DVARS),
                           name='cal_DVARS')
    wf.connect(inputNode, 'motion_correct', cal_DVARS, 'rest')
    wf.connect(inputNode, 'mask', cal_DVARS, 'mask')

    # Calculating mean Framewise Displacement as per power et al., 2012
    calculate_FD = setup_node(Function(input_names=['in_file'],
                                       output_names=['out_file'],
                                       function=calculate_FD_P),
                              name='calculate_FD')

    wf.connect(inputNode, 'movement_parameters', calculate_FD, 'in_file')
    wf.connect(calculate_FD, 'out_file', outputNode, 'FD_1D')

    # Calculating mean Framewise Displacement as per jenkinson et al., 2002
    calculate_FDJ = setup_node(Function(input_names=['in_file'],
                                        output_names=['out_file'],
                                        function=calculate_FD_J),
                               name='calculate_FDJ')

    wf.connect(inputNode, 'oned_matrix_save', calculate_FDJ, 'in_file')
    wf.connect(calculate_FDJ, 'out_file', outputNode, 'FDJ_1D')

    ##calculating frames to exclude and include after scrubbing
    exclude_frames = setup_node(Function(
        input_names=['in_file', 'threshold', 'frames_before', 'frames_after'],
        output_names=['out_file'],
        function=set_frames_ex),
                                name='exclude_frames')

    wf.connect(calculate_FD, 'out_file', exclude_frames, 'in_file')
    wf.connect(scrubbing_input, 'threshold', exclude_frames, 'threshold')
    wf.connect(scrubbing_input, 'remove_frames_before', exclude_frames,
               'frames_before')
    wf.connect(scrubbing_input, 'remove_frames_after', exclude_frames,
               'frames_after')
    wf.connect(exclude_frames, 'out_file', outputNode, 'frames_ex_1D')

    include_frames = setup_node(Function(
        input_names=['in_file', 'threshold', 'exclude_list'],
        output_names=['out_file'],
        function=set_frames_in),
                                name='include_frames')

    wf.connect(calculate_FD, 'out_file', include_frames, 'in_file')
    wf.connect(scrubbing_input, 'threshold', include_frames, 'threshold')
    wf.connect(exclude_frames, 'out_file', include_frames, 'exclude_list')
    wf.connect(include_frames, 'out_file', outputNode, 'frames_in_1D')

    calc_motion_parameters = setup_node(Function(
        input_names=[
            "subject_id", "scan_id", "movement_parameters", "max_displacement"
        ],
        output_names=['out_file'],
        function=gen_motion_parameters),
                                        name='calc_motion_parameters')
    wf.connect(inputNode, 'subject_id', calc_motion_parameters, 'subject_id')
    wf.connect(inputNode, 'scan_id', calc_motion_parameters, 'scan_id')
    wf.connect(inputNode, 'movement_parameters', calc_motion_parameters,
               'movement_parameters')
    wf.connect(inputNode, 'max_displacement', calc_motion_parameters,
               'max_displacement')
    wf.connect(calc_motion_parameters, 'out_file', outputNode, 'motion_params')

    calc_power_parameters = setup_node(Function(input_names=[
        "subject_id", "scan_id", "FD_1D", "FDJ_1D", "threshold", "DVARS"
    ],
                                                output_names=['out_file'],
                                                function=gen_power_parameters),
                                       name='calc_power_parameters')
    wf.connect(inputNode, 'subject_id', calc_power_parameters, 'subject_id')
    wf.connect(inputNode, 'scan_id', calc_power_parameters, 'scan_id')
    wf.connect(cal_DVARS, 'out_file', calc_power_parameters, 'DVARS')
    wf.connect(calculate_FD, 'out_file', calc_power_parameters, 'FD_1D')
    wf.connect(calculate_FDJ, 'out_file', calc_power_parameters, 'FDJ_1D')
    wf.connect(scrubbing_input, 'threshold', calc_power_parameters,
               'threshold')

    wf.connect(calc_power_parameters, 'out_file', outputNode, 'power_params')

    return wf
Ejemplo n.º 19
0
def camino_tractography(wf_name="camino_tract"):
    """ Run the diffusion MRI pre-processing workflow against the diff files in `data_dir`.

    Nipype Inputs
    -------------
    tract_input.diff: traits.File
        path to the diffusion MRI image

    tract_input.bval: traits.File
        path to the bvals file

    tract_input.bvec: traits.File
        path to the bvecs file

    tract_input.mask: traits.File
        path to the brain mask file

    tract_input.atlas: traits.File
        path to the atlas file

    Nipypte Outputs
    ---------------
    tract_output.tensor
        The result of fitting the tensor model to the whole image.

    tract_output.tracks
        The tractography result.

    tract_output.connectivity
        The atlas ROIxROI structural connectivity matrix.

    tract_output.mean_fa
        The atlas ROIxROI structural connectivity matrix with average FA values.

    tract_output.fa
        The voxelwise fractional anisotropy image.

    Returns
    -------
    wf: nipype Workflow
    """
    in_fields = ["diff", "bvec", "bval", "mask", "atlas"]
    out_fields = ["tensor", "tracks", "connectivity", "mean_fa", "fa"]

    tract_input = pe.Node(IdentityInterface(fields=in_fields, mandatory_inputs=True), name="tract_input")

    img2vox_diff = setup_node(Image2Voxel(out_type="float"), name="img2vox_diff")
    img2vox_mask = setup_node(Image2Voxel(out_type="short"), name="img2vox_mask")
    fsl2scheme = setup_node(FSL2Scheme(), name="fsl2scheme")
    dtifit = setup_node(DTIFit(), name="dtifit")
    fa = setup_node(ComputeFractionalAnisotropy(), name="fa")

    analyzehdr_fa = setup_node(interface=AnalyzeHeader(), name="analyzeheader_fa")
    analyzehdr_fa.inputs.datatype = "double"

    fa2nii = setup_node(interface=misc.CreateNifti(), name='fa2nii')

    track = setup_node(Track(inputmodel="dt", out_file="tracts.Bfloat"), name="track")
    conmat = setup_node(Conmat(output_root="conmat_"), name="conmat")

    tract_output = pe.Node(IdentityInterface(fields=out_fields), name="tract_output")

    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # Connect the nodes
    wf.connect([
        # convert data to camino format
        (tract_input, img2vox_diff, [("diff", "in_file")]),
        (tract_input, img2vox_mask, [("mask", "in_file")]),

        # convert bvec and bval to camino scheme
        (tract_input, fsl2scheme, [("bvec", "bvec_file"),
                                   ("bval", "bval_file")]),

        # dtifit
        (img2vox_diff, dtifit, [("voxel_order", "in_file")]),
        (img2vox_mask, dtifit, [("voxel_order", "bgmask")]),
        (fsl2scheme, dtifit, [("scheme", "scheme_file")]),

        # calculate FA
        (fsl2scheme, fa, [("scheme", "scheme_file")]),
        (dtifit, fa, [("tensor_fitted", "in_file")]),

        # tractography
        (tract_input, track, [("atlas", "seed_file")]),
        (dtifit, track, [("tensor_fitted", "in_file")]),

        # convert FA data to NifTI
        (fa, analyzehdr_fa, [("fa", "in_file")]),
        (tract_input, analyzehdr_fa, [(('diff', get_vox_dims), "voxel_dims"),
                                      (('diff', get_data_dims), "data_dims")]),

        (tract_input, fa2nii, [(("diff", get_affine), "affine")]),
        (analyzehdr_fa, fa2nii, [("header", "header_file")]),
        (fa, fa2nii, [("fa", "data_file")]),

        # connectivity matrix
        (tract_input, conmat, [("atlas", "target_file")]),
        (track, conmat, [("tracked", "in_file")]),
        (fa2nii, conmat, [("nifti_file", "scalar_file")]),

        # output
        (fa2nii, tract_output, [("nifti_file", "fa")]),
        (dtifit, tract_output, [("tensor_fitted", "tensor")]),
        (track, tract_output, [("tracked", "tracks")]),
        (conmat, tract_output, [("conmat_sc", "connectivity"),
                                ("conmat_ts", "mean_fa")]),
    ])
    return wf
Ejemplo n.º 20
0
def attach_spm_fmri_grouptemplate_wf(main_wf, wf_name='spm_epi_grouptemplate'):
    """ Attach a fMRI pre-processing workflow that uses SPM12 to `main_wf`.
    This workflow picks all spm_fmri_preproc outputs 'fmri_output.warped_files' in `main_wf`
    to create a group template.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow


    Nipype Inputs
    -------------
    rest_input.in_file: traits.File
        The slice time and motion corrected fMRI file.

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    rest_output.avg_epi_mni: input node

    datasink: nipype Node

    spm_rest_preproc_mni: nipype Workflow

    Nipype Outputs
    --------------
    group_template.fmri_template: file
        The path to the fMRI group template.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - fmri_cleanup: for the `rest_output.avg_epi` output

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    fmri_cleanup_wf = get_subworkflow(main_wf, 'fmri_cleanup')

    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf, name='datasink')

    # The base name of the 'rest' file for the substitutions
    fmri_fbasename = remove_ext(
        os.path.basename(get_input_file_name(in_files, 'rest')))

    # the group template datasink
    base_outdir = datasink.inputs.base_directory
    grp_datasink = pe.Node(
        DataSink(parameterization=False, base_directory=base_outdir),
        name='{}_grouptemplate_datasink'.format(fmri_fbasename))
    grp_datasink.inputs.container = '{}_grouptemplate'.format(fmri_fbasename)

    # the list of the average EPIs from all the subjects
    # avg_epi_map = pe.MapNode(IdentityInterface(fields=['avg_epis']), iterfield=['avg_epis'], name='avg_epi_map')

    avg_epis = pe.JoinNode(IdentityInterface(fields=['avg_epis']),
                           joinsource='infosrc',
                           joinfield='avg_epis',
                           name='avg_epis')

    # directly warp the avg EPI to the SPM standard template
    warp_epis = spm_warp_to_mni("spm_warp_avgepi_to_mni")

    # the group template workflow
    template_wf = spm_create_group_template_wf(wf_name)

    # output node
    output = setup_node(IdentityInterface(fields=['fmri_template']),
                        name='group_template')

    # group dataSink output substitutions
    regexp_subst = [
        (r'/wgrptemplate{fmri}_merged_mean_smooth.nii$',
         '/{fmri}_grouptemplate_mni.nii'),
        (r'/w{fmri}_merged_mean_smooth.nii$', '/{fmri}_grouptemplate_mni.nii'),
    ]
    regexp_subst = format_pair_list(regexp_subst, fmri=fmri_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    grp_datasink.inputs.regexp_substitutions = extend_trait_list(
        grp_datasink.inputs.regexp_substitutions, regexp_subst)

    # Connect the nodes
    main_wf.connect([
        # the avg EPI inputs
        (fmri_cleanup_wf, avg_epis, [('rest_output.avg_epi', 'avg_epis')]),

        # warp avg EPIs to MNI
        (avg_epis, warp_epis, [('avg_epis', 'warp_input.in_files')]),

        # group template wf
        (warp_epis, template_wf, [('warp_output.warped_files',
                                   'grptemplate_input.in_files')]),

        # output node
        (template_wf, output, [('grptemplate_output.template', 'fmri_template')
                               ]),

        # template output
        (output, grp_datasink, [('fmri_template', '@fmri_group_template')]),
        (warp_epis, grp_datasink, [('warp_output.warped_files',
                                    'individuals.@warped')]),
    ])

    return main_wf
Ejemplo n.º 21
0
def spm_anat_preprocessing(wf_name="spm_anat_preproc"):
    """ Run the T1 pre-processing workflow against the anat_hc
    files in `data_dir`.

    It does:
    - N4BiasFieldCorrection
    - SPM12 New Segment
    - SPM12 Warp of MPRAGE to MNI

    [Optional: from config]
    - Atlas file warping to MPRAGE
    - Cortical thickness (SPM+DiReCT)

    Nipype Inputs
    -------------
    anat_input.in_file: traits.File
        Path to the anatomical image.

    anat_input.atlas_file: traits.File
        Path to an atlas file in MNI space to be
        warped to the anatomical space.
        Can also be set through the configuration
        setting `atlas_file`.

    Nipype Outputs
    --------------
    anat_output.anat_mni: traits.File
        The bias-field normalized to MNI anatomical image.

    anat_output.tissues_warped: traits.File
        The tissue segmentation in MNI space from SPM.

    anat_output.tissues_native: traits.File
        The tissue segmentation in native space from SPM.

    anat_output.affine_transform: traits.File
        The affine transformation file.

    anat_output.warp_forward: traits.File
        The forward (anat to MNI) warp field from SPM.

    anat_output.warp_inverse: traits.File
        The inverse (MNI to anat) warp field from SPM.

    anat_output.anat_biascorr: traits.File
        The bias-field corrected anatomical image.

    anat_output.brain_mask: traits.File
        A brain mask file in anatomical space.
        This is calculated by summing up the maps of
        segmented tissues (CSF, WM, GM) and then binarised.

    anat_output.atlas_anat: traits.File
        If `atlas_file` is an existing file in MNI space.
        The atlas file warped to anatomical space,
        if do_atlas and the atlas file is set in configuration.

    anat_output.cortical_thickness: traits.File
        If `anat_preproc.do_cortical_thickness` is True.
        The cortical thickness estimations calculated with the
        SPM+DiReCT method (KellyKapowski).

    anat_output.warped_white_matter: warped_white_matter
        If `anat_preproc.do_cortical_thickness` is True.
        The warped white matter image calculated with the
        SPM+DiReCT method (KellyKapowski).

    Returns
    -------
    wf: nipype Workflow
    """
    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # specify input and output fields
    in_fields = ["in_file"]
    out_fields = [
        "anat_mni",
        "tissues_warped",
        "tissues_native",
        "affine_transform",
        "warp_forward",
        "warp_inverse",
        "anat_biascorr",
        "brain_mask",
    ]

    # check if we have to warp an atlas files too.
    do_atlas, atlas_file = check_atlas_file()
    if do_atlas:
        in_fields += ["atlas_file"]
        out_fields += ["atlas_anat"]

    # check if we have to do cortical thickness (SPM+DiReCT) method.
    do_cortical_thickness = get_config_setting(
        'anat_preproc.do_cortical_thickness', False)
    if do_cortical_thickness:
        out_fields += [
            "cortical_thickness",
            "warped_white_matter",
        ]

    # input node
    anat_input = pe.Node(IdentityInterface(fields=in_fields,
                                           mandatory_inputs=True),
                         name="anat_input")

    # atlas registration
    if do_atlas and not isdefined(anat_input.inputs.atlas_file):
        anat_input.inputs.set(atlas_file=atlas_file)

    # T1 preprocessing nodes
    biascor = setup_node(biasfield_correct(), name="bias_correction")
    gunzip_anat = setup_node(Gunzip(), name="gunzip_anat")
    segment = setup_node(spm_segment(), name="new_segment")
    warp_anat = setup_node(spm_apply_deformations(), name="warp_anat")

    tpm_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="tpm_bbox")
    tpm_bbox.inputs.in_file = spm_tpm_priors_path()

    # calculate brain mask from tissue maps
    tissues = setup_node(IdentityInterface(fields=["gm", "wm", "csf"],
                                           mandatory_inputs=True),
                         name="tissues")

    brain_mask = setup_node(Function(
        function=math_img,
        input_names=["formula", "out_file", "gm", "wm", "csf"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                            name='brain_mask')
    brain_mask.inputs.out_file = "tissues_brain_mask.nii.gz"
    brain_mask.inputs.formula = "np.abs(gm + wm + csf) > 0"

    # output node
    anat_output = pe.Node(IdentityInterface(fields=out_fields),
                          name="anat_output")

    # Connect the nodes
    wf.connect([
        # input to biasfieldcorrection
        (anat_input, biascor, [("in_file", "input_image")]),

        # new segment
        (biascor, gunzip_anat, [("output_image", "in_file")]),
        (gunzip_anat, segment, [("out_file", "channel_files")]),

        # Normalize12
        (segment, warp_anat, [("forward_deformation_field", "deformation_file")
                              ]),
        (segment, warp_anat, [("bias_corrected_images", "apply_to_files")]),
        (tpm_bbox, warp_anat, [("bbox", "write_bounding_box")]),

        # brain mask from tissues
        (segment, tissues, [
            (("native_class_images", selectindex, 0), "gm"),
            (("native_class_images", selectindex, 1), "wm"),
            (("native_class_images", selectindex, 2), "csf"),
        ]),
        (tissues, brain_mask, [
            ("gm", "gm"),
            ("wm", "wm"),
            ("csf", "csf"),
        ]),

        # output
        (warp_anat, anat_output, [("normalized_files", "anat_mni")]),
        (segment, anat_output, [("modulated_class_images", "tissues_warped"),
                                ("native_class_images", "tissues_native"),
                                ("transformation_mat", "affine_transform"),
                                ("forward_deformation_field", "warp_forward"),
                                ("inverse_deformation_field", "warp_inverse"),
                                ("bias_corrected_images", "anat_biascorr")]),
        (brain_mask, anat_output, [("out_file", "brain_mask")]),
    ])

    # atlas warping nodes
    if do_atlas:
        gunzip_atlas = pe.Node(Gunzip(), name="gunzip_atlas")
        warp_atlas = setup_node(spm_apply_deformations(), name="warp_atlas")
        anat_bbox = setup_node(Function(function=get_bounding_box,
                                        input_names=["in_file"],
                                        output_names=["bbox"]),
                               name="anat_bbox")

        # set the warping interpolation to nearest neighbour.
        warp_atlas.inputs.write_interp = 0

        # connect the atlas registration nodes
        wf.connect([
            (anat_input, gunzip_atlas, [("atlas_file", "in_file")]),
            (gunzip_anat, anat_bbox, [("out_file", "in_file")]),
            (gunzip_atlas, warp_atlas, [("out_file", "apply_to_files")]),
            (segment, warp_atlas, [("inverse_deformation_field",
                                    "deformation_file")]),
            (anat_bbox, warp_atlas, [("bbox", "write_bounding_box")]),
            (warp_atlas, anat_output, [("normalized_files", "atlas_anat")]),
        ])

    # cortical thickness (SPM+DiReCT) method
    if do_cortical_thickness:
        from ..interfaces.ants import KellyKapowski

        segm_img = setup_node(Function(
            function=math_img,
            input_names=["formula", "out_file", "gm", "wm"],
            output_names=["out_file"],
            imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                              name='gm-wm_image')
        segm_img.inputs.out_file = "gm_wm.nii.gz"
        segm_img.inputs.formula = '((gm >= 0.5)*2 + (wm > 0.5)*3).astype(np.uint8)'

        # copy the header from the GM tissue image to the result from `gm-wm_image`.
        # this is necessary because the `gm-wm_image` operation sometimes modifies the
        # offset of the image, which will provoke an ANTs exception due to
        # ITK tolerance in ImageToImageFilter
        # https://github.com/stnava/ANTs/issues/74
        cp_hdr = setup_node(Function(
            function=copy_header,
            input_names=["in_file", "data_file"],
            output_names=["out_file"],
            imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                            name='copy_header')

        kk = setup_node(KellyKapowski(), name='direct')
        kk.inputs.cortical_thickness = 'direct_cortical_thickness.nii.gz'
        kk.inputs.warped_white_matter = 'direct_warped_white_matter.nii.gz'

        # connect the cortical thickness (SPM+DiReCT) method
        wf.connect([
            # create segmentation GM+WM file
            (tissues, segm_img, [("gm", "gm"), ("wm", "wm")]),
            (segm_img, cp_hdr, [("out_file", "data_file")]),
            (tissues, cp_hdr, [("gm", "in_file")]),

            # kellykapowski
            (cp_hdr, kk, [("out_file", "segmentation_image")]),
            (tissues, kk, [("gm", "gray_matter_prob_image"),
                           ("wm", "white_matter_prob_image")]),
            (kk, anat_output, [("cortical_thickness", "cortical_thickness"),
                               ("warped_white_matter", "warped_white_matter")
                               ]),
        ])
    return wf
Ejemplo n.º 22
0
def petpvc_mask(wf_name="petpvc_mask"):
    """ A Workflow that returns a 4D merge of 4 volumes for PETPVC: GM, WM, CSF and background.

    Parameters
    ----------
    wf_name: str
        The name of the workflow.

    Nipype.Inputs
    -------------
    pvcmask_input.tissues: list of existing files
        List of tissue files in anatomical space, the 3 file
        paths must be in this order: GM, WM, CSF

    Nipype.Outputs
    --------------
    pvcmask_output.petpvc_mask: existing file
        A 4D volume file with these maps in order: GM, WM, CSF, background

    pvcmask_output.brain_mask: existing file
        A mask that is a binarised sum of the tissues file with fslmaths.
        Can be used as brain mask in anatomical space for the PET image.

    Returns
    -------
    wf: nipype Workflow
    """
    # define nodes
    # specify input and output fields
    in_fields = ["tissues"]

    out_fields = [
        "petpvc_mask",
        "brain_mask",
    ]

    # input
    pvcmask_input = setup_node(IdentityInterface(fields=in_fields,
                                                 mandatory_inputs=True),
                               name="pvcmask_input")

    tissues = setup_node(IdentityInterface(fields=["gm", "wm", "csf"],
                                           mandatory_inputs=True),
                         name="tissues")

    merge_list = setup_node(Merge(4), name="merge_list")

    # maths for background
    img_bkg = setup_node(Function(
        function=math_img,
        input_names=["formula", "out_file", "gm", "wm", "csf"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                         name='background')
    img_bkg.inputs.out_file = "tissue_bkg.nii.gz"
    img_bkg.inputs.formula = "np.maximum((-((gm + wm + csf) - 1)), 0)"

    # maths for brain mask
    brain_mask = setup_node(Function(
        function=math_img,
        input_names=["formula", "out_file", "gm", "wm", "csf"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                            name='brain_mask')
    brain_mask.inputs.out_file = "tissues_brain_mask.nii.gz"
    brain_mask.inputs.formula = "np.abs(gm + wm + csf) > 0"

    # concat the tissues images and the background for PETPVC
    merge_tissues = setup_node(Function(
        function=concat_imgs,
        input_names=["in_files"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                               name='merge_tissues')
    merge_tissues.inputs.out_file = "petpvc_mask.nii.gz"

    # output
    pvcmask_output = setup_node(IdentityInterface(fields=out_fields),
                                name="pvcmask_output")

    # Create the workflow object
    wf = Workflow(name=wf_name)

    # Connect the nodes
    wf.connect([
        # separate [GM, WM, CSF] into [GM] and [WM, CSF]
        (pvcmask_input, tissues, [(("tissues", selectindex, 0), "gm"),
                                  (("tissues", selectindex, 1), "wm"),
                                  (("tissues", selectindex, 2), "csf")]),
        (tissues, img_bkg, [("gm", "gm"), ("wm", "wm"), ("csf", "csf")]),
        (tissues, brain_mask, [("gm", "gm"), ("wm", "wm"), ("csf", "csf")]),
        (tissues, merge_list, [("gm", "in1"), ("wm", "in2"), ("csf", "in3")]),

        # create a list of [GM, WM, CSF, BKG]
        (img_bkg, merge_list, [("out_file", "in4")]),

        # merge into 4D: [GM, WM, CSF, BKG]
        (merge_list, merge_tissues, [("out", "in_files")]),

        # output
        (merge_tissues, pvcmask_output, [("out_file", "petpvc_mask")]),
        (brain_mask, pvcmask_output, [("out_file", "brain_mask")]),
    ])

    return wf
Ejemplo n.º 23
0
def spm_warp_fmri_wf(wf_name="spm_warp_fmri", register_to_grptemplate=False):
    """ Run SPM to warp resting-state fMRI pre-processed data to MNI or a given
    template.

    Tasks:
    - Warping the inputs to MNI or a template, if `do_group_template` is True

    Parameters
    ----------
    wf_name: str

    register_to_grptemplate: bool
        If True will expect the wfmri_input.epi_template input and use it as a group template
        for inter-subject registratio.

    Nipype Inputs
    -------------
    wfmri_input.in_file: traits.File
        The slice time and motion corrected fMRI file.

    wfmri_input.reference_file: traits.File
        The anatomical image in its native space
        for registration reference.

    wfmri_input.anat_fmri: traits.File
        The anatomical image in fMRI space.

    wfmri_input.anat_to_mni_warp: traits.File
        The warp field from the transformation of the
        anatomical image to the standard MNI space.

    wfmri_input.time_filtered: traits.File
        The bandpass time filtered fMRI file.

    wfmri_input.avg_epi: traits.File
        The average EPI from the fMRI file.

    wfmri_input.epi_template: traits.File
        Reference EPI template file for inter subject registration.
        If `do_group_template` is True you must specify this input.

    wfmri_input.brain_mask: traits.File
        Brain mask in fMRI space.

    wfmri_input.atlas_anat: traits.File
        Atlas in subject anatomical space.

    Nipype Outputs
    --------------
    wfmri_output.warped_fmri: traits.File
        The slice time, motion, and nuisance corrected fMRI
        file registered to the template.

    wfmri_output.wtime_filtered: traits.File
        The bandpass time filtered fMRI file
        registered to the template.

    wfmri_output.smooth: traits.File
        The smooth bandpass time filtered fMRI file
        registered to the template.

    wfmri_output.wavg_epi: traits.File
        The average EPI from the fMRI file
        registered to the template.

    wfmri_output.warp_field: traits.File
        The fMRI to template warp field.

    wfmri_output.coreg_avg_epi: traits.File
        The average EPI image in anatomical space.

        Only if registration.fmri2mni is false.

    wfmri_output.coreg_others: traits.File
        Other mid-preprocessing fmri images registered to
        anatomical space:

        - wfmri_input.in_file,

        - wfmri_input.brain_mask,

        - wfmri_input.time_filtered.

        Only if registration.fmri2mni is false

    wfmri_output.wbrain_mask: traits.File
        Brain mask in fMRI space warped to MNI.

    Returns
    -------
    wf: nipype Workflow
    """
    # Create the workflow object
    wf = pe.Workflow(name=wf_name)

    # specify input and output fields
    in_fields = [
        "in_file",
        "anat_fmri",
        "anat_to_mni_warp",
        "brain_mask",
        "reference_file",
        "time_filtered",
        "avg_epi",
    ]

    out_fields = [
        "warped_fmri", "wtime_filtered", "smooth", "wavg_epi", "wbrain_mask",
        "warp_field", "coreg_avg_epi", "coreg_others"
    ]

    if register_to_grptemplate:
        in_fields += ['epi_template']

    do_atlas, _ = check_atlas_file()
    if do_atlas:
        in_fields += ["atlas_anat"]
        out_fields += ["atlas_fmri"]

    # input identities
    wfmri_input = setup_node(IdentityInterface(fields=in_fields,
                                               mandatory_inputs=True),
                             name="wfmri_input")

    # in file unzipper
    in_gunzip = pe.Node(Gunzip(), name="in_gunzip")

    # merge list for normalization input
    merge_list = pe.Node(Merge(2), name='merge_for_warp')
    gunzipper = pe.MapNode(Gunzip(), name="gunzip", iterfield=['in_file'])

    # the template bounding box
    tpm_bbox = setup_node(Function(function=get_bounding_box,
                                   input_names=["in_file"],
                                   output_names=["bbox"]),
                          name="tpm_bbox")

    # smooth the final result
    smooth = setup_node(fsl.IsotropicSmooth(fwhm=8, output_type='NIFTI'),
                        name="smooth_fmri")

    # output identities
    rest_output = setup_node(IdentityInterface(fields=out_fields),
                             name="wfmri_output")

    # check how to perform the registration, to decide how to build the pipeline
    fmri2mni = get_config_setting('registration.fmri2mni', False)
    # register to group template
    if register_to_grptemplate:
        gunzip_template = pe.Node(
            Gunzip(),
            name="gunzip_template",
        )
        warp = setup_node(spm.Normalize(jobtype="estwrite",
                                        out_prefix="wgrptmpl_"),
                          name="fmri_grptemplate_warp")
        warp_source_arg = "source"
        warp_outsource_arg = "normalized_source"
        warp_field_arg = "normalization_parameters"

    elif fmri2mni:
        # register to standard template
        warp = setup_node(spm_normalize(), name="fmri_warp")
        tpm_bbox.inputs.in_file = spm_tpm_priors_path()
        warp_source_arg = "image_to_align"
        warp_outsource_arg = "normalized_image"
        warp_field_arg = "deformation_field"

    else:  # fmri2mni is False
        coreg = setup_node(spm_coregister(cost_function="mi"),
                           name="coreg_fmri")
        warp = setup_node(spm_apply_deformations(), name="fmri_warp")
        coreg_files = pe.Node(Merge(3), name='merge_for_coreg')
        warp_files = pe.Node(Merge(2), name='merge_for_warp')
        tpm_bbox.inputs.in_file = spm_tpm_priors_path()

    # make the connections
    if register_to_grptemplate:
        wf.connect([
            # get template bounding box to apply to results
            (wfmri_input, tpm_bbox, [("epi_template", "in_file")]),

            # unzip and forward the template file
            (wfmri_input, gunzip_template, [("epi_template", "in_file")]),
            (gunzip_template, warp, [("out_file", "template")]),

            # get template bounding box to apply to results
            (wfmri_input, tpm_bbox, [("epi_template", "in_file")]),
        ])

    if fmri2mni or register_to_grptemplate:
        # prepare the inputs
        wf.connect([
            # unzip the in_file input file
            (wfmri_input, in_gunzip, [("avg_epi", "in_file")]),

            # warp source file
            (in_gunzip, warp, [("out_file", warp_source_arg)]),

            # bounding box
            (tpm_bbox, warp, [("bbox", "write_bounding_box")]),

            # merge the other input files into a list
            (wfmri_input, merge_list, [
                ("in_file", "in1"),
                ("time_filtered", "in2"),
            ]),

            # gunzip them for SPM
            (merge_list, gunzipper, [("out", "in_file")]),

            # apply to files
            (gunzipper, warp, [("out_file", "apply_to_files")]),

            # outputs
            (warp, rest_output, [
                (warp_field_arg, "warp_field"),
                (warp_outsource_arg, "wavg_epi"),
            ]),
        ])

    else:  # FMRI to ANAT
        wf.connect([
            (wfmri_input, coreg, [("reference_file", "target")]),

            # unzip the in_file input file
            (wfmri_input, in_gunzip, [("avg_epi", "in_file")]),
            (in_gunzip, coreg, [("out_file", "source")]),

            # merge the other input files into a list
            (wfmri_input, coreg_files, [
                ("in_file", "in1"),
                ("time_filtered", "in2"),
                ("brain_mask", "in3"),
            ]),

            # gunzip them for SPM
            (coreg_files, gunzipper, [("out", "in_file")]),

            # coregister fmri to anat
            (gunzipper, coreg, [("out_file", "apply_to_files")]),

            # anat to mni warp field
            (wfmri_input, warp, [("anat_to_mni_warp", "deformation_file")]),

            # bounding box
            (tpm_bbox, warp, [("bbox", "write_bounding_box")]),

            # apply to files
            (coreg, warp_files, [("coregistered_source", "in1")]),
            (coreg, warp_files, [("coregistered_files", "in2")]),
            (warp_files, warp, [("out", "apply_to_files")]),

            # outputs
            (warp, rest_output, [
                ("normalized_files", "warped_files"),
            ]),
            (warp, rest_output, [
                (("normalized_files", selectindex, 0), "wavg_epi"),
            ]),
            (coreg, rest_output, [("coregistered_source", "coreg_avg_epi")]),
            (coreg, rest_output, [("coregistered_files", "coreg_others")]),
        ])

    # atlas file in fMRI space
    if fmri2mni:
        coreg_atlas = setup_node(spm_coregister(cost_function="mi"),
                                 name="coreg_atlas2fmri")

        # set the registration interpolation to nearest neighbour.
        coreg_atlas.inputs.write_interp = 0
        wf.connect([
            (wfmri_input, coreg_atlas, [
                ("reference_file", "source"),
                ("atlas_anat", "apply_to_files"),
            ]),
            (in_gunzip, coreg_atlas, [("out_file", "target")]),
            (coreg_atlas, rest_output, [("coregistered_files", "atlas_fmri")]),
        ])

    # smooth and sink
    wf.connect([
        # smooth the final bandpassed image
        (warp, smooth, [(("normalized_files", selectindex, 1), "in_file")]),

        # output
        (smooth, rest_output, [("out_file", "smooth")]),
        (warp, rest_output, [
            (("normalized_files", selectindex, 0), "warped_fmri"),
            (("normalized_files", selectindex, 1), "wtime_filtered"),
        ]),
    ])

    return wf