Exemplo n.º 1
0
    def _run_interface(self, runtime):
        self._results['out_tmp'] = fname_presuffix(self.inputs.in_boldmask,
                                                   suffix='_tempmask',
                                                   newpath=runtime.cwd)
        self._results['out_mask'] = fname_presuffix(self.inputs.in_boldmask,
                                                    suffix='_refinemask',
                                                    newpath=runtime.cwd)
        b1 = ApplyTransforms()
        b1.inputs.dimension = 3
        b1.inputs.float = True
        b1.inputs.input_image = self.inputs.in_t1mask
        b1.inputs.interpolation = 'NearestNeighbor'
        b1.inputs.reference_image = self.inputs.in_boldmask
        b1.inputs.transforms = self.inputs.transforms
        b1.inputs.input_image_type = 3
        b1.inputs.output_image = self._results['out_tmp']
        b1.run()

        from nipype.interfaces.fsl import MultiImageMaths
        mat1 = MultiImageMaths()
        mat1.inputs.in_file = self._results['out_tmp']
        mat1.inputs.op_string = " -mul  %s -bin"
        mat1.inputs.operand_files = self.inputs.in_boldmask
        mat1.inputs.out_file = self._results['out_mask']
        mat1.run()
        self.inputs.out_mask = os.path.abspath(self._results['out_mask'])
        return runtime
def main(subject, session, mask, bids_folder):

    if session is None:
        target_dir = op.join(bids_folder, 'derivatives', 'masks', f'sub-{subject}', 'anat')
    else:
        target_dir = op.join(bids_folder, 'derivatives', 'masks', f'sub-{subject}', f'ses-{session}', 'anat')

        if session.endswith('2'):
            task = 'task'
        else:
            task = 'mapper'

    if not op.exists(target_dir):
        os.makedirs(target_dir)

    applier = ApplyTransforms(interpolation='Linear')
    applier.inputs.input_image = op.join(bids_folder, 'derivatives', 'masks', f'group_space-MNI152NLin2009cAsym_desc-{mask}_mask.nii.gz')
    applier.inputs.transforms = op.join(bids_folder, 'derivatives', 'fmriprep', f'sub-{subject}', 'anat',
                                        f'sub-{subject}_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5')

    if session is None:
        applier.inputs.reference_image = op.join(bids_folder, 'derivatives', 'fmriprep', f'sub-{subject}', 'anat',
                                        f'sub-{subject}_desc-preproc_T1w.nii.gz')
        applier.inputs.output_image = op.join(target_dir, f'sub-{subject}_space-T1w_desc-{mask}_mask.nii.gz')
    else:
        applier.inputs.reference_image = op.join(bids_folder, 'derivatives', 'fmriprep', f'sub-{subject}', f'ses-{session}',
                                        'func', f'sub-{subject}_ses-{session}_task-{task}_run-1_space-T1w_boldref.nii.gz')

        applier.inputs.output_image = op.join(target_dir, f'sub-{subject}_ses-{session}_space-T1w_desc-{mask}_mask.nii.gz')


    r = applier.run()
def alignCompartments(fixedImg, movingImgs, transform):
    """
    Given a precalculated linking transform and a fixed image (required by ANTS, not 
    sure why), align each image in the movingImgs list to the fixed image.

    Inputs:
    - fixedImg: the path to the fixed image
    - movingImgs: a list of paths to the moving images
    - transform: either a list of paths or a single path to a transform file

    Returns:
    - None

    Effects:
    - Overwrites the specified images with a more aligned version of the same images

    *** Note: this version assumes the same fixed image. Could also be implemented 
              so that the fixed image is the previous moving image.
    """
    # for each image
    for m in movingImgs:
        # set up the transform application
        at = ApplyTransforms()
        at.inputs.input_image = m
        at.inputs.reference_image = fixedImg
        at.inputs.output_image = m
        at.inputs.transforms = transform
        at.inputs.interpolation = 'NearestNeighbor'
        at.inputs.invert_transform_flags = [True]
        # run the transform application
        at.run()
Exemplo n.º 4
0
def coregister(data_dir, subject, modality, output_dir):
    print(subject)
    with tempfile.TemporaryDirectory() as temp_dir:
        # register with different modality
        if modality == 'T2':
            if not os.path.exists(
                    os.path.join(output_dir, subject,
                                 'ANTS_T2_r_final.nii.gz')):
                if os.path.exists(os.path.join(data_dir, 'crossmoda_', subject, '_hrT2.nii.gz')) \
                        and os.path.exists(os.path.join(output_dir, subject, 'ceT1_r_transform.matComposite.h5')):
                    print('T2 coregistration starts...')
                    reorient = fsl.utils.Reorient2Std()
                    reorient.inputs.in_file = os.path.join(
                        data_dir, 'crossmoda_', subject, '_hrT2.nii.gz')
                    reorient.inputs.out_file = os.path.join(
                        temp_dir, 'T2_reorient.nii.gz')
                    reorient.run()

                    # apply registration transformation on PWI
                    at = ApplyTransforms()
                    at.inputs.dimension = 3
                    at.inputs.input_image_type = 3
                    at.inputs.input_image = os.path.join(
                        temp_dir, 'T2_reorient.nii.gz')
                    at.inputs.reference_image = os.path.join(
                        output_dir, subject, 'ANTS_T1_r.nii.gz')
                    at.inputs.output_image = os.path.join(
                        output_dir, subject, 'ANTS_T2_r.nii.gz')
                    at.inputs.interpolation = 'Linear'
                    at.inputs.default_value = 0
                    at.inputs.transforms = os.path.join(
                        output_dir, subject,
                        'ceT1_r_transform.matComposite.h5')
                    at.run()

                    # apply skull stripping mask
                    am = fsl.maths.ApplyMask()
                    am.inputs.in_file = os.path.join(output_dir, subject,
                                                     'ANTS_T2_r.nii.gz')
                    am.inputs.mask_file = os.path.join(
                        output_dir, subject, 'ANTS_T2_r_mask.nii.gz')
                    am.inputs.out_file = os.path.join(
                        output_dir, subject, 'ANTS_T2_r_final.nii.gz')
                    am.run()
                else:
                    pass
            else:
                pass
Exemplo n.º 5
0
def warp_segments(name="warp_segments"):
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import IdentityInterface
    wf = pe.Workflow(name=name)
    seg = fs_segment()
    inputspec = pe.Node(IdentityInterface(fields=[
        'subject_id', 'subjects_dir', 'warp_file', 'ants_affine',
        'warped_brain'
    ]),
                        name="inputspec")
    from nipype.interfaces.ants import ApplyTransforms
    ap = pe.MapNode(ApplyTransforms(interpolation='NearestNeighbor'),
                    name="apply_transforms",
                    iterfield=["input_image"])
    wf.connect(inputspec, "subject_id", seg, "inputspec.subject_id")
    wf.connect(inputspec, "subjects_dir", seg, "inputspec.subjects_dir")
    from nipype.interfaces.utility import Merge
    merge = pe.Node(Merge(3), name="merge")
    wf.connect(seg, "outputspec.wm", merge, 'in1')
    wf.connect(seg, "outputspec.gm", merge, "in2")
    wf.connect(seg, "outputspec.csf", merge, "in3")
    wf.connect(merge, "out", ap, "input_image")
    wf.connect(inputspec, "warped_brain", ap, "reference_image")
    merge1 = pe.Node(Merge(2), name="get_transformations")
    wf.connect(inputspec, "warp_file", merge1, "in1")
    wf.connect(inputspec, "ants_affine", merge1, "in2")
    wf.connect(merge1, "out", ap, "transforms")
    outputspec = pe.Node(IdentityInterface(fields=["out_files"]),
                         name='outputspec')
    wf.connect(ap, "output_image", outputspec, "out_files")
    return wf
Exemplo n.º 6
0
 def final_apply_transform(self, in_file, out_file):
     """
     Uses ANTs registration tools through Nipype to warp the split channel RGB volumes into 
     an MRI reference space.
     """
     at = ApplyTransforms()
     at.inputs.dimension = 3
     at.inputs.input_image = in_file
     at.inputs.reference_image = self.orig_bf_loc + '/volume/aligned_to_MRI/blockface_to_MRI_alignment.nii.gz'  #self.MRI#self.MRI_path + os.path.split(self.orig_MRI)[1]
     at.inputs.transforms = self.orig_bf_loc + '/volume/aligned_to_MRI/composite_transform_blockface_to_MRI_alignmentComposite.h5'
     at.inputs.interpolation = 'BSpline'
     at.inputs.output_image = out_file
     at.inputs.invert_transform_flags = [False]
     at.inputs.interpolation_parameters = (5, )
     print(at.cmdline)
     at.run()
Exemplo n.º 7
0
    def _run_interface(self, runtime):
        transforms = []
        invert_transform_flags = []
        if isdefined(self.inputs.transform_1):
            transforms.append(self.inputs.transform_1)
            invert_transform_flags.append(self.inputs.invert_1)

        if isdefined(self.inputs.transform_2):
            transforms.append(self.inputs.transform_2)
            invert_transform_flags.append(self.inputs.invert_2)

        if isdefined(self.inputs.transform_3):
            transforms.append(self.inputs.transform_3)
            invert_transform_flags.append(self.inputs.invert_3)

        cmd = ApplyTransforms()
        cmd.inputs.transforms = transforms
        cmd.inputs.invert_transform_flags = invert_transform_flags
        cmd.inputs.reference_image = self.inputs.reference_image
        cmd.inputs.input_image = self.inputs.input_image
        cmd.inputs.interpolation = self.inputs.interpolation
        cmd.run()
        split = splitext(os.path.basename(self.inputs.input_image))
        self.inputs.output_image = os.getcwd(
        ) + os.sep + split[0] + '_trans' + split[1]
        print(os.listdir(os.getcwd()))
        return runtime
Exemplo n.º 8
0
 def apply_transform_slice(self, i, col_vol, out_suf):
     """
     Applies a previously calculated transformation (self.hist_transform) to a given color channel.
     """
     #Define input variables for ANT's ApplyTransform command through Nipype
     at = ApplyTransforms()
     at.inputs.dimension = 2
     at.inputs.input_image = col_vol.slices[i].path
     at.inputs.reference_image = self.BF_NIFTI.slices[i].path
     at.inputs.transforms = self.hist_transform.slices[i].path
     at.inputs.interpolation = 'BSpline'
     at.inputs.output_image = self.out_dir + out_suf + '/' + col_vol.slices[
         i].name
     at.inputs.invert_transform_flags = [False]
     at.inputs.interpolation_parameters = (5, )
     print(at.cmdline)
     at.run()
Exemplo n.º 9
0
def epi_mni_align(name='SpatialNormalization', ants_nthreads=6, testing=False, resolution=2):
    """
    Uses FSL FLIRT with the BBR cost function to find the transform that
    maps the EPI space into the MNI152-nonlinear-symmetric atlas.

    The input epi_mean is the averaged and brain-masked EPI timeseries

    Returns the EPI mean resampled in MNI space (for checking out registration) and
    the associated "lobe" parcellation in EPI space.

    """
    from nipype.interfaces.ants import ApplyTransforms, N4BiasFieldCorrection
    from niworkflows.data import get_mni_icbm152_nlin_asym_09c as get_template
    from niworkflows.interfaces.registration import RobustMNINormalizationRPT as RobustMNINormalization
    mni_template = get_template()

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['epi_mean', 'epi_mask']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(
        fields=['epi_mni', 'epi_parc', 'report']), name='outputnode')

    epimask = pe.Node(fsl.ApplyMask(), name='EPIApplyMask')

    n4itk = pe.Node(N4BiasFieldCorrection(dimension=3), name='SharpenEPI')
    # Mask T2 template image
    brainmask = pe.Node(fsl.ApplyMask(
        in_file=op.join(mni_template, '%dmm_T2.nii.gz' % resolution),
        mask_file=op.join(mni_template, '%dmm_brainmask.nii.gz' % resolution)),
        name='MNIApplyMask')

    norm = pe.Node(RobustMNINormalization(
        num_threads=ants_nthreads, template='mni_icbm152_nlin_asym_09c',
        testing=testing, moving='EPI', generate_report=True),
                   name='EPI2MNI')

    # Warp segmentation into EPI space
    invt = pe.Node(ApplyTransforms(
        input_image=op.join(mni_template, '%dmm_parc.nii.gz' % resolution),
        dimension=3, default_value=0, interpolation='NearestNeighbor'),
                   name='ResampleSegmentation')

    workflow.connect([
        (inputnode, invt, [('epi_mean', 'reference_image')]),
        (inputnode, n4itk, [('epi_mean', 'input_image')]),
        (inputnode, epimask, [('epi_mask', 'mask_file')]),
        (n4itk, epimask, [('output_image', 'in_file')]),
        (brainmask, norm, [('out_file', 'reference_image')]),
        (epimask, norm, [('out_file', 'moving_image')]),
        (norm, invt, [
            ('reverse_transforms', 'transforms'),
            ('reverse_invert_flags', 'invert_transform_flags')]),
        (invt, outputnode, [('output_image', 'epi_parc')]),
        (norm, outputnode, [('warped_image', 'epi_mni'),
                            ('out_report', 'report')]),

    ])
    return workflow
Exemplo n.º 10
0
def make_workflow_roi(region):
    """
    Benson_ROI_Names = {'V1', 'V2', 'V3', 'hV4', 'VO1', 'VO2', 'LO1', 'LO2', 'TO1', 'TO2', 'V3B', 'V3A'};

    Wang_ROI_Names = [
        'V1v', 'V1d', 'V2v', 'V2d', 'V3v', 'V3d', 'hV4', 'VO1', 'VO2', 'PHC1', 'PHC2',
        'TO2', 'TO1', 'LO2', 'LO1', 'V3B', 'V3A', 'IPS0', 'IPS1', 'IPS2', 'IPS3', 'IPS4' ,
        'IPS5', 'SPL1', 'FEF'];
    """

    w = Workflow(f'roi_{region}')

    n_in = Node(IdentityInterface(fields=[
        'atlas',
        'func_to_struct',
        'struct_to_freesurfer',
        'ref',
    ]),
                name='input')

    n_out = Node(IdentityInterface(fields=[
        'mask_file',
    ]), name='output')

    n_m = Node(Merge(2), 'merge')

    n_v = Node(MathsCommand(), region)
    n_v.inputs.out_file = 'roi.nii.gz'
    n_v.inputs.nan2zeros = True

    if region == 'V1':
        n_v.inputs.args = '-uthr 1 -bin'
    elif region == 'V2':
        n_v.inputs.args = '-thr 2 -uthr 3 -bin'
    elif region == 'V3':
        n_v.inputs.args = '-thr 4 -uthr 5 -bin'
    else:
        raise ValueError(f'Unknown region {region}. It should be V1, V2, V3')

    at = Node(ApplyTransforms(), 'applytransform')
    at.inputs.dimension = 3
    at.inputs.output_image = 'roi_func.nii.gz'
    at.inputs.interpolation = 'Linear'
    at.inputs.default_value = 0
    at.inputs.invert_transform_flags = [True, True]

    w.connect(n_in, 'atlas', n_v, 'in_file')
    w.connect(n_v, 'out_file', at, 'input_image')
    w.connect(n_in, 'ref', at, 'reference_image')
    w.connect(n_in, 'struct_to_freesurfer', n_m, 'in1')
    w.connect(n_in, 'func_to_struct', n_m, 'in2')
    w.connect(n_m, 'out', at, 'transforms')
    w.connect(at, 'output_image', n_out, 'mask_file')

    return w
Exemplo n.º 11
0
def apply_transforms(input_filename,
                     reference_filename,
                     transforms,
                     output_filename,
                     interpolation="NearestNeighbor",
                     args="-u uchar",
                     num_threads=1,
                     invert_transform_flags=None):
    if invert_transform_flags is None:
        invert_transform_flags = [False for t in transforms]
    cmd = ApplyTransforms(transforms=transforms,
                          input_image=input_filename,
                          output_image=output_filename,
                          reference_image=reference_filename,
                          interpolation=interpolation,
                          args=args,
                          num_threads=num_threads,
                          invert_transform_flags=invert_transform_flags)
    print(cmd.cmdline)
    cmd.run()
    return cmd.output_spec().output_image
def ANTs_Apply_Transform(subject_list, base_directory, reference):
    #==============================================================
    # Loading required packages
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype import SelectFiles
    from nipype.interfaces.ants import ApplyTransforms
    import os

    #====================================
    # Defining the nodes for the workflow

    # Getting the subject ID
    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_id']),
        name='infosource')
    infosource.iterables = ('subject_id', subject_list)

    # Getting the relevant diffusion-weighted data
    templates = dict(in_file='antsTMPL_{subject_id}repaired.nii.gz',
                     warp_field='antsTMPL_{subject_id}Warp.nii.gz',
                     transformation_matrix='antsTMPL_{subject_id}Affine.txt')

    selectfiles = pe.Node(SelectFiles(templates), name="selectfiles")
    selectfiles.inputs.base_directory = os.path.abspath(base_directory)

    at = pe.Node(interface=ApplyTransforms(), name='at')
    at.inputs.dimension = 3
    at.inputs.reference_image = reference
    at.inputs.interpolation = 'Linear'
    at.inputs.default_value = 0
    at.inputs.invert_transform_flags = False

    #====================================
    # Setting up the workflow
    apply_ants_transform = pe.Workflow(name='apply_ants_transform')

    apply_ants_transform.connect(infosource, 'subject_id', selectfiles,
                                 'subject_id')
    apply_ants_transform.connect(selectfiles, 'in_file', at, 'input_image')
    apply_ants_transform.connect(selectfiles, 'warp_field', at, 'transforms')

    #====================================
    # Running the workflow
    apply_ants_transform.base_dir = os.path.abspath(base_directory)
    apply_ants_transform.write_graph()
    apply_ants_transform.run('PBSGraph')
Exemplo n.º 13
0
def applyTransformNode(name, transform, **kwargs):
    """ input fields are kwargs e.g. 'interpolation', 'invert_transform_flags', etc. """
    kwargs.setdefault('interpolation', 'Linear')
    if transform == 'fmri2nac':
        kwargs['invert_transform_flags'] = [False, False]
    elif transform == 't12fmri':
        kwargs['invert_transform_flags'] = [True]
    elif transform == 'nac2fmri':
        kwargs['invert_transform_flags'] = [True, False]
    else:
        pass
    # NOTE: antsApplyTransforms takes transforms in REVERSE order!!!
    node = pipe.Node(interface=ApplyTransforms(),
                     iterfield=['input_image'],
                     name=name)
    for k, v in kwargs.items():
        setattr(node.inputs, k, v)
    return node
Exemplo n.º 14
0
def builder(subject_id,
            subId,
            project_dir,
            data_dir,
            output_dir,
            output_final_dir,
            output_interm_dir,
            layout,
            anat=None,
            funcs=None,
            fmaps=None,
            task_name='',
            session=None,
            apply_trim=False,
            apply_dist_corr=False,
            apply_smooth=False,
            apply_filter=False,
            mni_template='2mm',
            apply_n4=True,
            ants_threads=8,
            readable_crash_files=False,
            write_logs=True):
    """
    Core function that returns a workflow. See wfmaker for more details.

    Args:
        subject_id: name of subject folder for final outputted sub-folder name
        subId: abbreviate name of subject for intermediate outputted sub-folder name
        project_dir: full path to root of project
        data_dir: full path to raw data files
        output_dir: upper level output dir (others will be nested within this)
        output_final_dir: final preprocessed sub-dir name
        output_interm_dir: intermediate preprcess sub-dir name
        layout: BIDS layout instance
    """

    ##################
    ### PATH SETUP ###
    ##################
    if session is not None:
        session = int(session)
        if session < 10:
            session = '0' + str(session)
        else:
            session = str(session)

    # Set MNI template
    MNItemplate = os.path.join(get_resource_path(),
                               'MNI152_T1_' + mni_template + '_brain.nii.gz')
    MNImask = os.path.join(get_resource_path(),
                           'MNI152_T1_' + mni_template + '_brain_mask.nii.gz')
    MNItemplatehasskull = os.path.join(get_resource_path(),
                                       'MNI152_T1_' + mni_template + '.nii.gz')

    # Set ANTs files
    bet_ants_template = os.path.join(get_resource_path(),
                                     'OASIS_template.nii.gz')
    bet_ants_prob_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz')
    bet_ants_registration_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz')

    #################################
    ### NIPYPE IMPORTS AND CONFIG ###
    #################################
    # Update nipype global config because workflow.config[] = ..., doesn't seem to work
    # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file

    # Create subject's intermediate directory before configuring nipype and the workflow because that's where we'll save log files in addition to intermediate files
    if not os.path.exists(os.path.join(output_interm_dir, subId, 'logs')):
        os.makedirs(os.path.join(output_interm_dir, subId, 'logs'))
    log_dir = os.path.join(output_interm_dir, subId, 'logs')
    from nipype import config
    if readable_crash_files:
        cfg = dict(execution={'crashfile_format': 'txt'})
        config.update_config(cfg)
    config.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': write_logs
        },
        'execution': {
            'crashdump_dir': log_dir
        }
    })
    from nipype import logging
    logging.update_logging(config)

    # Now import everything else
    from nipype.interfaces.io import DataSink
    from nipype.interfaces.utility import Merge, IdentityInterface
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.nipy.preprocess import ComputeMask
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection
    from nipype.interfaces.ants import Registration, ApplyTransforms
    from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP
    from nipype.interfaces.fsl.maths import MeanImage
    from nipype.interfaces.fsl import Merge as MERGE
    from nipype.interfaces.fsl.utils import Smooth
    from nipype.interfaces.nipy.preprocess import Trim
    from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask

    ##################
    ### INPUT NODE ###
    ##################

    # Turn functional file list into interable Node
    func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans')
    func_scans.iterables = ('scan', funcs)

    # Get TR for use in filtering below; we're assuming all BOLD runs have the same TR
    tr_length = layout.get_metadata(funcs[0])['RepetitionTime']

    #####################################
    ## TRIM ##
    #####################################
    if apply_trim:
        trim = Node(Trim(), name='trim')
        trim.inputs.begin_index = apply_trim

    #####################################
    ## DISTORTION CORRECTION ##
    #####################################

    if apply_dist_corr:
        # Get fmap file locations
        fmaps = [
            f.filename for f in layout.get(
                subject=subId, modality='fmap', extensions='.nii.gz')
        ]
        if not fmaps:
            raise IOError(
                "Distortion Correction requested but field map scans not found..."
            )

        # Get fmap metadata
        totalReadoutTimes, measurements, fmap_pes = [], [], []

        for i, fmap in enumerate(fmaps):
            # Grab total readout time for each fmap
            totalReadoutTimes.append(
                layout.get_metadata(fmap)['TotalReadoutTime'])

            # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans)
            measurements.append(nib.load(fmap).header['dim'][4])

            # Get phase encoding direction
            fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"]
            fmap_pes.append(fmap_pe)

        encoding_file_writer = Node(interface=Create_Encoding_File(),
                                    name='create_encoding')
        encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes
        encoding_file_writer.inputs.fmaps = fmaps
        encoding_file_writer.inputs.fmap_pes = fmap_pes
        encoding_file_writer.inputs.measurements = measurements
        encoding_file_writer.inputs.file_name = 'encoding_file.txt'

        merge_to_file_list = Node(interface=Merge(2),
                                  infields=['in1', 'in2'],
                                  name='merge_to_file_list')
        merge_to_file_list.inputs.in1 = fmaps[0]
        merge_to_file_list.inputs.in1 = fmaps[1]

        # Merge AP and PA distortion correction scans
        merger = Node(interface=MERGE(dimension='t'), name='merger')
        merger.inputs.output_type = 'NIFTI_GZ'
        merger.inputs.in_files = fmaps
        merger.inputs.merged_file = 'merged_epi.nii.gz'

        # Create distortion correction map
        topup = Node(interface=TOPUP(), name='topup')
        topup.inputs.output_type = 'NIFTI_GZ'

        # Apply distortion correction to other scans
        apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
        apply_topup.inputs.output_type = 'NIFTI_GZ'
        apply_topup.inputs.method = 'jac'
        apply_topup.inputs.interp = 'spline'

    ###################################
    ### REALIGN ###
    ###################################
    realign_fsl = Node(MCFLIRT(), name="realign")
    realign_fsl.inputs.cost = 'mutualinfo'
    realign_fsl.inputs.mean_vol = True
    realign_fsl.inputs.output_type = 'NIFTI_GZ'
    realign_fsl.inputs.save_mats = True
    realign_fsl.inputs.save_rms = True
    realign_fsl.inputs.save_plots = True

    ###################################
    ### MEAN EPIs ###
    ###################################
    # For coregistration after realignment
    mean_epi = Node(MeanImage(), name='mean_epi')
    mean_epi.inputs.dimension = 'T'

    # For after normalization is done to plot checks
    mean_norm_epi = Node(MeanImage(), name='mean_norm_epi')
    mean_norm_epi.inputs.dimension = 'T'

    ###################################
    ### MASK, ART, COV CREATION ###
    ###################################
    compute_mask = Node(ComputeMask(), name='compute_mask')
    compute_mask.inputs.m = .05

    art = Node(ArtifactDetect(), name='art')
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'FSL'

    make_cov = Node(Create_Covariates(), name='make_cov')

    ################################
    ### N4 BIAS FIELD CORRECTION ###
    ################################
    if apply_n4:
        n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction')
        n4_correction.inputs.copy_header = True
        n4_correction.inputs.save_bias = False
        n4_correction.inputs.num_threads = ants_threads
        n4_correction.inputs.input_image = anat

    ###################################
    ### BRAIN EXTRACTION ###
    ###################################
    brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction')
    brain_extraction_ants.inputs.dimension = 3
    brain_extraction_ants.inputs.use_floatingpoint_precision = 1
    brain_extraction_ants.inputs.num_threads = ants_threads
    brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask
    brain_extraction_ants.inputs.keep_temporary_files = 1
    brain_extraction_ants.inputs.brain_template = bet_ants_template
    brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask
    brain_extraction_ants.inputs.out_prefix = 'bet'

    ###################################
    ### COREGISTRATION ###
    ###################################
    coregistration = Node(Registration(), name='coregistration')
    coregistration.inputs.float = False
    coregistration.inputs.output_transform_prefix = "meanEpi2highres"
    coregistration.inputs.transforms = ['Rigid']
    coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )]
    coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]]
    coregistration.inputs.dimension = 3
    coregistration.inputs.num_threads = ants_threads
    coregistration.inputs.write_composite_transform = True
    coregistration.inputs.collapse_output_transforms = True
    coregistration.inputs.metric = ['MI']
    coregistration.inputs.metric_weight = [1]
    coregistration.inputs.radius_or_number_of_bins = [32]
    coregistration.inputs.sampling_strategy = ['Regular']
    coregistration.inputs.sampling_percentage = [0.25]
    coregistration.inputs.convergence_threshold = [1e-08]
    coregistration.inputs.convergence_window_size = [10]
    coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]]
    coregistration.inputs.sigma_units = ['mm']
    coregistration.inputs.shrink_factors = [[4, 3, 2, 1]]
    coregistration.inputs.use_estimate_learning_rate_once = [True]
    coregistration.inputs.use_histogram_matching = [False]
    coregistration.inputs.initial_moving_transform_com = True
    coregistration.inputs.output_warped_image = True
    coregistration.inputs.winsorize_lower_quantile = 0.01
    coregistration.inputs.winsorize_upper_quantile = 0.99

    ###################################
    ### NORMALIZATION ###
    ###################################
    # Settings Explanations
    # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference
    # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275
    # Things that matter the most:
    # smoothing_sigmas:
    # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm
    # Old settings [[3,2,1,0]]*3
    # shrink_factors
    # The coarseness with which to do registration
    # Old settings [[8,4,2,1]] * 3
    # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex
    # Other settings
    # transform_parameters:
    # how much regularization to do for fitting that transformation
    # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets
    # radius_or_number_of_bins
    # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer
    # use_histogram_matching
    # Use image intensity distribution to guide registration
    # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1)
    # convergence_threshold
    # threshold for optimizer
    # convergence_window_size
    # how many samples should optimizer average to compute threshold?
    # sampling_strategy
    # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass

    normalization = Node(Registration(), name='normalization')
    normalization.inputs.float = False
    normalization.inputs.collapse_output_transforms = True
    normalization.inputs.convergence_threshold = [1e-06]
    normalization.inputs.convergence_window_size = [10]
    normalization.inputs.dimension = 3
    normalization.inputs.fixed_image = MNItemplate
    normalization.inputs.initial_moving_transform_com = True
    normalization.inputs.metric = ['MI', 'MI', 'CC']
    normalization.inputs.metric_weight = [1.0] * 3
    normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
                                                 [1000, 500, 250, 100],
                                                 [100, 70, 50, 20]]
    normalization.inputs.num_threads = ants_threads
    normalization.inputs.output_transform_prefix = 'anat2template'
    normalization.inputs.output_inverse_warped_image = True
    normalization.inputs.output_warped_image = True
    normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None']
    normalization.inputs.shrink_factors = [[8, 4, 2, 1]] * 3
    normalization.inputs.sigma_units = ['vox'] * 3
    normalization.inputs.smoothing_sigmas = [[3, 2, 1, 0]] * 3
    normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    normalization.inputs.transform_parameters = [(0.1, ), (0.1, ),
                                                 (0.1, 3.0, 0.0)]
    normalization.inputs.use_histogram_matching = True
    normalization.inputs.winsorize_lower_quantile = 0.005
    normalization.inputs.winsorize_upper_quantile = 0.995
    normalization.inputs.write_composite_transform = True

    # NEW SETTINGS (need to be adjusted; specifically shink_factors and smoothing_sigmas need to be the same length)
    # normalization = Node(Registration(), name='normalization')
    # normalization.inputs.float = False
    # normalization.inputs.collapse_output_transforms = True
    # normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07]
    # normalization.inputs.convergence_window_size = [10]
    # normalization.inputs.dimension = 3
    # normalization.inputs.fixed_image = MNItemplate
    # normalization.inputs.initial_moving_transform_com = True
    # normalization.inputs.metric = ['MI', 'MI', 'CC']
    # normalization.inputs.metric_weight = [1.0]*3
    # normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
    #                                              [1000, 500, 250, 100],
    #                                              [100, 70, 50, 20]]
    # normalization.inputs.num_threads = ants_threads
    # normalization.inputs.output_transform_prefix = 'anat2template'
    # normalization.inputs.output_inverse_warped_image = True
    # normalization.inputs.output_warped_image = True
    # normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    # normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    # normalization.inputs.sampling_strategy = ['Regular',
    #                                           'Regular',
    #                                           'None']
    # normalization.inputs.shrink_factors = [[4, 3, 2, 1]]*3
    # normalization.inputs.sigma_units = ['vox']*3
    # normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]]
    # normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    # normalization.inputs.transform_parameters = [(0.1,),
    #                                              (0.1,),
    #                                              (0.1, 3.0, 0.0)]
    # normalization.inputs.use_histogram_matching = True
    # normalization.inputs.winsorize_lower_quantile = 0.005
    # normalization.inputs.winsorize_upper_quantile = 0.995
    # normalization.inputs.write_composite_transform = True

    ###################################
    ### APPLY TRANSFORMS AND SMOOTH ###
    ###################################
    merge_transforms = Node(Merge(2),
                            iterfield=['in2'],
                            name='merge_transforms')

    # Used for epi -> mni, via (coreg + norm)
    apply_transforms = Node(ApplyTransforms(),
                            iterfield=['input_image'],
                            name='apply_transforms')
    apply_transforms.inputs.input_image_type = 3
    apply_transforms.inputs.float = False
    apply_transforms.inputs.num_threads = 12
    apply_transforms.inputs.environ = {}
    apply_transforms.inputs.interpolation = 'BSpline'
    apply_transforms.inputs.invert_transform_flags = [False, False]
    apply_transforms.inputs.reference_image = MNItemplate

    # Used for t1 segmented -> mni, via (norm)
    apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg')
    apply_transform_seg.inputs.input_image_type = 3
    apply_transform_seg.inputs.float = False
    apply_transform_seg.inputs.num_threads = 12
    apply_transform_seg.inputs.environ = {}
    apply_transform_seg.inputs.interpolation = 'MultiLabel'
    apply_transform_seg.inputs.invert_transform_flags = [False]
    apply_transform_seg.inputs.reference_image = MNItemplate

    ###################################
    ### PLOTS ###
    ###################################
    plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign")
    plot_qa = Node(Plot_Quality_Control(), name="plot_qa")
    plot_normalization_check = Node(Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = MNItemplatehasskull

    ############################################
    ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ###
    ############################################
    # Use cosanlab_preproc for down sampling
    down_samp = Node(Down_Sample_Precision(), name="down_samp")

    # Use FSL for smoothing
    if apply_smooth:
        smooth = Node(Smooth(), name='smooth')
        if isinstance(apply_smooth, list):
            smooth.iterables = ("fwhm", apply_smooth)
        elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float):
            smooth.inputs.fwhm = apply_smooth
        else:
            raise ValueError("apply_smooth must be a list or int/float")

    # Use cosanlab_preproc for low-pass filtering
    if apply_filter:
        lp_filter = Node(Filter_In_Mask(), name='lp_filter')
        lp_filter.inputs.mask = MNImask
        lp_filter.inputs.sampling_rate = tr_length
        lp_filter.inputs.high_pass_cutoff = 0
        if isinstance(apply_filter, list):
            lp_filter.iterables = ("low_pass_cutoff", apply_filter)
        elif isinstance(apply_filter, int) or isinstance(apply_filter, float):
            lp_filter.inputs.low_pass_cutoff = apply_filter
        else:
            raise ValueError("apply_filter must be a list or int/float")

    ###################
    ### OUTPUT NODE ###
    ###################
    # Collect all final outputs in the output dir and get rid of file name additions
    datasink = Node(DataSink(), name='datasink')
    if session:
        datasink.inputs.base_directory = os.path.join(output_final_dir,
                                                      subject_id)
        datasink.inputs.container = 'ses-' + session
    else:
        datasink.inputs.base_directory = output_final_dir
        datasink.inputs.container = subject_id

    # Remove substitutions
    data_dir_parts = data_dir.split('/')[1:]
    if session:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + [
            'ses-' + session
        ] + ['func']
    else:
        prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func']
    func_scan_names = [os.path.split(elem)[-1] for elem in funcs]
    to_replace = []
    for elem in func_scan_names:
        bold_name = elem.split(subject_id + '_')[-1]
        bold_name = bold_name.split('.nii.gz')[0]
        to_replace.append(('..'.join(prefix + [elem]), bold_name))
    datasink.inputs.substitutions = to_replace

    #####################
    ### INIT WORKFLOW ###
    #####################
    # If we have sessions provide the full path to the subject's intermediate directory
    # and only rely on workflow init to create the session container *within* that directory
    # Otherwise just point to the intermediate directory and let the workflow init create the subject container within the intermediate directory
    if session:
        workflow = Workflow(name='ses_' + session)
        workflow.base_dir = os.path.join(output_interm_dir, subId)
    else:
        workflow = Workflow(name=subId)
        workflow.base_dir = output_interm_dir

    ############################
    ######### PART (1a) #########
    # func -> discorr -> trim -> realign
    # OR
    # func -> trim -> realign
    # OR
    # func -> discorr -> realign
    # OR
    # func -> realign
    ############################
    if apply_dist_corr:
        workflow.connect([(encoding_file_writer, topup, [('encoding_file',
                                                          'encoding_file')]),
                          (encoding_file_writer, apply_topup,
                           [('encoding_file', 'encoding_file')]),
                          (merger, topup, [('merged_file', 'in_file')]),
                          (func_scans, apply_topup, [('scan', 'in_files')]),
                          (topup, apply_topup,
                           [('out_fieldcoef', 'in_topup_fieldcoef'),
                            ('out_movpar', 'in_topup_movpar')])])
        if apply_trim:
            # Dist Corr + Trim
            workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file')
                                                   ]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # Dist Corr + No Trim
            workflow.connect([(apply_topup, realign_fsl, [('out_corrected',
                                                           'in_file')])])
    else:
        if apply_trim:
            # No Dist Corr + Trim
            workflow.connect([(func_scans, trim, [('scan', 'in_file')]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # No Dist Corr + No Trim
            workflow.connect([
                (func_scans, realign_fsl, [('scan', 'in_file')]),
            ])

    ############################
    ######### PART (1n) #########
    # anat -> N4 -> bet
    # OR
    # anat -> bet
    ############################
    if apply_n4:
        workflow.connect([(n4_correction, brain_extraction_ants,
                           [('output_image', 'anatomical_image')])])
    else:
        brain_extraction_ants.inputs.anatomical_image = anat

    ##########################################
    ############### PART (2) #################
    # realign -> coreg -> mni (via t1)
    # t1 -> mni
    # covariate creation
    # plot creation
    ###########################################

    workflow.connect([
        (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]),
        (realign_fsl, plot_qa, [('out_file', 'dat_img')]),
        (realign_fsl, art, [('out_file', 'realigned_files'),
                            ('par_file', 'realignment_parameters')]),
        (realign_fsl, mean_epi, [('out_file', 'in_file')]),
        (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]),
        (mean_epi, compute_mask, [('out_file', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (art, plot_realign, [('outlier_files', 'outliers')]),
        (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]),
        (brain_extraction_ants, coregistration, [('BrainExtractionBrain',
                                                  'fixed_image')]),
        (mean_epi, coregistration, [('out_file', 'moving_image')]),
        (brain_extraction_ants, normalization, [('BrainExtractionBrain',
                                                 'moving_image')]),
        (coregistration, merge_transforms, [('composite_transform', 'in2')]),
        (normalization, merge_transforms, [('composite_transform', 'in1')]),
        (merge_transforms, apply_transforms, [('out', 'transforms')]),
        (realign_fsl, apply_transforms, [('out_file', 'input_image')]),
        (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]),
        (normalization, apply_transform_seg, [('composite_transform',
                                               'transforms')]),
        (brain_extraction_ants, apply_transform_seg,
         [('BrainExtractionSegmentation', 'input_image')]),
        (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')])
    ])

    ##################################################
    ################### PART (3) #####################
    # epi (in mni) -> filter -> smooth -> down sample
    # OR
    # epi (in mni) -> filter -> down sample
    # OR
    # epi (in mni) -> smooth -> down sample
    # OR
    # epi (in mni) -> down sample
    ###################################################

    if apply_filter:
        workflow.connect([(apply_transforms, lp_filter, [('output_image',
                                                          'in_file')])])

        if apply_smooth:
            # Filtering + Smoothing
            workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]),
                              (smooth, down_samp, [('smoothed_file', 'in_file')
                                                   ])])
        else:
            # Filtering + No Smoothing
            workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')])
                              ])
    else:
        if apply_smooth:
            # No Filtering + Smoothing
            workflow.connect([
                (apply_transforms, smooth, [('output_image', 'in_file')]),
                (smooth, down_samp, [('smoothed_file', 'in_file')])
            ])
        else:
            # No Filtering + No Smoothing
            workflow.connect([(apply_transforms, down_samp, [('output_image',
                                                              'in_file')])])

    ##########################################
    ############### PART (4) #################
    # down sample -> save
    # plots -> save
    # covs -> save
    # t1 (in mni) -> save
    # t1 segmented masks (in mni) -> save
    # realignment parms -> save
    ##########################################

    workflow.connect([
        (down_samp, datasink, [('out_file', 'functional.@down_samp')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_qa, datasink, [('plot', 'functional.@plot_qa')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')]),
        (normalization, datasink, [('warped_image', 'structural.@normanat')]),
        (apply_transform_seg, datasink, [('output_image',
                                          'structural.@normanatseg')]),
        (realign_fsl, datasink, [('par_file', 'functional.@motionparams')])
    ])

    if not os.path.exists(os.path.join(output_dir, 'pipeline.png')):
        workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'),
                             format='png')

    print(f"Creating workflow for subject: {subject_id}")
    if ants_threads != 8:
        print(
            f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing."
        )
    return workflow
Exemplo n.º 15
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipeline."""
        import os

        import nipype.interfaces.fsl as fsl
        import nipype.interfaces.mrtrix as mrtrix
        import nipype.interfaces.utility as nutil
        import nipype.pipeline.engine as npe
        from nipype.interfaces.ants import ApplyTransforms, RegistrationSynQuick
        from nipype.interfaces.mrtrix.preprocess import DWI2Tensor

        from clinica.lib.nipype.interfaces.mrtrix3.utils import TensorMetrics
        from clinica.utils.check_dependency import check_environment_variable

        from .dwi_dti_utils import (
            extract_bids_identifier_from_caps_filename,
            get_ants_transforms,
            get_caps_filenames,
            print_begin_pipeline,
            print_end_pipeline,
            statistics_on_atlases,
        )

        # Nodes creation
        # ==============
        get_bids_identifier = npe.Node(
            interface=nutil.Function(
                input_names=["caps_dwi_filename"],
                output_names=["bids_identifier"],
                function=extract_bids_identifier_from_caps_filename,
            ),
            name="0-Get_BIDS_Identifier",
        )

        get_caps_filenames = npe.Node(
            interface=nutil.Function(
                input_names=["caps_dwi_filename"],
                output_names=[
                    "bids_source",
                    "out_dti",
                    "out_fa",
                    "out_md",
                    "out_ad",
                    "out_rd",
                    "out_evec",
                ],
                function=get_caps_filenames,
            ),
            name="0-CAPS_Filenames",
        )

        convert_gradients = npe.Node(interface=mrtrix.FSL2MRTrix(),
                                     name="0-Convert_FSL_Gradient")

        dwi_to_dti = npe.Node(interface=DWI2Tensor(), name="1-Compute_DTI")

        dti_to_metrics = npe.Node(interface=TensorMetrics(),
                                  name="2-DTI-based_Metrics")

        register_fa = npe.Node(interface=RegistrationSynQuick(),
                               name="3a-Register_FA")
        fsl_dir = check_environment_variable("FSLDIR", "FSL")
        fa_map = os.path.join(fsl_dir, "data", "atlases", "JHU",
                              "JHU-ICBM-FA-1mm.nii.gz")
        register_fa.inputs.fixed_image = fa_map

        ants_transforms = npe.Node(
            interface=nutil.Function(
                input_names=[
                    "in_affine_transformation", "in_bspline_transformation"
                ],
                output_names=["transforms"],
                function=get_ants_transforms,
            ),
            name="combine_ants_transforms",
        )

        apply_ants_registration = npe.Node(interface=ApplyTransforms(),
                                           name="apply_ants_registration")
        apply_ants_registration.inputs.dimension = 3
        apply_ants_registration.inputs.input_image_type = 0
        apply_ants_registration.inputs.interpolation = "Linear"
        apply_ants_registration.inputs.reference_image = fa_map

        apply_ants_registration_for_md = apply_ants_registration.clone(
            "3b-Apply_ANTs_Registration_MD")
        apply_ants_registration_for_ad = apply_ants_registration.clone(
            "3b-Apply_ANTs_Registration_AD")
        apply_ants_registration_for_rd = apply_ants_registration.clone(
            "3b-Apply_ANTs_Registration_RD")

        thres_map = npe.Node(fsl.Threshold(thresh=0.0),
                             iterfield=["in_file"],
                             name="RemoveNegative")
        thres_norm_fa = thres_map.clone("3c-RemoveNegative_FA")
        thres_norm_md = thres_map.clone("3c-RemoveNegative_MD")
        thres_norm_ad = thres_map.clone("3c-RemoveNegative_AD")
        thres_norm_rd = thres_map.clone("3c-RemoveNegative_RD")

        scalar_analysis = npe.Node(
            interface=nutil.Function(
                input_names=["in_registered_map", "name_map", "prefix_file"],
                output_names=["atlas_statistics_list"],
                function=statistics_on_atlases,
            ),
            name="4-Scalar_Analysis",
        )
        scalar_analysis_fa = scalar_analysis.clone("4-Scalar_Analysis_FA")
        scalar_analysis_fa.inputs.name_map = "FA"
        scalar_analysis_md = scalar_analysis.clone("4-Scalar_Analysis_MD")
        scalar_analysis_md.inputs.name_map = "MD"
        scalar_analysis_ad = scalar_analysis.clone("4-Scalar_Analysis_AD")
        scalar_analysis_ad.inputs.name_map = "AD"
        scalar_analysis_rd = scalar_analysis.clone("4-Scalar_Analysis_RD")
        scalar_analysis_rd.inputs.name_map = "RD"

        thres_map = npe.Node(fsl.Threshold(thresh=0.0),
                             iterfield=["in_file"],
                             name="5-Remove_Negative")
        thres_fa = thres_map.clone("5-Remove_Negative_FA")
        thres_md = thres_map.clone("5-Remove_Negative_MD")
        thres_ad = thres_map.clone("5-Remove_Negative_AD")
        thres_rd = thres_map.clone("5-Remove_Negative_RD")
        thres_decfa = thres_map.clone("5-Remove_Negative_DECFA")

        print_begin_message = npe.Node(
            interface=nutil.Function(input_names=["in_bids_or_caps_file"],
                                     function=print_begin_pipeline),
            name="Write-Begin_Message",
        )

        print_end_message = npe.Node(
            interface=nutil.Function(
                input_names=[
                    "in_bids_or_caps_file", "final_file_1", "final_file_2"
                ],
                function=print_end_pipeline,
            ),
            name="Write-End_Message",
        )

        # Connection
        # ==========
        # fmt: off
        self.connect([
            (self.input_node, get_caps_filenames, [("preproc_dwi",
                                                    "caps_dwi_filename")]),
            # Print begin message
            (self.input_node, print_begin_message, [("preproc_dwi",
                                                     "in_bids_or_caps_file")]),
            # Get BIDS/CAPS identifier from filename
            (self.input_node, get_bids_identifier, [("preproc_dwi",
                                                     "caps_dwi_filename")]),
            # Convert FSL gradient files (bval/bvec) to MRtrix format
            (self.input_node, convert_gradients,
             [("preproc_bval", "bval_file"), ("preproc_bvec", "bvec_file")]),
            # Computation of the DTI model
            (self.input_node, dwi_to_dti, [("b0_mask", "mask"),
                                           ("preproc_dwi", "in_file")]),
            (convert_gradients, dwi_to_dti, [("encoding_file", "encoding_file")
                                             ]),
            (get_caps_filenames, dwi_to_dti, [("out_dti", "out_filename")]),
            # Computation of the different metrics from the DTI
            (get_caps_filenames, dti_to_metrics, [("out_fa", "out_fa")]),
            (get_caps_filenames, dti_to_metrics, [("out_md", "out_adc")]),
            (get_caps_filenames, dti_to_metrics, [("out_ad", "out_ad")]),
            (get_caps_filenames, dti_to_metrics, [("out_rd", "out_rd")]),
            (get_caps_filenames, dti_to_metrics, [("out_evec", "out_evec")]),
            (self.input_node, dti_to_metrics, [("b0_mask", "in_mask")]),
            (dwi_to_dti, dti_to_metrics, [("tensor", "in_file")]),
            # Registration of FA-map onto the atlas:
            (dti_to_metrics, register_fa, [("out_fa", "moving_image")]),
            # Apply deformation field on MD, AD & RD:
            (register_fa, ants_transforms, [("out_matrix",
                                             "in_affine_transformation")]),
            (register_fa, ants_transforms, [("forward_warp_field",
                                             "in_bspline_transformation")]),
            (dti_to_metrics, apply_ants_registration_for_md,
             [("out_adc", "input_image")]),
            (ants_transforms, apply_ants_registration_for_md,
             [("transforms", "transforms")]),
            (dti_to_metrics, apply_ants_registration_for_ad,
             [("out_ad", "input_image")]),
            (ants_transforms, apply_ants_registration_for_ad,
             [("transforms", "transforms")]),
            (dti_to_metrics, apply_ants_registration_for_rd,
             [("out_rd", "input_image")]),
            (ants_transforms, apply_ants_registration_for_rd,
             [("transforms", "transforms")]),
            # Remove negative values from the DTI maps:
            (register_fa, thres_norm_fa, [("warped_image", "in_file")]),
            (apply_ants_registration_for_md, thres_norm_md, [("output_image",
                                                              "in_file")]),
            (apply_ants_registration_for_rd, thres_norm_rd, [("output_image",
                                                              "in_file")]),
            (apply_ants_registration_for_ad, thres_norm_ad, [("output_image",
                                                              "in_file")]),
            # Generate regional TSV files
            (get_bids_identifier, scalar_analysis_fa, [("bids_identifier",
                                                        "prefix_file")]),
            (thres_norm_fa, scalar_analysis_fa, [("out_file",
                                                  "in_registered_map")]),
            (get_bids_identifier, scalar_analysis_md, [("bids_identifier",
                                                        "prefix_file")]),
            (thres_norm_md, scalar_analysis_md, [("out_file",
                                                  "in_registered_map")]),
            (get_bids_identifier, scalar_analysis_ad, [("bids_identifier",
                                                        "prefix_file")]),
            (thres_norm_ad, scalar_analysis_ad, [("out_file",
                                                  "in_registered_map")]),
            (get_bids_identifier, scalar_analysis_rd, [("bids_identifier",
                                                        "prefix_file")]),
            (thres_norm_rd, scalar_analysis_rd, [("out_file",
                                                  "in_registered_map")]),
            # Remove negative values from the DTI maps:
            (get_caps_filenames, thres_fa, [("out_fa", "out_file")]),
            (dti_to_metrics, thres_fa, [("out_fa", "in_file")]),
            (get_caps_filenames, thres_md, [("out_md", "out_file")]),
            (dti_to_metrics, thres_md, [("out_adc", "in_file")]),
            (get_caps_filenames, thres_ad, [("out_ad", "out_file")]),
            (dti_to_metrics, thres_ad, [("out_ad", "in_file")]),
            (get_caps_filenames, thres_rd, [("out_rd", "out_file")]),
            (dti_to_metrics, thres_rd, [("out_rd", "in_file")]),
            (get_caps_filenames, thres_decfa, [("out_evec", "out_file")]),
            (dti_to_metrics, thres_decfa, [("out_evec", "in_file")]),
            # Outputnode
            (dwi_to_dti, self.output_node, [("tensor", "dti")]),
            (thres_fa, self.output_node, [("out_file", "fa")]),
            (thres_md, self.output_node, [("out_file", "md")]),
            (thres_ad, self.output_node, [("out_file", "ad")]),
            (thres_rd, self.output_node, [("out_file", "rd")]),
            (thres_decfa, self.output_node, [("out_file", "decfa")]),
            (register_fa, self.output_node, [("out_matrix", "affine_matrix")]),
            (register_fa, self.output_node, [("forward_warp_field",
                                              "b_spline_transform")]),
            (thres_norm_fa, self.output_node, [("out_file", "registered_fa")]),
            (thres_norm_md, self.output_node, [("out_file", "registered_md")]),
            (thres_norm_ad, self.output_node, [("out_file", "registered_ad")]),
            (thres_norm_rd, self.output_node, [("out_file", "registered_rd")]),
            (scalar_analysis_fa, self.output_node, [("atlas_statistics_list",
                                                     "statistics_fa")]),
            (scalar_analysis_md, self.output_node, [("atlas_statistics_list",
                                                     "statistics_md")]),
            (scalar_analysis_ad, self.output_node, [("atlas_statistics_list",
                                                     "statistics_ad")]),
            (scalar_analysis_rd, self.output_node, [("atlas_statistics_list",
                                                     "statistics_rd")]),
            # Print end message
            (self.input_node, print_end_message, [("preproc_dwi",
                                                   "in_bids_or_caps_file")]),
            (thres_rd, print_end_message, [("out_file", "final_file_1")]),
            (scalar_analysis_rd, print_end_message, [("atlas_statistics_list",
                                                      "final_file_2")]),
        ])
Exemplo n.º 16
0
def apply_xforms(in_file, out_file, xforms, temp_dir):
    """
    Build miniworkflow to split, apply xforms, and merge

    Split:
    fslsplit /scratch/tsalo006/work/fmriprep_wf/single_subject_ltd_wf/\
        func_preproc_task_checkerboard_echo_1_wf/bold_stc_wf/\
        _bold_file_..scratch..tsalo006..ltd_dset..sub-ltd..func..\
        sub-ltd_task-checkerboard_echo-1_bold.nii.gz/copy_xform/\
        sub-ltd_task-checkerboard_echo-1_bold_tshift_xform.nii.gz -t

    Apply xforms:
    antsApplyTransforms --default-value 0 --float 1 \
        --input /scratch/tsalo006/work/fmriprep_wf/single_subject_ltd_wf/\
        func_preproc_task_checkerboard_echo_1_wf/split_opt_comb/\
        vol0000.nii.gz \
        --interpolation LanczosWindowedSinc \
        --output /scratch/tsalo006/work/fmriprep_wf/single_subject_ltd_wf/\
        func_preproc_task_checkerboard_echo_1_wf/bold_mni_trans_wf/\
        bold_to_mni_transform/vol0000_xform-00000.nii.gz \
        --reference-image /scratch/tsalo006/work/fmriprep_wf/\
        single_subject_ltd_wf/func_preproc_task_checkerboard_echo_1_wf/\
        bold_mni_trans_wf/gen_ref/\
        tpl-MNI152NLin2009cAsym_res-01_T1w_reference.nii.gz \
        --transform /scratch/tsalo006/work/fmriprep_wf/single_subject_ltd_wf/\
        anat_preproc_wf/t1_2_mni/ants_t1_to_mniComposite.h5 \
        --transform /scratch/tsalo006/work/fmriprep_wf/single_subject_ltd_wf/\
        func_preproc_task_checkerboard_echo_1_wf/bold_reg_wf/bbreg_wf/\
        fsl2itk_fwd/affine.txt \
        --transform /scratch/tsalo006/work/fmriprep_wf/single_subject_ltd_wf/\
        func_preproc_task_checkerboard_echo_1_wf/bold_mni_trans_wf/\
        bold_to_mni_transform/tmp-h62vznik/mat2itk_pos-002_xfm-00000.txt

    Merge:
    nilearn
    """
    assert op.isfile(in_file)
    assert not op.isdir(temp_dir)
    assert all([op.isfile(xform) for xform in xforms])

    # Split 4D input file into 3D temporary files
    temp_files = split_4d(in_file, temp_dir)

    # Apply transforms
    ref_file = in_file.replace(
        'native_desc-partialPreproc_bold',
        'MNI152NLin2009cAsym_desc-preproc_bold')
    echo_regex = re.compile('_echo-[0-9+]_')
    ref_file = re.sub(echo_regex, '_', ref_file)
    assert op.isfile(ref_file)

    print('Applying transforms...')
    at = ApplyTransforms(
        default_value=0, float=True, interpolation='LanczosWindowedSinc',
        transforms=xforms,
        reference_image=ref_file)

    temp_xformed_files = []
    for f in temp_files:
        temp_xformed_file = op.join(temp_dir, 'xformed_{0}'.format(op.basename(f)))
        at.inputs.input_image = f
        at.inputs.output_image = temp_xformed_file
        at.run()
        temp_xformed_files.append(temp_xformed_file)

    # Merge transformed 3D files into output 4D file
    img_4d = image.concat_imgs(temp_xformed_files)
    img_4d.to_filename(out_file)

    # Remove temp_dir
    print('Cleaning up temporary directory...')
    rmtree(temp_dir)
Exemplo n.º 17
0
def preproc_workflow(input_dir,
                     output_dir,
                     subject_list,
                     ses_list,
                     anat_file,
                     func_file,
                     scan_size=477,
                     bet_frac=0.37):
    """
    The preprocessing workflow used in the preparation of the psilocybin vs escitalopram rsFMRI scans.
    Workflows and notes are defined throughout. Inputs are designed to be general and masks/default MNI space is provided

    :param input_dir: The input file directory containing all scans in BIDS format
    :param output_dir: The output file directory
    :param subject_list: a list of subject numbers
    :param ses_list: a list of scan numbers (session numbers)
    :param anat_file: The format of the anatomical scan within the input directory
    :param func_file: The format of the functional scan within the input directory
    :param scan_size: The length of the scan by number of images, most 10 minutes scans are around 400-500 depending
    upon scanner defaults and parameters - confirm by looking at your data
    :param bet_frac: brain extraction fractional intensity threshold
    :return: the preprocessing workflow
    """
    preproc = Workflow(name='preproc')
    preproc.base_dir = output_dir

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'ses']),
                      name="infosource")

    infosource.iterables = [('subject_id', subject_list), ('ses', ses_list)]

    # SelectFiles - to grab the data (alternative to DataGrabber)
    templates = {
        'anat': anat_file,
        'func': func_file
    }  # define the template of each file input

    selectfiles = Node(SelectFiles(templates, base_directory=input_dir),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=output_dir, container=output_dir),
                    name="datasink")

    preproc.connect([(infosource, selectfiles, [('subject_id', 'subject_id'),
                                                ('ses', 'ses')])])
    ''' 
    This is your functional processing workflow, used to trim scans, despike the signal, slice-time correct, 
    and motion correct your data 
    '''

    fproc = Workflow(name='fproc')  # the functional processing workflow

    # ExtractROI - skip dummy scans at the beginning of the recording by removing the first three
    trim = Node(ExtractROI(t_min=3, t_size=scan_size, output_type='NIFTI_GZ'),
                name="trim")

    # 3dDespike - despike
    despike = Node(Despike(outputtype='NIFTI_GZ', args='-NEW'), name="despike")
    fproc.connect([(trim, despike, [('roi_file', 'in_file')])])
    preproc.connect([(selectfiles, fproc, [('func', 'trim.in_file')])])

    # 3dTshift - slice time correction
    slicetime = Node(TShift(outputtype='NIFTI_GZ', tpattern='alt+z2'),
                     name="slicetime")
    fproc.connect([(despike, slicetime, [('out_file', 'in_file')])])

    # 3dVolreg - correct motion and output 1d matrix
    moco = Node(Volreg(outputtype='NIFTI_GZ',
                       interp='Fourier',
                       zpad=4,
                       args='-twopass'),
                name="moco")
    fproc.connect([(slicetime, moco, [('out_file', 'in_file')])])

    moco_bpfdt = Node(
        MOCObpfdt(), name='moco_bpfdt'
    )  # use the matlab function to correct the motion regressor
    fproc.connect([(moco, moco_bpfdt, [('oned_file', 'in_file')])])
    '''
    This is the co-registration workflow using FSL and ANTs
    '''

    coreg = Workflow(name='coreg')

    # BET - structural data brain extraction
    bet_anat = Node(BET(output_type='NIFTI_GZ', frac=bet_frac, robust=True),
                    name="bet_anat")

    # FSL segmentation process to get WM map
    seg = Node(FAST(bias_iters=6,
                    img_type=1,
                    output_biascorrected=True,
                    output_type='NIFTI_GZ'),
               name="seg")
    coreg.connect([(bet_anat, seg, [('out_file', 'in_files')])])

    # functional to structural registration
    mean = Node(MCFLIRT(mean_vol=True, output_type='NIFTI_GZ'), name="mean")

    # BBR using linear methods for initial transform fit
    func2struc = Node(FLIRT(cost='bbr', dof=6, output_type='NIFTI_GZ'),
                      name='func2struc')
    coreg.connect([(seg, func2struc, [('restored_image', 'reference')])])
    coreg.connect([(mean, func2struc, [('mean_img', 'in_file')])])
    coreg.connect([(seg, func2struc, [(('tissue_class_files', pickindex, 2),
                                       'wm_seg')])])

    # convert the FSL linear transform into a C3d format for AFNI
    f2s_c3d = Node(C3dAffineTool(itk_transform=True, fsl2ras=True),
                   name='f2s_c3d')
    coreg.connect([(func2struc, f2s_c3d, [('out_matrix_file', 'transform_file')
                                          ])])
    coreg.connect([(mean, f2s_c3d, [('mean_img', 'source_file')])])
    coreg.connect([(seg, f2s_c3d, [('restored_image', 'reference_file')])])

    # Functional to structural registration via ANTs non-linear registration
    reg = Node(Registration(
        fixed_image='default_images/MNI152_T1_2mm_brain.nii.gz',
        transforms=['Affine', 'SyN'],
        transform_parameters=[(0.1, ), (0.1, 3.0, 0.0)],
        number_of_iterations=[[1500, 1000, 1000], [100, 70, 50, 20]],
        dimension=3,
        write_composite_transform=True,
        collapse_output_transforms=True,
        metric=['MI'] + ['CC'],
        metric_weight=[1] * 2,
        radius_or_number_of_bins=[32] + [4],
        convergence_threshold=[1.e-8, 1.e-9],
        convergence_window_size=[20] + [10],
        smoothing_sigmas=[[2, 1, 0], [4, 2, 1, 0]],
        sigma_units=['vox'] * 2,
        shrink_factors=[[4, 2, 1], [6, 4, 2, 1]],
        use_histogram_matching=[False] + [True],
        use_estimate_learning_rate_once=[True, True],
        output_warped_image=True),
               name='reg')

    coreg.connect([(seg, reg, [('restored_image', 'moving_image')])
                   ])  # connect segmentation node to registration node

    merge1 = Node(niu.Merge(2), iterfield=['in2'],
                  name='merge1')  # merge the linear and nonlinear transforms
    coreg.connect([(f2s_c3d, merge1, [('itk_transform', 'in2')])])
    coreg.connect([(reg, merge1, [('composite_transform', 'in1')])])

    # warp the functional images into MNI space using the transforms from FLIRT and SYN
    warp = Node(ApplyTransforms(
        reference_image='default_images/MNI152_T1_2mm_brain.nii.gz',
        input_image_type=3),
                name='warp')
    coreg.connect([(moco, warp, [('out_file', 'input_image')])])
    coreg.connect([(merge1, warp, [('out', 'transforms')])])

    preproc.connect([(selectfiles, coreg, [('anat', 'bet_anat.in_file')])])
    preproc.connect([(fproc, coreg, [('moco.out_file', 'mean.in_file')])])
    '''
    Scrubbing workflow - find the motion outliers, bandpass filter, re-mean the data after bpf
    '''

    scrub = Workflow(name='scrub')

    # Generate the Scrubbing Regressor
    scrub_metrics = Node(MotionOutliers(dummy=4,
                                        out_file='FD_outliers.1D',
                                        metric='fd',
                                        threshold=0.4),
                         name="scrub_metrics")

    # regress out timepoints
    scrub_frames = Node(Bandpass(highpass=0,
                                 lowpass=99999,
                                 outputtype='NIFTI_GZ'),
                        name='scrub_frames')
    scrub.connect([(scrub_metrics, scrub_frames, [('out_file',
                                                   'orthogonalize_file')])])
    preproc.connect([(coreg, scrub, [('warp.output_image',
                                      'scrub_frames.in_file')])])
    preproc.connect([(selectfiles, scrub, [('func', 'scrub_metrics.in_file')])
                     ])

    # mean image for remeaning after bandpass
    premean = Node(TStat(args='-mean', outputtype='NIFTI_GZ'), name='premean')
    # remean the image
    remean2 = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean2')
    scrub.connect([(scrub_frames, remean2, [('out_file', 'in_file_a')])])
    scrub.connect([(premean, remean2, [('out_file', 'in_file_b')])])
    preproc.connect([(coreg, scrub, [('warp.output_image', 'premean.in_file')])
                     ])
    '''
    Regressors for final cleaning steps
    '''

    regressors = Workflow(name='regressors')

    # Using registered structural image to create the masks for both WM and CSF
    regbet = Node(BET(robust=True, frac=0.37, output_type='NIFTI_GZ'),
                  name='regbet')

    regseg = Node(FAST(img_type=1,
                       output_type='NIFTI_GZ',
                       no_pve=True,
                       no_bias=True,
                       segments=True),
                  name='regseg')
    regressors.connect([(regbet, regseg, [('out_file', 'in_files')])])
    preproc.connect([(coreg, regressors, [('reg.warped_image',
                                           'regbet.in_file')])])
    '''
    Create a cerebrospinal fluid (CSF) regressor 
    '''

    # subtract subcortical GM from the CSF mask
    subcortgm = Node(BinaryMaths(
        operation='sub',
        operand_file='default_images/subcortical_gm_mask_bin.nii.gz',
        output_type='NIFTI_GZ',
        args='-bin'),
                     name='subcortgm')
    regressors.connect([(regseg, subcortgm, [(('tissue_class_files', pickindex,
                                               0), 'in_file')])])

    # Fill the mask holes

    fillcsf = Node(MaskTool(fill_holes=True, outputtype='NIFTI_GZ'),
                   name='fillcsf')
    regressors.connect([(subcortgm, fillcsf, [('out_file', 'in_file')])])

    # Erode the mask

    erocsf = Node(MaskTool(outputtype='NIFTI_GZ', dilate_inputs='-1'),
                  name='erocsf')
    regressors.connect([(fillcsf, erocsf, [('out_file', 'in_file')])])

    # Take mean csf signal from functional image
    meancsf = Node(ImageMeants(output_type='NIFTI_GZ'), name='meancsf')
    regressors.connect([(erocsf, meancsf, [('out_file', 'mask')])])
    preproc.connect([(coreg, regressors, [('warp.output_image',
                                           'meancsf.in_file')])])

    bpf_dt_csf = Node(CSFbpfdt(), name='bpf_dt_csf')
    regressors.connect([(meancsf, bpf_dt_csf, [('out_file', 'in_file')])])
    '''
    Creates a local white matter regressor
    '''

    # subtract subcortical gm
    subcortgm2 = Node(BinaryMaths(
        operation='sub',
        operand_file='default_images/subcortical_gm_mask_bin.nii.gz',
        output_type='NIFTI_GZ',
        args='-bin'),
                      name='subcortgm2')
    regressors.connect([(regseg, subcortgm2, [(('tissue_class_files',
                                                pickindex, 2), 'in_file')])])

    # fill mask
    fillwm = Node(MaskTool(fill_holes=True, outputtype='NIFTI_GZ'),
                  name='fillwm')
    regressors.connect([(subcortgm2, fillwm, [('out_file', 'in_file')])])

    # erod mask
    erowm = Node(MaskTool(outputtype='NIFTI_GZ', dilate_inputs='-1'),
                 name='erowm')
    regressors.connect([(fillwm, erowm, [('out_file', 'in_file')])])

    # generate local wm
    localwm = Node(Localstat(neighborhood=('SPHERE', 25),
                             stat='mean',
                             nonmask=True,
                             outputtype='NIFTI_GZ'),
                   name='localwm')
    regressors.connect([(erowm, localwm, [('out_file', 'mask_file')])])
    preproc.connect([(coreg, regressors, [('warp.output_image',
                                           'localwm.in_file')])])

    # bandpass filter the local wm regressor
    localwm_bpf = Node(Fourier(highpass=0.01,
                               lowpass=0.08,
                               args='-retrend',
                               outputtype='NIFTI_GZ'),
                       name='loacwm_bpf')
    regressors.connect([(localwm, localwm_bpf, [('out_file', 'in_file')])])

    # detrend the local wm regressor

    localwm_bpf_dt = Node(Detrend(args='-polort 2', outputtype='NIFTI_GZ'),
                          name='localwm_bpf_dt')
    regressors.connect([(localwm_bpf, localwm_bpf_dt, [('out_file', 'in_file')
                                                       ])])
    '''
    Clean up your functional image with the regressors you have created above
    '''

    # create a mask for blurring filtering, and detrending

    clean = Workflow(name='clean')

    mask = Node(BET(mask=True, functional=True), name='mask')

    mean_mask = Node(MCFLIRT(mean_vol=True, output_type='NIFTI_GZ'),
                     name="mean_mask")

    dilf = Node(DilateImage(operation='max', output_type='NIFTI_GZ'),
                name='dilf')
    clean.connect([(mask, dilf, [('mask_file', 'in_file')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'mask.in_file')])])

    fill = Node(MaskTool(in_file='default_images/MNI152_T1_2mm_brain.nii.gz',
                         fill_holes=True,
                         outputtype='NIFTI_GZ'),
                name='fill')

    axb = Node(Calc(expr='a*b', outputtype='NIFTI_GZ'), name='axb')
    clean.connect([(dilf, axb, [('out_file', 'in_file_a')])])
    clean.connect([(fill, axb, [('out_file', 'in_file_b')])])

    bxc = Node(Calc(expr='ispositive(a)*b', outputtype='NIFTI_GZ'), name='bxc')
    clean.connect([(mean_mask, bxc, [('mean_img', 'in_file_a')])])
    clean.connect([(axb, bxc, [('out_file', 'in_file_b')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'mean_mask.in_file')
                                     ])])

    #### BLUR, FOURIER BPF, and DETREND

    blurinmask = Node(BlurInMask(fwhm=6, outputtype='NIFTI_GZ'),
                      name='blurinmask')
    clean.connect([(bxc, blurinmask, [('out_file', 'mask')])])
    preproc.connect([(scrub, clean, [('remean2.out_file', 'blurinmask.in_file')
                                     ])])

    fourier = Node(Fourier(highpass=0.01,
                           lowpass=0.08,
                           retrend=True,
                           outputtype='NIFTI_GZ'),
                   name='fourier')
    clean.connect([(blurinmask, fourier, [('out_file', 'in_file')])])

    tstat = Node(TStat(args='-mean', outputtype='NIFTI_GZ'), name='tstat')
    clean.connect([(fourier, tstat, [('out_file', 'in_file')])])

    detrend = Node(Detrend(args='-polort 2', outputtype='NIFTI_GZ'),
                   name='detrend')
    clean.connect([(fourier, detrend, [('out_file', 'in_file')])])

    remean = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean')
    clean.connect([(detrend, remean, [('out_file', 'in_file_a')])])
    clean.connect([(tstat, remean, [('out_file', 'in_file_b')])])

    concat = Node(ConcatModel(), name='concat')

    # Removes nuisance regressors via regression function
    clean_rs = Node(Bandpass(highpass=0, lowpass=99999, outputtype='NIFTI_GZ'),
                    name='clean_rs')

    clean.connect([(concat, clean_rs, [('out_file', 'orthogonalize_file')])])

    remean1 = Node(Calc(expr='a+b', outputtype='NIFTI_GZ'), name='remean1')
    clean.connect([(clean_rs, remean1, [('out_file', 'in_file_a')])])
    clean.connect([(tstat, remean1, [('out_file', 'in_file_b')])])

    preproc.connect([(regressors, clean, [('bpf_dt_csf.out_file',
                                           'concat.in_file_a')])])
    preproc.connect([(fproc, clean, [('moco_bpfdt.out_file',
                                      'concat.in_file_b')])])

    preproc.connect([(regressors, clean, [('localwm_bpf_dt.out_file',
                                           'clean_rs.orthogonalize_dset')])])
    clean.connect([(remean, clean_rs, [('out_file', 'in_file')])])
    '''
    Write graphical output detailing the workflows and nodes 
    '''

    fproc.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./fproc.dot')
    fproc.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./fproc_color.dot')

    coreg.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./coreg.dot')
    coreg.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./coreg_color.dot')

    scrub.write_graph(graph2use='flat',
                      format='png',
                      simple_form=True,
                      dotfilename='./scrub.dot')
    scrub.write_graph(graph2use='colored',
                      format='png',
                      simple_form=True,
                      dotfilename='./scrub_color.dot')

    regressors.write_graph(graph2use='flat',
                           format='png',
                           simple_form=True,
                           dotfilename='./reg.dot')
    regressors.write_graph(graph2use='colored',
                           format='png',
                           simple_form=True,
                           dotfilename='./reg_color.dot')

    preproc.write_graph(graph2use='flat',
                        format='png',
                        simple_form=True,
                        dotfilename='./preproc.dot')
    preproc.write_graph(graph2use='colored',
                        format='png',
                        simple_form=True,
                        dotfilename='./preproc_color.dot')

    return preproc
Exemplo n.º 18
0
def epi_mni_align(name='SpatialNormalization'):
    """
    Estimate the transform that maps the EPI space into MNI152NLin2009cAsym.

    The input epi_mean is the averaged and brain-masked EPI timeseries

    Returns the EPI mean resampled in MNI space (for checking out registration) and
    the associated "lobe" parcellation in EPI space.

    .. workflow::

        from mriqc.workflows.functional import epi_mni_align
        from mriqc.testing import mock_config
        with mock_config():
            wf = epi_mni_align()

    """
    from nipype.interfaces.ants import ApplyTransforms, N4BiasFieldCorrection
    from templateflow.api import get as get_template
    from niworkflows.interfaces.registration import (RobustMNINormalizationRPT
                                                     as RobustMNINormalization)

    # Get settings
    testing = config.execution.debug
    n_procs = config.nipype.nprocs
    ants_nthreads = config.nipype.omp_nthreads

    workflow = pe.Workflow(name=name)
    inputnode = pe.Node(niu.IdentityInterface(fields=['epi_mean', 'epi_mask']),
                        name='inputnode')
    outputnode = pe.Node(
        niu.IdentityInterface(fields=['epi_mni', 'epi_parc', 'report']),
        name='outputnode')

    n4itk = pe.Node(N4BiasFieldCorrection(dimension=3, copy_header=True),
                    name='SharpenEPI')

    norm = pe.Node(RobustMNINormalization(
        explicit_masking=False,
        flavor='testing' if testing else 'precise',
        float=config.execution.ants_float,
        generate_report=True,
        moving='boldref',
        num_threads=ants_nthreads,
        reference='boldref',
        reference_image=str(
            get_template('MNI152NLin2009cAsym', resolution=2,
                         suffix='boldref')),
        reference_mask=str(
            get_template('MNI152NLin2009cAsym',
                         resolution=2,
                         desc='brain',
                         suffix='mask')),
        template='MNI152NLin2009cAsym',
        template_resolution=2,
    ),
                   name='EPI2MNI',
                   num_threads=n_procs,
                   mem_gb=3)

    # Warp segmentation into EPI space
    invt = pe.Node(ApplyTransforms(float=True,
                                   input_image=str(
                                       get_template('MNI152NLin2009cAsym',
                                                    resolution=1,
                                                    desc='carpet',
                                                    suffix='dseg')),
                                   dimension=3,
                                   default_value=0,
                                   interpolation='MultiLabel'),
                   name='ResampleSegmentation')

    workflow.connect([
        (inputnode, invt, [('epi_mean', 'reference_image')]),
        (inputnode, n4itk, [('epi_mean', 'input_image')]),
        (inputnode, norm, [('epi_mask', 'moving_mask')]),
        (n4itk, norm, [('output_image', 'moving_image')]),
        (norm, invt, [('inverse_composite_transform', 'transforms')]),
        (invt, outputnode, [('output_image', 'epi_parc')]),
        (norm, outputnode, [('warped_image', 'epi_mni'),
                            ('out_report', 'report')]),
    ])
    return workflow
Exemplo n.º 19
0
def build_coordinate_mapping(source_image,
                             target_image,
                             h5_forward,
                             h5_inverse,
                             output_dir='./',
                             file_name=None,
                             verbose=False,
                             save_data=True):
    from nipype.interfaces.ants import ApplyTransforms
    import nibabel as nb
    from nighres.io import load_volume, save_volume
    from nighres.utils import _output_dir_4saving, _fname_4saving, _check_topology_lut_dir

    X = 0
    Y = 1
    Z = 2
    T = 3

    # load
    if verbose:
        print('Loading source & target...')
    source = load_volume(source_image)
    src_affine = source.affine
    src_header = source.header
    nsx = source.header.get_data_shape()[X]
    nsy = source.header.get_data_shape()[Y]
    nsz = source.header.get_data_shape()[Z]
    rsx = source.header.get_zooms()[X]
    rsy = source.header.get_zooms()[Y]
    rsz = source.header.get_zooms()[Z]

    target = load_volume(target_image)
    trg_affine = target.affine
    trg_header = target.header
    ntx = target.header.get_data_shape()[X]
    nty = target.header.get_data_shape()[Y]
    ntz = target.header.get_data_shape()[Z]
    rtx = target.header.get_zooms()[X]
    rty = target.header.get_zooms()[Y]
    rtz = target.header.get_zooms()[Z]

    if verbose:
        print('Building coordinate mappings...')
    # build coordinate mappings
    src_coord = np.zeros((nsx, nsy, nsz, 3))
    trg_coord = np.zeros((ntx, nty, ntz, 3))
    for x in range(nsx):
        for y in range(nsy):
            for z in range(nsz):
                src_coord[x, y, z, X] = x
                src_coord[x, y, z, Y] = y
                src_coord[x, y, z, Z] = z
    src_map = nb.Nifti1Image(src_coord, source.affine, source.header)
    src_map_file = os.path.join(
        output_dir,
        _fname_4saving(file_name=file_name,
                       rootfile=source_image,
                       suffix='tmp_srccoord'))
    save_volume(src_map_file, src_map)
    for x in range(ntx):
        for y in range(nty):
            for z in range(ntz):
                trg_coord[x, y, z, X] = x
                trg_coord[x, y, z, Y] = y
                trg_coord[x, y, z, Z] = z
    trg_map = nb.Nifti1Image(trg_coord, target.affine, target.header)
    trg_map_file = os.path.join(
        output_dir,
        _fname_4saving(file_name=file_name,
                       rootfile=source_image,
                       suffix='tmp_trgcoord'))
    save_volume(trg_map_file, trg_map)

    #     if verbose:
    #         print('Applying transforms to source...')
    #     at = ApplyTransforms()
    #     at.inputs.dimension = 2
    #     at.inputs.input_image = source.get_filename()
    #     at.inputs.reference_image = target.get_filename()
    #     at.inputs.interpolation = 'NearestNeighbor'
    #     at.inputs.transforms = h5_forward
    # #    at.inputs.invert_transform_flags = result.outputs.forward_invert_flags
    #     print(at.cmdline)
    #     transformed = at.run()

    if verbose:
        print('Applying transforms to forward...')
    # Create coordinate mappings
    src_at = ApplyTransforms()
    src_at.inputs.dimension = 3
    src_at.inputs.input_image_type = 3
    src_at.inputs.input_image = src_map.get_filename()
    src_at.inputs.reference_image = target.get_filename()
    src_at.inputs.interpolation = 'Linear'
    src_at.inputs.transforms = h5_forward
    #    src_at.inputs.invert_transform_flags = result.outputs.forward_invert_flags
    mapping = src_at.run()

    if verbose:
        print('Applying transforms to inverse...')
    trg_at = ApplyTransforms()
    trg_at.inputs.dimension = 3
    trg_at.inputs.input_image_type = 3
    trg_at.inputs.input_image = trg_map.get_filename()
    trg_at.inputs.reference_image = source.get_filename()
    trg_at.inputs.interpolation = 'Linear'
    trg_at.inputs.transforms = h5_inverse
    #    trg_at.inputs.invert_transform_flags = result.outputs.reverse_invert_flags
    inverse = trg_at.run()

    # save - already done?
    if verbose:
        print('Creating niftis...')
    mapping_img = nb.Nifti1Image(
        nb.load(mapping.outputs.output_image).get_data(), target.affine,
        target.header)
    inverse_img = nb.Nifti1Image(
        nb.load(inverse.outputs.output_image).get_data(), source.affine,
        source.header)

    outputs = {'mapping': mapping_img, 'inverse': inverse_img}

    if verbose:
        print('Clean-up & save...')
    # clean-up intermediate files
    os.remove(src_map_file)
    os.remove(trg_map_file)

    os.remove(mapping.outputs.output_image)
    os.remove(inverse.outputs.output_image)

    if save_data:
        mapping_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-map'))

        inverse_mapping_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-invmap'))
        #         save_volume(transformed_source_file, transformed_img)
        save_volume(mapping_file, mapping_img)
        save_volume(inverse_mapping_file, inverse_img)

    return outputs
Exemplo n.º 20
0
    reg.inputs.convergence_window_size = [10]*2
    reg.inputs.smoothing_sigmas = [[3, 1, 0]]*2
    reg.inputs.sigma_units = ['vox']*2
    reg.inputs.shrink_factors = [[ 2, 1, 0]]*2
    reg.inputs.use_estimate_learning_rate_once = [True]*2
    reg.inputs.use_histogram_matching = [True]*2
    reg.terminal_output = 'none'
    reg.inputs.num_threads = 4  # ?
    reg.inputs.winsorize_lower_quantile = 0.025
    reg.inputs.winsorize_upper_quantile = 0.95
    reg.inputs.output_warped_image = True
    #reg.inputs.collapse_linear_transforms_to_fixed_image_header = False
    reg.inputs.output_warped_image = folder + 'registeredAtlas_' + fileName
    reg.cmdline
    reg.run()
     
    at = ApplyTransforms()
#    os.chdir(newPath)
    fileNames = ['pbmap_GM.nii','pbmap_WM.nii','pbmap_CSF.nii','tissues.nii']    
    for j in range(4):    
        at.inputs.dimension = 3
        at.inputs.input_image = atlasPath+fileNames[j]
        at.inputs.reference_image = input_fixed
        at.inputs.output_image = 'deformed_'+fileNames[j]
        at.inputs.interpolation = 'BSpline'
        at.inputs.default_value = 0
        at.inputs.transforms = ['0GenericAffine.mat','1Warp.nii.gz']
        at.inputs.invert_transform_flags = [False, False]
        at.cmdline
        at.run()
def antsRegistrationTemplateBuildSingleIterationWF(iterationPhasePrefix=''):
    """

    Inputs::

           inputspec.images :
           inputspec.fixed_image :
           inputspec.ListOfPassiveImagesDictionaries :
           inputspec.interpolationMapping :

    Outputs::

           outputspec.template :
           outputspec.transforms_list :
           outputspec.passive_deformed_templates :
    """
    TemplateBuildSingleIterationWF = pe.Workflow(
        name='antsRegistrationTemplateBuildSingleIterationWF_' +
        str(iterationPhasePrefix))

    inputSpec = pe.Node(interface=util.IdentityInterface(fields=[
        'ListOfImagesDictionaries', 'registrationImageTypes',
        'interpolationMapping', 'fixed_image'
    ]),
                        run_without_submitting=True,
                        name='inputspec')
    ## HACK: TODO: Need to move all local functions to a common untility file, or at the top of the file so that
    ##             they do not change due to re-indenting.  Otherwise re-indenting for flow control will trigger
    ##             their hash to change.
    ## HACK: TODO: REMOVE 'transforms_list' it is not used.  That will change all the hashes
    ## HACK: TODO: Need to run all python files through the code beutifiers.  It has gotten pretty ugly.
    outputSpec = pe.Node(interface=util.IdentityInterface(
        fields=['template', 'transforms_list', 'passive_deformed_templates']),
                         run_without_submitting=True,
                         name='outputspec')

    ### NOTE MAP NODE! warp each of the original images to the provided fixed_image as the template
    BeginANTS = pe.MapNode(interface=Registration(),
                           name='BeginANTS',
                           iterfield=['moving_image'])
    BeginANTS.inputs.dimension = 3
    BeginANTS.inputs.output_transform_prefix = str(
        iterationPhasePrefix) + '_tfm'
    BeginANTS.inputs.transforms = ["Affine", "SyN"]
    BeginANTS.inputs.transform_parameters = [[0.9], [0.25, 3.0, 0.0]]
    BeginANTS.inputs.metric = ['Mattes', 'CC']
    BeginANTS.inputs.metric_weight = [1.0, 1.0]
    BeginANTS.inputs.radius_or_number_of_bins = [32, 5]
    BeginANTS.inputs.number_of_iterations = [[1000, 1000, 1000], [50, 35, 15]]
    BeginANTS.inputs.use_histogram_matching = [True, True]
    BeginANTS.inputs.use_estimate_learning_rate_once = [False, False]
    BeginANTS.inputs.shrink_factors = [[3, 2, 1], [3, 2, 1]]
    BeginANTS.inputs.smoothing_sigmas = [[3, 2, 0], [3, 2, 0]]

    GetMovingImagesNode = pe.Node(interface=util.Function(
        function=GetMovingImages,
        input_names=[
            'ListOfImagesDictionaries', 'registrationImageTypes',
            'interpolationMapping'
        ],
        output_names=['moving_images', 'moving_interpolation_type']),
                                  run_without_submitting=True,
                                  name='99_GetMovingImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec,
                                           'ListOfImagesDictionaries',
                                           GetMovingImagesNode,
                                           'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes',
                                           GetMovingImagesNode,
                                           'registrationImageTypes')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           GetMovingImagesNode,
                                           'interpolationMapping')

    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_images', BeginANTS,
                                           'moving_image')
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_interpolation_type',
                                           BeginANTS, 'interpolation')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image', BeginANTS,
                                           'fixed_image')

    ## Now warp all the input_images images
    wimtdeformed = pe.MapNode(
        interface=ApplyTransforms(),
        iterfield=['transforms', 'invert_transform_flags', 'input_image'],
        name='wimtdeformed')
    wimtdeformed.inputs.interpolation = 'Linear'
    wimtdeformed.default_value = 0
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms',
                                           wimtdeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags',
                                           wimtdeformed,
                                           'invert_transform_flags')
    TemplateBuildSingleIterationWF.connect(GetMovingImagesNode,
                                           'moving_images', wimtdeformed,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'fixed_image',
                                           wimtdeformed, 'reference_image')

    ##  Shape Update Next =====
    ## Now  Average All input_images deformed images together to create an updated template average
    AvgDeformedImages = pe.Node(interface=AverageImages(),
                                name='AvgDeformedImages')
    AvgDeformedImages.inputs.dimension = 3
    AvgDeformedImages.inputs.output_average_image = str(
        iterationPhasePrefix) + '.nii.gz'
    AvgDeformedImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(wimtdeformed, "output_image",
                                           AvgDeformedImages, 'images')

    ## Now average all affine transforms together
    AvgAffineTransform = pe.Node(interface=AverageAffineTransform(),
                                 name='AvgAffineTransform')
    AvgAffineTransform.inputs.dimension = 3
    AvgAffineTransform.inputs.output_affine_transform = 'Avererage_' + str(
        iterationPhasePrefix) + '_Affine.mat'

    SplitAffineAndWarpsNode = pe.Node(interface=util.Function(
        function=SplitAffineAndWarpComponents,
        input_names=['list_of_transforms_lists'],
        output_names=['affine_component_list', 'warp_component_list']),
                                      run_without_submitting=True,
                                      name='99_SplitAffineAndWarpsNode')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms',
                                           SplitAffineAndWarpsNode,
                                           'list_of_transforms_lists')
    TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode,
                                           'affine_component_list',
                                           AvgAffineTransform, 'transforms')

    ## Now average the warp fields togther
    AvgWarpImages = pe.Node(interface=AverageImages(), name='AvgWarpImages')
    AvgWarpImages.inputs.dimension = 3
    AvgWarpImages.inputs.output_average_image = str(
        iterationPhasePrefix) + 'warp.nii.gz'
    AvgWarpImages.inputs.normalize = True
    TemplateBuildSingleIterationWF.connect(SplitAffineAndWarpsNode,
                                           'warp_component_list',
                                           AvgWarpImages, 'images')

    ## Now average the images together
    ## TODO:  For now GradientStep is set to 0.25 as a hard coded default value.
    GradientStep = 0.25
    GradientStepWarpImage = pe.Node(interface=MultiplyImages(),
                                    name='GradientStepWarpImage')
    GradientStepWarpImage.inputs.dimension = 3
    GradientStepWarpImage.inputs.second_input = -1.0 * GradientStep
    GradientStepWarpImage.inputs.output_product_image = 'GradientStep0.25_' + str(
        iterationPhasePrefix) + '_warp.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgWarpImages,
                                           'output_average_image',
                                           GradientStepWarpImage,
                                           'first_input')

    ## Now create the new template shape based on the average of all deformed images
    UpdateTemplateShape = pe.Node(interface=ApplyTransforms(),
                                  name='UpdateTemplateShape')
    UpdateTemplateShape.inputs.invert_transform_flags = [True]
    UpdateTemplateShape.inputs.interpolation = 'Linear'
    UpdateTemplateShape.default_value = 0

    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           UpdateTemplateShape,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect([
        (AvgAffineTransform, UpdateTemplateShape,
         [(('affine_transform', makeListOfOneElement), 'transforms')]),
    ])
    TemplateBuildSingleIterationWF.connect(GradientStepWarpImage,
                                           'output_product_image',
                                           UpdateTemplateShape, 'input_image')

    ApplyInvAverageAndFourTimesGradientStepWarpImage = pe.Node(
        interface=util.Function(
            function=MakeTransformListWithGradientWarps,
            input_names=['averageAffineTranform', 'gradientStepWarp'],
            output_names=['TransformListWithGradientWarps']),
        run_without_submitting=True,
        name='99_MakeTransformListWithGradientWarps')
    ApplyInvAverageAndFourTimesGradientStepWarpImage.inputs.ignore_exception = True

    TemplateBuildSingleIterationWF.connect(
        AvgAffineTransform, 'affine_transform',
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'averageAffineTranform')
    TemplateBuildSingleIterationWF.connect(
        UpdateTemplateShape, 'output_image',
        ApplyInvAverageAndFourTimesGradientStepWarpImage, 'gradientStepWarp')

    ReshapeAverageImageWithShapeUpdate = pe.Node(
        interface=ApplyTransforms(), name='ReshapeAverageImageWithShapeUpdate')
    ReshapeAverageImageWithShapeUpdate.inputs.invert_transform_flags = [
        True, False, False, False, False
    ]
    ReshapeAverageImageWithShapeUpdate.inputs.interpolation = 'Linear'
    ReshapeAverageImageWithShapeUpdate.default_value = 0
    ReshapeAverageImageWithShapeUpdate.inputs.output_image = 'ReshapeAverageImageWithShapeUpdate.nii.gz'
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'input_image')
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           ReshapeAverageImageWithShapeUpdate,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps', ReshapeAverageImageWithShapeUpdate,
        'transforms')
    TemplateBuildSingleIterationWF.connect(ReshapeAverageImageWithShapeUpdate,
                                           'output_image', outputSpec,
                                           'template')

    ######
    ######
    ######  Process all the passive deformed images in a way similar to the main image used for registration
    ######
    ######
    ######
    ##############################################
    ## Now warp all the ListOfPassiveImagesDictionaries images
    FlattenTransformAndImagesListNode = pe.Node(
        Function(function=FlattenTransformAndImagesList,
                 input_names=[
                     'ListOfPassiveImagesDictionaries', 'transforms',
                     'invert_transform_flags', 'interpolationMapping'
                 ],
                 output_names=[
                     'flattened_images', 'flattened_transforms',
                     'flattened_invert_transform_flags',
                     'flattened_image_nametypes',
                     'flattened_interpolation_type'
                 ]),
        run_without_submitting=True,
        name="99_FlattenTransformAndImagesList")

    GetPassiveImagesNode = pe.Node(interface=util.Function(
        function=GetPassiveImages,
        input_names=['ListOfImagesDictionaries', 'registrationImageTypes'],
        output_names=['ListOfPassiveImagesDictionaries']),
                                   run_without_submitting=True,
                                   name='99_GetPassiveImagesNode')
    TemplateBuildSingleIterationWF.connect(inputSpec,
                                           'ListOfImagesDictionaries',
                                           GetPassiveImagesNode,
                                           'ListOfImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'registrationImageTypes',
                                           GetPassiveImagesNode,
                                           'registrationImageTypes')

    TemplateBuildSingleIterationWF.connect(GetPassiveImagesNode,
                                           'ListOfPassiveImagesDictionaries',
                                           FlattenTransformAndImagesListNode,
                                           'ListOfPassiveImagesDictionaries')
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           FlattenTransformAndImagesListNode,
                                           'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_transforms',
                                           FlattenTransformAndImagesListNode,
                                           'transforms')
    TemplateBuildSingleIterationWF.connect(BeginANTS, 'forward_invert_flags',
                                           FlattenTransformAndImagesListNode,
                                           'invert_transform_flags')
    wimtPassivedeformed = pe.MapNode(interface=ApplyTransforms(),
                                     iterfield=[
                                         'transforms',
                                         'invert_transform_flags',
                                         'input_image', 'interpolation'
                                     ],
                                     name='wimtPassivedeformed')
    wimtPassivedeformed.default_value = 0
    TemplateBuildSingleIterationWF.connect(AvgDeformedImages,
                                           'output_average_image',
                                           wimtPassivedeformed,
                                           'reference_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_interpolation_type',
                                           wimtPassivedeformed,
                                           'interpolation')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_images',
                                           wimtPassivedeformed, 'input_image')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_transforms',
                                           wimtPassivedeformed, 'transforms')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_invert_transform_flags',
                                           wimtPassivedeformed,
                                           'invert_transform_flags')

    RenestDeformedPassiveImagesNode = pe.Node(
        Function(function=RenestDeformedPassiveImages,
                 input_names=[
                     'deformedPassiveImages', 'flattened_image_nametypes',
                     'interpolationMapping'
                 ],
                 output_names=[
                     'nested_imagetype_list', 'outputAverageImageName_list',
                     'image_type_list', 'nested_interpolation_type'
                 ]),
        run_without_submitting=True,
        name="99_RenestDeformedPassiveImages")
    TemplateBuildSingleIterationWF.connect(inputSpec, 'interpolationMapping',
                                           RenestDeformedPassiveImagesNode,
                                           'interpolationMapping')
    TemplateBuildSingleIterationWF.connect(wimtPassivedeformed, 'output_image',
                                           RenestDeformedPassiveImagesNode,
                                           'deformedPassiveImages')
    TemplateBuildSingleIterationWF.connect(FlattenTransformAndImagesListNode,
                                           'flattened_image_nametypes',
                                           RenestDeformedPassiveImagesNode,
                                           'flattened_image_nametypes')
    ## Now  Average All passive input_images deformed images together to create an updated template average
    AvgDeformedPassiveImages = pe.MapNode(
        interface=AverageImages(),
        iterfield=['images', 'output_average_image'],
        name='AvgDeformedPassiveImages')
    AvgDeformedPassiveImages.inputs.dimension = 3
    AvgDeformedPassiveImages.inputs.normalize = False
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "nested_imagetype_list",
                                           AvgDeformedPassiveImages, 'images')
    TemplateBuildSingleIterationWF.connect(RenestDeformedPassiveImagesNode,
                                           "outputAverageImageName_list",
                                           AvgDeformedPassiveImages,
                                           'output_average_image')

    ## -- TODO:  Now neeed to reshape all the passive images as well
    ReshapeAveragePassiveImageWithShapeUpdate = pe.MapNode(
        interface=ApplyTransforms(),
        iterfield=[
            'input_image', 'reference_image', 'output_image', 'interpolation'
        ],
        name='ReshapeAveragePassiveImageWithShapeUpdate')
    ReshapeAveragePassiveImageWithShapeUpdate.inputs.invert_transform_flags = [
        True, False, False, False, False
    ]
    ReshapeAveragePassiveImageWithShapeUpdate.default_value = 0
    TemplateBuildSingleIterationWF.connect(
        RenestDeformedPassiveImagesNode, 'nested_interpolation_type',
        ReshapeAveragePassiveImageWithShapeUpdate, 'interpolation')
    TemplateBuildSingleIterationWF.connect(
        RenestDeformedPassiveImagesNode, 'outputAverageImageName_list',
        ReshapeAveragePassiveImageWithShapeUpdate, 'output_image')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'input_image')
    TemplateBuildSingleIterationWF.connect(
        AvgDeformedPassiveImages, 'output_average_image',
        ReshapeAveragePassiveImageWithShapeUpdate, 'reference_image')
    TemplateBuildSingleIterationWF.connect(
        ApplyInvAverageAndFourTimesGradientStepWarpImage,
        'TransformListWithGradientWarps',
        ReshapeAveragePassiveImageWithShapeUpdate, 'transforms')
    TemplateBuildSingleIterationWF.connect(
        ReshapeAveragePassiveImageWithShapeUpdate, 'output_image', outputSpec,
        'passive_deformed_templates')

    return TemplateBuildSingleIterationWF
binarize_pt2pp = binarize_post2ant.clone('binarize_pt2pp')

# FreeSurferSource - Data grabber specific for FreeSurfer data
fssource_lh = Node(FreeSurferSource(subjects_dir=fs_dir, hemi='lh'),
                   run_without_submitting=True,
                   name='fssource_lh')

fssource_rh = Node(FreeSurferSource(subjects_dir=fs_dir, hemi='rh'),
                   run_without_submitting=True,
                   name='fssource_rh')

# Transform the volumetric ROIs to the target space
inverse_transform_mni_volume_post2ant = MapNode(
    ApplyTransforms(args='--float',
                    input_image_type=3,
                    interpolation='Linear',
                    invert_transform_flags=[False, False],
                    num_threads=1,
                    terminal_output='file'),
    name='inverse_transform_mni_volume_post2ants',
    iterfield=['input_image'])

inverse_transform_mni_volume_pt2pp = inverse_transform_mni_volume_post2ant.clone(
    'inverse_transform_mni_volume_pt2pp')


# setlabel2label output file name - there might be a bug in nipype's label2label interface, should be checked from time to time and if resolved adapted the workflow accordingly
def set_output_name(label):
    output_name = label + '_converted.label'

    return output_name
def main(args=None):

    args = arg_parser().parse_args(args)
    FLAIR = args.FLAIR
    MPRAGE = args.T1
    
    prefix=args.prefix + '.'

    if args.mask is None:
        args.temp_mask = os.path.abspath (args.temp_mask)
        args.brain_template = os.path.abspath(args.brain_template)
        args.temp_prob = os.path.abspath(args.temp_prob)
        if not os.path.isfile(args.temp_mask):
            raise Exception("template mask not foud")
        if not os.path.isfile(args.brain_template):
            raise Exception("brain template mask not foud")
        if not os.path.isfile(args.temp_prob):
            raise Exception("template probability mask not foud")
    elif not os.path.isfile(args.mask):
            raise Exception("T1 mask file not foud")

    if not os.path.isfile(MPRAGE):
        raise Exception("Input T1 file not found")
    if not os.path.isfile(FLAIR):
        raise Exception("Input FLAIR file not found")

    if args.outfolder is not None:
        abs_out = os.path.abspath(args.outfolder)
        #print(abs_out)
        if not os.path.exists(abs_out):
            #if selecting a new folder copy the files (not sure how to specify different folder under nipype when it runs sh scripts from ants)
            os.mkdir(abs_out)
        copyfile(os.path.abspath(MPRAGE),os.path.join(abs_out,os.path.basename(MPRAGE)))
        copyfile(os.path.abspath(FLAIR),os.path.join(abs_out,os.path.basename(FLAIR)))
        if args.mask is not None:
            if os.path.isfile(args.mask):
                copyfile(os.path.abspath(args.mask),os.path.join(abs_out, prefix + 'MPRAGE.mask.nii.gz'))
        os.chdir(args.outfolder)
    elif args.mask is not None:
        copyfile(os.path.abspath(args.mask),os.path.join(os.path.abspath(args.mask), prefix + 'MPRAGE.mask.nii.gz'))

    if args.mask is None:
        # T1 brain extraction
        brainextraction = BrainExtraction()
        brainextraction.inputs.dimension = 3
        brainextraction.inputs.anatomical_image = MPRAGE
        brainextraction.inputs.brain_template = args.brain_template
        brainextraction.inputs.brain_probability_mask = args.temp_prob
        brainextraction.inputs.extraction_registration_mask= args.temp_mask
        brainextraction.inputs.debug=True
        print("brain extraction")
        print(' ')
        print(brainextraction.cmdline)
        print('-'*30)
        brainextraction.run()
        os.rename('highres001_BrainExtractionMask.nii.gz',prefix +'MPRAGE.mask.nii.gz')
        os.rename('highres001_BrainExtractionBrain.nii.gz',prefix +'MPRAGE.brain.nii.gz')
        os.remove('highres001_BrainExtractionPrior0GenericAffine.mat')
        os.rmdir('highres001_')

    #two step registration with ants (step1)

    reg = Registration()
    reg.inputs.fixed_image = FLAIR
    reg.inputs.moving_image = MPRAGE
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.output_warped_image = prefix + 'output_warped_image.nii.gz'
    reg.inputs.dimension = 3
    reg.inputs.transforms = ['Rigid']
    reg.inputs.transform_parameters = [[0.1]]
    reg.inputs.radius_or_number_of_bins = [32]
    reg.inputs.metric = ['MI']
    reg.inputs.sampling_percentage = [0.1]
    reg.inputs.sampling_strategy = ['Regular']
    reg.inputs.shrink_factors = [[4,3,2,1]]
    reg.inputs.smoothing_sigmas = [[3,2,1,0]]
    reg.inputs.sigma_units = ['vox']
    reg.inputs.use_histogram_matching = [False]
    reg.inputs.number_of_iterations = [[1000,500,250,100]]
    reg.inputs.winsorize_lower_quantile = 0.025
    reg.inputs.winsorize_upper_quantile = 0.975
    print("first pass registration")
    print(' ')
    print(reg.cmdline)
    print('-'*30)
    reg.run()

    os.rename('output_0GenericAffine.mat',prefix + 'MPRAGE_to_FLAIR.firstpass.mat')

    #apply tranform MPRAGE mask to FLAIR

    at = ApplyTransforms()
    at.inputs.dimension = 3
    at.inputs.input_image = prefix + 'MPRAGE.mask.nii.gz'
    at.inputs.reference_image = FLAIR
    at.inputs.output_image = prefix + 'FLAIR.mask.nii.gz'
    at.inputs.interpolation = 'MultiLabel'
    at.inputs.default_value = 0
    at.inputs.transforms = [ prefix + 'MPRAGE_to_FLAIR.firstpass.mat']
    at.inputs.invert_transform_flags = [False]
    print("apply stranform to T1 maks")
    print(' ')
    print(at.cmdline)
    print('-'*30)    
    at.run()

    # bias correct FLAIR and MPRAGE

    n4m = N4BiasFieldCorrection()
    n4m.inputs.dimension = 3
    n4m.inputs.input_image = MPRAGE
    n4m.inputs.mask_image = prefix + 'MPRAGE.mask.nii.gz'
    n4m.inputs.bspline_fitting_distance = 300
    n4m.inputs.shrink_factor = 3
    n4m.inputs.n_iterations = [50,50,30,20]
    n4m.inputs.output_image = prefix + 'MPRAGE.N4.nii.gz'
    print("bias correcting T1")
    print(' ')
    print(n4m.cmdline)
    print('-'*30)
    n4m.run()

    n4f = copy.deepcopy(n4m)
    n4f.inputs.input_image = FLAIR
    n4f.inputs.mask_image = prefix + 'FLAIR.mask.nii.gz'
    n4f.inputs.output_image = prefix + 'FLAIR.N4.nii.gz'
    print("bias correcting FLAIR")
    print(' ')
    print(n4f.cmdline)
    print('-'*30)
    n4f.run()

    # mask bias corrected FLAIR and MPRAGE

    calc = afni.Calc()
    calc.inputs.in_file_a = prefix + 'FLAIR.N4.nii.gz'
    calc.inputs.in_file_b = prefix + 'FLAIR.mask.nii.gz'
    calc.inputs.expr='a*b'
    calc.inputs.out_file = prefix +  'FLAIR.N4.masked.nii.gz'
    calc.inputs.outputtype = 'NIFTI'
    calc.inputs.overwrite = True
    calc.run()

    calc1= copy.deepcopy(calc)
    calc1.inputs.in_file_a = prefix + 'MPRAGE.N4.nii.gz'
    calc1.inputs.in_file_b = prefix + 'MPRAGE.mask.nii.gz'
    calc1.inputs.out_file = prefix +  'MPRAGE.N4.masked.nii.gz'
    calc1.inputs.overwrite = True
    calc1.run()

    #register bias corrected

    reg1 = copy.deepcopy(reg)
    reg1.inputs.output_transform_prefix = "output_"
    reg1.inputs.output_warped_image = prefix + 'output_warped_image.nii.gz'
    reg1.inputs.initial_moving_transform = prefix +'MPRAGE_to_FLAIR.firstpass.mat'
    print("second pass registration")
    print(' ')
    print(reg1.cmdline)
    print('-'*30)
    reg1.run()
    os.rename('output_0GenericAffine.mat',prefix +'MPRAGE_to_FLAIR.secondpass.mat')
    
    
    #generate final mask in FLAIR space

    atf = ApplyTransforms()
    atf.inputs.dimension = 3
    atf.inputs.input_image = prefix + 'MPRAGE.N4.nii.gz'
    atf.inputs.reference_image = FLAIR
    atf.inputs.output_image = prefix + 'MPRAGE.N4.toFLAIR.nii.gz'
    atf.inputs.interpolation = 'BSpline'
    atf.inputs.interpolation_parameters = (3,)
    atf.inputs.default_value = 0
    atf.inputs.transforms = [prefix +  'MPRAGE_to_FLAIR.secondpass.mat']
    atf.inputs.invert_transform_flags = [False]
    print("final apply transform")
    print(' ')
    print(atf.cmdline)
    print('-'*30)
    atf.run()


    #cleanup

    os.remove(prefix + 'output_warped_image.nii.gz')

    if args.outfolder is not None:
        os.remove(os.path.join(abs_out,os.path.basename(MPRAGE)))
        os.remove(os.path.join(abs_out,os.path.basename(FLAIR)))
        
    if args.mask is None:
        os.remove(prefix + 'MPRAGE.brain.nii.gz')
        
    if not args.storetemp:
        os.remove(prefix + 'MPRAGE.mask.nii.gz')
        os.remove(prefix + 'MPRAGE_to_FLAIR.firstpass.mat')
        os.remove(prefix + 'FLAIR.N4.masked.nii.gz')
        os.remove(prefix + 'MPRAGE.N4.masked.nii.gz')
        os.remove(prefix + 'MPRAGE.N4.nii.gz')


    return
Exemplo n.º 24
0
    def distortion_correction_workflow(self):
        # The initial script created by Vinit Srivastava was:
        # antsIntermodalityIntrasubject.sh -d 3 -i eddy_corr_brain_b0.nii.gz -r
        # T1-nonGdE_brain_N4bfc_masked.nii.gz -x T1-nonGdE_brain_mask.nii.gz -w
        # template -o B0toT1SmallWarp -t 2
        #
        # Note: the script antsIntermodalityIntrasubject.sh returns an error regarding a missing template file:
        #  template1Warp.nii.gz does not exist - please specify in order to proceed to steps that map to the template
        # This is expected and means that the second half of the script is not executed nor necessary for this step.
        # https://github.com/ANTsX/ANTs/blob/master/Scripts/antsIntermodalityIntrasubject.sh
        #
        # Additionally, the anatomical T1 brain mask is used in the second part of the script and is not useful in our
        # case.
        #
        # The ants interface from nipype doesn't wrap the antsIntermodalityIntrasubject.sh script
        #
        # antsIntermodalityIntrasubject.sh Script breakdown:
        # Usage: `basename $0`
        #        -d imageDimension
        #        -r anatomicalT1image(brain or whole - head, depending on modality) to align to
        #        -R anatomicalReference image to warp to(often higher resolution thananatomicalT1image)
        #        -i scalarImageToMatch(such as avgerage bold, averge dwi, etc.)
        #        -x anatomicalT1brainmask(should mask out regions that do not appear in scalarImageToMatch)
        #        -t transformType(0 = rigid, 1 = affine, 2 = rigid + small_def, 3 = affine + small_def)
        #        -w prefix of T1 to template transform
        #        -T template space
        #        < OPTARGS >
        #        -o outputPrefix
        #        -l labels in template space
        #        -a auxiliary scalar image/s to warp to template
        #        -b auxiliary dt image to warp to template

        # Initial command runs:
        #       /opt/ants-2.3.1/antsRegistration -d 3 -m MI[anatomicalImage(-r), scalarImage(-i),1,32,Regular,0.25]
        #        -c [1000x500x250x0,1e-7,5] -t Rigid[0.1] -f 8x4x2x1 -s 4x2x1x0
        #        -u 1 -m mattes[anatomicalImage(-r), scalarImage(-i),1,32] -c [50x50x0,1e-7,5] -t SyN[0.1,3,0] -f 4x2x1
        #        -s 2x1x0mm -u 1 -z 1 --winsorize-image-intensities [0.005, 0.995] -o B0toT1Warp
        # -d: dimensionality
        # -m: metric
        #       "MI[fixedImage,movingImage,metricWeight,numberOfBins,<samplingStrategy={None,Regular,Random}>,<samplingPercentage=[0,1]>]" );
        #       "Mattes[fixedImage,movingImage,metricWeight,numberOfBins,<samplingStrategy={None,Regular,Random}>,<samplingPercentage=[0,1]>]" );
        # -c: convergence
        #       "MxNxO"
        #       "[MxNxO,<convergenceThreshold=1e-6>,<convergenceWindowSize=10>]"
        # -t: transform
        #       0:rigid[GradientStep], 1:affine[], 2:composite affine[], 3:similarity[], 4:translation[], 5:BSpline[]
        #       "SyN[gradientStep,<updateFieldVarianceInVoxelSpace=3>,<totalFieldVarianceInVoxelSpace=0>]"
        # -f: shrink-factors
        #       "MxNxO..."
        # -s: smoothing-sigmas
        #       "MxNxO..."
        # -u: use-histogram-matching
        # -z: collapse-output-transforms
        # -o: output transform prefix

        b0_T1w_Reg = Node(Registration(), name="b0_T1w_Reg")
        b0_T1w_Reg.btn_string = 'dwi b0 to T1w Registration'
        # -r, -i, -x will get set via workflow implementation
        # -d
        b0_T1w_Reg.inputs.dimension = 3
        # -m
        b0_T1w_Reg.inputs.metric = ['MI', 'Mattes']
        b0_T1w_Reg.inputs.metric_weight = [1, 1]
        b0_T1w_Reg.inputs.radius_or_number_of_bins = [32, 32]
        b0_T1w_Reg.inputs.sampling_strategy = ['Regular', None]
        b0_T1w_Reg.inputs.sampling_percentage = [0.25, None]
        # -c
        b0_T1w_Reg.inputs.number_of_iterations = [[1000, 500, 250, 0],
                                                  [50, 50, 0]]
        b0_T1w_Reg.inputs.convergence_threshold = [1e-7, 1e-7]
        b0_T1w_Reg.inputs.convergence_window_size = [5, 5]
        # -t
        b0_T1w_Reg.inputs.transforms = ['Rigid', 'SyN']
        b0_T1w_Reg.inputs.transform_parameters = [(0.1, ), (0.1, 3, 0.0)]
        # -f
        b0_T1w_Reg.inputs.shrink_factors = [[8, 4, 2, 1], [4, 2, 1]]
        # -s
        b0_T1w_Reg.inputs.smoothing_sigmas = [[4, 2, 1, 0], [2, 1, 0]]
        b0_T1w_Reg.inputs.sigma_units = ['vox', 'mm']
        # -u
        b0_T1w_Reg.inputs.use_histogram_matching = [True, True]
        # -z
        b0_T1w_Reg.inputs.collapse_output_transforms = True
        # winsorize
        b0_T1w_Reg.inputs.winsorize_lower_quantile = 0.005
        b0_T1w_Reg.inputs.winsorize_upper_quantile = 0.995
        # -o
        b0_T1w_Reg.inputs.output_transform_prefix = 'dwiToT1Warp'

        # Since the antsApplyTransform interface in nipype only accepts the transform list in the reverse order (i.e.
        # the output from the antsRegistration script needs to be flipped) we save the transform files as a single
        # composite file.
        b0_T1w_Reg.inputs.write_composite_transform = True
        self.interfaces.append(b0_T1w_Reg)

        # Althought the antsRegistration interface can output a warped image, we keep the antsApplyTransform node to
        # replicate the original (i.e. not nipype) pipeline and to add the input_image_type parameter.\

        # second script: antsApplyTransforms
        # antsApplyTransforms -d 3 -e 3 -i data.nii.gz -o data_distcorr.nii.gz -r
        # eddy_corr_brain_b0.nii.gz -t B0toT1SmallWarp1Warp.nii.gz -t
        # B0toT1SmallWarp0GenericAffine.mat -v
        dwi_T1w_Tran = Node(ApplyTransforms(), name="dwi_T1w_Tran")
        dwi_T1w_Tran.btn_string = 'dwi to T1w Transformation'
        # -d: dimension
        dwi_T1w_Tran.inputs.dimension = 3
        # -e: input image type
        dwi_T1w_Tran.inputs.input_image_type = 3
        # the -i, -o, -r, -t options are from workflow
        self.interfaces.append(dwi_T1w_Tran)
Exemplo n.º 25
0
# skullstrip the T1 structural image
skullstrip_structural_node = Node(SkullStrip(outputtype='NIFTI'),
                                  name='skullstrip')

# coreg_to_struct_space = Node(FLIRT(apply_xfm=True, reference=struct_image, interp="sinc"), name="coreg")
coreg_to_struct_space_node = Node(FLIRT(apply_xfm=True,
                                        interp="sinc",
                                        cost='mutualinfo'),
                                  name="coreg_to_struct_space")

# Warp whole head T1 Structural Image to MNI 152 template
warp_to_152_node = Node(legacy.GenWarpFields(similarity_metric="CC"),
                        name="warp152")

# coreg_to_template_space_node = Node(ApplyTransforms(reference_image=template, interpolation='BSpline'), name="coreg_to_template_space")
coreg_to_template_space_node = Node(ApplyTransforms(interpolation='BSpline'),
                                    name="coreg_to_template_space")

merge_transforms_node = Node(Merge(2), iterfield=['in2'], name="merge")

# Spatial smoothing
iso_smooth_node = Node(IsotropicSmooth(fwhm=4, output_type="NIFTI"),
                       name='isoSmooth')

#TODO: Use the data sink node in the pipeline
data_sink_node = Node(nio.DataSink(base_directory="results_dir",
                                   container='warp152_output',
                                   infields=['tt']),
                      name='dataSink')

Exemplo n.º 26
0
    def _run_interface(self, runtime):
        nii = nb.load(self.inputs.in_file)
        zooms = nii.header.get_zooms()
        size_diff = np.array(zooms[:3]) - (self.inputs.pixel_size - 0.1)
        if np.all(size_diff >= -1e-3):
            IFLOGGER.info('Voxel size is large enough')
            self._results['out_file'] = self.inputs.in_file
            if isdefined(self.inputs.in_mask):
                self._results['out_mask'] = self.inputs.in_mask
            return runtime

        IFLOGGER.info(
            'One or more voxel dimensions (%f, %f, %f) are smaller than '
            'the requested voxel size (%f) - diff=(%f, %f, %f)', zooms[0],
            zooms[1], zooms[2], self.inputs.pixel_size, size_diff[0],
            size_diff[1], size_diff[2])

        # Figure out new matrix
        # 1) Get base affine
        aff_base = nii.header.get_base_affine()
        aff_base_inv = np.linalg.inv(aff_base)

        # 2) Find center pixel in mm
        center_idx = (np.array(nii.shape[:3]) - 1) * 0.5
        center_mm = aff_base.dot(center_idx.tolist() + [1])

        # 3) Find extent of each dimension
        min_mm = aff_base.dot([-0.5, -0.5, -0.5, 1])
        max_mm = aff_base.dot((np.array(nii.shape[:3]) - 0.5).tolist() + [1])
        extent_mm = np.abs(max_mm - min_mm)[:3]

        # 4) Find new matrix size
        new_size = np.array(extent_mm / self.inputs.pixel_size, dtype=int)

        # 5) Initialize new base affine
        new_base = aff_base[:3, :3] * np.abs(
            aff_base_inv[:3, :3]) * self.inputs.pixel_size

        # 6) Find new center
        new_center_idx = (new_size - 1) * 0.5
        new_affine_base = np.eye(4)
        new_affine_base[:3, :3] = new_base
        new_affine_base[:3, 3] = center_mm[:3] - new_base.dot(new_center_idx)

        # 7) Rotate new matrix
        rotation = nii.affine.dot(aff_base_inv)
        new_affine = rotation.dot(new_affine_base)

        # 8) Generate new reference image
        hdr = nii.header.copy()
        hdr.set_data_shape(new_size)
        ref_file = 'resample_ref.nii.gz'
        nb.Nifti1Image(np.zeros(new_size, dtype=nii.get_data_dtype()),
                       new_affine, hdr).to_filename(ref_file)

        out_prefix, ext = op.splitext(op.basename(self.inputs.in_file))
        if ext == '.gz':
            out_prefix, ext2 = op.splitext(out_prefix)
            ext = ext2 + ext

        out_file = op.abspath('%s_resampled%s' % (out_prefix, ext))

        # 9) Resample new image
        ApplyTransforms(
            dimension=3,
            input_image=self.inputs.in_file,
            reference_image=ref_file,
            interpolation='LanczosWindowedSinc',
            transforms=[pkgrf('mriqc', 'data/itk_identity.tfm')],
            output_image=out_file,
        ).run()

        self._results['out_file'] = out_file

        if isdefined(self.inputs.in_mask):
            hdr = nii.header.copy()
            hdr.set_data_shape(new_size)
            hdr.set_data_dtype(np.uint8)
            ref_mask = 'mask_ref.nii.gz'
            nb.Nifti1Image(np.zeros(new_size, dtype=np.uint8), new_affine,
                           hdr).to_filename(ref_mask)

            out_mask = op.abspath('%s_resmask%s' % (out_prefix, ext))
            ApplyTransforms(
                dimension=3,
                input_image=self.inputs.in_mask,
                reference_image=ref_mask,
                interpolation='NearestNeighbor',
                transforms=[pkgrf('mriqc', 'data/itk_identity.tfm')],
                output_image=out_mask,
            ).run()

            self._results['out_mask'] = out_mask

        return runtime
Exemplo n.º 27
0
def embedded_antsreg_2d(source_image,
                        target_image,
                        run_rigid=False,
                        rigid_iterations=1000,
                        run_affine=False,
                        affine_iterations=1000,
                        run_syn=True,
                        coarse_iterations=40,
                        medium_iterations=50,
                        fine_iterations=40,
                        cost_function='MutualInformation',
                        interpolation='NearestNeighbor',
                        convergence=1e-6,
                        ignore_affine=False,
                        ignore_header=False,
                        save_data=False,
                        overwrite=False,
                        output_dir=None,
                        file_name=None):
    """ Embedded ANTS Registration 2D

    Runs the rigid and/or Symmetric Normalization (SyN) algorithm of ANTs and
    formats the output deformations into voxel coordinate mappings as used in
    CBSTools registration and transformation routines.

    Parameters
    ----------
    source_image: niimg
        Image to register
    target_image: niimg
        Reference image to match
    run_rigid: bool
        Whether or not to run a rigid registration first (default is False)
    rigid_iterations: float
        Number of iterations in the rigid step (default is 1000)
    run_affine: bool
        Whether or not to run a affine registration first (default is False)
    affine_iterations: float
        Number of iterations in the affine step (default is 1000)
    run_syn: bool
        Whether or not to run a SyN registration (default is True)
    coarse_iterations: float
        Number of iterations at the coarse level (default is 40)
    medium_iterations: float
        Number of iterations at the medium level (default is 50)
    fine_iterations: float
        Number of iterations at the fine level (default is 40)
    cost_function: {'CrossCorrelation', 'MutualInformation'}
        Cost function for the registration (default is 'MutualInformation')
    interpolation: {'NearestNeighbor', 'Linear'}
        Interpolation for the registration result (default is 'NearestNeighbor')
    convergence: flaot
        Threshold for convergence, can make the algorithm very slow
        (default is convergence)
    ignore_affine: bool
        Ignore the affine matrix information extracted from the image header
        (default is False)
    ignore_header: bool
        Ignore the orientation information and affine matrix information
        extracted from the image header (default is False)
    save_data: bool
        Save output data to file (default is False)
    overwrite: bool
        Overwrite existing results (default is False)
    output_dir: str, optional
        Path to desired output directory, will be created if it doesn't exist
    file_name: str, optional
        Desired base name for output files with file extension
        (suffixes will be added)

    Returns
    ----------
    dict
        Dictionary collecting outputs under the following keys
        (suffix of output files in brackets)

        * transformed_source (niimg): Deformed source image (_ants-def)
        * mapping (niimg): Coordinate mapping from source to target (_ants-map)
        * inverse (niimg): Inverse coordinate mapping from target to source
          (_ants-invmap)

    Notes
    ----------
    Port of the CBSTools Java module by Pierre-Louis Bazin. The main algorithm
    is part of the ANTs software by Brian Avants and colleagues [1]_. The
    interfacing with ANTs is performed through Nipype [2]_. Parameters have been
    set to values commonly found in neuroimaging scripts online, but not
    necessarily optimal.

    References
    ----------
    .. [1] Avants et al (2008), Symmetric diffeomorphic
       image registration with cross-correlation: evaluating automated labeling
       of elderly and neurodegenerative brain, Med Image Anal. 12(1):26-41
    .. [2] Gorgolewski et al (2011) Nipype: a flexible, lightweight and
       extensible neuroimaging data processing framework in python.
       Front Neuroinform 5. doi:10.3389/fninf.2011.00013
    """

    print('\nEmbedded ANTs Registration')

    # for external tools: nipype
    try:
        from nipype.interfaces.ants import Registration
        from nipype.interfaces.ants import ApplyTransforms
    except ImportError:
        print(
            'Error: Nipype and/or ANTS could not be imported, they are required'
            + ' in order to run this module. \n (aborting)')
        return None

    # make sure that saving related parameters are correct
    output_dir = _output_dir_4saving(
        output_dir, source_image)  # needed for intermediate results
    if save_data:
        transformed_source_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-def'))

        mapping_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-map'))

        inverse_mapping_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='ants-invmap'))
        if overwrite is False \
            and os.path.isfile(transformed_source_file) \
            and os.path.isfile(mapping_file) \
            and os.path.isfile(inverse_mapping_file) :

            print("skip computation (use existing results)")
            output = {
                'transformed_source': load_volume(transformed_source_file),
                'mapping': load_volume(mapping_file),
                'inverse': load_volume(inverse_mapping_file)
            }
            return output

    # load and get dimensions and resolution from input images
    source = load_volume(source_image)
    src_affine = source.affine
    src_header = source.header
    nsx = source.header.get_data_shape()[X]
    nsy = source.header.get_data_shape()[Y]
    nsz = 1
    rsx = source.header.get_zooms()[X]
    rsy = source.header.get_zooms()[Y]
    rsz = 1

    target = load_volume(target_image)
    trg_affine = target.affine
    trg_header = target.header
    ntx = target.header.get_data_shape()[X]
    nty = target.header.get_data_shape()[Y]
    ntz = 1
    rtx = target.header.get_zooms()[X]
    rty = target.header.get_zooms()[Y]
    rtz = 1

    # in case the affine transformations are not to be trusted: make them equal
    if ignore_affine or ignore_header:
        mx = np.argmax(np.abs(src_affine[0][0:3]))
        my = np.argmax(np.abs(src_affine[1][0:3]))
        mz = np.argmax(np.abs(src_affine[2][0:3]))
        new_affine = np.zeros((4, 4))
        if ignore_header:
            new_affine[0][0] = rsx
            new_affine[1][1] = rsy
            new_affine[2][2] = rsz
            new_affine[0][3] = -rsx * nsx / 2.0
            new_affine[1][3] = -rsy * nsy / 2.0
            new_affine[2][3] = -rsz * nsz / 2.0
        else:
            new_affine[0][mx] = rsx * np.sign(src_affine[0][mx])
            new_affine[1][my] = rsy * np.sign(src_affine[1][my])
            new_affine[2][mz] = rsz * np.sign(src_affine[2][mz])
            if (np.sign(src_affine[0][mx]) < 0):
                new_affine[0][3] = rsx * nsx / 2.0
            else:
                new_affine[0][3] = -rsx * nsx / 2.0

            if (np.sign(src_affine[1][my]) < 0):
                new_affine[1][3] = rsy * nsy / 2.0
            else:
                new_affine[1][3] = -rsy * nsy / 2.0

            if (np.sign(src_affine[2][mz]) < 0):
                new_affine[2][3] = rsz * nsz / 2.0
            else:
                new_affine[2][3] = -rsz * nsz / 2.0
        #if (np.sign(src_affine[0][mx])<0): new_affine[mx][3] = rsx*nsx
        #if (np.sign(src_affine[1][my])<0): new_affine[my][3] = rsy*nsy
        #if (np.sign(src_affine[2][mz])<0): new_affine[mz][3] = rsz*nsz
        #new_affine[0][3] = nsx/2.0
        #new_affine[1][3] = nsy/2.0
        #new_affine[2][3] = nsz/2.0
        new_affine[3][3] = 1.0

        src_img = nb.Nifti1Image(source.get_data(), new_affine, source.header)
        src_img.update_header()
        src_img_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='tmp_srcimg'))
        save_volume(src_img_file, src_img)
        source = load_volume(src_img_file)
        src_affine = source.affine
        src_header = source.header

        # create generic affine aligned with the orientation for the target
        mx = np.argmax(np.abs(trg_affine[0][0:3]))
        my = np.argmax(np.abs(trg_affine[1][0:3]))
        mz = np.argmax(np.abs(trg_affine[2][0:3]))
        new_affine = np.zeros((4, 4))
        if ignore_header:
            new_affine[0][0] = rtx
            new_affine[1][1] = rty
            new_affine[2][2] = rtz
            new_affine[0][3] = -rtx * ntx / 2.0
            new_affine[1][3] = -rty * nty / 2.0
            new_affine[2][3] = -rtz * ntz / 2.0
        else:
            new_affine[0][mx] = rtx * np.sign(trg_affine[0][mx])
            new_affine[1][my] = rty * np.sign(trg_affine[1][my])
            new_affine[2][mz] = rtz * np.sign(trg_affine[2][mz])
            if (np.sign(trg_affine[0][mx]) < 0):
                new_affine[0][3] = rtx * ntx / 2.0
            else:
                new_affine[0][3] = -rtx * ntx / 2.0

            if (np.sign(trg_affine[1][my]) < 0):
                new_affine[1][3] = rty * nty / 2.0
            else:
                new_affine[1][3] = -rty * nty / 2.0

            if (np.sign(trg_affine[2][mz]) < 0):
                new_affine[2][3] = rtz * ntz / 2.0
            else:
                new_affine[2][3] = -rtz * ntz / 2.0
        #if (np.sign(trg_affine[0][mx])<0): new_affine[mx][3] = rtx*ntx
        #if (np.sign(trg_affine[1][my])<0): new_affine[my][3] = rty*nty
        #if (np.sign(trg_affine[2][mz])<0): new_affine[mz][3] = rtz*ntz
        #new_affine[0][3] = ntx/2.0
        #new_affine[1][3] = nty/2.0
        #new_affine[2][3] = ntz/2.0
        new_affine[3][3] = 1.0

        trg_img = nb.Nifti1Image(target.get_data(), new_affine, target.header)
        trg_img.update_header()
        trg_img_file = os.path.join(
            output_dir,
            _fname_4saving(file_name=file_name,
                           rootfile=source_image,
                           suffix='tmp_trgimg'))
        save_volume(trg_img_file, trg_img)
        target = load_volume(trg_img_file)
        trg_affine = target.affine
        trg_header = target.header

    # build coordinate mapping matrices and save them to disk
    src_coord = np.zeros((nsx, nsy, 2))
    trg_coord = np.zeros((ntx, nty, 2))
    for x in range(nsx):
        for y in range(nsy):
            src_coord[x, y, X] = x
            src_coord[x, y, Y] = y
    src_map = nb.Nifti1Image(src_coord, source.affine, source.header)
    src_map_file = os.path.join(
        output_dir,
        _fname_4saving(file_name=file_name,
                       rootfile=source_image,
                       suffix='tmp_srccoord'))
    save_volume(src_map_file, src_map)
    for x in range(ntx):
        for y in range(nty):
            trg_coord[x, y, X] = x
            trg_coord[x, y, Y] = y
    trg_map = nb.Nifti1Image(trg_coord, target.affine, target.header)
    trg_map_file = os.path.join(
        output_dir,
        _fname_4saving(file_name=file_name,
                       rootfile=source_image,
                       suffix='tmp_trgcoord'))
    save_volume(trg_map_file, trg_map)

    # run the main ANTS software
    reg = Registration()
    reg.inputs.dimension = 2

    # add a prefix to avoid multiple names?
    prefix = _fname_4saving(file_name=file_name,
                            rootfile=source_image,
                            suffix='tmp_syn')
    prefix = os.path.basename(prefix)
    prefix = prefix.split(".")[0]
    reg.inputs.output_transform_prefix = prefix
    reg.inputs.fixed_image = [target.get_filename()]
    reg.inputs.moving_image = [source.get_filename()]

    print("registering " + source.get_filename() + "\n to " +
          target.get_filename())

    if run_rigid is True and run_affine is True and run_syn is True:
        reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
        reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [
            [rigid_iterations, rigid_iterations, rigid_iterations],
            [affine_iterations, affine_iterations, affine_iterations],
            [
                coarse_iterations, coarse_iterations, medium_iterations,
                fine_iterations
            ]
        ]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [32, 32, 32]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[4, 2, 1]] + [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[3, 2, 1]
                                                     ] + [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence
                                                            ] + [convergence]
        reg.inputs.convergence_window_size = [10] + [10] + [5]
        reg.inputs.use_histogram_matching = [False] + [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is True and run_affine is False and run_syn is True:
        reg.inputs.transforms = ['Rigid', 'SyN']
        reg.inputs.transform_parameters = [(0.1, ), (0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [
            [rigid_iterations, rigid_iterations, rigid_iterations],
            [
                coarse_iterations, coarse_iterations, medium_iterations,
                fine_iterations
            ]
        ]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence]
        reg.inputs.convergence_window_size = [10] + [5]
        reg.inputs.use_histogram_matching = [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is True and run_syn is True:
        reg.inputs.transforms = ['Affine', 'SyN']
        reg.inputs.transform_parameters = [(0.1, ), (0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [
            [affine_iterations, affine_iterations, affine_iterations],
            [
                coarse_iterations, coarse_iterations, medium_iterations,
                fine_iterations
            ]
        ]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [64, 64]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence]
        reg.inputs.convergence_window_size = [10] + [5]
        reg.inputs.use_histogram_matching = [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    if run_rigid is True and run_affine is True and run_syn is False:
        reg.inputs.transforms = ['Rigid', 'Affine']
        reg.inputs.transform_parameters = [(0.1, ), (0.1, )]
        reg.inputs.number_of_iterations = [[
            rigid_iterations, rigid_iterations, rigid_iterations
        ], [affine_iterations, affine_iterations, affine_iterations]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC', 'CC']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [5, 5]
        else:
            reg.inputs.metric = ['MI', 'MI']
            reg.inputs.metric_weight = [1.0, 1.0]
            reg.inputs.radius_or_number_of_bins = [32, 32]
        reg.inputs.shrink_factors = [[4, 2, 1]] + [[4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]] + [[3, 2, 1]]
        reg.inputs.sampling_strategy = ['Random'] + ['Random']
        reg.inputs.sampling_percentage = [0.3] + [0.3]
        reg.inputs.convergence_threshold = [convergence] + [convergence]
        reg.inputs.convergence_window_size = [10] + [10]
        reg.inputs.use_histogram_matching = [False] + [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is True and run_affine is False and run_syn is False:
        reg.inputs.transforms = ['Rigid']
        reg.inputs.transform_parameters = [(0.1, )]
        reg.inputs.number_of_iterations = [[
            rigid_iterations, rigid_iterations, rigid_iterations
        ]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [5]
        else:
            reg.inputs.metric = ['MI']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.shrink_factors = [[4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]]
        reg.inputs.sampling_strategy = ['Random']
        reg.inputs.sampling_percentage = [0.3]
        reg.inputs.convergence_threshold = [convergence]
        reg.inputs.convergence_window_size = [10]
        reg.inputs.use_histogram_matching = [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is True and run_syn is False:
        reg.inputs.transforms = ['Affine']
        reg.inputs.transform_parameters = [(0.1, )]
        reg.inputs.number_of_iterations = [[
            affine_iterations, affine_iterations, affine_iterations
        ]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [5]
        else:
            reg.inputs.metric = ['MI']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.shrink_factors = [[4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[3, 2, 1]]
        reg.inputs.sampling_strategy = ['Random']
        reg.inputs.sampling_percentage = [0.3]
        reg.inputs.convergence_threshold = [convergence]
        reg.inputs.convergence_window_size = [10]
        reg.inputs.use_histogram_matching = [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is False and run_syn is True:
        reg.inputs.transforms = ['SyN']
        reg.inputs.transform_parameters = [(0.2, 3.0, 0.0)]
        reg.inputs.number_of_iterations = [[
            coarse_iterations, coarse_iterations, medium_iterations,
            fine_iterations
        ]]
        if (cost_function == 'CrossCorrelation'):
            reg.inputs.metric = ['CC']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [5]
        else:
            reg.inputs.metric = ['MI']
            reg.inputs.metric_weight = [1.0]
            reg.inputs.radius_or_number_of_bins = [32]
        reg.inputs.shrink_factors = [[8, 4, 2, 1]]
        reg.inputs.smoothing_sigmas = [[2, 1, 0.5, 0]]
        reg.inputs.sampling_strategy = ['Random']
        reg.inputs.sampling_percentage = [0.3]
        reg.inputs.convergence_threshold = [convergence]
        reg.inputs.convergence_window_size = [10]
        reg.inputs.use_histogram_matching = [False]
        reg.inputs.winsorize_lower_quantile = 0.001
        reg.inputs.winsorize_upper_quantile = 0.999

    elif run_rigid is False and run_affine is False and run_syn is False:
        reg.inputs.transforms = ['Rigid']
        reg.inputs.transform_parameters = [(0.1, )]
        reg.inputs.number_of_iterations = [[0]]
        reg.inputs.metric = ['CC']
        reg.inputs.metric_weight = [1.0]
        reg.inputs.radius_or_number_of_bins = [5]
        reg.inputs.shrink_factors = [[1]]
        reg.inputs.smoothing_sigmas = [[1]]

    print(reg.cmdline)
    result = reg.run()

    # Transforms the moving image
    at = ApplyTransforms()
    at.inputs.dimension = 2
    at.inputs.input_image = source.get_filename()
    at.inputs.reference_image = target.get_filename()
    at.inputs.interpolation = interpolation
    at.inputs.transforms = result.outputs.forward_transforms
    at.inputs.invert_transform_flags = result.outputs.forward_invert_flags
    print(at.cmdline)
    transformed = at.run()

    # Create coordinate mappings
    src_at = ApplyTransforms()
    src_at.inputs.dimension = 2
    src_at.inputs.input_image_type = 3
    src_at.inputs.input_image = src_map.get_filename()
    src_at.inputs.reference_image = target.get_filename()
    src_at.inputs.interpolation = 'Linear'
    src_at.inputs.transforms = result.outputs.forward_transforms
    src_at.inputs.invert_transform_flags = result.outputs.forward_invert_flags
    mapping = src_at.run()

    trg_at = ApplyTransforms()
    trg_at.inputs.dimension = 2
    trg_at.inputs.input_image_type = 3
    trg_at.inputs.input_image = trg_map.get_filename()
    trg_at.inputs.reference_image = source.get_filename()
    trg_at.inputs.interpolation = 'Linear'
    trg_at.inputs.transforms = result.outputs.reverse_transforms
    trg_at.inputs.invert_transform_flags = result.outputs.reverse_invert_flags
    inverse = trg_at.run()

    # pad coordinate mapping outside the image? hopefully not needed...

    # collect outputs and potentially save
    transformed_img = nb.Nifti1Image(
        nb.load(transformed.outputs.output_image).get_data(), target.affine,
        target.header)
    mapping_img = nb.Nifti1Image(
        nb.load(mapping.outputs.output_image).get_data(), target.affine,
        target.header)
    inverse_img = nb.Nifti1Image(
        nb.load(inverse.outputs.output_image).get_data(), source.affine,
        source.header)

    outputs = {
        'transformed_source': transformed_img,
        'mapping': mapping_img,
        'inverse': inverse_img
    }

    # clean-up intermediate files
    os.remove(src_map_file)
    os.remove(trg_map_file)
    if ignore_affine or ignore_header:
        os.remove(src_img_file)
        os.remove(trg_img_file)

    for name in result.outputs.forward_transforms:
        if os.path.exists(name): os.remove(name)
    for name in result.outputs.reverse_transforms:
        if os.path.exists(name): os.remove(name)
    os.remove(transformed.outputs.output_image)
    os.remove(mapping.outputs.output_image)
    os.remove(inverse.outputs.output_image)

    if save_data:
        save_volume(transformed_source_file, transformed_img)
        save_volume(mapping_file, mapping_img)
        save_volume(inverse_mapping_file, inverse_img)

    return outputs
Exemplo n.º 28
0
template = abspath(args['template'])
bias_correction_node = Node(N4BiasFieldCorrection(), name='bias_correction')
generate_transforms_node = Node(legacy.GenWarpFields(reference_image=template),
                                name='generate_transforms')
merge_transforms_node = Node(Merge(2),
                             iterfield='in2',
                             name='merge_transforms')

if args['derivatives'] is not None:
    merge_input_files_node = Node(Merge(len(derivatives_names)),
                                  name='merge_input_files')

map_apply_node = MapNode(
    interface=ApplyTransforms(reference_image=template,
                              interpolation='BSpline',
                              dimension=3,
                              input_image_type=3),
    iterfield=['input_image'],
    name='map_apply_node'
)  # When applying 3d transforms to a 4d image, set input_image_type to 3, (setting dimension to 3 was reccomended as well)

transform_images = Workflow(name='cpac_epi_reg', base_dir=args['results_dir'])

if args['derivatives'] is not None:
    if 'bias_mask' in temps_dict:
        transform_images.connect([(data_grabber_node, bias_correction_node,
                                   [('bias_mask', 'mask_image')])])
        transform_images.connect([(data_grabber_node, bias_correction_node,
                                   [('mean', 'input_image')])])
        transform_images.connect([
            (bias_correction_node, generate_transforms_node, [('output_image',
Exemplo n.º 29
0
bbregister = Node(BBRegister(init='fsl', contrast_type='t2',
                             out_fsl_file=True),
                  name='bbregister')

# Convert the BBRegister transformation to ANTS ITK format
convert2itk = Node(C3dAffineTool(fsl2ras=True, itk_transform=True),
                   name='convert2itk')

# Concatenate BBRegister's and ANTS' transforms into a list
merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')

# Transform the contrast images. First to anatomical and then to the target
warpall = MapNode(ApplyTransforms(args='--float',
                                  input_image_type=3,
                                  interpolation='Linear',
                                  invert_transform_flags=[False, False],
                                  num_threads=1,
                                  reference_image=template,
                                  terminal_output='file'),
                  name='warpall',
                  iterfield=['input_image'])

# Transform the mean image. First to anatomical and then to the target
warpmean = Node(ApplyTransforms(args='--float',
                                input_image_type=3,
                                interpolation='Linear',
                                invert_transform_flags=[False, False],
                                num_threads=1,
                                reference_image=template,
                                terminal_output='file'),
                name='warpmean')
Exemplo n.º 30
0
fwhm = [4, 8]

# MNI template
template = '/home/zohyos7/fmri/mni_icbm152_nlin_asym_09c/1mm_T1.nii.gz'

if __name__ == '__main__':
    if len(sys.argv) == 1:
        raise RuntimeError('Should pass subject IDs.')

    subject_list = sys.argv[1:]

    # Apply Transformation Node
    apply_norm_bold = MapNode(ApplyTransforms(args='--float',
                                              input_image_type=3,
                                              interpolation='BSpline',
                                              invert_transform_flags=[False],
                                              num_threads=8,
                                              reference_image=template,
                                              terminal_output='file'),
                              name='apply_norm_bold',
                              iterfield=['input_image'])

    apply_norm_anat = MapNode(ApplyTransforms(args='--float',
                                              input_image_type=3,
                                              interpolation='BSpline',
                                              invert_transform_flags=[False],
                                              num_threads=8,
                                              reference_image=template,
                                              terminal_output='file'),
                              name='apply_norm_anat',
                              iterfield=['input_image'])
Exemplo n.º 31
0
    def workflow(self):

        datasource = self.data_source
        dict_sequences = self.dict_sequences
        nipype_cache = self.nipype_cache
        result_dir = self.result_dir
        sub_id = self.sub_id

        toreg = {**dict_sequences['MR-RT'], **dict_sequences['OT']}
        workflow = nipype.Workflow('registration_workflow',
                                   base_dir=nipype_cache)
        datasink = nipype.Node(nipype.DataSink(base_directory=result_dir),
                               "datasink")
        substitutions = [('subid', sub_id)]
        substitutions += [('results/', '{}/'.format(self.workflow_name))]

        mr_rt_ref = None
        rtct = None

        if dict_sequences['MR-RT'] and self.normilize_mr_rt:
            ref_session = list(dict_sequences['MR-RT'].keys())[0]
            ref_scans = dict_sequences['MR-RT'][ref_session]['scans']
            for pr in POSSIBLE_REF:
                for scan in ref_scans:
                    if pr in scan.split('_')[0]:
                        mr_rt_ref = '{0}_{1}_preproc'.format(
                            ref_session,
                            scan.split('_')[0])
                        mr_rt_ref_name = '{}_preproc'.format(
                            scan.split('_')[0])
                        break
                else:
                    continue
                break
        if dict_sequences['RT'] and self.normilize_rtct:
            rt_session = list(dict_sequences['RT'].keys())[0]
            ct_name = dict_sequences['RT'][rt_session]['rtct']
            if ct_name is not None and mr_rt_ref is not None:
                rtct = '{0}_rtct'.format(rt_session, ct_name)
                reg_mr2ct = nipype.Node(interface=AntsRegSyn(),
                                        name='{}_lin_reg'.format(rt_session))
                reg_mr2ct.inputs.transformation = 'r'
                reg_mr2ct.inputs.num_dimensions = 3
                reg_mr2ct.inputs.num_threads = 4
                reg_mr2ct.inputs.out_prefix = '{}_reg2RTCT'.format(
                    mr_rt_ref_name)
                reg_mr2ct.inputs.interpolation = 'BSpline'
                workflow.connect(datasource, mr_rt_ref, reg_mr2ct,
                                 'input_file')
                workflow.connect(datasource, rtct, reg_mr2ct, 'ref_file')
                workflow.connect(
                    reg_mr2ct, 'regmat', datasink,
                    'results.subid.{0}.@{1}_reg2RTCT_mat'.format(
                        ref_session, mr_rt_ref_name))
                workflow.connect(
                    reg_mr2ct, 'reg_file', datasink,
                    'results.subid.{0}.@{1}_reg2RTCT'.format(
                        ref_session, mr_rt_ref_name))
                substitutions += [
                    ('{}_reg2RTCTWarped.nii.gz'.format(mr_rt_ref_name),
                     '{}_reg2RTCT.nii.gz'.format(mr_rt_ref_name))
                ]
                substitutions += [
                    ('{}_reg2RTCT0GenericAffine.mat'.format(mr_rt_ref_name),
                     '{}_reg2RTCT_linear_mat.mat'.format(mr_rt_ref_name))
                ]

        for key in toreg:
            session = toreg[key]
            if session['scans'] is not None:
                scans = session['scans']
                scans = [x for x in scans if 'mask' not in x]
                ref = None
                for pr in POSSIBLE_REF:
                    for scan in scans:
                        if pr in scan:
                            ref = '{0}_{1}_preproc'.format(
                                key,
                                scan.split('_')[0])
                            scans.remove('{}_preproc'.format(
                                scan.split('_')[0]))
                            ref_name = scan.split('_')[0]
                            workflow.connect(
                                datasource, ref, datasink,
                                'results.subid.{0}.@{1}_reg'.format(
                                    key, ref_name))
                            substitutions += [
                                ('{}_preproc'.format(scan.split('_')[0]),
                                 '{}_reg'.format(scan.split('_')[0]))
                            ]
                            break
                    else:
                        continue
                    break
                if ref is not None:
                    if mr_rt_ref is not None and key != ref_session:
                        reg_mr_rt = nipype.Node(interface=AntsRegSyn(),
                                                name='{}_def_reg'.format(key))
                        reg_mr_rt.inputs.transformation = 's'
                        reg_mr_rt.inputs.num_dimensions = 3
                        reg_mr_rt.inputs.num_threads = 6
                        reg_mr_rt.inputs.out_prefix = '{}_reg2MR_RT'.format(
                            ref_name)
                        workflow.connect(datasource, ref, reg_mr_rt,
                                         'input_file')
                        workflow.connect(datasource, mr_rt_ref, reg_mr_rt,
                                         'ref_file')
                        workflow.connect(
                            reg_mr_rt, 'regmat', datasink,
                            'results.subid.{0}.@{1}_reg2MR_RT_linear_mat'.
                            format(key, ref_name))
                        workflow.connect(
                            reg_mr_rt, 'reg_file', datasink,
                            'results.subid.{0}.@{1}_reg2MR_RT'.format(
                                key, ref_name))
                        workflow.connect(
                            reg_mr_rt, 'warp_file', datasink,
                            'results.subid.{0}.@{1}_reg2MR_RT_warp'.format(
                                key, ref_name))
                        substitutions += [
                            ('{}_reg2MR_RT0GenericAffine.mat'.format(ref_name),
                             '{}_reg2MR_RT_linear_mat.mat'.format(ref_name))
                        ]
                        substitutions += [
                            ('{}_reg2MR_RT1Warp.nii.gz'.format(ref_name),
                             '{}_reg2MR_RT_warp.nii.gz'.format(ref_name))
                        ]
                        substitutions += [
                            ('{}_reg2MR_RTWarped.nii.gz'.format(ref_name),
                             '{}_reg2MR_RT.nii.gz'.format(ref_name))
                        ]
                    if rtct is not None and key != ref_session:
                        apply_ts_rt_ref = nipype.Node(
                            interface=ApplyTransforms(),
                            name='{}_norm2RT'.format(ref_name))
                        apply_ts_rt_ref.inputs.output_image = (
                            '{}_reg2RTCT.nii.gz'.format(ref_name))
                        workflow.connect(datasource, ref, apply_ts_rt_ref,
                                         'input_image')
                        workflow.connect(datasource, rtct, apply_ts_rt_ref,
                                         'reference_image')
                        workflow.connect(
                            apply_ts_rt_ref, 'output_image', datasink,
                            'results.subid.{0}.@{1}_reg2RTCT'.format(
                                key, ref_name))
                        merge_rt_ref = nipype.Node(
                            interface=Merge(4),
                            name='{}_merge_rt'.format(ref_name))
                        merge_rt_ref.inputs.ravel_inputs = True
                        workflow.connect(reg_mr2ct, 'regmat', merge_rt_ref,
                                         'in1')
                        workflow.connect(reg_mr_rt, 'regmat', merge_rt_ref,
                                         'in3')
                        workflow.connect(reg_mr_rt, 'warp_file', merge_rt_ref,
                                         'in2')
                        workflow.connect(merge_rt_ref, 'out', apply_ts_rt_ref,
                                         'transforms')

                    for el in scans:
                        el = el.strip(self.extention)
                        el_name = el.split('_')[0]
                        node_name = '{0}_{1}'.format(key, el)
                        reg = nipype.Node(interface=AntsRegSyn(),
                                          name='{}_lin_reg'.format(node_name))
                        reg.inputs.transformation = 'r'
                        reg.inputs.num_dimensions = 3
                        reg.inputs.num_threads = 4
                        reg.inputs.interpolation = 'BSpline'
                        reg.inputs.out_prefix = '{}_reg'.format(el_name)
                        workflow.connect(datasource, node_name, reg,
                                         'input_file')
                        workflow.connect(datasource, ref, reg, 'ref_file')
                        workflow.connect(
                            reg, 'reg_file', datasink,
                            'results.subid.{0}.@{1}_reg'.format(key, el_name))
                        workflow.connect(
                            reg, 'regmat', datasink,
                            'results.subid.{0}.@{1}_regmat'.format(
                                key, el_name))
                        substitutions += [
                            ('{}_regWarped.nii.gz'.format(el_name),
                             '{}_reg.nii.gz'.format(el_name))
                        ]
                        substitutions += [
                            ('{}_reg0GenericAffine.mat'.format(el_name),
                             '{}_linear_regmat.mat'.format(el_name))
                        ]
                        if mr_rt_ref is not None and key != ref_session:
                            merge = nipype.Node(
                                interface=Merge(3),
                                name='{}_merge_MR_RT'.format(node_name))
                            merge.inputs.ravel_inputs = True
                            workflow.connect(reg, 'regmat', merge, 'in3')
                            workflow.connect(reg_mr_rt, 'regmat', merge, 'in2')
                            workflow.connect(reg_mr_rt, 'warp_file', merge,
                                             'in1')
                            apply_ts = nipype.Node(
                                interface=ApplyTransforms(),
                                name='{}_norm2MR_RT'.format(node_name))
                            apply_ts.inputs.output_image = '{}_reg2MR_RT.nii.gz'.format(
                                el_name)
                            workflow.connect(merge, 'out', apply_ts,
                                             'transforms')
                            workflow.connect(datasource, node_name, apply_ts,
                                             'input_image')
                            workflow.connect(datasource, mr_rt_ref, apply_ts,
                                             'reference_image')
                            workflow.connect(
                                apply_ts, 'output_image', datasink,
                                'results.subid.{0}.@{1}_reg2MR_RT'.format(
                                    key, el_name))
                        if rtct is not None:
                            apply_ts_rt = nipype.Node(
                                interface=ApplyTransforms(),
                                name='{}_norm2RT'.format(node_name))
                            apply_ts_rt.inputs.output_image = '{}_reg2RTCT.nii.gz'.format(
                                el_name)
                            workflow.connect(datasource, node_name,
                                             apply_ts_rt, 'input_image')
                            workflow.connect(datasource, rtct, apply_ts_rt,
                                             'reference_image')
                            workflow.connect(
                                apply_ts_rt, 'output_image', datasink,
                                'results.subid.{0}.@{1}_reg2RTCT'.format(
                                    key, el_name))
                            if key != ref_session:
                                merge_rt = nipype.Node(
                                    interface=Merge(4),
                                    name='{}_merge_rt'.format(node_name))
                                merge_rt.inputs.ravel_inputs = True
                                workflow.connect(reg_mr2ct, 'regmat', merge_rt,
                                                 'in1')
                                workflow.connect(reg, 'regmat', merge_rt,
                                                 'in4')
                                workflow.connect(reg_mr_rt, 'regmat', merge_rt,
                                                 'in3')
                                workflow.connect(reg_mr_rt, 'warp_file',
                                                 merge_rt, 'in2')
                                workflow.connect(merge_rt, 'out', apply_ts_rt,
                                                 'transforms')
                            else:
                                merge_rt = nipype.Node(
                                    interface=Merge(2),
                                    name='{}_merge_rt'.format(node_name))
                                merge_rt.inputs.ravel_inputs = True
                                workflow.connect(reg_mr2ct, 'regmat', merge_rt,
                                                 'in1')
                                workflow.connect(reg, 'regmat', merge_rt,
                                                 'in2')
                                workflow.connect(merge_rt, 'out', apply_ts_rt,
                                                 'transforms')

        datasink.inputs.substitutions = substitutions

        return workflow