Ejemplo n.º 1
0
def preprocessing_t1w(bids_directory,
                      caps_directory,
                      tsv,
                      working_directory=None):
    """
     This preprocessing pipeline includes globally three steps:
     1) N4 bias correction (performed with ANTS).
     2) Linear registration to MNI (MNI icbm152 nlinear sym template)
        (performed with ANTS) - RegistrationSynQuick.
     3) Cropping the background (in order to save computational power).
     4) Histogram-based intensity normalization. This is a custom function
        performed by the binary ImageMath included with ANTS.

     Parameters
     ----------
     bids_directory: str
        Folder with BIDS structure.
     caps_directory: str
        Folder where CAPS structure will be stored.
     working_directory: str
        Folder containing a temporary space to save intermediate results.
    """

    from os.path import dirname, join, abspath, split, exists
    from os import pardir
    from clinica.utils.inputs import check_bids_folder
    from clinica.utils.participant import get_subject_session_list
    from clinica.utils.filemanip import get_subject_id
    from clinica.utils.exceptions import ClinicaBIDSError, ClinicaException
    from clinica.utils.inputs import clinica_file_reader
    from clinica.utils.input_files import T1W_NII
    from clinicadl.tools.inputs.input import fetch_file
    from clinicadl.tools.inputs.input import RemoteFileStructure
    import nipype.pipeline.engine as npe
    import nipype.interfaces.utility as nutil
    from nipype.interfaces import ants

    check_bids_folder(bids_directory)
    input_dir = bids_directory
    is_bids_dir = True
    base_dir = working_directory

    root = dirname(abspath(join(abspath(__file__), pardir)))
    path_to_mask = join(root, 'resources', 'masks')
    url_aramis = 'https://aramislab.paris.inria.fr/files/data/img_t1_linear/'
    FILE1 = RemoteFileStructure(
            filename='ref_cropped_template.nii.gz',
            url=url_aramis,
            checksum='67e1e7861805a8fd35f7fcf2bdf9d2a39d7bcb2fd5a201016c4d2acdd715f5b3'
            )
    FILE2 = RemoteFileStructure(
            filename='mni_icbm152_t1_tal_nlin_sym_09c.nii',
            url=url_aramis,
            checksum='93359ab97c1c027376397612a9b6c30e95406c15bf8695bd4a8efcb2064eaa34'
            )

    ref_template = join(path_to_mask, FILE2.filename)
    ref_crop = join(path_to_mask, FILE1.filename)

    if not(exists(ref_template)):
        try:
            ref_template = fetch_file(FILE2, path_to_mask)
        except IOError as err:
            print('Unable to download required template (mni_icbm152) for processing:', err)

    if not(exists(ref_crop)):
        try:
            ref_crop = fetch_file(FILE1, path_to_mask)
        except IOError as err:
            print('Unable to download required template (ref_crop) for processing:', err)

    sessions, subjects = get_subject_session_list(
            input_dir,
            tsv,
            is_bids_dir,
            False,
            base_dir
            )

    # Use hash instead of parameters for iterables folder names
    # Otherwise path will be too long and generate OSError
    from nipype import config
    cfg = dict(execution={'parameterize_dirs': False})
    config.update_config(cfg)

    # Inputs from anat/ folder
    # ========================
    # T1w file:
    try:
        t1w_files = clinica_file_reader(subjects,
                                        sessions,
                                        bids_directory,
                                        T1W_NII)
    except ClinicaException as e:
        err = 'Clinica faced error(s) while trying to read files in your CAPS directory.\n' + str(e)
        raise ClinicaBIDSError(err)

    def get_input_fields():
        """"Specify the list of possible inputs of this pipelines.
        Returns:
        A list of (string) input fields name.
        """
        return ['t1w']

    read_node = npe.Node(
            name="ReadingFiles",
            iterables=[
                ('t1w', t1w_files),
                ],
            synchronize=True,
            interface=nutil.IdentityInterface(
                fields=get_input_fields())
            )

    image_id_node = npe.Node(
            interface=nutil.Function(
                input_names=['bids_or_caps_file'],
                output_names=['image_id'],
                function=get_subject_id),
            name='ImageID'
            )

    # The core (processing) nodes

    # 1. N4biascorrection by ANTS. It uses nipype interface.
    n4biascorrection = npe.Node(
            name='n4biascorrection',
            interface=ants.N4BiasFieldCorrection(
                dimension=3,
                save_bias=True,
                bspline_fitting_distance=600
                )
            )

    # 2. `RegistrationSynQuick` by *ANTS*. It uses nipype interface.
    ants_registration_node = npe.Node(
            name='antsRegistrationSynQuick',
            interface=ants.RegistrationSynQuick()
            )
    ants_registration_node.inputs.fixed_image = ref_template
    ants_registration_node.inputs.transform_type = 'a'
    ants_registration_node.inputs.dimension = 3

    # 3. Crop image (using nifti). It uses custom interface, from utils file
    from .T1_linear_utils import crop_nifti

    cropnifti = npe.Node(
            name='cropnifti',
            interface=nutil.Function(
                function=crop_nifti,
                input_names=['input_img', 'ref_crop'],
                output_names=['output_img', 'crop_template']
                )
            )
    cropnifti.inputs.ref_crop = ref_crop

    # ********* Deprecrecated ********** #
    # ** This step was not used in the final version ** #
    # 4. Histogram-based intensity normalization. This is a custom function
    #    performed by the binary `ImageMath` included with *ANTS*.

#   from .T1_linear_utils import ants_histogram_intensity_normalization
#
#   # histogram-based intensity normalization
#   intensitynorm = npe.Node(
#           name='intensitynormalization',
#           interface=nutil.Function(
#               input_names=['image_dimension', 'crop_template', 'input_img'],
#               output_names=['output_img'],
#               function=ants_histogram_intensity_normalization
#               )
#           )
#   intensitynorm.inputs.image_dimension = 3

    # DataSink and the output node

    from .T1_linear_utils import (container_from_filename, get_data_datasink)
    # Create node to write selected files into the CAPS
    from nipype.interfaces.io import DataSink

    get_ids = npe.Node(
            interface=nutil.Function(
                input_names=['image_id'],
                output_names=['image_id_out', 'subst_ls'],
                function=get_data_datasink),
            name="GetIDs")

    # Find container path from t1w filename
    # =====================================
    container_path = npe.Node(
            nutil.Function(
                input_names=['bids_or_caps_filename'],
                output_names=['container'],
                function=container_from_filename),
            name='ContainerPath')

    write_node = npe.Node(
                name="WriteCaps",
                interface=DataSink()
                )
    write_node.inputs.base_directory = caps_directory
    write_node.inputs.parameterization = False

    # Connectiong the workflow
    from clinica.utils.nipype import fix_join

    wf = npe.Workflow(name='t1_linear_dl', base_dir=working_directory)

    wf.connect([
        (read_node, image_id_node, [('t1w', 'bids_or_caps_file')]),
        (read_node, container_path, [('t1w', 'bids_or_caps_filename')]),
        (image_id_node, ants_registration_node, [('image_id', 'output_prefix')]),
        (read_node, n4biascorrection, [("t1w", "input_image")]),

        (n4biascorrection, ants_registration_node, [('output_image', 'moving_image')]),

        (ants_registration_node, cropnifti, [('warped_image', 'input_img')]),
        (ants_registration_node, write_node, [('out_matrix', '@affine_mat')]),
        # Connect to DataSink
        (container_path, write_node, [(('container', fix_join, 't1_linear'), 'container')]),
        (image_id_node, get_ids, [('image_id', 'image_id')]),
        (get_ids, write_node, [('image_id_out', '@image_id')]),
        (get_ids, write_node, [('subst_ls', 'substitutions')]),
        # (get_ids, write_node, [('regexp_subst_ls', 'regexp_substitutions')]),
        (n4biascorrection, write_node, [('output_image', '@outfile_corr')]),
        (ants_registration_node, write_node, [('warped_image', '@outfile_reg')]),
        (cropnifti, write_node, [('output_img', '@outfile_crop')]),
        ])

    return wf
Ejemplo n.º 2
0
        ishanat.connect(struct, ("outputNode.bias_corrected_images", getElementFromList, 0), myelin, "inputNode.coregT2w")
        
    ### Perform cross check with FS segmentation
        
    SpmFsCrossCheck = genVbmFsCrossQCWorkflow(name='SpmFsCrossCheck', fsHipp=False)
    ishanat.connect(fsReconAll, "subject_id", SpmFsCrossCheck, "inputNode.subject_id")
    ishanat.connect(struct, ("outputNode.bias_corrected_images", getElementFromList, 0), SpmFsCrossCheck, "inputNode.ref_main")
    ishanat.connect(struct, ("outputNode.native_class_images", getElementFromList, 0), SpmFsCrossCheck, "inputNode.vbm_native_gm")
    ishanat.connect(struct, ("outputNode.native_class_images", getElementFromList, 1), SpmFsCrossCheck, "inputNode.vbm_native_wm")
    ishanat.connect(struct, ("outputNode.native_class_images", getElementFromList, 2), SpmFsCrossCheck, "inputNode.vbm_native_csf")
 
    if sinks:
        from nipype.interfaces.io import DataSink
        
        # XNAT assessor sinks
        assessorSink = Node(DataSink(), name='assessorSink')  
        assessorSink.inputs.container = 'data'            
        assessorSink.inputs.parameterization = False 
        ishanat.connect([
                     (struct, assessorSink,[('outputNode.xnat_assessor', 'vbmAssessorData')]),
                     #(fsconv, assessorSink,[('outputNode.xnat_assessor', 'fsAssessorData')]),
                    ]) 

        # Structural results sink: VBM
        anatSink = Node(DataSink(), name='anatSink')
        anatSink.inputs.container = 'data'
        anatSink.inputs.parameterization = False
        ishanat.connect([(struct, anatSink, [("outputNode.native_class_images", "native_class_images"),
                                             ("outputNode.bias_corrected_images", "bias_corrected_images"),
                                             ("outputNode.dartel_input_images", "dartel_input_images"),
                                             ("outputNode.forward_deformation_field", "forward_deformation_field"),
Ejemplo n.º 3
0
def build_correlation_wf(Registration=True,
                         use_Ankita_Function=False,
                         name='pearsonCorrcalc'):
    corr_wf = Workflow(name=name)
    if Registration:
        inputnode = Node(interface=util.IdentityInterface(fields=[
            'in_files', 'atlas_files', 'func2std', 'reference', 'mask_file',
            'WorkingDir'
        ]),
                         name='inputspec')
        outputnode = Node(
            interface=util.IdentityInterface(fields=['pearsonCorr_files']),
            name='outputspec')

        if use_Ankita_Function:
            Geomcpy = MapNode(interface=fsl.utils.CopyGeom(ignore_dims=True),
                              iterfield=['in_file', 'dest_file'],
                              name='Geomcpy')
            coff_matrix = MapNode(util.Function(
                function=pearson_corr_Ankita,
                input_names=['in_file', 'atlas_file', 'WorkingDir'],
                output_names=['coff_matrix_file']),
                                  iterfield=['in_file', 'atlas_file'],
                                  name='coff_matrix')
            transform_corr = MapNode(interface=fsl.ApplyXFM(interp='spline'),
                                     iterfield=['in_file', 'in_matrix_file'],
                                     name='transform_corr')
            maskCorrFile = MapNode(interface=fsl.ImageMaths(suffix='_masked',
                                                            op_string='-mas'),
                                   iterfield=['in_file'],
                                   name='maskWarpFile')
            make_npy_from_Corr = MapNode(util.Function(
                function=make_npy_from_CorrFile,
                input_names=['Corr_file', 'mask_file'],
                output_names=['coff_matrix_file']),
                                         iterfield=['Corr_file'],
                                         name='coff_matrix_in_npy')
            corr_wf.connect(inputnode, 'in_files', Geomcpy, 'in_file')
            corr_wf.connect(inputnode, 'atlas_files', Geomcpy, 'dest_file')
            corr_wf.connect(Geomcpy, 'out_file', coff_matrix, 'atlas_file')
        else:
            coff_matrix = MapNode(util.Function(
                function=pearsonr_with_roi_mean_w_reg,
                input_names=['in_file', 'atlas_file', 'WorkingDir'],
                output_names=['coff_matrix_file']),
                                  iterfield=['in_file', 'atlas_file'],
                                  name='coff_matrix')
            transform_corr = MapNode(interface=fsl.ApplyXFM(interp='spline'),
                                     iterfield=['in_file', 'in_matrix_file'],
                                     name='transform_corr')
            maskCorrFile = MapNode(interface=fsl.ImageMaths(suffix='_masked',
                                                            op_string='-mas'),
                                   iterfield=['in_file'],
                                   name='maskWarpFile')
            make_npy_from_Corr = MapNode(util.Function(
                function=make_npy_from_CorrFile,
                input_names=['Corr_file', 'mask_file'],
                output_names=['coff_matrix_file']),
                                         iterfield=['Corr_file'],
                                         name='coff_matrix_in_npy')
            corr_wf.connect(inputnode, 'atlas_files', coff_matrix,
                            'atlas_file')
        datasink = Node(interface=DataSink(), name='datasink')

        corr_wf.connect(inputnode, 'in_files', coff_matrix, 'in_file')

        corr_wf.connect(inputnode, 'WorkingDir', coff_matrix, 'WorkingDir')

        corr_wf.connect(coff_matrix, 'coff_matrix_file', transform_corr,
                        'in_file')
        corr_wf.connect(inputnode, 'func2std', transform_corr,
                        'in_matrix_file')
        corr_wf.connect(inputnode, 'reference', transform_corr, 'reference')
        corr_wf.connect(transform_corr, 'out_file', maskCorrFile, 'in_file')
        corr_wf.connect(inputnode, 'mask_file', maskCorrFile, 'in_file2')

        corr_wf.connect(maskCorrFile, 'out_file', make_npy_from_Corr,
                        'Corr_file')
        corr_wf.connect(inputnode, 'mask_file', make_npy_from_Corr,
                        'mask_file')
        corr_wf.connect(make_npy_from_Corr, 'coff_matrix_file', outputnode,
                        'pearsonCorr_files')
        corr_wf.connect(outputnode, 'pearsonCorr_files', datasink, 'out_file')

    else:

        inputnode = Node(interface=util.IdentityInterface(
            fields=['in_files', 'atlas_file', 'mask_file', 'WorkingDir']),
                         name='inputspec')
        outputnode = Node(interface=util.IdentityInterface(
            fields=['pearsonCorr_files', 'pearsonCorr_files_in_nii']),
                          name='outputspec')
        if use_Ankita_Function:
            Geomcpy = MapNode(interface=fsl.utils.CopyGeom(ignore_dims=True),
                              iterfield=['in_file'],
                              name='Geomcpy')
            coff_matrix = MapNode(util.Function(
                function=pearson_corr_Ankita,
                input_names=['in_file', 'atlas_file', 'WorkingDir'],
                output_names=['coff_matrix_file']),
                                  iterfield=['in_file'],
                                  name='coff_matrix')
            maskCorrFile = MapNode(interface=fsl.ImageMaths(suffix='_masked',
                                                            op_string='-mas'),
                                   iterfield=['in_file'],
                                   name='maskWarpFile')
            make_npy_from_Corr = MapNode(util.Function(
                function=make_npy_from_CorrFile,
                input_names=['Corr_file', 'mask_file'],
                output_names=['coff_matrix_file']),
                                         iterfield=['Corr_file'],
                                         name='coff_matrix_in_npy')
            datasink = Node(interface=DataSink(), name='datasink')

            corr_wf.connect(inputnode, 'in_files', coff_matrix, 'in_file')
            corr_wf.connect(inputnode, 'in_files', Geomcpy, 'in_file')
            corr_wf.connect(inputnode, 'atlas_file', Geomcpy, 'dest_file')
            corr_wf.connect(Geomcpy, 'out_file', coff_matrix, 'atlas_file')
            corr_wf.connect(inputnode, 'WorkingDir', coff_matrix, 'WorkingDir')
            corr_wf.connect(coff_matrix, 'coff_matrix_file', maskCorrFile,
                            'in_file')
            corr_wf.connect(inputnode, 'mask_file', maskCorrFile, 'in_file2')

            corr_wf.connect(maskCorrFile, 'out_file', make_npy_from_Corr,
                            'Corr_file')
            corr_wf.connect(inputnode, 'mask_file', make_npy_from_Corr,
                            'mask_file')
            corr_wf.connect(make_npy_from_Corr, 'coff_matrix_file', outputnode,
                            'pearsonCorr_files')
            corr_wf.connect(outputnode, 'pearsonCorr_files', datasink,
                            'out_file')
        else:
            coff_matrix = MapNode(util.Function(
                function=pearsonr_with_roi_mean,
                input_names=['in_file', 'atlas_file', 'mask_file'],
                output_names=['coff_matrix_file', 'coff_matrix_file_in_nii']),
                                  iterfield=['in_file'],
                                  name='coff_matrix')
            datasink = Node(interface=DataSink(), name='datasink')
            # selectfile = MapNode(interface=util.Select(index=[0]), iterfield = ['inlist'],name='select')
            corr_wf.connect(inputnode, 'in_files', coff_matrix, 'in_file')
            corr_wf.connect(inputnode, 'atlas_file', coff_matrix, 'atlas_file')
            corr_wf.connect(inputnode, 'mask_file', coff_matrix, 'mask_file')

            corr_wf.connect(coff_matrix, 'coff_matrix_file', outputnode,
                            'pearsonCorr_files')
            corr_wf.connect(coff_matrix, 'coff_matrix_file_in_nii', outputnode,
                            'pearsonCorr_files_in_nii')
            corr_wf.connect(outputnode, 'pearsonCorr_files', datasink,
                            'out_file')
        # coff_matrix = MapNode(util.Function(function=pearsonr_with_roi_mean_w_reg,
        #                             input_names=['in_file','atlas_file'],
        #                             output_names=['coff_matrix_file']),
        #                   iterfield=['in_file'],
        #                   name = 'coff_matrix')
        # maskCorrFile = MapNode(interface=fsl.ImageMaths(suffix='_masked',
        #                                        op_string='-mas'),
        #               iterfield=['in_file'],
        #               name = 'maskWarpFile')
        # make_npy_from_Corr = MapNode(util.Function(function=make_npy_from_CorrFile,
        #                             input_names=['Corr_file','mask_file'],
        #                             output_names=['coff_matrix_file']),
        #                   iterfield=['Corr_file'],
        #                   name = 'coff_matrix_in_npy')
        # datasink = Node(interface=DataSink(), name='datasink')

        # corr_wf.connect(inputnode, 'in_files', coff_matrix, 'in_file')
        # corr_wf.connect(inputnode, 'atlas_file', coff_matrix, 'atlas_file')
        # corr_wf.connect(coff_matrix,'coff_matrix_file', maskCorrFile, 'in_file')
        # corr_wf.connect(inputnode, 'mask_file', maskCorrFile, 'in_file2')

        # corr_wf.connect(maskCorrFile,'out_file', make_npy_from_Corr, 'Corr_file')
        # corr_wf.connect(inputnode,'mask_file', make_npy_from_Corr, 'mask_file')
        # corr_wf.connect(make_npy_from_Corr, 'coff_matrix_file', outputnode, 'pearsonCorr_files')
        # corr_wf.connect(outputnode, 'pearsonCorr_files', datasink, 'out_file')

    return corr_wf
Ejemplo n.º 4
0
# node to skip dummy scans
extract = Node(
    fsl.ExtractROI(
        in_file=imagefMRI,  # input image
        t_min=4,  # first 4 volumes are deleted
        t_size=-1),
    name="extract")

# creating motion correction node
mcflirt = Node(
    fsl.MCFLIRT(save_rms=True,
                save_plots=True),  # saving displacement parameters
    name="mcflirt")

# creating datasink to collect outputs
datasink = Node(DataSink(base_directory=os.path.join(outDir, 'Results')),
                name='datasink')

# creating a workflow
moCor = Workflow(name="MoCor", base_dir=outDir)

# connecting the nodes
moCor.connect(extract, 'roi_file', mcflirt, 'in_file')

# output to datasink
moCor.connect(mcflirt, 'out_file', datasink, 'out_file')  # corrected fMRI
moCor.connect(mcflirt, 'par_file', datasink, 'par_file')  # motion parameter
moCor.connect(mcflirt, 'rms_files', datasink, 'rms_files')  # relative motion

# writing out graph
moCor.write_graph(graph2use='orig', dotfilename='graph_orig.dot')
Ejemplo n.º 5
0
    def run(self):
        matlab_cmd = self.paths['spm_path'] + ' ' + self.paths[
            'mcr_path'] + '/ script'
        spm.SPMCommand.set_mlab_paths(matlab_cmd=matlab_cmd, use_mcr=True)
        print(matlab_cmd)
        print('SPM version: ' + str(spm.SPMCommand().version))

        experiment_dir = opj(self.paths['input_path'], 'output/')
        output_dir = 'datasink'
        working_dir = 'workingdir'

        subject_list = self.subject_list

        # list of subject identifiers
        fwhm = self.parameters[
            'fwhm']  # Smoothing widths to apply (Gaussian kernel size)
        tr = self.parameters['tr']  # Repetition time
        init_volume = self.parameters[
            'init_volume']  # Firts volumen identification which will use in the pipeline
        iso_size = self.parameters[
            'iso_size']  # Isometric resample of functional images to voxel size (in mm)
        low_pass = self.parameters['low_pass']
        high_pass = self.parameters['high_pass']
        t1_relative_path = self.paths['t1_relative_path']
        fmri_relative_path = self.paths['fmri_relative_path']

        # ExtractROI - skip dummy scans
        extract = Node(ExtractROI(t_min=init_volume,
                                  t_size=-1,
                                  output_type='NIFTI'),
                       name="extract")  #FSL

        # MCFLIRT - motion correction
        mcflirt = Node(MCFLIRT(mean_vol=True,
                               save_plots=True,
                               output_type='NIFTI'),
                       name="motion_correction")  #FSL

        # SliceTimer - correct for slice wise acquisition
        slicetimer = Node(SliceTimer(index_dir=False,
                                     interleaved=True,
                                     output_type='NIFTI',
                                     time_repetition=tr),
                          name="slice_timing_correction")  #FSL

        # Smooth - image smoothing

        denoise = Node(Denoise(), name="denoising")  #Interfaces with dipy

        smooth = Node(spm.Smooth(fwhm=fwhm), name="smooth")  #SPM

        n4bias = Node(N4Bias(out_file='t1_n4bias.nii.gz'),
                      name='n4bias')  #Interface with SimpleITK

        descomposition = Node(Descomposition(n_components=20,
                                             low_pass=0.1,
                                             high_pass=0.01,
                                             tr=tr),
                              name='descomposition')  #Interface with nilearn

        # Artifact Detection - determines outliers in functional images
        art = Node(ArtifactDetect(norm_threshold=2,
                                  zintensity_threshold=3,
                                  mask_type='spm_global',
                                  parameter_source='FSL',
                                  use_differences=[True, False],
                                  plot_type='svg'),
                   name="artifact_detection")  #Rapidart

        extract_confounds_ws_csf = Node(
            ExtractConfounds(out_file='ev_without_gs.csv'),
            name='extract_confounds_ws_csf')  #Interfece

        extract_confounds_gs = Node(ExtractConfounds(out_file='ev_with_gs.csv',
                                                     delimiter=','),
                                    name='extract_confounds_global_signal')

        signal_extraction = Node(SignalExtraction(
            time_series_out_file='time_series.csv',
            correlation_matrix_out_file='correlation_matrix.png',
            labels_parcellation_path=self.paths['labels_parcellation_path'],
            mask_mni_path=self.paths['mask_mni_path'],
            tr=tr,
            low_pass=low_pass,
            high_pass=high_pass,
            plot=False),
                                 name='signal_extraction')
        signal_extraction.iterables = [('image_parcellation_path',
                                        self.paths['image_parcellation_path'])]

        art_remotion = Node(
            ArtifacRemotion(out_file='fmri_art_removed.nii'),
            name='artifact_remotion')  #This interface requires implementation

        # BET - Skullstrip anatomical anf funtional images
        bet_t1 = Node(BET(frac=0.5,
                          robust=True,
                          mask=True,
                          output_type='NIFTI_GZ'),
                      name="bet_t1")  #FSL

        # FAST - Image Segmentation
        segmentation = Node(FAST(output_type='NIFTI'),
                            name="segmentation")  #FSL

        # Normalize - normalizes functional and structural images to the MNI template
        normalize_fmri = Node(Normalize12(
            jobtype='estwrite',
            tpm=self.paths['template_spm_path'],
            write_voxel_sizes=[iso_size, iso_size, iso_size],
            write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                              name="normalize_fmri")  #SPM

        gunzip = Node(Gunzip(), name="gunzip")

        normalize_t1 = Node(Normalize12(
            jobtype='estwrite',
            tpm=self.paths['template_spm_path'],
            write_voxel_sizes=[iso_size, iso_size, iso_size],
            write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                            name="normalize_t1")

        normalize_masks = Node(Normalize12(
            jobtype='estwrite',
            tpm=self.paths['template_spm_path'],
            write_voxel_sizes=[iso_size, iso_size, iso_size],
            write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                               name="normalize_masks")

        # Threshold - Threshold WM probability image
        threshold = Node(Threshold(thresh=0.5,
                                   args='-bin',
                                   output_type='NIFTI_GZ'),
                         name="wm_mask_threshold")

        # FLIRT - pre-alignment of functional images to anatomical images
        coreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'),
                         name="linear_warp_estimation")

        # FLIRT - coregistration of functional images to anatomical images with BBR
        coreg_bbr = Node(FLIRT(dof=6,
                               cost='bbr',
                               schedule=opj(os.getenv('FSLDIR'),
                                            'etc/flirtsch/bbr.sch'),
                               output_type='NIFTI_GZ'),
                         name="nonlinear_warp_estimation")

        # Apply coregistration warp to functional images
        applywarp = Node(FLIRT(interp='spline',
                               apply_isoxfm=iso_size,
                               output_type='NIFTI'),
                         name="registration_fmri")

        # Apply coregistration warp to mean file
        applywarp_mean = Node(FLIRT(interp='spline',
                                    apply_isoxfm=iso_size,
                                    output_type='NIFTI_GZ'),
                              name="registration_mean_fmri")

        # Infosource - a function free node to iterate over the list of subject names
        infosource = Node(IdentityInterface(fields=['subject_id']),
                          name="infosource")
        infosource.iterables = [('subject_id', subject_list)]

        # SelectFiles - to grab the data (alternativ to DataGrabber)
        anat_file = opj('{subject_id}', t1_relative_path)
        func_file = opj('{subject_id}', fmri_relative_path)

        #anat_file = opj('{subject_id}/anat/', 'data.nii')
        #func_file = opj('{subject_id}/func/', 'data.nii')

        templates = {'anat': anat_file, 'func': func_file}

        selectfiles = Node(SelectFiles(
            templates, base_directory=self.paths['input_path']),
                           name="selectfiles")

        # Datasink - creates output folder for important outputs
        datasink = Node(DataSink(base_directory=experiment_dir,
                                 container=output_dir),
                        name="datasink")

        # Create a coregistration workflow
        coregwf = Workflow(name='coreg_fmri_to_t1')
        coregwf.base_dir = opj(experiment_dir, working_dir)

        # Create a preprocessing workflow
        preproc = Workflow(name='preproc')
        preproc.base_dir = opj(experiment_dir, working_dir)

        # Connect all components of the coregistration workflow

        coregwf.connect([
            (bet_t1, n4bias, [('out_file', 'in_file')]),
            (n4bias, segmentation, [('out_file', 'in_files')]),
            (segmentation, threshold, [(('partial_volume_files', get_latest),
                                        'in_file')]),
            (n4bias, coreg_pre, [('out_file', 'reference')]),
            (threshold, coreg_bbr, [('out_file', 'wm_seg')]),
            (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),
            (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),
            (n4bias, applywarp, [('out_file', 'reference')]),
            (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')
                                         ]),
            (n4bias, applywarp_mean, [('out_file', 'reference')]),
        ])

        ## Use the following DataSink output substitutions
        substitutions = [('_subject_id_', 'sub-')]
        #                 ('_fwhm_', 'fwhm-'),
        #                 ('_roi', ''),
        #                 ('_mcf', ''),
        #                 ('_st', ''),
        #                 ('_flirt', ''),
        #                 ('.nii_mean_reg', '_mean'),
        #                 ('.nii.par', '.par'),
        #                 ]
        # subjFolders = [('fwhm-%s/' % f, 'fwhm-%s_' % f) for f in fwhm]

        # substitutions.extend(subjFolders)
        datasink.inputs.substitutions = substitutions

        # Connect all components of the preprocessing workflow
        preproc.connect([
            (infosource, selectfiles, [('subject_id', 'subject_id')]),
            (selectfiles, extract, [('func', 'in_file')]),
            (extract, mcflirt, [('roi_file', 'in_file')]),
            (mcflirt, slicetimer, [('out_file', 'in_file')]),
            (selectfiles, denoise, [('anat', 'in_file')]),
            (denoise, coregwf, [('out_file', 'bet_t1.in_file'),
                                ('out_file',
                                 'nonlinear_warp_estimation.reference')]),
            (mcflirt, coregwf,
             [('mean_img', 'linear_warp_estimation.in_file'),
              ('mean_img', 'nonlinear_warp_estimation.in_file'),
              ('mean_img', 'registration_mean_fmri.in_file')]),
            (slicetimer, coregwf, [('slice_time_corrected_file',
                                    'registration_fmri.in_file')]),
            (coregwf, art, [('registration_fmri.out_file', 'realigned_files')
                            ]),
            (mcflirt, art, [('par_file', 'realignment_parameters')]),
            (art, art_remotion, [('outlier_files', 'outlier_files')]),
            (coregwf, art_remotion, [('registration_fmri.out_file', 'in_file')
                                     ]),
            (coregwf, gunzip, [('n4bias.out_file', 'in_file')]),
            (selectfiles, normalize_fmri, [('anat', 'image_to_align')]),
            (art_remotion, normalize_fmri, [('out_file', 'apply_to_files')]),
            (selectfiles, normalize_t1, [('anat', 'image_to_align')]),
            (gunzip, normalize_t1, [('out_file', 'apply_to_files')]),
            (selectfiles, normalize_masks, [('anat', 'image_to_align')]),
            (coregwf, normalize_masks, [(('segmentation.partial_volume_files',
                                          get_wm_csf), 'apply_to_files')]),
            (normalize_fmri, smooth, [('normalized_files', 'in_files')]),
            (smooth, extract_confounds_ws_csf, [('smoothed_files', 'in_file')
                                                ]),
            (normalize_masks, extract_confounds_ws_csf, [('normalized_files',
                                                          'list_mask')]),
            (mcflirt, extract_confounds_ws_csf, [('par_file', 'file_concat')]),
            (art, extract_confounds_ws_csf, [('outlier_files', 'outlier_files')
                                             ]),

            # (smooth, extract_confounds_gs, [('smoothed_files', 'in_file')]),
            # (normalize_t1, extract_confounds_gs, [(('normalized_files',change_to_list), 'list_mask')]),
            # (extract_confounds_ws_csf, extract_confounds_gs, [('out_file', 'file_concat')]),
            (smooth, signal_extraction, [('smoothed_files', 'in_file')]),
            # (extract_confounds_gs, signal_extraction, [('out_file', 'confounds_file')]),
            (extract_confounds_ws_csf, signal_extraction,
             [('out_file', 'confounds_file')]),

            #(smooth, descomposition, [('smoothed_files', 'in_file')]),
            #(extract_confounds_ws_csf, descomposition, [('out_file', 'confounds_file')]),

            # (extract_confounds_gs, datasink, [('out_file', 'preprocessing.@confounds_with_gs')]),
            (denoise, datasink, [('out_file', 'preprocessing.@t1_denoised')]),
            (extract_confounds_ws_csf, datasink,
             [('out_file', 'preprocessing.@confounds_without_gs')]),
            (smooth, datasink, [('smoothed_files', 'preprocessing.@smoothed')
                                ]),
            (normalize_fmri, datasink, [('normalized_files',
                                         'preprocessing.@fmri_normalized')]),
            (normalize_t1, datasink, [('normalized_files',
                                       'preprocessing.@t1_normalized')]),
            (normalize_masks, datasink, [('normalized_files',
                                          'preprocessing.@masks_normalized')]),
            (signal_extraction, datasink, [('time_series_out_file',
                                            'preprocessing.@time_serie')]),
            (signal_extraction, datasink,
             [('correlation_matrix_out_file',
               'preprocessing.@correlation_matrix')])
        ])
        #(signal_extraction, datasink,
        # [('fmri_cleaned_out_file', 'preprocessing.@fmri_cleaned_out_file')])])
        #,
        #(descomposition, datasink, [('out_file', 'preprocessing.@descomposition')]),
        #(descomposition, datasink, [('plot_files', 'preprocessing.@descomposition_plot_files')])
        #])

        preproc.write_graph(graph2use='colored',
                            format='png',
                            simple_form=True)
        preproc.run()
def create_workflow(files,
                    target_file,
                    subject_id,
                    TR,
                    slice_times,
                    norm_threshold=1,
                    num_components=5,
                    vol_fwhm=None,
                    surf_fwhm=None,
                    lowpass_freq=-1,
                    highpass_freq=-1,
                    subjects_dir=None,
                    sink_directory=os.getcwd(),
                    target_subject=['fsaverage3', 'fsaverage4'],
                    name='resting'):

    wf = Workflow(name=name)

    # Rename files in case they are named identically
    name_unique = MapNode(Rename(format_string='rest_%(run)02d'),
                          iterfield=['in_file', 'run'],
                          name='rename')
    name_unique.inputs.keep_ext = True
    name_unique.inputs.run = list(range(1, len(files) + 1))
    name_unique.inputs.in_file = files

    realign = Node(nipy.SpaceTimeRealigner(), name="spacetime_realign")
    realign.inputs.slice_times = slice_times
    realign.inputs.tr = TR
    realign.inputs.slice_info = 2
    realign.plugin_args = {'sbatch_args': '-c%d' % 4}

    # Compute TSNR on realigned data regressing polynomials up to order 2
    tsnr = MapNode(TSNR(regress_poly=2), iterfield=['in_file'], name='tsnr')
    wf.connect(realign, "out_file", tsnr, "in_file")

    # Compute the median image across runs
    calc_median = Node(CalculateMedian(), name='median')
    wf.connect(tsnr, 'detrended_file', calc_median, 'in_files')
    """Segment and Register
    """

    registration = create_reg_workflow(name='registration')
    wf.connect(calc_median, 'median_file', registration,
               'inputspec.mean_image')
    registration.inputs.inputspec.subject_id = subject_id
    registration.inputs.inputspec.subjects_dir = subjects_dir
    registration.inputs.inputspec.target_image = target_file
    """Quantify TSNR in each freesurfer ROI
    """

    get_roi_tsnr = MapNode(fs.SegStats(default_color_table=True),
                           iterfield=['in_file'],
                           name='get_aparc_tsnr')
    get_roi_tsnr.inputs.avgwf_txt_file = True
    wf.connect(tsnr, 'tsnr_file', get_roi_tsnr, 'in_file')
    wf.connect(registration, 'outputspec.aparc', get_roi_tsnr,
               'segmentation_file')
    """Use :class:`nipype.algorithms.rapidart` to determine which of the
    images in the functional series are outliers based on deviations in
    intensity or movement.
    """

    art = Node(interface=ArtifactDetect(), name="art")
    art.inputs.use_differences = [True, True]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = norm_threshold
    art.inputs.zintensity_threshold = 9
    art.inputs.mask_type = 'spm_global'
    art.inputs.parameter_source = 'NiPy'
    """Here we are connecting all the nodes together. Notice that we add the merge node only if you choose
    to use 4D. Also `get_vox_dims` function is passed along the input volume of normalise to set the optimal
    voxel sizes.
    """

    wf.connect([
        (name_unique, realign, [('out_file', 'in_file')]),
        (realign, art, [('out_file', 'realigned_files')]),
        (realign, art, [('par_file', 'realignment_parameters')]),
    ])

    def selectindex(files, idx):
        import numpy as np
        from nipype.utils.filemanip import filename_to_list, list_to_filename
        return list_to_filename(
            np.array(filename_to_list(files))[idx].tolist())

    mask = Node(fsl.BET(), name='getmask')
    mask.inputs.mask = True
    wf.connect(calc_median, 'median_file', mask, 'in_file')

    # get segmentation in normalized functional space

    def merge_files(in1, in2):
        out_files = filename_to_list(in1)
        out_files.extend(filename_to_list(in2))
        return out_files

    # filter some noise

    # Compute motion regressors
    motreg = Node(Function(
        input_names=['motion_params', 'order', 'derivatives'],
        output_names=['out_files'],
        function=motion_regressors,
        imports=imports),
                  name='getmotionregress')
    wf.connect(realign, 'par_file', motreg, 'motion_params')

    # Create a filter to remove motion and art confounds
    createfilter1 = Node(Function(
        input_names=['motion_params', 'comp_norm', 'outliers', 'detrend_poly'],
        output_names=['out_files'],
        function=build_filter1,
        imports=imports),
                         name='makemotionbasedfilter')
    createfilter1.inputs.detrend_poly = 2
    wf.connect(motreg, 'out_files', createfilter1, 'motion_params')
    wf.connect(art, 'norm_files', createfilter1, 'comp_norm')
    wf.connect(art, 'outlier_files', createfilter1, 'outliers')

    filter1 = MapNode(fsl.GLM(out_f_name='F_mcart.nii.gz',
                              out_pf_name='pF_mcart.nii.gz',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filtermotion')

    wf.connect(realign, 'out_file', filter1, 'in_file')
    wf.connect(realign, ('out_file', rename, '_filtermotart'), filter1,
               'out_res_name')
    wf.connect(createfilter1, 'out_files', filter1, 'design')

    createfilter2 = MapNode(ACompCor(),
                            iterfield=['realigned_file', 'extra_regressors'],
                            name='makecompcorrfilter')
    createfilter2.inputs.components_file = 'noise_components.txt'
    createfilter2.inputs.num_components = num_components

    wf.connect(createfilter1, 'out_files', createfilter2, 'extra_regressors')
    wf.connect(filter1, 'out_res', createfilter2, 'realigned_file')
    wf.connect(registration,
               ('outputspec.segmentation_files', selectindex, [0, 2]),
               createfilter2, 'mask_file')

    filter2 = MapNode(fsl.GLM(out_f_name='F.nii.gz',
                              out_pf_name='pF.nii.gz',
                              demean=True),
                      iterfield=['in_file', 'design', 'out_res_name'],
                      name='filter_noise_nosmooth')
    wf.connect(filter1, 'out_res', filter2, 'in_file')
    wf.connect(filter1, ('out_res', rename, '_cleaned'), filter2,
               'out_res_name')
    wf.connect(createfilter2, 'components_file', filter2, 'design')
    wf.connect(mask, 'mask_file', filter2, 'mask')

    bandpass = Node(Function(
        input_names=['files', 'lowpass_freq', 'highpass_freq', 'fs'],
        output_names=['out_files'],
        function=bandpass_filter,
        imports=imports),
                    name='bandpass_unsmooth')
    bandpass.inputs.fs = 1. / TR
    bandpass.inputs.highpass_freq = highpass_freq
    bandpass.inputs.lowpass_freq = lowpass_freq
    wf.connect(filter2, 'out_res', bandpass, 'files')
    """Smooth the functional data using
    :class:`nipype.interfaces.fsl.IsotropicSmooth`.
    """

    smooth = MapNode(interface=fsl.IsotropicSmooth(),
                     name="smooth",
                     iterfield=["in_file"])
    smooth.inputs.fwhm = vol_fwhm

    wf.connect(bandpass, 'out_files', smooth, 'in_file')

    collector = Node(Merge(2), name='collect_streams')
    wf.connect(smooth, 'out_file', collector, 'in1')
    wf.connect(bandpass, 'out_files', collector, 'in2')
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = MapNode(ants.ApplyTransforms(),
                      iterfield=['input_image'],
                      name='warpall')
    warpall.inputs.input_image_type = 3
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.terminal_output = 'file'
    warpall.inputs.reference_image = target_file
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 2
    warpall.plugin_args = {'sbatch_args': '-c%d' % 2}

    # transform to target
    wf.connect(collector, 'out', warpall, 'input_image')
    wf.connect(registration, 'outputspec.transforms', warpall, 'transforms')

    mask_target = Node(fsl.ImageMaths(op_string='-bin'), name='target_mask')

    wf.connect(registration, 'outputspec.anat2target', mask_target, 'in_file')

    maskts = MapNode(fsl.ApplyMask(), iterfield=['in_file'], name='ts_masker')
    wf.connect(warpall, 'output_image', maskts, 'in_file')
    wf.connect(mask_target, 'out_file', maskts, 'mask_file')

    # map to surface
    # extract aparc+aseg ROIs
    # extract subcortical ROIs
    # extract target space ROIs
    # combine subcortical and cortical rois into a single cifti file

    #######
    # Convert aparc to subject functional space

    # Sample the average time series in aparc ROIs
    sampleaparc = MapNode(
        freesurfer.SegStats(default_color_table=True),
        iterfield=['in_file', 'summary_file', 'avgwf_txt_file'],
        name='aparc_ts')
    sampleaparc.inputs.segment_id = ([8] + list(range(10, 14)) +
                                     [17, 18, 26, 47] + list(range(49, 55)) +
                                     [58] + list(range(1001, 1036)) +
                                     list(range(2001, 2036)))

    wf.connect(registration, 'outputspec.aparc', sampleaparc,
               'segmentation_file')
    wf.connect(collector, 'out', sampleaparc, 'in_file')

    def get_names(files, suffix):
        """Generate appropriate names for output files
        """
        from nipype.utils.filemanip import (split_filename, filename_to_list,
                                            list_to_filename)
        import os
        out_names = []
        for filename in files:
            path, name, _ = split_filename(filename)
            out_names.append(os.path.join(path, name + suffix))
        return list_to_filename(out_names)

    wf.connect(collector, ('out', get_names, '_avgwf.txt'), sampleaparc,
               'avgwf_txt_file')
    wf.connect(collector, ('out', get_names, '_summary.stats'), sampleaparc,
               'summary_file')

    # Sample the time series onto the surface of the target surface. Performs
    # sampling into left and right hemisphere
    target = Node(IdentityInterface(fields=['target_subject']), name='target')
    target.iterables = ('target_subject', filename_to_list(target_subject))

    samplerlh = MapNode(freesurfer.SampleToSurface(),
                        iterfield=['source_file'],
                        name='sampler_lh')
    samplerlh.inputs.sampling_method = "average"
    samplerlh.inputs.sampling_range = (0.1, 0.9, 0.1)
    samplerlh.inputs.sampling_units = "frac"
    samplerlh.inputs.interp_method = "trilinear"
    samplerlh.inputs.smooth_surf = surf_fwhm
    # samplerlh.inputs.cortex_mask = True
    samplerlh.inputs.out_type = 'niigz'
    samplerlh.inputs.subjects_dir = subjects_dir

    samplerrh = samplerlh.clone('sampler_rh')

    samplerlh.inputs.hemi = 'lh'
    wf.connect(collector, 'out', samplerlh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerlh, 'reg_file')
    wf.connect(target, 'target_subject', samplerlh, 'target_subject')

    samplerrh.set_input('hemi', 'rh')
    wf.connect(collector, 'out', samplerrh, 'source_file')
    wf.connect(registration, 'outputspec.out_reg_file', samplerrh, 'reg_file')
    wf.connect(target, 'target_subject', samplerrh, 'target_subject')

    # Combine left and right hemisphere to text file
    combiner = MapNode(Function(input_names=['left', 'right'],
                                output_names=['out_file'],
                                function=combine_hemi,
                                imports=imports),
                       iterfield=['left', 'right'],
                       name="combiner")
    wf.connect(samplerlh, 'out_file', combiner, 'left')
    wf.connect(samplerrh, 'out_file', combiner, 'right')

    # Sample the time series file for each subcortical roi
    ts2txt = MapNode(Function(
        input_names=['timeseries_file', 'label_file', 'indices'],
        output_names=['out_file'],
        function=extract_subrois,
        imports=imports),
                     iterfield=['timeseries_file'],
                     name='getsubcortts')
    ts2txt.inputs.indices = [8] + list(range(10, 14)) + [17, 18, 26, 47] +\
        list(range(49, 55)) + [58]
    ts2txt.inputs.label_file = \
        os.path.abspath(('OASIS-TRT-20_jointfusion_DKT31_CMA_labels_in_MNI152_'
                         '2mm_v2.nii.gz'))
    wf.connect(maskts, 'out_file', ts2txt, 'timeseries_file')

    ######

    substitutions = [
        ('_target_subject_', ''),
        ('_filtermotart_cleaned_bp_trans_masked', ''),
        ('_filtermotart_cleaned_bp', ''),
    ]
    substitutions += [("_smooth%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_ts_masker%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_getsubcortts%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_combiner%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_filtermotion%d" % i, "") for i in range(11)[::-1]]
    substitutions += [("_filter_noise_nosmooth%d" % i, "")
                      for i in range(11)[::-1]]
    substitutions += [("_makecompcorfilter%d" % i, "")
                      for i in range(11)[::-1]]
    substitutions += [("_get_aparc_tsnr%d/" % i, "run%d_" % (i + 1))
                      for i in range(11)[::-1]]

    substitutions += [("T1_out_brain_pve_0_maths_warped", "compcor_csf"),
                      ("T1_out_brain_pve_1_maths_warped", "compcor_gm"),
                      ("T1_out_brain_pve_2_maths_warped", "compcor_wm"),
                      ("output_warped_image_maths", "target_brain_mask"),
                      ("median_brain_mask", "native_brain_mask"),
                      ("corr_", "")]

    regex_subs = [
        ('_combiner.*/sar', '/smooth/'),
        ('_combiner.*/ar', '/unsmooth/'),
        ('_aparc_ts.*/sar', '/smooth/'),
        ('_aparc_ts.*/ar', '/unsmooth/'),
        ('_getsubcortts.*/sar', '/smooth/'),
        ('_getsubcortts.*/ar', '/unsmooth/'),
        ('series/sar', 'series/smooth/'),
        ('series/ar', 'series/unsmooth/'),
        ('_inverse_transform./', ''),
    ]
    # Save the relevant data into an output directory
    datasink = Node(interface=DataSink(), name="datasink")
    datasink.inputs.base_directory = sink_directory
    datasink.inputs.container = subject_id
    datasink.inputs.substitutions = substitutions
    datasink.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(realign, 'par_file', datasink, 'resting.qa.motion')
    wf.connect(art, 'norm_files', datasink, 'resting.qa.art.@norm')
    wf.connect(art, 'intensity_files', datasink, 'resting.qa.art.@intensity')
    wf.connect(art, 'outlier_files', datasink, 'resting.qa.art.@outlier_files')
    wf.connect(registration, 'outputspec.segmentation_files', datasink,
               'resting.mask_files')
    wf.connect(registration, 'outputspec.anat2target', datasink,
               'resting.qa.ants')
    wf.connect(mask, 'mask_file', datasink, 'resting.mask_files.@brainmask')
    wf.connect(mask_target, 'out_file', datasink, 'resting.mask_files.target')
    wf.connect(filter1, 'out_f', datasink, 'resting.qa.compmaps.@mc_F')
    wf.connect(filter1, 'out_pf', datasink, 'resting.qa.compmaps.@mc_pF')
    wf.connect(filter2, 'out_f', datasink, 'resting.qa.compmaps')
    wf.connect(filter2, 'out_pf', datasink, 'resting.qa.compmaps.@p')
    wf.connect(registration, 'outputspec.min_cost_file', datasink,
               'resting.qa.mincost')
    wf.connect(tsnr, 'tsnr_file', datasink, 'resting.qa.tsnr.@map')
    wf.connect([(get_roi_tsnr, datasink,
                 [('avgwf_txt_file', 'resting.qa.tsnr'),
                  ('summary_file', 'resting.qa.tsnr.@summary')])])

    wf.connect(bandpass, 'out_files', datasink,
               'resting.timeseries.@bandpassed')
    wf.connect(smooth, 'out_file', datasink, 'resting.timeseries.@smoothed')
    wf.connect(createfilter1, 'out_files', datasink,
               'resting.regress.@regressors')
    wf.connect(createfilter2, 'components_file', datasink,
               'resting.regress.@compcorr')
    wf.connect(maskts, 'out_file', datasink, 'resting.timeseries.target')
    wf.connect(sampleaparc, 'summary_file', datasink,
               'resting.parcellations.aparc')
    wf.connect(sampleaparc, 'avgwf_txt_file', datasink,
               'resting.parcellations.aparc.@avgwf')
    wf.connect(ts2txt, 'out_file', datasink,
               'resting.parcellations.grayo.@subcortical')

    datasink2 = Node(interface=DataSink(), name="datasink2")
    datasink2.inputs.base_directory = sink_directory
    datasink2.inputs.container = subject_id
    datasink2.inputs.substitutions = substitutions
    datasink2.inputs.regexp_substitutions = regex_subs  # (r'(/_.*(\d+/))', r'/run\2')
    wf.connect(combiner, 'out_file', datasink2,
               'resting.parcellations.grayo.@surface')
    return wf
Ejemplo n.º 7
0
templates = {


          'zstat'           :  '/media/amr/Amr_4TB/Work/stimulation/stimulation_3rd_level_CA3/{frequencies}/flameo/{zstats}.nii.gz',

        }



selectfiles = Node(SelectFiles(templates,
                              base_directory=experiment_dir),
                              name="selectfiles")
#==========================================================================================================================================================
# In[5]:

datasink = Node(DataSink(), name = 'datasink')
datasink.inputs.container = output_dir
datasink.inputs.base_directory = experiment_dir

substitutions = [('_frequencies_', ''),('_zstats_', '_')]

datasink.inputs.substitutions = substitutions

#==========================================================================================================================================================
#Smooth estimation
def smooth_est(zstat):
     import nipype.interfaces.fsl as fsl
     import os
     template_mask = '/media/amr/Amr_4TB/Work/October_Acquistion/Anat_Template_Enhanced_Mask.nii.gz'

     smooth_est = fsl.SmoothEstimate()
apply2epiNC.inputs.terminal_output = 'file'

#Apply transform to non-filtered EPIs (for FALFF ETC)
apply2epiNF = MapNode(ants.ApplyTransforms(),
                      iterfield='input_image',
                      name='apply2epiNF')
apply2epiNF.inputs.default_value = 0
apply2epiNF.inputs.input_image_type = 3
apply2epiNF.inputs.interpolation = 'Linear'
apply2epiNF.inputs.invert_transform_flags = [True, False]
apply2epiNF.inputs.num_threads = 1
apply2epiNF.inputs.terminal_output = 'file'

#Datasink
substitutions = ('_subject_id_', '')
sink = Node(DataSink(), name='sink')
sink.inputs.base_directory = out_dir
sink.inputs.substitutions = substitutions

preproc = Workflow(name='healthy_preproc')
preproc.base_dir = work_dir

####POPULATE INPUTS, GET DATA, DROP EPI VOLS, GENERAL HOUSEKEEPING###
preproc.connect([
    (infosource, selectfiles, [('subject_id', 'subject_id')]),
    (selectfiles, dropvols, [('epi', 'epi_list')]),
    (dropvols, epi_stack, [('epi_list', 'dicom_files')]),
    (epi_stack, metadata, [('out_file', 'nifti')]),
    (epi_stack, despike, [('out_file', 'in_file')]),

    ###HERE BE SLICE TIMING###
# TSE normalization magdeburg
MAG_normalize_TSE_n = MapNode(C3d(interp="Sinc",
                                  pix_type='float',
                                  args='-histmatch 5',
                                  out_file='MAG_normalise_TSE.nii.gz'),
                              name='MAG_normalize_TSE_n',
                              iterfield=['in_file', 'opt_in_file'])
wf.connect([(MAG_reslice_TSE_n, MAG_normalize_TSE_n, [('out_files',
                                                       'opt_in_file')])])
wf.connect([(selecttemplates, MAG_normalize_TSE_n, [('tse_inthist_template',
                                                     'in_file')])])

################
## DATA SINK  ##
################
datasink = Node(DataSink(base_directory=src_path + working_dir,
                         container=output_dir),
                name="datasink")

wf.connect([(MAG_reslice_labels_SEG_n, datasink,
             [('out_files', 'MAG_reslice_labels_SEG')])])  #Step 14
wf.connect([(MAG_normalize_TSE_n, datasink,
             [('out_files', 'MAG_normalized_TSE')])])  #Step 14
wf.connect([(MAG_normalize_MPRAGE_n, datasink,
             [('out_files', 'MAG_normalized_MPRAGE')])])  #Step 14

###################
## Run the thing ##
###################

wf.write_graph(graph2use='flat', format='png', simple_form=False)
###########
#
# NODES FOR THE MERGING IMAGES
#
###########
# merging cope files
copemerge = Node(fsl.Merge(dimension='t', in_files=listCopeFiles),
                 name="copemerge")

# merging varcope files
varcopemerge = Node(fsl.Merge(dimension='t', in_files=listVarcopeFiles),
                    name="varcopemerge")

# creating datasink to collect outputs
datasink = Node(
    DataSink(base_directory=os.path.join(outDir, 'Flanker_Cope4_Level3')),
    name='datasink')

###########
#
# SETTING UP THE WORKFLOW NODES
#
###########

# creating the workflow
thirdLevel = Workflow(name="thirdLevel", base_dir=outDir)

# connecting nodes
thirdLevel.connect(level2design, 'design_mat', flameo, 'design_file')
thirdLevel.connect(level2design, 'design_con', flameo, 't_con_file')
thirdLevel.connect(level2design, 'design_grp', flameo, 'cov_split_file')
Ejemplo n.º 11
0
def group_randomise_wf(
    input_dir,
    output_dir,
    subject_list,
    regressors_path,
    contrast_path,
    selected_cope=None,
    roi=None,
    oneSampleT=False,
    analysis_name="oneSampleT_PPI",
):
    """Group level non parametric test work flow

    Parameters
    ----------
    input_dir:
        BIDS derivative
    subject_list:
        subjects entering group level analysis
    roi:
        mask or coordinate (default: whole brain)
    """
    def wf_prep_files():
        prep_files = pe.Workflow(name="prep_files")
        prep_files.base_dir = input_dir + os.sep + "group_level"

        template = {"mask": "sub-{subject}/sub-{subject}.feat/mask.nii.gz"}
        whole_brain_mask = pe.MapNode(
            SelectFiles(templates=template),
            iterfield="subject",
            name="whole_brain_mask",
        )
        whole_brain_mask.inputs.base_directory = input_dir
        whole_brain_mask.inputs.subject = subject_list

        gen_groupmask = pe.Node(
            Function(
                function=_create_group_mask,
                input_names=["brain_masks", "base_dir"],
                output_names=["groupmask_path"],
            ),
            name="gen_groupmask",
        )
        gen_groupmask.inputs.base_dir = input_dir + os.sep + "group_level" + os.sep

        designs = pe.Node(
            Function(
                function=_groupmean_contrast,
                input_names=[
                    "subject_list", "regressors_path", "contrast_path"
                ],
                output_names=["groups", "regressors", "contrasts"],
            ),
            name="designs",
        )
        designs.inputs.subject_list = subject_list
        designs.inputs.regressors_path = regressors_path
        designs.inputs.contrast_path = contrast_path

        model = pe.Node(fsl.MultipleRegressDesign(), name=f"model")

        outputnode = pe.Node(
            interface=niu.IdentityInterface(
                fields=["mask", "regressors", "contrasts"]),
            name="outputnode",
        )

        prep_files.connect([
            (whole_brain_mask, gen_groupmask, [("mask", "brain_masks")]),
            (
                designs,
                model,
                [
                    ("groups", "groups"),
                    ("regressors", "regressors"),
                    ("contrasts", "contrasts"),
                ],
            ),
            (gen_groupmask, outputnode, [("groupmask_path", "mask")]),
            (
                model,
                outputnode,
                [
                    ("design_grp", "group"),
                    ("design_mat", "regressors"),
                    ("design_con", "contrasts"),
                ],
            ),
        ])
        return prep_files

    meta_workflow = pe.Workflow(name=analysis_name)
    meta_workflow.base_dir = input_dir + os.sep + "group_level"
    prep_files = wf_prep_files()
    # now run randomise...
    contrast_names = _cope_names(input_dir, selected_cope)
    for cope_id, contrast in contrast_names:
        node_name = contrast.replace(">", "_wrt_")
        wk = pe.Workflow(name=f"contrast_{node_name}")
        template = {
            "cope_file":
            "sub-{subject}/sub-{subject}.feat/stats/cope{cope}.nii.gz"
        }
        file_grabber = pe.MapNode(
            SelectFiles(template, base_directory=input_dir),
            iterfield="subject",
            name="file_grabber",
        )
        file_grabber.inputs.cope = cope_id
        file_grabber.inputs.subject = subject_list

        concat_copes = pe.Node(
            Function(
                function=_concat_copes,
                input_names=["cope_file", "mm", "output_dir"],
                output_names=["output_dir"],
            ),
            name="concat_copes",
        )
        concat_copes.inputs.mm = 6
        concat_copes.inputs.output_dir = (input_dir + os.sep + "group_level" +
                                          os.sep + f"cope_{node_name}.nii.gz")
        prep_files = wf_prep_files()

        # generate design matrix
        randomise = pe.Node(fsl.Randomise(), name="stats_randomise")
        randomise.inputs.num_perm = 1000
        randomise.inputs.vox_p_values = True
        randomise.inputs.tfce = True

        import pandas as pd

        group_contrast_names = pd.read_csv(contrast_path,
                                           sep="\t",
                                           index_col=0).index
        group_contrast_names = group_contrast_names.tolist()

        # Create DataSink object
        sinker = pe.Node(DataSink(), name=f"sinker_{node_name}")
        sinker.inputs.base_directory = output_dir + os.sep + analysis_name
        t_test_new_name, p_new_name = [], []
        for i, name in enumerate(group_contrast_names):
            t_test_new_name.append(
                (f"randomise_tstat{i + 1}", f"{name}_tstat"))
            p_new_name.append((f"randomise_tfce_corrp_tstat{i + 1}",
                               f"{name}_tfce_corrp_tstat"))
        sinker.inputs.substitutions = t_test_new_name + p_new_name

        # connect the nodes
        wk.connect([
            (file_grabber, concat_copes, [("cope_file", "cope_file")]),
            (concat_copes, randomise, [("output_dir", "in_file")]),
            (
                prep_files,
                randomise,
                [
                    ("outputnode.mask", "mask"),
                    ("outputnode.contrasts", "tcon"),
                    ("outputnode.regressors", "design_mat"),
                ],
            ),
            (
                randomise,
                sinker,
                [
                    ("tstat_files", f"contrast_{node_name}.@tstat_files"),
                    (
                        "t_corrected_p_files",
                        f"contrast_{node_name}.@t_corrected_p_files",
                    ),
                ],
            ),
        ])

        if oneSampleT:
            # one sample T test
            onesampleT_randomise = pe.Node(fsl.Randomise(),
                                           name="onesampleT_randomise")
            onesampleT_randomise.inputs.num_perm = 1000
            onesampleT_randomise.inputs.vox_p_values = True
            onesampleT_randomise.inputs.tfce = True
            onesampleT_randomise.inputs.one_sample_group_mean = True

            # Create DataSink object
            gsinker = pe.Node(DataSink(), name=f"sinker_{node_name}_group")
            gsinker.inputs.base_directory = output_dir + os.sep + analysis_name
            gsinker.inputs.substitutions = [
                ("tstat1", "tstat"),
                ("randomise", "fullsample"),
            ]
            wk.connect([
                (concat_copes, onesampleT_randomise, [("output_dir", "in_file")
                                                      ]),
                (prep_files, onesampleT_randomise, [("outputnode.mask", "mask")
                                                    ]),
                (
                    onesampleT_randomise,
                    gsinker,
                    [
                        ("tstat_files",
                         f"contrast_{node_name}.@group_tstat_files"),
                        (
                            "t_corrected_p_files",
                            f"contrast_{node_name}.@group_t_corrected_p_files",
                        ),
                    ],
                ),
            ])

        meta_workflow.add_nodes([wk])
    return meta_workflow
Ejemplo n.º 12
0
def preproc_func(subject_list,
                 task_list,
                 slice_order,
                 experiment_dir,
                 base_directory,
                 fwhm_list,
                 TR,
                 dummy_scans=0,
                 iso_size=4,
                 multiple_scans=False):

    output_dir = 'datasink'
    working_dir = 'workingdir'

    response = input('Does your data follow BIDS format? Enter yes or no :')
    if response == 'yes':
        print('Great! You saved me a lot of hassle.')
        anat_file = 'sub-{subject_id}/anat/sub-{subject_id}_T1w.nii.gz'
        if multiple_scans == True:
            func_file = 'sub-{subject_id}/func/sub-{subject_id}_task-{task_name}_run-*_bold.nii.gz'
        else:
            func_file = 'sub-{subject_id}/func/sub-{subject_id}_task-{task_name}_bold.nii.gz'
    elif response == 'no':
        print('You have to manually set the template of path to the data.')
        print(
            ' - anat_example: sub-{subject_id}/anat/sub-{subject_id}_T1w.nii.gz'
        )
        print(
            ' - func_example: sub-{subject_id}/func/sub-{subject_id}_task-{task_name}_run-*_bold.nii.gz'
        )
        anat_file = input('Enter template of path to anatomical image:')
        func_file = input('Enter template of path to functional image:')

    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=dummy_scans,
                              t_size=-1,
                              output_type='NIFTI'),
                   name="extract")

    slicetime = Node(SliceTiming(num_slices=len(slice_order),
                                 ref_slice=int(median(slice_order)),
                                 slice_order=slice_order,
                                 time_repetition=TR,
                                 time_acquisition=TR -
                                 (TR / len(slice_order))),
                     name="slicetime")

    mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True,
                           output_type='NIFTI'),
                   name="mcflirt")

    # Smooth - image smoothing
    smooth = Node(Smooth(), name="smooth")
    smooth.iterables = ("fwhm", fwhm_list)

    # Artifact Detection - determines outliers in functional images
    art = Node(ArtifactDetect(norm_threshold=2,
                              zintensity_threshold=3,
                              mask_type='spm_global',
                              parameter_source='FSL',
                              use_differences=[True, False],
                              plot_type='svg'),
               name="art")

    # BET - Skullstrip anatomical Image
    bet_anat = Node(BET(frac=0.5, robust=True, output_type='NIFTI_GZ'),
                    name="bet_anat")

    # FAST - Image Segmentation
    segmentation = Node(FAST(output_type='NIFTI_GZ'),
                        name="segmentation",
                        mem_gb=4)

    # Select WM segmentation file from segmentation output
    def get_wm(files):
        return files[-1]

    # Threshold - Threshold WM probability image
    threshold = Node(Threshold(thresh=0.5, args='-bin',
                               output_type='NIFTI_GZ'),
                     name="threshold")

    # FLIRT - pre-alignment of functional images to anatomical images
    coreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'), name="coreg_pre")

    # FLIRT - coregistration of functional images to anatomical images with BBR
    coreg_bbr = Node(FLIRT(dof=6,
                           cost='bbr',
                           schedule=opj(os.getenv('FSLDIR'),
                                        'etc/flirtsch/bbr.sch'),
                           output_type='NIFTI_GZ'),
                     name="coreg_bbr")

    # Apply coregistration warp to functional images
    applywarp = Node(FLIRT(interp='spline',
                           apply_isoxfm=iso_size,
                           output_type='NIFTI'),
                     name="applywarp")

    # Apply coregistration warp to mean file
    applywarp_mean = Node(FLIRT(interp='spline',
                                apply_isoxfm=iso_size,
                                output_type='NIFTI_GZ'),
                          name="applywarp_mean")

    # Create a coregistration workflow
    coregwf = Workflow(name='coregwf')
    coregwf.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the coregistration workflow
    coregwf.connect([
        (bet_anat, segmentation, [('out_file', 'in_files')]),
        (segmentation, threshold, [(('partial_volume_files', get_wm),
                                    'in_file')]),
        (bet_anat, coreg_pre, [('out_file', 'reference')]),
        (threshold, coreg_bbr, [('out_file', 'wm_seg')]),
        (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),
        (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),
        (bet_anat, applywarp, [('out_file', 'reference')]),
        (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')]),
        (bet_anat, applywarp_mean, [('out_file', 'reference')]),
    ])

    # Infosource - a function free node to iterate over the list of subject names
    infosource = Node(IdentityInterface(fields=['subject_id', 'task_name']),
                      name="infosource")
    infosource.iterables = [('subject_id', subject_list),
                            ('task_name', task_list)]

    # SelectFiles - to grab the data (alternativ to DataGrabber)

    templates = {'anat': anat_file, 'func': func_file}
    selectfiles = Node(SelectFiles(templates, base_directory=base_directory),
                       name="selectfiles")

    # Datasink - creates output folder for important outputs
    datasink = Node(DataSink(base_directory=experiment_dir,
                             container=output_dir),
                    name="datasink")

    ## Use the following DataSink output substitutions
    substitutions = [
        ('_subject_id_', 'sub-'),
        ('_task_name_', '/task-'),
        ('_fwhm_', 'fwhm-'),
        ('_roi', ''),
        ('_mcf', ''),
        ('_st', ''),
        ('_flirt', ''),
        ('.nii_mean_reg', '_mean'),
        ('.nii.par', '.par'),
    ]
    subjFolders = [('fwhm-%s/' % f, 'fwhm-%s_' % f) for f in fwhm_list]
    substitutions.extend(subjFolders)
    datasink.inputs.substitutions = substitutions

    # Create a preprocessing workflow
    preproc = Workflow(name='preproc')
    preproc.base_dir = opj(experiment_dir, working_dir)

    # Connect all components of the preprocessing workflow
    preproc.connect([
        (infosource, selectfiles, [('subject_id', 'subject_id'),
                                   ('task_name', 'task_name')]),
        (selectfiles, extract, [('func', 'in_file')]),
        (extract, slicetime, [('roi_file', 'in_files')]),
        (slicetime, mcflirt, [('timecorrected_files', 'in_file')]),
        (selectfiles, coregwf, [('anat', 'bet_anat.in_file'),
                                ('anat', 'coreg_bbr.reference')]),
        (mcflirt, coregwf, [('mean_img', 'coreg_pre.in_file'),
                            ('mean_img', 'coreg_bbr.in_file'),
                            ('mean_img', 'applywarp_mean.in_file')]),
        (mcflirt, coregwf, [('out_file', 'applywarp.in_file')]),
        (coregwf, smooth, [('applywarp.out_file', 'in_files')]),
        (mcflirt, datasink, [('par_file', 'preproc.@par')]),
        (smooth, datasink, [('smoothed_files', 'preproc.@smooth')]),
        (coregwf, datasink, [('applywarp_mean.out_file', 'preproc.@mean')]),
        (coregwf, art, [('applywarp.out_file', 'realigned_files')]),
        (mcflirt, art, [('par_file', 'realignment_parameters')]),
        (coregwf, datasink, [('coreg_bbr.out_matrix_file',
                              'preproc.@mat_file'),
                             ('bet_anat.out_file', 'preproc.@brain')]),
        (art, datasink, [('outlier_files', 'preproc.@outlier_files'),
                         ('plot_files', 'preproc.@plot_files')]),
    ])
    # Create preproc output graph# Creat # Create
    preproc.write_graph(graph2use='colored', format='png', simple_form=True)

    # Visualize the graph
    img1 = imread(opj(preproc.base_dir, 'preproc', 'graph.png'))
    plt.imshow(img1)
    plt.xticks([]), plt.yticks([])
    plt.show()

    # Visualize the detailed graph# Visua # Visual
    preproc.write_graph(graph2use='flat', format='png', simple_form=True)
    img2 = imread(opj(preproc.base_dir, 'preproc', 'graph_detailed.png'))
    plt.imshow(img2)
    plt.xticks([]), plt.yticks([])
    plt.show()

    print("Workflow all set. Check the workflow image :)")

    response = input('Should run the workflow? Enter yes or no :')

    if response == 'yes':
        preproc.run('MultiProc', plugin_args={'n_procs': 10})
    elif response == 'no':
        print('Exits the program since you entered no')
    else:
        raise RuntimeError('Should enter either yes or no')
Ejemplo n.º 13
0
def builder(subject_id,
            subId,
            project_dir,
            data_dir,
            output_dir,
            output_final_dir,
            output_interm_dir,
            log_dir,
            layout,
            anat=None,
            funcs=None,
            fmaps=None,
            task_name='',
            session=None,
            apply_trim=False,
            apply_dist_corr=False,
            apply_smooth=False,
            apply_filter=False,
            mni_template='2mm',
            apply_n4=True,
            ants_threads=8,
            readable_crash_files=False):
    """
    Core function that returns a workflow. See wfmaker for more details.

    Args:
        subject_id: name of subject folder for final outputted sub-folder name
        subId: abbreviate name of subject for intermediate outputted sub-folder name
        project_dir: full path to root of project
        data_dir: full path to raw data files
        output_dir: upper level output dir (others will be nested within this)
        output_final_dir: final preprocessed sub-dir name
        output_interm_dir: intermediate preprcess sub-dir name
        log_dir: directory for nipype log files
        layout: BIDS layout instance
    """

    ##################
    ### PATH SETUP ###
    ##################
    # Set MNI template
    MNItemplate = os.path.join(get_resource_path(),
                               'MNI152_T1_' + mni_template + '_brain.nii.gz')
    MNImask = os.path.join(get_resource_path(),
                           'MNI152_T1_' + mni_template + '_brain_mask.nii.gz')
    MNItemplatehasskull = os.path.join(get_resource_path(),
                                       'MNI152_T1_' + mni_template + '.nii.gz')

    # Set ANTs files
    bet_ants_template = os.path.join(get_resource_path(),
                                     'OASIS_template.nii.gz')
    bet_ants_prob_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumProbabilityMask.nii.gz')
    bet_ants_registration_mask = os.path.join(
        get_resource_path(), 'OASIS_BrainCerebellumRegistrationMask.nii.gz')

    #################################
    ### NIPYPE IMPORTS AND CONFIG ###
    #################################
    # Update nipype global config because workflow.config[] = ..., doesn't seem to work
    # Can't store nipype config/rc file in container anyway so set them globaly before importing and setting up workflow as suggested here: http://nipype.readthedocs.io/en/latest/users/config_file.html#config-file
    from nipype import config
    if readable_crash_files:
        cfg = dict(execution={'crashfile_format': 'txt'})
        config.update_config(cfg)
    config.update_config(
        {'logging': {
            'log_directory': log_dir,
            'log_to_file': True
        }})
    from nipype import logging
    logging.update_logging(config)

    # Now import everything else
    from nipype.interfaces.io import DataSink
    from nipype.interfaces.utility import Merge, IdentityInterface
    from nipype.pipeline.engine import Node, Workflow
    from nipype.interfaces.nipy.preprocess import ComputeMask
    from nipype.algorithms.rapidart import ArtifactDetect
    from nipype.interfaces.ants.segmentation import BrainExtraction, N4BiasFieldCorrection
    from nipype.interfaces.ants import Registration, ApplyTransforms
    from nipype.interfaces.fsl import MCFLIRT, TOPUP, ApplyTOPUP
    from nipype.interfaces.fsl.maths import MeanImage
    from nipype.interfaces.fsl import Merge as MERGE
    from nipype.interfaces.fsl.utils import Smooth
    from nipype.interfaces.nipy.preprocess import Trim
    from .interfaces import Plot_Coregistration_Montage, Plot_Quality_Control, Plot_Realignment_Parameters, Create_Covariates, Down_Sample_Precision, Create_Encoding_File, Filter_In_Mask

    ##################
    ### INPUT NODE ###
    ##################

    # Turn functional file list into interable Node
    func_scans = Node(IdentityInterface(fields=['scan']), name='func_scans')
    func_scans.iterables = ('scan', funcs)

    # Get TR for use in filtering below; we're assuming all BOLD runs have the same TR
    tr_length = layout.get_metadata(funcs[0])['RepetitionTime']

    #####################################
    ## TRIM ##
    #####################################
    if apply_trim:
        trim = Node(Trim(), name='trim')
        trim.inputs.begin_index = apply_trim

    #####################################
    ## DISTORTION CORRECTION ##
    #####################################

    if apply_dist_corr:
        # Get fmap file locations
        fmaps = [
            f.filename for f in layout.get(
                subject=subId, modality='fmap', extensions='.nii.gz')
        ]
        if not fmaps:
            raise IOError(
                "Distortion Correction requested but field map scans not found..."
            )

        # Get fmap metadata
        totalReadoutTimes, measurements, fmap_pes = [], [], []

        for i, fmap in enumerate(fmaps):
            # Grab total readout time for each fmap
            totalReadoutTimes.append(
                layout.get_metadata(fmap)['TotalReadoutTime'])

            # Grab measurements (for some reason pyBIDS doesn't grab dcm_meta... fields from side-car json file and json.load, doesn't either; so instead just read the header using nibabel to determine number of scans)
            measurements.append(nib.load(fmap).header['dim'][4])

            # Get phase encoding direction
            fmap_pe = layout.get_metadata(fmap)["PhaseEncodingDirection"]
            fmap_pes.append(fmap_pe)

        encoding_file_writer = Node(interface=Create_Encoding_File(),
                                    name='create_encoding')
        encoding_file_writer.inputs.totalReadoutTimes = totalReadoutTimes
        encoding_file_writer.inputs.fmaps = fmaps
        encoding_file_writer.inputs.fmap_pes = fmap_pes
        encoding_file_writer.inputs.measurements = measurements
        encoding_file_writer.inputs.file_name = 'encoding_file.txt'

        merge_to_file_list = Node(interface=Merge(2),
                                  infields=['in1', 'in2'],
                                  name='merge_to_file_list')
        merge_to_file_list.inputs.in1 = fmaps[0]
        merge_to_file_list.inputs.in1 = fmaps[1]

        # Merge AP and PA distortion correction scans
        merger = Node(interface=MERGE(dimension='t'), name='merger')
        merger.inputs.output_type = 'NIFTI_GZ'
        merger.inputs.in_files = fmaps
        merger.inputs.merged_file = 'merged_epi.nii.gz'

        # Create distortion correction map
        topup = Node(interface=TOPUP(), name='topup')
        topup.inputs.output_type = 'NIFTI_GZ'

        # Apply distortion correction to other scans
        apply_topup = Node(interface=ApplyTOPUP(), name='apply_topup')
        apply_topup.inputs.output_type = 'NIFTI_GZ'
        apply_topup.inputs.method = 'jac'
        apply_topup.inputs.interp = 'spline'

    ###################################
    ### REALIGN ###
    ###################################
    realign_fsl = Node(MCFLIRT(), name="realign")
    realign_fsl.inputs.cost = 'mutualinfo'
    realign_fsl.inputs.mean_vol = True
    realign_fsl.inputs.output_type = 'NIFTI_GZ'
    realign_fsl.inputs.save_mats = True
    realign_fsl.inputs.save_rms = True
    realign_fsl.inputs.save_plots = True

    ###################################
    ### MEAN EPIs ###
    ###################################
    # For coregistration after realignment
    mean_epi = Node(MeanImage(), name='mean_epi')
    mean_epi.inputs.dimension = 'T'

    # For after normalization is done to plot checks
    mean_norm_epi = Node(MeanImage(), name='mean_norm_epi')
    mean_norm_epi.inputs.dimension = 'T'

    ###################################
    ### MASK, ART, COV CREATION ###
    ###################################
    compute_mask = Node(ComputeMask(), name='compute_mask')
    compute_mask.inputs.m = .05

    art = Node(ArtifactDetect(), name='art')
    art.inputs.use_differences = [True, False]
    art.inputs.use_norm = True
    art.inputs.norm_threshold = 1
    art.inputs.zintensity_threshold = 3
    art.inputs.mask_type = 'file'
    art.inputs.parameter_source = 'FSL'

    make_cov = Node(Create_Covariates(), name='make_cov')

    ################################
    ### N4 BIAS FIELD CORRECTION ###
    ################################
    if apply_n4:
        n4_correction = Node(N4BiasFieldCorrection(), name='n4_correction')
        n4_correction.inputs.copy_header = True
        n4_correction.inputs.save_bias = False
        n4_correction.inputs.num_threads = ants_threads
        n4_correction.inputs.input_image = anat

    ###################################
    ### BRAIN EXTRACTION ###
    ###################################
    brain_extraction_ants = Node(BrainExtraction(), name='brain_extraction')
    brain_extraction_ants.inputs.dimension = 3
    brain_extraction_ants.inputs.use_floatingpoint_precision = 1
    brain_extraction_ants.inputs.num_threads = ants_threads
    brain_extraction_ants.inputs.brain_probability_mask = bet_ants_prob_mask
    brain_extraction_ants.inputs.keep_temporary_files = 1
    brain_extraction_ants.inputs.brain_template = bet_ants_template
    brain_extraction_ants.inputs.extraction_registration_mask = bet_ants_registration_mask
    brain_extraction_ants.inputs.out_prefix = 'bet'

    ###################################
    ### COREGISTRATION ###
    ###################################
    coregistration = Node(Registration(), name='coregistration')
    coregistration.inputs.float = False
    coregistration.inputs.output_transform_prefix = "meanEpi2highres"
    coregistration.inputs.transforms = ['Rigid']
    coregistration.inputs.transform_parameters = [(0.1, ), (0.1, )]
    coregistration.inputs.number_of_iterations = [[1000, 500, 250, 100]]
    coregistration.inputs.dimension = 3
    coregistration.inputs.num_threads = ants_threads
    coregistration.inputs.write_composite_transform = True
    coregistration.inputs.collapse_output_transforms = True
    coregistration.inputs.metric = ['MI']
    coregistration.inputs.metric_weight = [1]
    coregistration.inputs.radius_or_number_of_bins = [32]
    coregistration.inputs.sampling_strategy = ['Regular']
    coregistration.inputs.sampling_percentage = [0.25]
    coregistration.inputs.convergence_threshold = [1e-08]
    coregistration.inputs.convergence_window_size = [10]
    coregistration.inputs.smoothing_sigmas = [[3, 2, 1, 0]]
    coregistration.inputs.sigma_units = ['mm']
    coregistration.inputs.shrink_factors = [[4, 3, 2, 1]]
    coregistration.inputs.use_estimate_learning_rate_once = [True]
    coregistration.inputs.use_histogram_matching = [False]
    coregistration.inputs.initial_moving_transform_com = True
    coregistration.inputs.output_warped_image = True
    coregistration.inputs.winsorize_lower_quantile = 0.01
    coregistration.inputs.winsorize_upper_quantile = 0.99

    ###################################
    ### NORMALIZATION ###
    ###################################
    # Settings Explanations
    # Only a few key settings are worth adjusting and most others relate to how ANTs optimizer starts or iterates and won't make a ton of difference
    # Brian Avants referred to these settings as the last "best tested" when he was aligning fMRI data: https://github.com/ANTsX/ANTsRCore/blob/master/R/antsRegistration.R#L275
    # Things that matter the most:
    # smoothing_sigmas:
    # how much gaussian smoothing to apply when performing registration, probably want the upper limit of this to match the resolution that the data is collected at e.g. 3mm
    # Old settings [[3,2,1,0]]*3
    # shrink_factors
    # The coarseness with which to do registration
    # Old settings [[8,4,2,1]] * 3
    # >= 8 may result is some problems causing big chunks of cortex with little fine grain spatial structure to be moved to other parts of cortex
    # Other settings
    # transform_parameters:
    # how much regularization to do for fitting that transformation
    # for syn this pertains to both the gradient regularization term, and the flow, and elastic terms. Leave the syn settings alone as they seem to be the most well tested across published data sets
    # radius_or_number_of_bins
    # This is the bin size for MI metrics and 32 is probably adequate for most use cases. Increasing this might increase precision (e.g. to 64) but takes exponentially longer
    # use_histogram_matching
    # Use image intensity distribution to guide registration
    # Leave it on for within modality registration (e.g. T1 -> MNI), but off for between modality registration (e.g. EPI -> T1)
    # convergence_threshold
    # threshold for optimizer
    # convergence_window_size
    # how many samples should optimizer average to compute threshold?
    # sampling_strategy
    # what strategy should ANTs use to initialize the transform. Regular here refers to approximately random sampling around the center of the image mass
    normalization = Node(Registration(), name='normalization')
    normalization.inputs.float = False
    normalization.inputs.collapse_output_transforms = True
    normalization.inputs.convergence_threshold = [1e-06, 1e-06, 1e-07]
    normalization.inputs.convergence_window_size = [10]
    normalization.inputs.dimension = 3
    normalization.inputs.fixed_image = MNItemplate
    normalization.inputs.initial_moving_transform_com = True
    normalization.inputs.metric = ['MI', 'MI', 'CC']
    normalization.inputs.metric_weight = [1.0] * 3
    normalization.inputs.number_of_iterations = [[1000, 500, 250, 100],
                                                 [1000, 500, 250, 100],
                                                 [100, 70, 50, 20]]
    normalization.inputs.num_threads = ants_threads
    normalization.inputs.output_transform_prefix = 'anat2template'
    normalization.inputs.output_inverse_warped_image = True
    normalization.inputs.output_warped_image = True
    normalization.inputs.radius_or_number_of_bins = [32, 32, 4]
    normalization.inputs.sampling_percentage = [0.25, 0.25, 1]
    normalization.inputs.sampling_strategy = ['Regular', 'Regular', 'None']
    normalization.inputs.shrink_factors = [[4, 3, 2, 1]] * 3
    normalization.inputs.sigma_units = ['vox'] * 3
    normalization.inputs.smoothing_sigmas = [[2, 1], [2, 1], [3, 2, 1, 0]]
    normalization.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    normalization.inputs.transform_parameters = [(0.1, ), (0.1, ),
                                                 (0.1, 3.0, 0.0)]
    normalization.inputs.use_histogram_matching = True
    normalization.inputs.winsorize_lower_quantile = 0.005
    normalization.inputs.winsorize_upper_quantile = 0.995
    normalization.inputs.write_composite_transform = True

    ###################################
    ### APPLY TRANSFORMS AND SMOOTH ###
    ###################################
    merge_transforms = Node(Merge(2),
                            iterfield=['in2'],
                            name='merge_transforms')

    # Used for epi -> mni, via (coreg + norm)
    apply_transforms = Node(ApplyTransforms(),
                            iterfield=['input_image'],
                            name='apply_transforms')
    apply_transforms.inputs.input_image_type = 3
    apply_transforms.inputs.float = False
    apply_transforms.inputs.num_threads = 12
    apply_transforms.inputs.environ = {}
    apply_transforms.inputs.interpolation = 'BSpline'
    apply_transforms.inputs.invert_transform_flags = [False, False]
    apply_transforms.inputs.reference_image = MNItemplate

    # Used for t1 segmented -> mni, via (norm)
    apply_transform_seg = Node(ApplyTransforms(), name='apply_transform_seg')
    apply_transform_seg.inputs.input_image_type = 3
    apply_transform_seg.inputs.float = False
    apply_transform_seg.inputs.num_threads = 12
    apply_transform_seg.inputs.environ = {}
    apply_transform_seg.inputs.interpolation = 'MultiLabel'
    apply_transform_seg.inputs.invert_transform_flags = [False]
    apply_transform_seg.inputs.reference_image = MNItemplate

    ###################################
    ### PLOTS ###
    ###################################
    plot_realign = Node(Plot_Realignment_Parameters(), name="plot_realign")
    plot_qa = Node(Plot_Quality_Control(), name="plot_qa")
    plot_normalization_check = Node(Plot_Coregistration_Montage(),
                                    name="plot_normalization_check")
    plot_normalization_check.inputs.canonical_img = MNItemplatehasskull

    ############################################
    ### FILTER, SMOOTH, DOWNSAMPLE PRECISION ###
    ############################################
    # Use cosanlab_preproc for down sampling
    down_samp = Node(Down_Sample_Precision(), name="down_samp")

    # Use FSL for smoothing
    if apply_smooth:
        smooth = Node(Smooth(), name='smooth')
        if isinstance(apply_smooth, list):
            smooth.iterables = ("fwhm", apply_smooth)
        elif isinstance(apply_smooth, int) or isinstance(apply_smooth, float):
            smooth.inputs.fwhm = apply_smooth
        else:
            raise ValueError("apply_smooth must be a list or int/float")

    # Use cosanlab_preproc for low-pass filtering
    if apply_filter:
        lp_filter = Node(Filter_In_Mask(), name='lp_filter')
        lp_filter.inputs.mask = MNImask
        lp_filter.inputs.sampling_rate = tr_length
        lp_filter.inputs.high_pass_cutoff = 0
        if isinstance(apply_filter, list):
            lp_filter.iterables = ("low_pass_cutoff", apply_filter)
        elif isinstance(apply_filter, int) or isinstance(apply_filter, float):
            lp_filter.inputs.low_pass_cutoff = apply_filter
        else:
            raise ValueError("apply_filter must be a list or int/float")

    ###################
    ### OUTPUT NODE ###
    ###################
    # Collect all final outputs in the output dir and get rid of file name additions
    datasink = Node(DataSink(), name='datasink')
    if session:
        datasink.inputs.base_directory = os.path.join(output_final_dir,
                                                      subject_id)
        datasink.inputs.container = 'ses-' + session
    else:
        datasink.inputs.base_directory = output_final_dir
        datasink.inputs.container = subject_id

    # Remove substitutions
    data_dir_parts = data_dir.split('/')[1:]
    prefix = ['_scan_'] + data_dir_parts + [subject_id] + ['func']
    func_scan_names = [os.path.split(elem)[-1] for elem in funcs]
    to_replace = []
    for elem in func_scan_names:
        bold_name = elem.split(subject_id + '_')[-1]
        bold_name = bold_name.split('.nii.gz')[0]
        to_replace.append(('..'.join(prefix + [elem]), bold_name))
    datasink.inputs.substitutions = to_replace

    #####################
    ### INIT WORKFLOW ###
    #####################
    if session:
        workflow = Workflow(name='ses-' + session)
        workflow.base_dir = os.path.join(output_interm_dir, subId)
    else:
        workflow = Workflow(name=subId)
        workflow.base_dir = output_interm_dir

    ############################
    ######### PART (1a) #########
    # func -> discorr -> trim -> realign
    # OR
    # func -> trim -> realign
    # OR
    # func -> discorr -> realign
    # OR
    # func -> realign
    ############################
    if apply_dist_corr:
        workflow.connect([(encoding_file_writer, topup, [('encoding_file',
                                                          'encoding_file')]),
                          (encoding_file_writer, apply_topup,
                           [('encoding_file', 'encoding_file')]),
                          (merger, topup, [('merged_file', 'in_file')]),
                          (func_scans, apply_topup, [('scan', 'in_files')]),
                          (topup, apply_topup,
                           [('out_fieldcoef', 'in_topup_fieldcoef'),
                            ('out_movpar', 'in_topup_movpar')])])
        if apply_trim:
            # Dist Corr + Trim
            workflow.connect([(apply_topup, trim, [('out_corrected', 'in_file')
                                                   ]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # Dist Corr + No Trim
            workflow.connect([(apply_topup, realign_fsl, [('out_corrected',
                                                           'in_file')])])
    else:
        if apply_trim:
            # No Dist Corr + Trim
            workflow.connect([(func_scans, trim, [('scan', 'in_file')]),
                              (trim, realign_fsl, [('out_file', 'in_file')])])
        else:
            # No Dist Corr + No Trim
            workflow.connect([
                (func_scans, realign_fsl, [('scan', 'in_file')]),
            ])

    ############################
    ######### PART (1n) #########
    # anat -> N4 -> bet
    # OR
    # anat -> bet
    ############################
    if apply_n4:
        workflow.connect([(n4_correction, brain_extraction_ants,
                           [('output_image', 'anatomical_image')])])
    else:
        brain_extraction_ants.inputs.anatomical_image = anat

    ##########################################
    ############### PART (2) #################
    # realign -> coreg -> mni (via t1)
    # t1 -> mni
    # covariate creation
    # plot creation
    ###########################################

    workflow.connect([
        (realign_fsl, plot_realign, [('par_file', 'realignment_parameters')]),
        (realign_fsl, plot_qa, [('out_file', 'dat_img')]),
        (realign_fsl, art, [('out_file', 'realigned_files'),
                            ('par_file', 'realignment_parameters')]),
        (realign_fsl, mean_epi, [('out_file', 'in_file')]),
        (realign_fsl, make_cov, [('par_file', 'realignment_parameters')]),
        (mean_epi, compute_mask, [('out_file', 'mean_volume')]),
        (compute_mask, art, [('brain_mask', 'mask_file')]),
        (art, make_cov, [('outlier_files', 'spike_id')]),
        (art, plot_realign, [('outlier_files', 'outliers')]),
        (plot_qa, make_cov, [('fd_outliers', 'fd_outliers')]),
        (brain_extraction_ants, coregistration, [('BrainExtractionBrain',
                                                  'fixed_image')]),
        (mean_epi, coregistration, [('out_file', 'moving_image')]),
        (brain_extraction_ants, normalization, [('BrainExtractionBrain',
                                                 'moving_image')]),
        (coregistration, merge_transforms, [('composite_transform', 'in2')]),
        (normalization, merge_transforms, [('composite_transform', 'in1')]),
        (merge_transforms, apply_transforms, [('out', 'transforms')]),
        (realign_fsl, apply_transforms, [('out_file', 'input_image')]),
        (apply_transforms, mean_norm_epi, [('output_image', 'in_file')]),
        (normalization, apply_transform_seg, [('composite_transform',
                                               'transforms')]),
        (brain_extraction_ants, apply_transform_seg,
         [('BrainExtractionSegmentation', 'input_image')]),
        (mean_norm_epi, plot_normalization_check, [('out_file', 'wra_img')])
    ])

    ##################################################
    ################### PART (3) #####################
    # epi (in mni) -> filter -> smooth -> down sample
    # OR
    # epi (in mni) -> filter -> down sample
    # OR
    # epi (in mni) -> smooth -> down sample
    # OR
    # epi (in mni) -> down sample
    ###################################################

    if apply_filter:
        workflow.connect([(apply_transforms, lp_filter, [('output_image',
                                                          'in_file')])])

        if apply_smooth:
            # Filtering + Smoothing
            workflow.connect([(lp_filter, smooth, [('out_file', 'in_file')]),
                              (smooth, down_samp, [('smoothed_file', 'in_file')
                                                   ])])
        else:
            # Filtering + No Smoothing
            workflow.connect([(lp_filter, down_samp, [('out_file', 'in_file')])
                              ])
    else:
        if apply_smooth:
            # No Filtering + Smoothing
            workflow.connect([
                (apply_transforms, smooth, [('output_image', 'in_file')]),
                (smooth, down_samp, [('smoothed_file', 'in_file')])
            ])
        else:
            # No Filtering + No Smoothing
            workflow.connect([(apply_transforms, down_samp, [('output_image',
                                                              'in_file')])])

    ##########################################
    ############### PART (4) #################
    # down sample -> save
    # plots -> save
    # covs -> save
    # t1 (in mni) -> save
    # t1 segmented masks (in mni) -> save
    ##########################################

    workflow.connect([
        (down_samp, datasink, [('out_file', 'functional.@down_samp')]),
        (plot_realign, datasink, [('plot', 'functional.@plot_realign')]),
        (plot_qa, datasink, [('plot', 'functional.@plot_qa')]),
        (plot_normalization_check, datasink,
         [('plot', 'functional.@plot_normalization')]),
        (make_cov, datasink, [('covariates', 'functional.@covariates')]),
        (normalization, datasink, [('warped_image', 'structural.@normanat')]),
        (apply_transform_seg, datasink, [('output_image',
                                          'structural.@normanatseg')]),
        (realign_fsl, datasink, [('par_file', 'functional.@motionparams')])
    ])

    # if not os.path.exists(os.path.join(output_dir, 'pipeline.png')):
    #     workflow.write_graph(dotfilename=os.path.join(output_dir, 'pipeline'), format='png')
    if session:
        print(
            f"Creating workflow for subject: {subject_id} session: {session}")
    else:
        print(f"Creating workflow for subject: {subject_id}")
    if ants_threads == 8:
        print(
            f"ANTs will utilize the default of {ants_threads} threads for parallel processing."
        )
    else:
        print(
            f"ANTs will utilize the user-requested {ants_threads} threads for parallel processing."
        )
    return workflow
Ejemplo n.º 14
0
    def run(self):
        experiment_dir = opj(self.paths['input_path'], 'output/')
        output_dir = 'datasink'
        working_dir = 'workingdir'

        subject_list = self.subject_list
        iso_size = self.parameters['iso_size']

        t1_relative_path = self.paths['t1_relative_path']
        dwi_relative_path = self.paths['dwi_relative_path']
        bvec_relative_path = self.paths['bvec_relative_path']
        bval_relative_path = self.paths['bval_relative_path']

        # Infosource - a function free node to iterate over the list of subject names
        infosource = Node(IdentityInterface(fields=['subject_id']),
                          name="infosource")
        infosource.iterables = [('subject_id', subject_list)]

        # SelectFiles - to grab the data (alternativ to DataGrabber)
        anat_file = opj('{subject_id}', t1_relative_path)
        dwi_file = opj('{subject_id}', dwi_relative_path)
        bvec_file = opj('{subject_id}', bvec_relative_path)
        bval_file = opj('{subject_id}', bval_relative_path)

        templates = {
            'anat': anat_file,
            'dwi': dwi_file,
            'bvec': bvec_file,
            'bval': bval_file
        }

        selectfiles = Node(SelectFiles(
            templates, base_directory=self.paths['input_path']),
                           name="selectfiles")

        # Datasink - creates output folder for important outputs
        datasink = Node(DataSink(base_directory=experiment_dir,
                                 container=output_dir),
                        name="datasink")

        substitutions = [('_subject_id_', 'sub-')]

        datasink.inputs.substitutions = substitutions

        preproc = Workflow(name='preproc')
        preproc.base_dir = opj(experiment_dir, working_dir)

        # BET - Skullstrip anatomical anf funtional images
        bet_t1 = Node(BET(frac=0.5,
                          robust=True,
                          mask=True,
                          output_type='NIFTI_GZ'),
                      name="bet_t1")  # FSL

        denoise_t1 = Node(Denoise(), name="denoising_t1")  # Dipy

        reslicing = Node(Reslicing(vox_sz=iso_size), name="reslicing")  #Dipy

        #registration_atlas = Node(RegistrationAtlas(reference=self.paths['reference'], atlas_to_apply=self.paths['image_parcellation_path']), name="registration_atlas")
        registration_atlas = Node(
            RegistrationAtlas(reference=self.paths['reference']),
            name="registration_atlas")
        registration_atlas.iterables = [
            ('atlas_to_apply', self.paths['image_parcellation_path'])
        ]

        #registration_t1 = Node(Registration(reference=self.paths['reference']), name="registration_t1")

        #registration_dwi = Node(Registration(reference='/home/brainlab/Desktop/Rudas/Data/Parcellation/MNI152_T2_2mm.nii.gz'), name="registration_dwi")

        tractography = Node(Tractography(), name='tractography')  # Dipy

        model_dti = Node(ModelDTI(), name="model_dti")  # Dipy

        denoise_dwi = Node(Denoise(), name="denoising_dwi")  # Dipy

        extract_b0 = Node(ExtractB0(), name="extract_b0")

        n4bias = Node(N4Bias(out_file='t1_n4bias.nii.gz'),
                      name='n4bias')  # SimpeITK

        eddycorrection = Node(EddyCorrect(ref_num=0), 'eddycorrection')  # FSL

        median_otsu = Node(MedianOtsu(), 'median_otsu')  # Dipy
        '''
        normalize_t1 = Node(Normalize12(jobtype='estwrite',
                                        tpm=self.paths['template_spm_path'],
                                        write_voxel_sizes=[iso_size, iso_size, iso_size],
                                        write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                            name="normalize_t1")

        normalize_masks = Node(Normalize12(jobtype='estwrite',
                                           tpm=self.paths['template_spm_path'],
                                           write_voxel_sizes=[iso_size, iso_size, iso_size],
                                           write_bounding_box=[[-90, -126, -72], [90, 90, 108]]),
                               name="normalize_masks")
        
        # FAST - Image Segmentation
        segmentation = Node(FAST(output_type='NIFTI'), name="segmentation")

        # FLIRT - pre-alignment of functional images to anatomical images
        coreg_pre = Node(FLIRT(dof=6, output_type='NIFTI_GZ'), name="linear_warp_estimation")

        # Threshold - Threshold WM probability image
        threshold = Node(Threshold(thresh=0.5, args='-bin', output_type='NIFTI_GZ'), name="wm_mask_threshold")

        gunzip1 = Node(Gunzip(), name="gunzip1")
        gunzip2 = Node(Gunzip(), name="gunzip2")
        '''

        # Create a coregistration workflow
        coregwf = Workflow(name='coreg_fmri_to_t1')
        coregwf.base_dir = opj(experiment_dir, working_dir)

        # FLIRT - coregistration of functional images to anatomical images with BBR
        coreg_bbr = Node(FLIRT(dof=6,
                               cost='bbr',
                               schedule=opj(os.getenv('FSLDIR'),
                                            'etc/flirtsch/bbr.sch'),
                               output_type='NIFTI_GZ'),
                         name="nonlinear_warp_estimation")

        # Apply coregistration warp to functional images
        applywarp = Node(FLIRT(interp='spline',
                               apply_isoxfm=iso_size,
                               output_type='NIFTI'),
                         name="registration_dwi")

        applywarp_mean = Node(FLIRT(interp='spline',
                                    apply_isoxfm=iso_size,
                                    output_type='NIFTI_GZ'),
                              name="registration_mean_b0")

        # Connect all components of the coregistration workflow
        '''
        coregwf.connect([(denoise_t1, bet_t1, [('out_file', 'in_file')]),
                         (bet_t1, n4bias, [('out_file', 'in_file')]),
                         (n4bias, segmentation, [('out_file', 'in_files')]),
                         (segmentation, threshold, [(('partial_volume_files', get_latest), 'in_file')]),
                         (n4bias, coreg_pre, [('out_file', 'reference')]),
                         (threshold, coreg_bbr, [('out_file', 'wm_seg')]),
                         (coreg_pre, coreg_bbr, [('out_matrix_file', 'in_matrix_file')]),
                         (coreg_bbr, applywarp, [('out_matrix_file', 'in_matrix_file')]),
                         (n4bias, applywarp, [('out_file', 'reference')]),
                         (coreg_bbr, applywarp_mean, [('out_matrix_file', 'in_matrix_file')]),
                         (n4bias, applywarp_mean, [('out_file', 'reference')]),
                         ])
        '''

        # Connect all components of the preprocessing workflow
        preproc.connect([
            (infosource, selectfiles, [('subject_id', 'subject_id')]),
            #(selectfiles, coregwf, [('anat', 'denoising_t1.in_file'),
            #                        ('anat', 'nonlinear_warp_estimation.reference')]),
            #(selectfiles, extract_b0, [('dwi', 'dwi_path'), ('bval', 'bval_path'), ('bvec', 'bvec_path')]),
            #(extract_b0, coregwf, [('out_file', 'linear_warp_estimation.in_file'),
            #                    ('out_file', 'nonlinear_warp_estimation.in_file'),
            #                    ('out_file', 'registration_mean_b0.in_file')]),
            #(selectfiles, coregwf, [('dwi', 'registration_dwi.in_file')]),
            #(coregwf, eddycorrection, [('registration_dwi.out_file', 'in_file')]),
            #(eddycorrection, denoise_dwi, [('eddy_corrected', 'in_file')]),
            #(denoise_dwi, median_otsu, [('out_file', 'in_file')]),
            (selectfiles, denoise_t1, [('anat', 'in_file')]),
            (denoise_t1, bet_t1, [('out_file', 'in_file')]),
            (bet_t1, n4bias, [('out_file', 'in_file')]),
            (selectfiles, eddycorrection, [('dwi', 'in_file')]),
            (eddycorrection, reslicing, [('eddy_corrected', 'in_file')]),
            (reslicing, denoise_dwi, [('out_file', 'in_file')]),
            (denoise_dwi, median_otsu, [('out_file', 'in_file')]),
            (median_otsu, extract_b0, [(('out_file', get_first), 'in_file')]),
            (selectfiles, extract_b0, [('bval', 'bval_path'),
                                       ('bvec', 'bvec_path')]),
            (extract_b0, registration_atlas, [('out_file', 'image_to_align')]),
            (median_otsu, model_dti, [(('out_file', get_first), 'in_file'),
                                      (('out_file', get_latest), 'mask_file')
                                      ]),
            (selectfiles, model_dti, [('bval', 'bval_path'),
                                      ('bvec', 'bvec_path')]),
            (median_otsu, tractography, [(('out_file', get_first), 'in_file'),
                                         (('out_file', get_latest),
                                          'mask_file')]),
            (registration_atlas, tractography, [('out_file',
                                                 'image_parcellation_path')]),
            (selectfiles, tractography, [('bval', 'bval_path'),
                                         ('bvec', 'bvec_path')])
        ])
        preproc.run()
level1design = Node(fsl.Level1Design(bases={'dgamma': {
    'derivs': True
}},
                                     interscan_interval=TR,
                                     model_serial_correlations=True,
                                     contrasts=contrast_list),
                    name="level1design")

# creating all the other files necessary to run the model
modelgen = Node(fsl.FEATModel(), name='modelgen')

# then running through FEAT
feat = Node(fsl.FEAT(), name="feat")

# creating datasink to collect outputs
datasink = Node(DataSink(base_directory=outDir), name='datasink')

## Use the following DataSink output substitutions
substitutions = [('_subject_id_', 'sub-'), ('_subsession_id_', '/ses-')]

datasink.inputs.substitutions = substitutions

###########
#
# SETTING UP THE WORKFLOW NODES
#
###########

# creating the workflow
firstLevel = Workflow(name="Level1_FingerFootLips", base_dir=outDir)
                                  iterfield=["in_file"],
                                  name='convert')

        get_stats_node = Node(Function(input_names=["subjects_dir", "subject"],
                                       output_names=["output_dict"],
                                       function=parse_stats), name="get_freesurfer_stats")

        write_mindcontrol_entries = Node(Function(input_names=["output_dir",
                                                               "subject",
                                                               "stats",
                                                               "startup_json_path"],
                                                  output_names=["output_json"],
                                                  function=create_mindcontrol_entries),
                                         name="get_mindcontrol_entries")

        datasink_node = Node(DataSink(),
                             name='datasink')
        subst = [('out_file', ''),
                 ('_subject_id_', ''),
                 ('_out', '')]
        subst += [("_convert%d" % index, "mri") for index in range(len(volumes))]
        datasink_node.inputs.substitutions = subst
        workflow_working_dir = scratch_dir.absolute()

        wf = Workflow(name="MindPrepFS")
        wf.base_dir = workflow_working_dir
        wf.connect(input_node, "subject_id", dg_node, "subject")
        wf.connect(input_node, "subjects_dir", dg_node, "subjects_dir")
        wf.connect(input_node, "subject_id", get_stats_node, "subject")
        wf.connect(input_node, "subjects_dir", get_stats_node, "subjects_dir")
        wf.connect(input_node, "subject_id", write_mindcontrol_entries, "subject")
Ejemplo n.º 17
0
def secondlevel_wf(subject_id, sink_directory, name='wmaze_scndlvl_wf'):
    scndlvl_wf = Workflow(name='scndlvl_wf')
    base_dir = os.path.abspath('/home/data/madlab/data/mri/wmaze/')

    all_contrasts = [
        'F_C_corr', 'F_C_incorr', 'f_BL_C', 'AllVsBase', 'all_remaining'
    ]
    contrasts = []
    dof_runs = []

    for i, curr_cont in enumerate(all_contrasts):
        cont_files = glob(
            os.path.join(
                base_dir,
                'frstlvl/model_RSA/{0}/modelfit/contrasts/_estimate_model*/cope??_{1}.nii.gz'
                .format(subject_id, curr_cont)))
        if len(cont_files) > 1:
            contrasts.append(curr_cont)
            dof_runs.append([])

    cnt_file_list = []
    for curr_contrast in contrasts:
        cnt_file_list.append(
            glob(
                os.path.join(
                    base_dir,
                    'frstlvl/model_RSA/{0}/modelfit/contrasts/_estimate_model*/cope??_{1}.nii.gz'
                    .format(subject_id, curr_contrast))))

    for i, curr_file_list in enumerate(cnt_file_list):
        if not isinstance(curr_file_list, list):
            curr_file_list = [curr_file_list]
        for curr_file in curr_file_list:
            dof_runs[i].append(
                curr_file.split('/')[-2][-1])  #grabs the estimate_model #

    info = dict(copes=[['subject_id', contrasts]],
                varcopes=[['subject_id', contrasts]],
                mask_file=[['subject_id', 'aparc+aseg_thresh']],
                dof_files=[['subject_id', dof_runs, 'dof']])

    #datasource node to get the task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    datasource.inputs.base_directory = os.path.abspath(
        '/home/data/madlab/data/mri/wmaze/')
    datasource.inputs.field_template = dict(
        copes=
        'frstlvl/model_RSA/%s/modelfit/contrasts/_estimate_model*/cope*_%s.nii.gz',
        varcopes=
        'frstlvl/model_RSA/%s/modelfit/contrasts/_estimate_model*/varcope*_%s.nii.gz',
        mask_file='preproc/%s/ref/_fs_threshold20/%s*_thresh.nii',
        dof_files='frstlvl/model_RSA/%s/modelfit/dofs/_estimate_model%s/%s')
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    #Inputspec node to deal with copes and varcopes doublelist issues
    fixedfx_inputspec = Node(IdentityInterface(
        fields=['copes', 'varcopes', 'dof_files'], mandatory_inputs=True),
                             name='fixedfx_inputspec')
    scndlvl_wf.connect(datasource, ('copes', doublelist), fixedfx_inputspec,
                       'copes')
    scndlvl_wf.connect(datasource, ('varcopes', doublelist), fixedfx_inputspec,
                       'varcopes')
    scndlvl_wf.connect(datasource, ('dof_files', doublelist),
                       fixedfx_inputspec, 'dof_files')

    #MapNode to merge all copes into a single matrix across subject runs
    copemerge = MapNode(Merge(), iterfield=['in_files'], name='copemerge')
    copemerge.inputs.dimension = 't'
    copemerge.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    copemerge.inputs.ignore_exception = False
    copemerge.inputs.output_type = 'NIFTI_GZ'
    copemerge.inputs.terminal_output = 'stream'
    scndlvl_wf.connect(fixedfx_inputspec, 'copes', copemerge, 'in_files')

    #node to generate DOF volume for second level
    gendofvolume = Node(Function(input_names=['dof_files', 'cope_files'],
                                 output_names=['dof_volumes'],
                                 function=get_dofvolumes),
                        name='gendofvolume')
    gendofvolume.inputs.ignore_exception = False
    scndlvl_wf.connect(fixedfx_inputspec, 'dof_files', gendofvolume,
                       'dof_files')
    scndlvl_wf.connect(copemerge, 'merged_file', gendofvolume, 'cope_files')

    #MapNode to merge all of the varcopes into a single matrix across subject runs
    varcopemerge = MapNode(Merge(),
                           iterfield=['in_files'],
                           name='varcopemerge')
    varcopemerge.inputs.dimension = 't'
    varcopemerge.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    varcopemerge.inputs.ignore_exception = False
    varcopemerge.inputs.output_type = 'NIFTI_GZ'
    varcopemerge.inputs.terminal_output = 'stream'
    scndlvl_wf.connect(fixedfx_inputspec, 'varcopes', varcopemerge, 'in_files')

    #node to define contrasts from the names of the copes
    getcontrasts = Node(Function(input_names=['data_inputs'],
                                 output_names=['contrasts'],
                                 function=get_contrasts),
                        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    scndlvl_wf.connect(datasource, ('copes', doublelist), getcontrasts,
                       'data_inputs')

    #function node to ename output files to be more descriptive
    getsubs = Node(Function(input_names=['subject_id', 'cons'],
                            output_names=['subs'],
                            function=get_subs),
                   name='getsubs')
    getsubs.inputs.ignore_exception = False
    getsubs.inputs.subject_id = subject_id
    scndlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    #MapNode to create a l2model node for Fixed Effects analysis (aka within subj across runs)
    l2model = MapNode(L2Model(), iterfield=['num_copes'], name='l2model')
    l2model.inputs.ignore_exception = False
    scndlvl_wf.connect(datasource, ('copes', num_copes), l2model, 'num_copes')

    #MapNode to create a FLAMEO Node to run fixed effects analysis
    flameo_fe = MapNode(FLAMEO(),
                        iterfield=[
                            'cope_file', 'var_cope_file', 'dof_var_cope_file',
                            'design_file', 't_con_file', 'cov_split_file'
                        ],
                        name='flameo_fe')
    flameo_fe.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    flameo_fe.inputs.ignore_exception = False
    flameo_fe.inputs.log_dir = 'stats'
    flameo_fe.inputs.output_type = 'NIFTI_GZ'
    flameo_fe.inputs.run_mode = 'fe'
    flameo_fe.inputs.terminal_output = 'stream'
    scndlvl_wf.connect(varcopemerge, 'merged_file', flameo_fe, 'var_cope_file')
    scndlvl_wf.connect(l2model, 'design_mat', flameo_fe, 'design_file')
    scndlvl_wf.connect(l2model, 'design_con', flameo_fe, 't_con_file')
    scndlvl_wf.connect(l2model, 'design_grp', flameo_fe, 'cov_split_file')
    scndlvl_wf.connect(gendofvolume, 'dof_volumes', flameo_fe,
                       'dof_var_cope_file')
    scndlvl_wf.connect(datasource, 'mask_file', flameo_fe, 'mask_file')
    scndlvl_wf.connect(copemerge, 'merged_file', flameo_fe, 'cope_file')

    #outputspec node
    scndlvl_outputspec = Node(IdentityInterface(
        fields=['res4d', 'copes', 'varcopes', 'zstats', 'tstats'],
        mandatory_inputs=True),
                              name='scndlvl_outputspec')
    scndlvl_wf.connect(flameo_fe, 'res4d', scndlvl_outputspec, 'res4d')
    scndlvl_wf.connect(flameo_fe, 'copes', scndlvl_outputspec, 'copes')
    scndlvl_wf.connect(flameo_fe, 'var_copes', scndlvl_outputspec, 'varcopes')
    scndlvl_wf.connect(flameo_fe, 'zstats', scndlvl_outputspec, 'zstats')
    scndlvl_wf.connect(flameo_fe, 'tstats', scndlvl_outputspec, 'tstats')

    # Create a datasink node
    sinkd = Node(DataSink(), name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    scndlvl_wf.connect(scndlvl_outputspec, 'copes', sinkd, 'fixedfx.@copes')
    scndlvl_wf.connect(scndlvl_outputspec, 'varcopes', sinkd,
                       'fixedfx.@varcopes')
    scndlvl_wf.connect(scndlvl_outputspec, 'tstats', sinkd, 'fixedfx.@tstats')
    scndlvl_wf.connect(scndlvl_outputspec, 'zstats', sinkd, 'fixedfx.@zstats')
    scndlvl_wf.connect(scndlvl_outputspec, 'res4d', sinkd, 'fixedfx.@pvals')
    scndlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')

    return scndlvl_wf
Ejemplo n.º 18
0
# Identity node- select subjects
infosource = Node(IdentityInterface(fields=['subject_id']), name='infosource')
infosource.iterables = ('subject_id', subjects_list)

# Data grabber- select fMRI and sMRI
templates = {'func': raw_data + '/{subject_id}/rest/rest_raw.nii'}
selectfiles = Node(SelectFiles(templates), name='selectfiles')

# FreeSurferSource - Data grabber specific for FreeSurfer data
fssource = Node(FreeSurferSource(subjects_dir=fs_dir),
                run_without_submitting=True,
                name='fssource')

# Datasink- where our select outputs will go
substitutions = [('_subject_id_', '')]  #output file name substitutions
datasink = Node(DataSink(substitutions=substitutions), name='datasink')
datasink.inputs.base_directory = output_dir
datasink.inputs.container = output_dir

# In[3]:

## Nodes for preprocessing

# Reorient to standard space using FSL
reorientfunc = Node(Reorient2Std(), name='reorientfunc')
reorientstruct = Node(Reorient2Std(), name='reorientstruct')

# Reslice- using MRI_convert
reslice = Node(MRIConvert(vox_size=resampled_voxel_size, out_type='nii'),
               name='reslice')
Ejemplo n.º 19
0
imgZStat = os.path.join(statsDir, 'zstat1.nii.gz')
# mask image
imgMask = os.path.join(statsDir, 'mask.nii.gz')


# FINDING CLUSTERS IN THE ANALYSIS RESULTS
# cluster node
cluster = Node(fsl.Cluster(in_file=imgZStat,
                           threshold=zThresh,
                           out_index_file=True,
                           out_threshold_file=True,
                           out_localmax_txt_file=True),
               name='cluster')

# data sink node
datasink = Node(DataSink(base_directory=statsDir),
                name='datasink')

# workflow connecting clustering to the datasink
clusterWF = Workflow(name="clusterWF", base_dir=outDir)
clusterWF.connect(cluster, 'index_file', datasink, 'index_file')
clusterWF.connect(cluster, 'threshold_file', datasink, 'threshold_file')
clusterWF.connect(cluster, 'localmax_txt_file', datasink, 'localmax_txt_file')
clusterWF.run()


# LOADING CLUSTER MAXIMA TABLE
fMaxTable = os.path.join(statsDir,'localmax_txt_file/zstat1_localmax.txt')
maxData = pd.read_csv(fMaxTable, sep='\t')   # reading the maxima file as a dataframe
maxData.dropna(how='all', axis=1, inplace=True)  # removing empty columns
print(maxData)
Ejemplo n.º 20
0
def firstlevel_wf(subject_id, sink_directory, name='ds008_R2_frstlvl_wf'):

    frstlvl_wf = Workflow(name='frstlvl_wf')

    info = dict(task_mri_files=[['subject_id', 'stopsignal']],
                motion_noise_files=[['subject_id', 'filter_regressor']])

    # Create a Function node to define stimulus onsets, etc... for each subject
    subject_info = Node(Function(input_names=['subject_id'],
                                 output_names=['output'],
                                 function=subjectinfo),
                        name='subject_info')
    subject_info.inputs.ignore_exception = False
    subject_info.inputs.subject_id = subject_id

    # Create another Function node to define the contrasts for the experiment
    getcontrasts = Node(Function(input_names=['subject_id'],
                                 output_names=['contrasts'],
                                 function=get_contrasts),
                        name='getcontrasts')
    getcontrasts.inputs.ignore_exception = False
    getcontrasts.inputs.subject_id = subject_id

    # Create a Function node to substitute names of files created during pipeline
    getsubs = Node(Function(input_names=['subject_id', 'cons', 'info'],
                            output_names=['subs'],
                            function=get_subs),
                   name='getsubs')
    getsubs.inputs.ignore_exception = False
    getsubs.inputs.subject_id = subject_id
    frstlvl_wf.connect(subject_info, 'output', getsubs, 'info')
    frstlvl_wf.connect(getcontrasts, 'contrasts', getsubs, 'cons')

    # Create a datasource node to get the task_mri and motion-noise files
    datasource = Node(DataGrabber(infields=['subject_id'],
                                  outfields=info.keys()),
                      name='datasource')
    datasource.inputs.template = '*'
    datasource.inputs.subject_id = subject_id
    #datasource.inputs.base_directory = os.path.abspath('/scratch/PSB6351_2017/ds008_R2.0.0/preproc/')
    #datasource.inputs.field_template = dict(task_mri_files='%s/func/realigned/*%s*.nii.gz',
    #                                        motion_noise_files='%s/noise/%s*.txt')
    datasource.inputs.base_directory = os.path.abspath(
        '/scratch/PSB6351_2017/students/salo/data/preproc/')
    datasource.inputs.field_template = dict(
        task_mri_files=
        '%s/preproc/func/smoothed/corr_*_task-%s_*_bold_bet_smooth_mask.nii.gz',
        motion_noise_files='%s/preproc/noise/%s*.txt')
    datasource.inputs.template_args = info
    datasource.inputs.sort_filelist = True
    datasource.inputs.ignore_exception = False
    datasource.inputs.raise_on_empty = True

    # Create a Function node to modify the motion and noise files to be single regressors
    motionnoise = Node(Function(input_names=['subjinfo', 'files'],
                                output_names=['subjinfo'],
                                function=motion_noise),
                       name='motionnoise')
    motionnoise.inputs.ignore_exception = False
    frstlvl_wf.connect(subject_info, 'output', motionnoise, 'subjinfo')
    frstlvl_wf.connect(datasource, 'motion_noise_files', motionnoise, 'files')

    # Create a specify model node
    specify_model = Node(SpecifyModel(), name='specify_model')
    specify_model.inputs.high_pass_filter_cutoff = 128.
    specify_model.inputs.ignore_exception = False
    specify_model.inputs.input_units = 'secs'
    specify_model.inputs.time_repetition = 2.
    frstlvl_wf.connect(datasource, 'task_mri_files', specify_model,
                       'functional_runs')
    frstlvl_wf.connect(motionnoise, 'subjinfo', specify_model, 'subject_info')

    # Create an InputSpec node for the modelfit node
    modelfit_inputspec = Node(IdentityInterface(fields=[
        'session_info', 'interscan_interval', 'contrasts', 'film_threshold',
        'functional_data', 'bases', 'model_serial_correlations'
    ],
                                                mandatory_inputs=True),
                              name='modelfit_inputspec')
    modelfit_inputspec.inputs.bases = {'dgamma': {'derivs': False}}
    modelfit_inputspec.inputs.film_threshold = 0.0
    modelfit_inputspec.inputs.interscan_interval = 2.0
    modelfit_inputspec.inputs.model_serial_correlations = True
    frstlvl_wf.connect(datasource, 'task_mri_files', modelfit_inputspec,
                       'functional_data')
    frstlvl_wf.connect(getcontrasts, 'contrasts', modelfit_inputspec,
                       'contrasts')
    frstlvl_wf.connect(specify_model, 'session_info', modelfit_inputspec,
                       'session_info')

    # Create a level1 design node
    level1_design = Node(Level1Design(), name='level1_design')
    level1_design.inputs.ignore_exception = False
    frstlvl_wf.connect(modelfit_inputspec, 'interscan_interval', level1_design,
                       'interscan_interval')
    frstlvl_wf.connect(modelfit_inputspec, 'session_info', level1_design,
                       'session_info')
    frstlvl_wf.connect(modelfit_inputspec, 'contrasts', level1_design,
                       'contrasts')
    frstlvl_wf.connect(modelfit_inputspec, 'bases', level1_design, 'bases')
    frstlvl_wf.connect(modelfit_inputspec, 'model_serial_correlations',
                       level1_design, 'model_serial_correlations')

    # Create a MapNode to generate a model for each run
    generate_model = MapNode(FEATModel(),
                             iterfield=['fsf_file', 'ev_files'],
                             name='generate_model')
    generate_model.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    generate_model.inputs.ignore_exception = False
    generate_model.inputs.output_type = 'NIFTI_GZ'
    generate_model.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(level1_design, 'fsf_files', generate_model, 'fsf_file')
    frstlvl_wf.connect(level1_design, 'ev_files', generate_model, 'ev_files')

    # Create a MapNode to estimate the model using FILMGLS
    estimate_model = MapNode(FILMGLS(),
                             iterfield=['design_file', 'in_file', 'tcon_file'],
                             name='estimate_model')
    frstlvl_wf.connect(generate_model, 'design_file', estimate_model,
                       'design_file')
    frstlvl_wf.connect(generate_model, 'con_file', estimate_model, 'tcon_file')
    frstlvl_wf.connect(modelfit_inputspec, 'functional_data', estimate_model,
                       'in_file')

    # Create a merge node to merge the contrasts - necessary for fsl 5.0.7 and greater
    merge_contrasts = MapNode(Merge(2),
                              iterfield=['in1'],
                              name='merge_contrasts')
    frstlvl_wf.connect(estimate_model, 'zstats', merge_contrasts, 'in1')

    # Create a MapNode to transform the z2pval
    z2pval = MapNode(ImageMaths(), iterfield=['in_file'], name='z2pval')
    z2pval.inputs.environ = {'FSLOUTPUTTYPE': 'NIFTI_GZ'}
    z2pval.inputs.ignore_exception = False
    z2pval.inputs.op_string = '-ztop'
    z2pval.inputs.output_type = 'NIFTI_GZ'
    z2pval.inputs.suffix = '_pval'
    z2pval.inputs.terminal_output = 'stream'
    frstlvl_wf.connect(merge_contrasts, ('out', pop_lambda), z2pval, 'in_file')

    # Create an outputspec node
    modelfit_outputspec = Node(IdentityInterface(fields=[
        'copes', 'varcopes', 'dof_file', 'pfiles', 'parameter_estimates',
        'zstats', 'design_image', 'design_file', 'design_cov', 'sigmasquareds'
    ],
                                                 mandatory_inputs=True),
                               name='modelfit_outputspec')
    frstlvl_wf.connect(estimate_model, 'copes', modelfit_outputspec, 'copes')
    frstlvl_wf.connect(estimate_model, 'varcopes', modelfit_outputspec,
                       'varcopes')
    frstlvl_wf.connect(merge_contrasts, 'out', modelfit_outputspec, 'zstats')
    frstlvl_wf.connect(z2pval, 'out_file', modelfit_outputspec, 'pfiles')
    frstlvl_wf.connect(generate_model, 'design_image', modelfit_outputspec,
                       'design_image')
    frstlvl_wf.connect(generate_model, 'design_file', modelfit_outputspec,
                       'design_file')
    frstlvl_wf.connect(generate_model, 'design_cov', modelfit_outputspec,
                       'design_cov')
    frstlvl_wf.connect(estimate_model, 'param_estimates', modelfit_outputspec,
                       'parameter_estimates')
    frstlvl_wf.connect(estimate_model, 'dof_file', modelfit_outputspec,
                       'dof_file')
    frstlvl_wf.connect(estimate_model, 'sigmasquareds', modelfit_outputspec,
                       'sigmasquareds')

    # Create a datasink node
    sinkd = Node(DataSink(), name='sinkd')
    sinkd.inputs.base_directory = sink_directory
    sinkd.inputs.container = subject_id
    frstlvl_wf.connect(getsubs, 'subs', sinkd, 'substitutions')
    frstlvl_wf.connect(modelfit_outputspec, 'parameter_estimates', sinkd,
                       'modelfit.estimates')
    frstlvl_wf.connect(modelfit_outputspec, 'sigmasquareds', sinkd,
                       'modelfit.estimates.@sigsq')
    frstlvl_wf.connect(modelfit_outputspec, 'dof_file', sinkd, 'modelfit.dofs')
    frstlvl_wf.connect(modelfit_outputspec, 'copes', sinkd,
                       'modelfit.contrasts.@copes')
    frstlvl_wf.connect(modelfit_outputspec, 'varcopes', sinkd,
                       'modelfit.contrasts.@varcopes')
    frstlvl_wf.connect(modelfit_outputspec, 'zstats', sinkd,
                       'modelfit.contrasts.@zstats')
    frstlvl_wf.connect(modelfit_outputspec, 'design_image', sinkd,
                       'modelfit.design')
    frstlvl_wf.connect(modelfit_outputspec, 'design_cov', sinkd,
                       'modelfit.design.@cov')
    frstlvl_wf.connect(modelfit_outputspec, 'design_file', sinkd,
                       'modelfit.design.@matrix')
    frstlvl_wf.connect(modelfit_outputspec, 'pfiles', sinkd,
                       'modelfit.contrasts.@pstats')

    return frstlvl_wf
###
# Input & Output Stream

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id', 'contrasts'],
                                    contrasts=contrast_list),
                  name="infosource")
infosource.iterables = [('subject_id', subject_list)]

# SelectFiles - to grab the data (alternativ to DataGrabber)
templates = {'func': 'data/{subject_id}/run*.nii.gz'}
selectfiles = Node(SelectFiles(templates, base_directory=experiment_dir),
                   name="selectfiles")

# Datasink - creates output folder for important outputs
datasink = Node(DataSink(base_directory=experiment_dir, container=output_dir),
                name="datasink")

# Use the following DataSink output substitutions
substitutions = [('_subject_id_', ''), ('_despike', ''), ('_detrended', ''),
                 ('_warped', '')]
datasink.inputs.substitutions = substitutions

# Connect Infosource, SelectFiles and DataSink to the main workflow
metaflow.connect([
    (infosource, selectfiles, [('subject_id', 'subject_id')]),
    (infosource, preproc, [('subject_id', 'bbregister.subject_id'),
                           ('subject_id', 'fssource.subject_id')]),
    (selectfiles, preproc, [('func', 'despike.in_file')]),
    (infosource, getsubjectinfo, [('subject_id', 'subject_id')]),
    (getsubjectinfo, l1analysis, [('subject_info', 'modelspec.subject_info')]),
    output_names=[
        'tensor_fa_file', 'tensor_evec_file', 'model_gfa_file',
        'model_track_file', 'affine', 'tensor_ad_file', 'tensor_rd_file',
        'tensor_md_file', 'shm_coeff_file'
    ],
    function=dmri_recon),
               name='tracker')
tracker.inputs.data_dir = data_dir
tracker.inputs.out_dir = out_dir
tracker.inputs.recon = 'csd'
tracker.inputs.dirs = dirs
tracker.inputs.num_threads = num_threads
#tracker.plugin_args = {'sbatch_args': '--time=1-00:00:00 --mem=%dG -N 1 -c %d' % (10 * num_threads, num_threads),
#                       'overwrite': True}

ds = Node(DataSink(parameterization=False), name='sinker')
ds.inputs.base_directory = out_dir
ds.plugin_args = {'overwrite': True}

wf = Workflow(name='streamlines')
wf.config['execution']['crashfile_format'] = 'txt'

wf.connect(infosource, 'subject_id', tracker, 'sid')
wf.connect(infosource, 'subject_id', ds, 'container')

# data sink
wf.connect(tracker, 'tensor_fa_file', ds, 'recon.@fa')
wf.connect(tracker, 'tensor_evec_file', ds, 'recon.@evec')
wf.connect(tracker, 'model_gfa_file', ds, 'recon.@gfa')
wf.connect(tracker, 'model_track_file', ds, 'recon.@track')
Ejemplo n.º 23
0
def init_infant_brain_extraction_wf(
    ants_affine_init=False,
    bspline_fitting_distance=200,
    debug=False,
    in_template="MNIInfant",
    template_specs=None,
    interim_checkpoints=True,
    mem_gb=3.0,
    mri_scheme="T2w",
    name="infant_brain_extraction_wf",
    atropos_model=None,
    omp_nthreads=None,
    output_dir=None,
    use_float=True,
):
    """
    Build an atlas-based brain extraction pipeline for infant T2w MRI data.

    Parameters
    ----------
    ants_affine_init : :obj:`bool`, optional
        Set-up a pre-initialization step with ``antsAI`` to account for mis-oriented images.

    """
    inputnode = pe.Node(niu.IdentityInterface(fields=["in_files", "in_mask"]),
                        name="inputnode")
    outputnode = pe.Node(niu.IdentityInterface(
        fields=["out_corrected", "out_brain", "out_mask"]),
                         name="outputnode")

    template_specs = template_specs or {}
    # Find a suitable target template in TemplateFlow
    tpl_target_path = get_template(in_template,
                                   suffix=mri_scheme,
                                   **template_specs)
    if not tpl_target_path:
        raise RuntimeError(
            f"An instance of template <tpl-{in_template}> with MR scheme '{mri_scheme}'"
            " could not be found.")

    # tpl_brainmask_path = get_template(
    #     in_template, desc="brain", suffix="probseg", **template_specs
    # )
    # if not tpl_brainmask_path:

    # ignore probseg for the time being
    tpl_brainmask_path = get_template(in_template,
                                      desc="brain",
                                      suffix="mask",
                                      **template_specs)

    tpl_regmask_path = get_template(in_template,
                                    desc="BrainCerebellumExtraction",
                                    suffix="mask",
                                    **template_specs)

    # validate images
    val_tmpl = pe.Node(ValidateImage(), name='val_tmpl')
    val_tmpl.inputs.in_file = _pop(tpl_target_path)

    val_target = pe.Node(ValidateImage(), name='val_target')

    # Resample both target and template to a controlled, isotropic resolution
    res_tmpl = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS),
                       name="res_tmpl")  # testing
    res_target = pe.Node(RegridToZooms(zooms=HIRES_ZOOMS),
                         name="res_target")  # testing
    gauss_tmpl = pe.Node(niu.Function(function=_gauss_filter),
                         name="gauss_tmpl")

    # Spatial normalization step
    lap_tmpl = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"),
                       name="lap_tmpl")
    lap_target = pe.Node(ImageMath(operation="Laplacian", op2="0.4 1"),
                         name="lap_target")

    # Merge image nodes
    mrg_target = pe.Node(niu.Merge(2), name="mrg_target")
    mrg_tmpl = pe.Node(niu.Merge(2), name="mrg_tmpl")

    norm_lap_tmpl = pe.Node(niu.Function(function=_trunc),
                            name="norm_lap_tmpl")
    norm_lap_tmpl.inputs.dtype = "float32"
    norm_lap_tmpl.inputs.out_max = 1.0
    norm_lap_tmpl.inputs.percentile = (0.01, 99.99)
    norm_lap_tmpl.inputs.clip_max = None

    norm_lap_target = pe.Node(niu.Function(function=_trunc),
                              name="norm_lap_target")
    norm_lap_target.inputs.dtype = "float32"
    norm_lap_target.inputs.out_max = 1.0
    norm_lap_target.inputs.percentile = (0.01, 99.99)
    norm_lap_target.inputs.clip_max = None

    # Set up initial spatial normalization
    ants_params = "testing" if debug else "precise"
    norm = pe.Node(
        Registration(from_file=pkgr_fn(
            "niworkflows.data", f"antsBrainExtraction_{ants_params}.json")),
        name="norm",
        n_procs=omp_nthreads,
        mem_gb=mem_gb,
    )
    norm.inputs.float = use_float

    # main workflow
    wf = pe.Workflow(name)
    # Create a buffer interface as a cache for the actual inputs to registration
    buffernode = pe.Node(
        niu.IdentityInterface(fields=["hires_target", "smooth_target"]),
        name="buffernode")

    # truncate target intensity for N4 correction
    clip_target = pe.Node(
        niu.Function(function=_trunc),
        name="clip_target",
    )
    clip_tmpl = pe.Node(
        niu.Function(function=_trunc),
        name="clip_tmpl",
    )
    #clip_tmpl.inputs.in_file = _pop(tpl_target_path)

    # INU correction of the target image
    init_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=False,
            copy_header=True,
            n_iterations=[50] * (4 - debug),
            convergence_threshold=1e-7,
            shrink_factor=4,
            bspline_fitting_distance=bspline_fitting_distance,
        ),
        n_procs=omp_nthreads,
        name="init_n4",
    )
    clip_inu = pe.Node(
        niu.Function(function=_trunc),
        name="clip_inu",
    )
    gauss_target = pe.Node(niu.Function(function=_gauss_filter),
                           name="gauss_target")
    wf.connect([
        # truncation, resampling, and initial N4
        (inputnode, val_target, [(("in_files", _pop), "in_file")]),
        # (inputnode, res_target, [(("in_files", _pop), "in_file")]),
        (val_target, res_target, [("out_file", "in_file")]),
        (res_target, clip_target, [("out_file", "in_file")]),
        (val_tmpl, clip_tmpl, [("out_file", "in_file")]),
        (clip_tmpl, res_tmpl, [("out", "in_file")]),
        (clip_target, init_n4, [("out", "input_image")]),
        (init_n4, clip_inu, [("output_image", "in_file")]),
        (clip_inu, gauss_target, [("out", "in_file")]),
        (clip_inu, buffernode, [("out", "hires_target")]),
        (gauss_target, buffernode, [("out", "smooth_target")]),
        (res_tmpl, gauss_tmpl, [("out_file", "in_file")]),
        # (clip_tmpl, gauss_tmpl, [("out", "in_file")]),
    ])

    # Graft a template registration-mask if present
    if tpl_regmask_path:
        hires_mask = pe.Node(ApplyTransforms(
            input_image=_pop(tpl_regmask_path),
            transforms="identity",
            interpolation="NearestNeighbor",
            float=True),
                             name="hires_mask",
                             mem_gb=1)
        wf.connect([
            (res_tmpl, hires_mask, [("out_file", "reference_image")]),
        ])

    map_brainmask = pe.Node(ApplyTransforms(interpolation="Gaussian",
                                            float=True),
                            name="map_brainmask",
                            mem_gb=1)
    map_brainmask.inputs.input_image = str(tpl_brainmask_path)

    thr_brainmask = pe.Node(Binarize(thresh_low=0.80), name="thr_brainmask")
    bspline_grid = pe.Node(niu.Function(function=_bspline_distance),
                           name="bspline_grid")

    # Refine INU correction
    final_n4 = pe.Node(
        N4BiasFieldCorrection(
            dimension=3,
            save_bias=True,
            copy_header=True,
            n_iterations=[50] * 5,
            convergence_threshold=1e-7,
            rescale_intensities=True,
            shrink_factor=4,
        ),
        n_procs=omp_nthreads,
        name="final_n4",
    )
    final_mask = pe.Node(ApplyMask(), name="final_mask")

    if atropos_model is None:
        atropos_model = tuple(ATROPOS_MODELS[mri_scheme].values())

    atropos_wf = init_atropos_wf(
        use_random_seed=False,
        omp_nthreads=omp_nthreads,
        mem_gb=mem_gb,
        in_segmentation_model=atropos_model,
    )
    # if tpl_regmask_path:
    #     atropos_wf.get_node('inputnode').inputs.in_mask_dilated = tpl_regmask_path

    sel_wm = pe.Node(niu.Select(index=atropos_model[-1] - 1),
                     name='sel_wm',
                     run_without_submitting=True)

    wf.connect([
        (inputnode, map_brainmask, [(("in_files", _pop), "reference_image")]),
        (inputnode, final_n4, [(("in_files", _pop), "input_image")]),
        (inputnode, bspline_grid, [(("in_files", _pop), "in_file")]),
        # (bspline_grid, final_n4, [("out", "bspline_fitting_distance")]),
        (bspline_grid, final_n4, [("out", "args")]),
        # merge laplacian and original images
        (buffernode, lap_target, [("smooth_target", "op1")]),
        (buffernode, mrg_target, [("hires_target", "in1")]),
        (lap_target, norm_lap_target, [("output_image", "in_file")]),
        (norm_lap_target, mrg_target, [("out", "in2")]),
        # Template massaging
        (res_tmpl, lap_tmpl, [("out_file", "op1")]),
        (res_tmpl, mrg_tmpl, [("out_file", "in1")]),
        (lap_tmpl, norm_lap_tmpl, [("output_image", "in_file")]),
        (norm_lap_tmpl, mrg_tmpl, [("out", "in2")]),
        # spatial normalization
        (mrg_target, norm, [("out", "moving_image")]),
        (mrg_tmpl, norm, [("out", "fixed_image")]),
        (norm, map_brainmask, [("reverse_transforms", "transforms"),
                               ("reverse_invert_flags",
                                "invert_transform_flags")]),
        (map_brainmask, thr_brainmask, [("output_image", "in_file")]),
        # take a second pass of N4
        (map_brainmask, final_n4, [("output_image", "weight_image")]),
        (final_n4, final_mask, [("output_image", "in_file")]),
        (thr_brainmask, final_mask, [("out_mask", "in_mask")]),
        (final_n4, outputnode, [("output_image", "out_corrected")]),
        (thr_brainmask, outputnode, [("out_mask", "out_mask")]),
        (final_mask, outputnode, [("out_file", "out_brain")]),
    ])

    # wf.disconnect([
    #     (get_brainmask, apply_mask, [('output_image', 'mask_file')]),
    #     (copy_xform, outputnode, [('out_mask', 'out_mask')]),
    # ])

    # wf.connect([
    #     (init_n4, atropos_wf, [
    #         ('output_image', 'inputnode.in_files')]),  # intensity image
    #     (thr_brainmask, atropos_wf, [
    #         ('out_mask', 'inputnode.in_mask')]),
    #     (atropos_wf, sel_wm, [('outputnode.out_tpms', 'inlist')]),
    #     (sel_wm, final_n4, [('out', 'weight_image')]),
    # ])
    # wf.connect([
    # (atropos_wf, outputnode, [
    #     ('outputnode.out_mask', 'out_mask'),
    #     ('outputnode.out_segm', 'out_segm'),
    #     ('outputnode.out_tpms', 'out_tpms')]),
    # ])

    if tpl_regmask_path:
        wf.connect([
            (hires_mask, norm, [("output_image", "fixed_image_masks")]),
            # (hires_mask, atropos_wf, [
            #     ("output_image", "inputnode.in_mask_dilated")]),
        ])

    if interim_checkpoints:
        final_apply = pe.Node(ApplyTransforms(interpolation="BSpline",
                                              float=True),
                              name="final_apply",
                              mem_gb=1)
        final_report = pe.Node(SimpleBeforeAfter(
            before_label=f"tpl-{in_template}",
            after_label="target",
            out_report="final_report.svg"),
                               name="final_report")
        wf.connect([
            (inputnode, final_apply, [(("in_files", _pop), "reference_image")
                                      ]),
            (res_tmpl, final_apply, [("out_file", "input_image")]),
            (norm, final_apply, [("reverse_transforms", "transforms"),
                                 ("reverse_invert_flags",
                                  "invert_transform_flags")]),
            (final_apply, final_report, [("output_image", "before")]),
            (outputnode, final_report, [("out_corrected", "after"),
                                        ("out_mask", "wm_seg")]),
        ])

    if output_dir:
        from nipype.interfaces.io import DataSink
        ds_final_inu = pe.Node(DataSink(base_directory=str(output_dir.parent)),
                               name="ds_final_inu")
        ds_final_msk = pe.Node(DataSink(base_directory=str(output_dir.parent)),
                               name="ds_final_msk")
        ds_report = pe.Node(DataSink(base_directory=str(output_dir.parent)),
                            name="ds_report")

        wf.connect([
            (outputnode, ds_final_inu,
             [("out_corrected", f"{output_dir.name}.@inu_corrected")]),
            (outputnode, ds_final_msk, [("out_mask",
                                         f"{output_dir.name}.@brainmask")]),
            (final_report, ds_report, [("out_report",
                                        f"{output_dir.name}.@report")]),
        ])

    if not ants_affine_init:
        return wf

    # Initialize transforms with antsAI
    lowres_tmpl = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS),
                          name="lowres_tmpl")
    lowres_target = pe.Node(RegridToZooms(zooms=LOWRES_ZOOMS),
                            name="lowres_target")

    init_aff = pe.Node(
        AI(
            metric=("Mattes", 32, "Regular", 0.25),
            transform=("Affine", 0.1),
            search_factor=(15, 0.1),
            principal_axes=False,
            convergence=(10, 1e-6, 10),
            search_grid=(40, (0, 40, 40)),
            verbose=True,
        ),
        name="init_aff",
        n_procs=omp_nthreads,
    )
    wf.connect([
        (gauss_tmpl, lowres_tmpl, [("out", "in_file")]),
        (lowres_tmpl, init_aff, [("out_file", "fixed_image")]),
        (gauss_target, lowres_target, [("out", "in_file")]),
        (lowres_target, init_aff, [("out_file", "moving_image")]),
        (init_aff, norm, [("output_transform", "initial_moving_transform")]),
    ])

    if tpl_regmask_path:
        lowres_mask = pe.Node(ApplyTransforms(
            input_image=_pop(tpl_regmask_path),
            transforms="identity",
            interpolation="MultiLabel",
            float=True),
                              name="lowres_mask",
                              mem_gb=1)
        wf.connect([
            (lowres_tmpl, lowres_mask, [("out_file", "reference_image")]),
            (lowres_mask, init_aff, [("output_image", "fixed_image_mask")]),
        ])

    if interim_checkpoints:
        init_apply = pe.Node(ApplyTransforms(interpolation="BSpline",
                                             float=True),
                             name="init_apply",
                             mem_gb=1)
        init_report = pe.Node(SimpleBeforeAfter(
            before_label=f"tpl-{in_template}",
            after_label="target",
            out_report="init_report.svg"),
                              name="init_report")
        wf.connect([
            (lowres_target, init_apply, [("out_file", "input_image")]),
            (res_tmpl, init_apply, [("out_file", "reference_image")]),
            (init_aff, init_apply, [("output_transform", "transforms")]),
            (init_apply, init_report, [("output_image", "after")]),
            (res_tmpl, init_report, [("out_file", "before")]),
        ])

        if output_dir:
            ds_init_report = pe.Node(
                DataSink(base_directory=str(output_dir.parent)),
                name="ds_init_report")
            wf.connect(init_report, "out_report", ds_init_report,
                       f"{output_dir.name}.@init_report")
    return wf
def create_DWI_workflow(
    subject_list,
    bids_dir,
    work_dir,
    out_dir,
    bids_templates,
):

    # create initial workflow
    wf = Workflow(name='DWI', base_dir=work_dir)

    # use infosource to iterate workflow across subject list
    n_infosource = Node(interface=IdentityInterface(fields=['subject_id']),
                        name="subject_source"
                        # input: 'subject_id'
                        # output: 'subject_id'
                        )
    # runs the node with subject_id = each element in subject_list
    n_infosource.iterables = ('subject_id', subject_list)

    # select matching files from bids_dir
    n_selectfiles = Node(interface=SelectFiles(templates=bids_templates,
                                               base_directory=bids_dir),
                         name='get_subject_data')
    wf.connect([(n_infosource, n_selectfiles, [('subject_id', 'subject_id_p')])
                ])

    ########## IMPLEMENTING MRTRIX COMMANDS FOR IMAGE ANALYSIS #######################################

    ## 1) Preprocessing of Data
    # https://nipype.readthedocs.io/en/latest/api/generated/nipype.interfaces.mrtrix3.preprocess.html
    # DWIDenoise to remove Gaussian noise
    n_denoise = Node(interface=mrt.DWIDenoise(), name='n_denoise')
    wf.connect([(n_selectfiles, n_denoise, [('DWI_all', 'in_file')])])
    # datasink
    n_datasink = Node(interface=DataSink(base_directory=out_dir),
                      name='datasink')
    # output denoised data into 'DWI_all_denoised'
    wf.connect([(n_selectfiles, n_datasink, [('all_b0_PA',
                                              'all_b0_PA_unchanged')]),
                (n_denoise, n_datasink, [('out_file', 'DWI_all_denoised')])])

    # MRDeGibbs to remove Gibbs ringing artifact
    n_degibbs = Node(
        interface=mrt.MRDeGibbs(out_file='DWI_all_denoised_degibbs.mif'),
        name='n_degibbs')
    # input denoised data into degibbs function
    wf.connect([(n_denoise, n_degibbs, [('out_file', 'in_file')])])
    # output degibbs data into 'DWI_all_denoised_degibbs.mif'
    wf.connect([(n_degibbs, n_datasink, [('out_file',
                                          'DWI_all_denoised_degibbs.mif')])])

    # DWI Extract to extract b0 volumes from multi-b image data
    n_dwiextract = Node(interface=mrt.DWIExtract(bzero=True,
                                                 out_file='b0vols.mif'),
                        name='n_dwiextract')
    # input degibbs data into dwiextract function
    wf.connect([(n_degibbs, n_dwiextract, [('out_file', 'in_file')])])
    # output extracted b0 volume from degibbs data (contains multiple b values)
    wf.connect([(n_dwiextract, n_datasink, [('out_file', 'noddi_b0_degibbs')])
                ])

    # MRcat to combine b0 volumes from input image and reverse phase encoded data
    n_mrcat = Node(
        interface=mrcatfunc.MRCat(
            #axis=3,
            out_file='b0s.mif'),
        name='n_mrcat')
    # input DTI images (all b0 volumes; reverse phase encoded) for concatenating
    wf.connect([(n_selectfiles, n_mrcat, [('DTI_B0_PA', 'in_file1')])])
    # input b0 volumes from NODDI data for concatenating
    wf.connect([(n_dwiextract, n_mrcat, [('out_file', 'in_file2')])])
    # output the mrcat file into 'noddi_and_PA_b0s.mif'
    wf.connect([(n_mrcat, n_datasink, [('out_file', 'noddi_and_PA_b0s.mif')])])

    # DWIfslpreproc for image pre-processing using FSL's eddy tool
    n_dwifslpreproc = Node(interface=preprocfunc.DWIFslPreProc(
        out_file='preprocessedDWIs.mif', use_header=True),
                           name='n_dwifslpreproc')
    # output of degibbs as input for preprocessing
    wf.connect([(n_degibbs, n_dwifslpreproc, [('out_file', 'in_file')])])
    # output of mrcat (extracted b0 volumes) as se_epi input
    wf.connect([(n_mrcat, n_dwifslpreproc, [('out_file', 'se_epi_file')])])
    # output of dwifslpreproc into 'preprocessedDWIs.mif'
    wf.connect([(n_dwifslpreproc, n_datasink, [('out_file',
                                                'preprocessedDWIs.mif')])])

    # DWI bias correct for B1 field inhomogeneity correction
    n_dwibiascorrect = Node(
        interface=preprocess.DWIBiasCorrect(use_ants=True),
        name='n_dwibiascorrect',
    )
    # input preprocessed data
    wf.connect([(n_dwifslpreproc, n_dwibiascorrect, [('out_file', 'in_file')])
                ])
    # output biascorrect data into 'ANTSpreprocessedDWIs.mif'
    wf.connect([(n_dwibiascorrect, n_datasink,
                 [('out_file', 'ANTSpreprocessedDWIs.mif')])])

    # DWI2mask to compute whole brain mask from bias corrected data
    n_dwi2mask = Node(interface=mrt.BrainMask(out_file='mask.mif'),
                      name='n_dwi2mask')
    wf.connect([(n_dwibiascorrect, n_dwi2mask, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2mask, n_datasink, [('out_file', 'mask.mif')])])

    ##################################################################################
    ## 2) Fixel-based analysis
    # DWI2response for etimation of response function for spherical deconvolution
    n_dwi2response = Node(interface=mrt.ResponseSD(algorithm='dhollander',
                                                   wm_file='wm_res.txt',
                                                   gm_file='gm_res.txt',
                                                   csf_file='csf_res.txt'),
                          name='n_dwi2response')
    # input bias corrected data for response function estimation
    wf.connect([(n_dwibiascorrect, n_dwi2response, [('out_file', 'in_file')])])
    # output WM, GM, CSF response text files
    wf.connect([(n_dwi2response, n_datasink, [('wm_file', 'wm_res.txt')])])
    wf.connect([(n_dwi2response, n_datasink, [('gm_file', 'gm_res.txt')])])
    wf.connect([(n_dwi2response, n_datasink, [('csf_file', 'csf_res.txt')])])

    # DWI2fod for fibre orientation distribution estimation (FOD)
    n_dwi2fod = Node(interface=mrt.ConstrainedSphericalDeconvolution(
        algorithm='msmt_csd',
        wm_odf='wmfod.mif',
        gm_odf='gmfod.mif',
        csf_odf='csffod.mif'),
                     name='n_dwi2fod')
    # utilise dwi2fod response files as input
    wf.connect([(n_dwibiascorrect, n_dwi2fod, [('out_file', 'in_file')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('wm_file', 'wm_txt')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('gm_file', 'gm_txt')])])
    wf.connect([(n_dwi2response, n_dwi2fod, [('csf_file', 'csf_txt')])])
    # output WM, GM and CSF FODs for saving
    wf.connect([(n_dwi2fod, n_datasink, [('wm_odf', 'wmfod.mif')])])
    wf.connect([(n_dwi2fod, n_datasink, [('gm_odf', 'gmfod.mif')])])
    wf.connect([(n_dwi2fod, n_datasink, [('csf_odf', 'csffod.mif')])])

    # Mrconvert to select the first volume of the WM file (is best image out of 45 slices of wmfod file)
    n_mrconvert_fod = Node(interface=utils.MRConvert(out_file='Zwmfod.mif',
                                                     coord=[3, 0]),
                           name='n_mrconvert_fod')
    # utilise WM FOD as input
    wf.connect([(n_dwi2fod, n_mrconvert_fod, [('wm_odf', 'in_file')])])
    # output z component of WM FOD
    wf.connect([(n_mrconvert_fod, n_datasink, [('out_file', 'Zwmfod.mif')])])

    # MRcat to concatenate all WM, GM, CSF FOD files to see their distributions throughout Brain
    n_mrcat_fod = Node(interface=mrcatfunc.MRCat(out_file='vf.mif'),
                       name='n_mrcat_fod')
    # connect Zwmfod, gmfod and csffod as inputs
    wf.connect([(n_mrconvert_fod, n_mrcat_fod, [('out_file', 'in_file1')])])
    wf.connect([(n_dwi2fod, n_mrcat_fod, [('gm_odf', 'in_file2')])])
    wf.connect([(n_dwi2fod, n_mrcat_fod, [('csf_odf', 'in_file3')])])
    # output the mrcat file into file 'vf.mif'
    wf.connect([(n_mrcat_fod, n_datasink, [('out_file', 'vf.mif')])])

    # fod2fixel
    # Perform segmentation of continuous FODs to produce discrete fixels
    # OUTPUTS: -afd afd.mif -peak peak.mif -disp disp.mif
    n_fod2fixel = Node(
        interface=fod2fixelfunc.fod2fixel(
            out_file='wmfixels',
            #afd_file = 'afd.mif',
            peak_file='peak.mif',
            disp_file='disp.mif'),
        name='n_fod2fixel')
    # let the peak value parameter be trialed as multiple values
    n_fod2fixel.iterables = ('fmls_peak_value', [0, 0.10, 0.50])
    n_fod2fixel.iterables = ('fmls_integral', [0, 0.10, 0.50])

    # obtain WM fibre image as input
    wf.connect([(n_dwi2fod, n_fod2fixel, [('wm_odf', 'in_file')])])
    # ouputs of fod2fixel saved
    wf.connect([(n_fod2fixel, n_datasink, [('out_file', 'wmfixels')])])
    wf.connect([(n_fod2fixel, n_datasink, [('afd_file', 'afd.mif')])])
    wf.connect([(n_fod2fixel, n_datasink, [('peak_file', 'peak.mif')])])
    wf.connect([(n_fod2fixel, n_datasink, [('disp_file', 'disp.mif')])])

    # fixel2peaks to convert data in the fixel directory format into 4D image of 3-vectors
    n_fixel2peaks = Node(interface=fixel2peaksfunc.fixel2peaks(
        out_file='peaks_wmdirections.mif'),
                         name='n_fixel2peaks')
    # look at multiple values for maximum number of fixels in each voxel
    n_fixel2peaks.iterables = ('number', [1, 2, 3])

    # obtain directions file in output folder of fod2fixel, as input
    wf.connect([(n_fod2fixel, n_fixel2peaks, [('out_file', 'in_file')])])
    # ouput of fixel2peaks saved in peaks_wmdirections.mif'
    wf.connect([(n_fixel2peaks, n_datasink, [('out_file',
                                              'peaks_wmdirections.mif')])])

    # MRmath to find normalised value of peak WM directions
    n_mrmath = Node(interface=mrt.MRMath(
        axis=3, operation='norm', out_file='norm_peaks_wmdirections.mif'),
                    name='n_mrmath')
    # input peak fixel data
    wf.connect([(n_fixel2peaks, n_mrmath, [('out_file', 'in_file')])])
    # output saved into 'norm_peaks_wmdirections.mif'
    wf.connect([(n_mrmath, n_datasink, [('out_file',
                                         'norm_peaks_wmdirections.mif')])])

    # MRcalc to divide peak WM direction by normalised value
    n_mrcalc = Node(interface=mrcalcfunc.MRCalc(operation='divide',
                                                out_file='wm_peak_dir.mif'),
                    name='n_mrcalc')
    # fixel2peaks image as input 1
    wf.connect([(n_fixel2peaks, n_mrcalc, [('out_file', 'in_file1')])])
    # normalised fixel2peak image as input 2
    wf.connect([(n_mrmath, n_mrcalc, [('out_file', 'in_file2')])])
    # save output image as 'WM_peak_dir.mif'
    wf.connect([(n_mrcalc, n_datasink, [('out_file', 'WM_peak_dir.mif')])])

    # MRconvert to extract Z component of peak directions
    n_mrconvert2 = Node(interface=utils.MRConvert(
        out_file='Zpeak_WM_Directions.mif', coord=[3, 2]),
                        name='n_mrconvert2')
    # input normalised peak direction file
    wf.connect([(n_mrcalc, n_mrconvert2, [('out_file', 'in_file')])])
    # save ouptut as 'Zpeak_WM_Directions.mif'
    wf.connect([(n_mrconvert2, n_datasink, [('out_file',
                                             'Zpeak_WM_Directions.mif')])])

    # MRcalc to find absolute value of peak fibre directions
    n_mrcalc2 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='absZpeak_WM_Directions.mif'),
                     name='n_mrcalc2')
    # input z peaks image
    wf.connect([(n_mrconvert2, n_mrcalc2, [('out_file', 'in_file1')])])
    # save output as 'absZpeak_WM_Directions.mif'
    wf.connect([(n_mrcalc2, n_datasink, [('out_file',
                                          'absZpeak_WM_Directions.mif')])])

    # MRcalc to get angle by doing inverse cosine
    n_mrcalc3 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acosZpeak_WM_Directions.mif'),
                     name='n_mrcalc3')
    # input normalised z component of peaks image
    wf.connect([(n_mrcalc2, n_mrcalc3, [('out_file', 'in_file1')])])
    # save ouput as 'acosZpeak_WM_Directions.mif'
    wf.connect([(n_mrcalc3, n_datasink, [('out_file',
                                          'acosZpeak_WM_Directions.mif')])])

    # MRcalc to convert angle of peak fibre (w.r.t z axis), to degrees
    n_mrcalc4 = Node(interface=mrcalcfunc.MRCalc(
        operation='multiply', operand=180, out_file='Fixel1_Z_angle.mif'),
                     name='n_mrcalc4')
    # input inverse cosine image of peak fibre
    wf.connect([(n_mrcalc3, n_mrcalc4, [('out_file', 'in_file1')])])
    # output image as 'Fixel1_Z_angle.mif'
    wf.connect([(n_mrcalc4, n_datasink, [('out_file', 'Fixel1_Z_angle.mif')])])

    # MRcalc to divide by pi to finish converting from radians to degrees
    n_mrcalc5 = Node(interface=mrcalcfunc.MRCalc(
        operation='divide',
        operand=3.14159265,
        out_file='Fixel1_Z_cos_deg.mif'),
                     name='n_mrcalc5')
    # input image multiplied by 180
    wf.connect([(n_mrcalc4, n_mrcalc5, [('out_file', 'in_file1')])])
    # save output as 'Fixel1_Z_cos_deg.mif'
    wf.connect([(n_mrcalc5, n_datasink, [('out_file', 'Fixel1_Z_cos_deg.mif')])
                ])

    ##################################################################################
    ## 3) Tensor-based analysis
    # dwi2tensor to compute tensor from biascorrected DWI image
    n_dwi2tensor = Node(interface=mrt.FitTensor(out_file='dti.mif'),
                        name='n_dwi2tensor')
    # input bias corrected image
    wf.connect([(n_dwibiascorrect, n_dwi2tensor, [('out_file', 'in_file')])])
    # utilise mask to only compute tensors for regions of Brain
    wf.connect([(n_dwi2mask, n_dwi2tensor, [('out_file', 'in_mask')])])
    # output data into 'dt.mif'
    wf.connect([(n_dwi2tensor, n_datasink, [('out_file', 'dt.mif')])])

    # tensor2metric to convert tensors to generate maps of tensor-derived parameters
    n_tensor2metric = Node(interface=tensor2metricfunc.tensor2metric(
        modulate='none', num=1, vector_file='eigenvector.mif'),
                           name='n_tensor2metric')
    # input tensor image
    wf.connect([(n_dwi2tensor, n_tensor2metric, [('out_file', 'input_file')])])
    # save output eigenvectors of the diffusion tensor
    wf.connect([(n_tensor2metric, n_datasink, [('vector_file',
                                                'eigenvector.mif')])])

    # MRconvert to get eigenvector w.r.t z direction (main field)
    n_mrconvert3 = Node(interface=utils.MRConvert(coord=[3, 2],
                                                  out_file='eigenvectorZ.mif'),
                        name='n_mrconvert3')
    # input eigenvector file from tensor2metric
    wf.connect([(n_tensor2metric, n_mrconvert3, [('vector_file', 'in_file')])])
    # save output as 'eigenvectorZ.mif'
    wf.connect([(n_mrconvert3, n_datasink, [('out_file', 'eigenvectorZ.mif')])
                ])

    # ALL SUBSEQUENT STEPS GET ANGLE IN DEGREES
    # MRcalc to find absolute value of z eigenvector file
    n_mrcalc6 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='abs_eigenvectorZ.mif'),
                     name='n_mrcalc6')
    # z eigenvector image as input
    wf.connect([(n_mrconvert3, n_mrcalc6, [('out_file', 'in_file1')])])
    # save output as 'abs_eigenvectorZ.mif'
    wf.connect([(n_mrcalc6, n_datasink, [('out_file', 'abs_eigenvectorZ.mif')])
                ])

    # MRcalc to get angle by doing inverse cosine
    n_mrcalc7 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acos_eigenvectorZ.mif'),
                     name='n_mrcalc7')
    # input absolute value of z eigenvector image
    wf.connect([(n_mrcalc6, n_mrcalc7, [('out_file', 'in_file1')])])
    # save output as 'acos_eigenvectorZ.mif'
    wf.connect([(n_mrcalc7, n_datasink, [('out_file', 'acos_eigenvectorZ.mif')
                                         ])])

    # MRcalc to convert angle to degrees
    n_mrcalc8 = Node(
        interface=mrcalcfunc.MRCalc(operation='multiply',
                                    operand=180,
                                    out_file='degrees_eigenvectorZ.mif'),
        name='n_mrcalc8')
    # input inverse cosine image of z eigenvector
    wf.connect([(n_mrcalc7, n_mrcalc8, [('out_file', 'in_file1')])])
    # save output as 'degrees_eigenvectorZ.mif'
    wf.connect([(n_mrcalc8, n_datasink, [('out_file',
                                          'degrees_eigenvectorZ.mif')])])

    # MRcalc to divide by pi to finish converting from radians to degrees
    n_mrcalc9 = Node(interface=mrcalcfunc.MRCalc(operation='divide',
                                                 operand=3.14159265,
                                                 out_file='dti_z_cos_deg.mif'),
                     name='n_mrcalc9')
    # input z eigenvector image multiplied by 180
    wf.connect([(n_mrcalc8, n_mrcalc9, [('out_file', 'in_file1')])])
    # save output as 'dti_z_cos_deg.mif'
    wf.connect([(n_mrcalc9, n_datasink, [('out_file', 'dti_z_cos_deg.mif')])])

    # MRcalc to give difference image between fixel based and tensor based outputs
    n_mrcalc10 = Node(interface=mrcalcfunc.MRCalc(
        operation='subtract', out_file='diff_imag_tensor_minus_fixel.mif'),
                      name='n_mrcalc10')
    # input tensor based image of whole Brain
    wf.connect([(n_mrcalc9, n_mrcalc10, [('out_file', 'in_file1')])])
    # input fixel based image of Brain
    wf.connect([(n_mrcalc5, n_mrcalc10, [('out_file', 'in_file2')])])
    # output difference image as 'diff_imag_tensor_minus_fixel.mif'
    wf.connect([(n_mrcalc10, n_datasink,
                 [('out_file', 'diff_imag_tensor_minus_fixel.mif')])])

    #####################################################################################
    ## 4) Tensor based analysis on WM fibres only (NOT WHOLE BRAIN TENSORS)

    # MRthreshold to create WM mask from WM FOD (created earlier)
    n_mrthreshold = Node(interface=mrthresholdfunc.MRThreshold(
        out_file='thresholded_wmfod.mif'),
                         name='n_mrthreshold')
    # input WM FOD
    wf.connect([(n_dwi2fod, n_mrthreshold, [('wm_odf', 'in_file')])])
    # output thresholded WM FOD
    wf.connect([(n_mrthreshold, n_datasink, [('out_file',
                                              'thresholded_wmfod.mif')])])

    # MRconvert to extract 1st volume of thresholded WM FOD
    n_mrconvert4 = Node(interface=utils.MRConvert(coord=[3, 0],
                                                  out_file='WMmask.mif'),
                        name='n_mrconvert4')
    # input thresholded wmfod
    wf.connect([(n_mrthreshold, n_mrconvert4, [('out_file', 'in_file')])])
    # save output as 'WMmask.mif'
    wf.connect([(n_mrconvert4, n_datasink, [('out_file', 'WMmask.mif')])])

    # MRcalc to multiple WM mask with dti image to get tensors only of WM regions
    n_mrcalc11 = Node(interface=mrcalcfunc.MRCalc(operation='multiply',
                                                  out_file='WM_dt.mif'),
                      name='n_mrcalc11')
    # WM mask as input 1
    wf.connect([(n_mrconvert4, n_mrcalc11, [('out_file', 'in_file1')])])
    # dti image as input 2
    wf.connect([(n_dwi2tensor, n_mrcalc11, [('out_file', 'in_file2')])])
    # save output as 'WM_dt.mif'
    wf.connect([(n_mrcalc11, n_datasink, [('out_file', 'WM_dt.mif')])])

    # tensor2metric to convert tensors to generate maps of tensor-derived parameters
    n_tensor2metric2 = Node(interface=tensor2metricfunc.tensor2metric(
        modulate='none', num=1, vector_file='WMeigenvector.mif'),
                            name='n_tensor2metric2')
    # input tensor image
    wf.connect([(n_mrcalc11, n_tensor2metric2, [('out_file', 'input_file')])])
    # save output eigenvectors of the diffusion tensor
    wf.connect([(n_tensor2metric2, n_datasink, [('vector_file',
                                                 'WMeigenvector.mif')])])

    # MRconvert to get eigenvector w.r.t z direction (main field)
    n_mrconvert5 = Node(interface=utils.MRConvert(
        coord=[3, 2], out_file='WMeigenvectorZ.mif'),
                        name='n_mrconvert5')
    # input eigenvector file from tensor2metric
    wf.connect([(n_tensor2metric2, n_mrconvert5, [('vector_file', 'in_file')])
                ])
    # save output as 'eigenvectorZ.mif'
    wf.connect([(n_mrconvert5, n_datasink, [('out_file', 'WMeigenvectorZ.mif')
                                            ])])

    # ALL SUBSEQUENT STEPS GET ANGLE IN DEGREES
    # MRcalc to find absolute value of z eigenvector file
    n_mrcalc12 = Node(interface=mrcalcfunc.MRCalc(
        operation='abs', out_file='WM_abs_eigenvectorZ.mif'),
                      name='n_mrcalc12')
    # z eigenvector image as input
    wf.connect([(n_mrconvert5, n_mrcalc12, [('out_file', 'in_file1')])])
    # save output as 'WM_abs_eigenvectorZ.mif'
    wf.connect([(n_mrcalc12, n_datasink, [('out_file',
                                           'WM_abs_eigenvectorZ.mif')])])

    # MRcalc to get angle by doing inverse cosine
    n_mrcalc13 = Node(interface=mrcalcfunc.MRCalc(
        operation='acos', out_file='acos_WMeigenvectorZ.mif'),
                      name='n_mrcalc13')
    # input absolute value of z eigenvector image
    wf.connect([(n_mrcalc12, n_mrcalc13, [('out_file', 'in_file1')])])
    # save output as 'acos_WMeigenvectorZ.mif'
    wf.connect([(n_mrcalc13, n_datasink, [('out_file',
                                           'acos_WMeigenvectorZ.mif')])])

    # MRcalc to convert angle to degrees
    n_mrcalc14 = Node(interface=mrcalcfunc.MRCalc(
        operation='multiply',
        operand=180,
        out_file='degrees_WMeigenvectorZ.mif'),
                      name='n_mrcalc14')
    # input inverse cosine image of WM z eigenvector
    wf.connect([(n_mrcalc13, n_mrcalc14, [('out_file', 'in_file1')])])
    # save output as 'degrees_WMeigenvectorZ.mif'
    wf.connect([(n_mrcalc14, n_datasink, [('out_file',
                                           'degrees_WMeigenvectorZ.mif')])])

    # MRcalc to divide by pi to finish converting from radians to degrees
    n_mrcalc15 = Node(
        interface=mrcalcfunc.MRCalc(operation='divide',
                                    operand=3.14159265,
                                    out_file='WMdti_z_cos_deg.mif'),
        name='n_mrcalc15')
    # input WM z eigenvector image multiplied by 180
    wf.connect([(n_mrcalc14, n_mrcalc15, [('out_file', 'in_file1')])])
    # save output as 'WMdti_z_cos_deg.mif'
    wf.connect([(n_mrcalc15, n_datasink, [('out_file', 'WMdti_z_cos_deg.mif')])
                ])

    # MRcalc to give difference image between fixel based and WM tensor based outputs
    n_mrcalc16 = Node(interface=mrcalcfunc.MRCalc(
        operation='subtract', out_file='diffImage_WMtensor_minus_fixel.mif'),
                      name='n_mrcalc16')
    # input fixel image of Brain
    wf.connect([(n_mrcalc15, n_mrcalc16, [('out_file', 'in_file1')])])
    # input tensor image of WM fibres of Brain
    wf.connect([(n_mrcalc5, n_mrcalc16, [('out_file', 'in_file2')])])
    # output difference image as 'diff_imag_WMtensor_minus_fixel.mif'
    wf.connect([(n_mrcalc16, n_datasink,
                 [('out_file', 'diffImage_WMtensor_minus_fixel.mif')])])
    ######################################################################################
    return wf