コード例 #1
0
def create_fs_logb_workflow_for_both_hemispheres(name="FSLOGB",
                                                 plugin_args=None,
                                                 ml=False,
                                                 config=None):
    """Creates a workflow that connects FreeSurfer with LOGISMOS-B
    :param name:
    :param plugin_args:
    :param ml:
    :param config:
    :return:
    """

    fslogb_wf = Workflow(name=name)

    inputspec = Node(IdentityInterface([
        'recoding_file', 'lut_file', 'aseg_presurf', 'rawavg', 't2_raw',
        'lh_white', 'rh_white', 'hncma_atlas'
    ]),
                     name="inputspec")

    inputspec.inputs.recoding_file = get_local_file_location(
        "abc_fs_equivelants.json")
    inputspec.inputs.lut_file = get_local_file_location(
        "FreeSurferColorLUT.csv")

    # create outputspec with gm and wm surfaces
    outputs = [
        'lh_gm_surf_file', 'lh_wm_surf_file', 'rh_gm_surf_file',
        'rh_wm_surf_file'
    ]
    outputspec = Node(IdentityInterface(outputs), name="outputspec")

    for hemi in ('lh', 'rh'):
        hemi_logb_wf = create_fs_compatible_logb_workflow(
            "{0}_LOGBWF".format(hemi), plugin_args=plugin_args, config=config)
        hemi_logb_wf.inputs.inputspec.hemi = hemi
        fslogb_wf.connect([(inputspec, hemi_logb_wf,
                            [('aseg_presurf', 'inputspec.aseg'),
                             ('rawavg', 'inputspec.t1_file'),
                             ('t2_raw', 'inputspec.t2_file'),
                             ('hncma_atlas', 'inputspec.hncma_atlas'),
                             ('{0}_white'.format(hemi), 'inputspec.white')]),
                           (inputspec, hemi_logb_wf,
                            [('recoding_file', 'inputspec.recoding_file'),
                             ('lut_file', 'inputspec.lut_file')])])

        # move the outputs from logb to the outputspec
        fslogb_wf.connect([(hemi_logb_wf, outputspec, [
            ('outputspec.gmsurface_file', '{0}_gm_surf_file'.format(hemi)),
            ('outputspec.wmsurface_file', '{0}_wm_surf_file'.format(hemi))
        ])])

    return fslogb_wf
コード例 #2
0
def wf_transform_anat(in_file_list, in_matrix_file_list, reference):
    func2std_xform = MapNode(
        FLIRT(output_type='NIFTI', apply_xfm=True),
        name="func2std_xform",
        iterfield=['in_file', 'in_matrix_file', 'reference'])

    inputspec = Node(IdentityInterface(
        fields=['in_file_list', 'in_matrix_file_list', 'reference']),
                     name="inputspec")

    inputspec.inputs.in_file_list = in_file_list
    inputspec.inputs.in_matrix_file_list = in_matrix_file_list
    inputspec.inputs.reference = reference

    wf_transform_anat = Workflow(name="wf_transform_anat")
    wf_transform_anat.connect(inputspec, 'in_file_list', func2std_xform,
                              'in_file')
    wf_transform_anat.connect(inputspec, 'in_matrix_file_list', func2std_xform,
                              'in_matrix_file')
    wf_transform_anat.connect(inputspec, 'reference', func2std_xform,
                              'reference')

    return wf_transform_anat
コード例 #3
0
    def _skullstrip_register_anat(self, input_img, output_path):
        output_path = os.path.abspath(output_path)
        workflow = Workflow('register_anat', base_dir=output_path)

        # Skull strip
        anatstrip = Node(skullstrip.Robex(in_file=input_img),
                         name='skullstrip')

        # Register to MNI template
        register = Node(ants.RegistrationSynQuick(
            fixed_image=strT1TemplatePath,
            num_threads=self.n_ants_jobs,
            output_prefix=os.path.join(output_path, 'registered_anat_')),
                        name='register',
                        mem_gb=16,
                        n_procs=self.n_ants_jobs)
        workflow.connect(anatstrip, 'out_file', register, 'moving_image')
        workflow.run()
        return os.path.join(output_path, 'registered_anat_Warped.nii.gz')
コード例 #4
0
def main(paths, options_binary_string, ANAT, num_proc=7):

    json_path = paths[0]
    base_directory = paths[1]
    motion_correction_bet_directory = paths[2]
    parent_wf_directory = paths[3]
    # functional_connectivity_directory=paths[4]
    coreg_reg_directory = paths[5]
    atlas_resize_reg_directory = paths[6]
    subject_list = paths[7]
    datasink_name = paths[8]
    # fc_datasink_name=paths[9]
    atlasPath = paths[10]
    # brain_path=paths[11]
    # mask_path=paths[12]
    # atlas_path=paths[13]
    # tr_path=paths[14]
    # motion_params_path=paths[15]
    # func2std_mat_path=paths[16]
    # MNI3mm_path=paths[17]
    # demographics_file_path = paths[18]
    # phenotype_file_path = paths[19]
    data_directory = paths[20]

    number_of_subjects = len(subject_list)
    print("Working with ", number_of_subjects, " subjects.")

    # Create our own custom function - BIDSDataGrabber using a Function Interface.

    # In[858]:

    def get_nifti_filenames(subject_id, data_dir):
        #     Remember that all the necesary imports need to be INSIDE the function for the Function Interface to work!
        from bids.grabbids import BIDSLayout

        layout = BIDSLayout(data_dir)
        run = 1

        anat_file_path = [
            f.filename for f in layout.get(
                subject=subject_id, type='T1w', extensions=['nii', 'nii.gz'])
        ]
        func_file_path = [
            f.filename for f in layout.get(subject=subject_id,
                                           type='bold',
                                           run=run,
                                           extensions=['nii', 'nii.gz'])
        ]

        if len(anat_file_path) == 0:
            return None, func_file_path[0]  # No Anatomical files present
        return anat_file_path[0], func_file_path[0]

    BIDSDataGrabber = Node(Function(
        function=get_nifti_filenames,
        input_names=['subject_id', 'data_dir'],
        output_names=['anat_file_path', 'func_file_path']),
                           name='BIDSDataGrabber')
    # BIDSDataGrabber.iterables = [('subject_id',subject_list)]
    BIDSDataGrabber.inputs.data_dir = data_directory

    # ## Return TR

    def get_TR(in_file):
        from bids.grabbids import BIDSLayout

        data_directory = '/home1/varunk/data/ABIDE1/RawDataBIDs'
        layout = BIDSLayout(data_directory)
        metadata = layout.get_metadata(path=in_file)
        TR = metadata['RepetitionTime']
        return TR

    # ---------------- Added new Node to return TR and other slice timing correction params-------------------------------
    def _getMetadata(in_file):
        from bids.grabbids import BIDSLayout
        import logging

        logger = logging.getLogger(__name__)
        logger.setLevel(logging.DEBUG)

        # create a file handler
        handler = logging.FileHandler('progress.log')

        # add the handlers to the logger
        logger.addHandler(handler)

        interleaved = True
        index_dir = False
        data_directory = '/home1/varunk/data/ABIDE1/RawDataBIDs'
        layout = BIDSLayout(data_directory)
        metadata = layout.get_metadata(path=in_file)
        print(metadata)

        logger.info('Extracting Meta Data of file: %s', in_file)
        try:
            tr = metadata['RepetitionTime']
        except KeyError:
            print(
                'Key RepetitionTime not found in task-rest_bold.json so using a default of 2.0 '
            )
            tr = 2
            logger.error(
                'Key RepetitionTime not found in task-rest_bold.json for file %s so using a default of 2.0 ',
                in_file)

        try:
            slice_order = metadata['SliceAcquisitionOrder']
        except KeyError:
            print(
                'Key SliceAcquisitionOrder not found in task-rest_bold.json so using a default of interleaved ascending '
            )
            logger.error(
                'Key SliceAcquisitionOrder not found in task-rest_bold.json for file %s so using a default of interleaved ascending',
                in_file)
            return tr, index_dir, interleaved

        if slice_order.split(' ')[0] == 'Sequential':
            interleaved = False
        if slice_order.split(' ')[1] == 'Descending':
            index_dir = True

        return tr, index_dir, interleaved

    getMetadata = Node(Function(
        function=_getMetadata,
        input_names=['in_file'],
        output_names=['tr', 'index_dir', 'interleaved']),
                       name='getMetadata')

    # ### Skipping 4 starting scans
    # Extract ROI for skipping first 4 scans of the functional data
    # > **Arguments:**
    # t_min: (corresponds to time dimension) Denotes the starting time of the inclusion
    # t_size: Denotes the number of scans to include
    #
    # The logic behind skipping 4 initial scans is to take scans after the subject has stabalized in the scanner.

    # In[863]:

    # ExtractROI - skip dummy scans
    extract = Node(ExtractROI(t_min=4, t_size=-1),
                   output_type='NIFTI',
                   name="extract")

    # ### Slice time correction
    # Created a Node that does slice time correction
    # > **Arguments**:
    # index_dir=False -> Slices were taken bottom to top i.e. in ascending order
    # interleaved=True means odd slices were acquired first and then even slices [or vice versa(Not sure)]

    slicetimer = Node(SliceTimer(output_type='NIFTI'), name="slicetimer")

    # ### Motion Correction
    # Motion correction is done using fsl's mcflirt. It alligns all the volumes of a functional scan to each other

    # MCFLIRT - motion correction
    mcflirt = Node(MCFLIRT(mean_vol=True, save_plots=True,
                           output_type='NIFTI'),
                   name="mcflirt")

    #  Just a dummy node to transfer the output of Mcflirt to the next workflow. Needed if we didnt want to use the Mcflirt
    from_mcflirt = Node(IdentityInterface(fields=['in_file']),
                        name="from_mcflirt")

    # ### Skull striping
    # I used fsl's BET

    # In[868]:

    skullStrip = Node(BET(mask=False, frac=0.3, robust=True),
                      name='skullStrip')  #

    # *Note*: Do not include special characters in ```name``` field above coz then  wf.writegraph will cause issues

    # ## Resample
    # I needed to resample the anatomical file from 1mm to 3mm. Because registering a 1mm file was taking a huge amount of time.
    #

    # In[872]:

    # Resample - resample anatomy to 3x3x3 voxel resolution
    resample_mni = Node(
        Resample(
            voxel_size=(3, 3, 3),
            resample_mode='Cu',  # cubic interpolation
            outputtype='NIFTI'),
        name="resample_mni")

    resample_anat = Node(
        Resample(
            voxel_size=(3, 3, 3),
            resample_mode='Cu',  # cubic interpolation
            outputtype='NIFTI'),
        name="resample_anat")

    # In[873]:

    resample_atlas = Node(
        Resample(
            voxel_size=(3, 3, 3),
            resample_mode='NN',  # cubic interpolation
            outputtype='NIFTI'),
        name="resample_atlas")

    resample_atlas.inputs.in_file = atlasPath

    # # Matrix operations
    # ### For concatenating the transformation matrices

    concat_xform = Node(ConvertXFM(concat_xfm=True), name='concat_xform')

    # Node to calculate the inverse of func2std matrix
    inv_mat = Node(ConvertXFM(invert_xfm=True), name='inv_mat')

    # ## Extracting the mean brain

    meanfunc = Node(interface=ImageMaths(op_string='-Tmean', suffix='_mean'),
                    name='meanfunc')

    meanfuncmask = Node(interface=BET(mask=True, no_output=True, frac=0.3),
                        name='meanfuncmask')

    # ## Apply Mask

    # Does BET (masking) on the whole func scan [Not using this, creates bug for join node]
    maskfunc = Node(interface=ImageMaths(suffix='_bet', op_string='-mas'),
                    name='maskfunc')

    # Does BET (masking) on the mean func scan
    maskfunc4mean = Node(interface=ImageMaths(suffix='_bet', op_string='-mas'),
                         name='maskfunc4mean')

    # ## Datasink
    # I needed to define the structure of what files are saved and where.

    # Create DataSink object
    dataSink = Node(DataSink(), name='datasink')

    # Name of the output folder
    dataSink.inputs.base_directory = opj(base_directory, datasink_name)

    # Define substitution strings so that the data is similar to BIDS
    substitutions = [
        ('_subject_id_', 'sub-'), ('_resample_brain_flirt.nii_brain', ''),
        ('_roi_st_mcf_flirt.nii_brain_flirt', ''),
        ('task-rest_run-1_bold_roi_st_mcf.nii', 'motion_params'),
        ('T1w_resample_brain_flirt_sub-0050002_task-rest_run-1_bold_roi_st_mcf_mean_bet_flirt',
         'fun2std')
    ]

    # Feed the substitution strings to the DataSink node
    dataSink.inputs.substitutions = substitutions

    # ### Apply Mask to functional data
    # Mean file of the motion corrected functional scan is sent to
    # skullStrip to get just the brain and the mask_image.
    # Mask_image is just a binary file (containing 1 where brain is present and 0 where it isn't).
    # After getting the mask_image form skullStrip, apply that mask to aligned
    # functional image to extract its brain and remove the skull

    # In[889]:

    # Function
    # in_file: The file on which you want to apply mask
    # in_file2 = mask_file:  The mask you want to use. Make sure that mask_file has same size as in_file
    # out_file : Result of applying mask in in_file -> Gives the path of the output file

    def applyMask_func(in_file, in_file2):
        import numpy as np
        import nibabel as nib
        import os
        from os.path import join as opj

        # convert from unicode to string : u'/tmp/tmp8daO2Q/..' -> '/tmp/tmp8daO2Q/..' i.e. removes the prefix 'u'
        mask_file = in_file2

        brain_data = nib.load(in_file)
        mask_data = nib.load(mask_file)

        brain = brain_data.get_data().astype('float32')
        mask = mask_data.get_data()

        # applying mask by multiplying elementwise to the binary mask

        if len(brain.shape) == 3:  # Anat file
            brain = np.multiply(brain, mask)
        elif len(brain.shape) > 3:  # Functional File
            for t in range(brain.shape[-1]):
                brain[:, :, :, t] = np.multiply(brain[:, :, :, t], mask)
        else:
            pass

        # Saving the brain file

        path = os.getcwd()

        in_file_split_list = in_file.split('/')
        in_file_name = in_file_split_list[-1]

        out_file = in_file_name + '_brain.nii.gz'  # changing name
        brain_with_header = nib.Nifti1Image(brain,
                                            affine=brain_data.affine,
                                            header=brain_data.header)
        nib.save(brain_with_header, out_file)

        out_file = opj(path, out_file)
        out_file2 = in_file2

        return out_file, out_file2

    # #### Things learnt:
    # 1. I found out that whenever a node is being executed, it becomes the current directory and whatever file you create now, will be stored here.
    # 2. #from IPython.core.debugger import Tracer; Tracer()()    # Debugger doesnt work in nipype

    # Wrap the above function inside a Node

    # In[890]:

    applyMask = Node(Function(function=applyMask_func,
                              input_names=['in_file', 'in_file2'],
                              output_names=['out_file', 'out_file2']),
                     name='applyMask')

    # ### Some nodes needed for Co-registration and Normalization

    # Node for getting the xformation matrix
    func2anat_reg = Node(FLIRT(output_type='NIFTI'), name="func2anat_reg")

    # Node for applying xformation matrix to functional data
    func2std_xform = Node(FLIRT(output_type='NIFTI', apply_xfm=True),
                          name="func2std_xform")

    # Node for applying xformation matrix to functional data
    std2func_xform = Node(FLIRT(output_type='NIFTI',
                                apply_xfm=True,
                                interp='nearestneighbour'),
                          name="std2func_xform")

    # Node for Normalizing/Standardizing the anatomical and getting the xformation matrix
    anat2std_reg = Node(FLIRT(output_type='NIFTI'), name="anat2std_reg")

    # I wanted to use the MNI file as input to the workflow so I created an Identity
    # Node that reads the MNI file path and outputs the same MNI file path.
    # Then I connected this node to whereever it was needed.

    MNI152_2mm = Node(IdentityInterface(fields=['standard_file', 'mask_file']),
                      name="MNI152_2mm")
    # Set the mask_file and standard_file input in the Node. This setting sets the input mask_file permanently.
    MNI152_2mm.inputs.mask_file = os.path.expandvars(
        '$FSLDIR/data/standard/MNI152_T1_2mm_brain_mask.nii.gz')

    MNI152_2mm.inputs.standard_file = os.path.expandvars(
        '$FSLDIR/data/standard/MNI152_T1_2mm_brain.nii.gz')
    # MNI152_2mm.inputs.mask_file = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain_mask.nii.gz'
    # MNI152_2mm.inputs.standard_file = '/usr/share/fsl/5.0/data/standard/MNI152_T1_2mm_brain.nii.gz'

    # ## Band Pass Filtering
    # Let's do a band pass filtering on the data using the code from https://neurostars.org/t/bandpass-filtering-different-outputs-from-fsl-and-nipype-custom-function/824/2

    ### AFNI

    bandpass = Node(afni.Bandpass(highpass=0.008,
                                  lowpass=0.08,
                                  despike=False,
                                  no_detrend=True,
                                  notrans=True,
                                  outputtype='NIFTI_GZ'),
                    name='bandpass')

    # ### Following is a Join Node that collects the preprocessed file paths and saves them in a file

    # In[902]:

    def save_file_list_function_in_brain(in_brain):
        import numpy as np
        import os
        from os.path import join as opj

        file_list = np.asarray(in_brain)
        print('######################## File List ######################: \n',
              file_list)

        np.save('brain_file_list', file_list)
        file_name = 'brain_file_list.npy'
        out_brain = opj(os.getcwd(), file_name)  # path
        return out_brain

    def save_file_list_function_in_mask(in_mask):
        import numpy as np
        import os
        from os.path import join as opj

        file_list2 = np.asarray(in_mask)
        print('######################## File List ######################: \n',
              file_list2)

        np.save('mask_file_list', file_list2)
        file_name2 = 'mask_file_list.npy'
        out_mask = opj(os.getcwd(), file_name2)  # path
        return out_mask

    def save_file_list_function_in_motion_params(in_motion_params):
        import numpy as np
        import os
        from os.path import join as opj

        file_list3 = np.asarray(in_motion_params)
        print('######################## File List ######################: \n',
              file_list3)

        np.save('motion_params_file_list', file_list3)
        file_name3 = 'motion_params_file_list.npy'
        out_motion_params = opj(os.getcwd(), file_name3)  # path
        return out_motion_params

    def save_file_list_function_in_motion_outliers(in_motion_outliers):
        import numpy as np
        import os
        from os.path import join as opj

        file_list4 = np.asarray(in_motion_outliers)
        print('######################## File List ######################: \n',
              file_list4)

        np.save('motion_outliers_file_list', file_list4)
        file_name4 = 'motion_outliers_file_list.npy'
        out_motion_outliers = opj(os.getcwd(), file_name4)  # path
        return out_motion_outliers

    def save_file_list_function_in_joint_xformation_matrix(
            in_joint_xformation_matrix):
        import numpy as np
        import os
        from os.path import join as opj

        file_list5 = np.asarray(in_joint_xformation_matrix)
        print('######################## File List ######################: \n',
              file_list5)

        np.save('joint_xformation_matrix_file_list', file_list5)
        file_name5 = 'joint_xformation_matrix_file_list.npy'
        out_joint_xformation_matrix = opj(os.getcwd(), file_name5)  # path
        return out_joint_xformation_matrix

    def save_file_list_function_in_tr(in_tr):
        import numpy as np
        import os
        from os.path import join as opj

        tr_list = np.asarray(in_tr)
        print('######################## TR List ######################: \n',
              tr_list)

        np.save('tr_list', tr_list)
        file_name6 = 'tr_list.npy'
        out_tr = opj(os.getcwd(), file_name6)  # path
        return out_tr

    def save_file_list_function_in_atlas(in_atlas):
        import numpy as np
        import os
        from os.path import join as opj

        file_list7 = np.asarray(in_atlas)
        print('######################## File List ######################: \n',
              file_list7)

        np.save('atlas_file_list', file_list7)
        file_name7 = 'atlas_file_list.npy'
        out_atlas = opj(os.getcwd(), file_name7)  # path
        return out_atlas

    save_file_list_in_brain = JoinNode(Function(
        function=save_file_list_function_in_brain,
        input_names=['in_brain'],
        output_names=['out_brain']),
                                       joinsource="infosource",
                                       joinfield=['in_brain'],
                                       name="save_file_list_in_brain")

    save_file_list_in_mask = JoinNode(Function(
        function=save_file_list_function_in_mask,
        input_names=['in_mask'],
        output_names=['out_mask']),
                                      joinsource="infosource",
                                      joinfield=['in_mask'],
                                      name="save_file_list_in_mask")

    save_file_list_in_motion_outliers = JoinNode(
        Function(function=save_file_list_function_in_motion_outliers,
                 input_names=['in_motion_outliers'],
                 output_names=['out_motion_outliers']),
        joinsource="infosource",
        joinfield=['in_motion_outliers'],
        name="save_file_list_in_motion_outliers")

    save_file_list_in_motion_params = JoinNode(
        Function(function=save_file_list_function_in_motion_params,
                 input_names=['in_motion_params'],
                 output_names=['out_motion_params']),
        joinsource="infosource",
        joinfield=['in_motion_params'],
        name="save_file_list_in_motion_params")

    save_file_list_in_joint_xformation_matrix = JoinNode(
        Function(function=save_file_list_function_in_joint_xformation_matrix,
                 input_names=['in_joint_xformation_matrix'],
                 output_names=['out_joint_xformation_matrix']),
        joinsource="infosource",
        joinfield=['in_joint_xformation_matrix'],
        name="save_file_list_in_joint_xformation_matrix")

    save_file_list_in_tr = JoinNode(Function(
        function=save_file_list_function_in_tr,
        input_names=['in_tr'],
        output_names=['out_tr']),
                                    joinsource="infosource",
                                    joinfield=['in_tr'],
                                    name="save_file_list_in_tr")

    save_file_list_in_atlas = JoinNode(Function(
        function=save_file_list_function_in_atlas,
        input_names=['in_atlas'],
        output_names=['out_atlas']),
                                       joinsource="infosource",
                                       joinfield=['in_atlas'],
                                       name="save_file_list_in_atlas")

    # save_file_list = JoinNode(Function(function=save_file_list_function, input_names=['in_brain', 'in_mask', 'in_motion_params','in_motion_outliers','in_joint_xformation_matrix', 'in_tr', 'in_atlas'],
    #                output_names=['out_brain','out_mask','out_motion_params','out_motion_outliers','out_joint_xformation_matrix','out_tr', 'out_atlas']),
    #                joinsource="infosource",
    #                joinfield=['in_brain', 'in_mask', 'in_motion_params','in_motion_outliers','in_joint_xformation_matrix','in_tr', 'in_atlas'],
    #                name="save_file_list")

    # def save_file_list_function(in_brain, in_mask, in_motion_params, in_motion_outliers, in_joint_xformation_matrix, in_tr, in_atlas):
    #     # Imports
    #     import numpy as np
    #     import os
    #     from os.path import join as opj
    #
    #
    #     file_list = np.asarray(in_brain)
    #     print('######################## File List ######################: \n',file_list)
    #
    #     np.save('brain_file_list',file_list)
    #     file_name = 'brain_file_list.npy'
    #     out_brain = opj(os.getcwd(),file_name) # path
    #
    #
    #     file_list2 = np.asarray(in_mask)
    #     print('######################## File List ######################: \n',file_list2)
    #
    #     np.save('mask_file_list',file_list2)
    #     file_name2 = 'mask_file_list.npy'
    #     out_mask = opj(os.getcwd(),file_name2) # path
    #
    #
    #     file_list3 = np.asarray(in_motion_params)
    #     print('######################## File List ######################: \n',file_list3)
    #
    #     np.save('motion_params_file_list',file_list3)
    #     file_name3 = 'motion_params_file_list.npy'
    #     out_motion_params = opj(os.getcwd(),file_name3) # path
    #
    #
    #     file_list4 = np.asarray(in_motion_outliers)
    #     print('######################## File List ######################: \n',file_list4)
    #
    #     np.save('motion_outliers_file_list',file_list4)
    #     file_name4 = 'motion_outliers_file_list.npy'
    #     out_motion_outliers = opj(os.getcwd(),file_name4) # path
    #
    #
    #     file_list5 = np.asarray(in_joint_xformation_matrix)
    #     print('######################## File List ######################: \n',file_list5)
    #
    #     np.save('joint_xformation_matrix_file_list',file_list5)
    #     file_name5 = 'joint_xformation_matrix_file_list.npy'
    #     out_joint_xformation_matrix = opj(os.getcwd(),file_name5) # path
    #
    #     tr_list = np.asarray(in_tr)
    #     print('######################## TR List ######################: \n',tr_list)
    #
    #     np.save('tr_list',tr_list)
    #     file_name6 = 'tr_list.npy'
    #     out_tr = opj(os.getcwd(),file_name6) # path
    #
    #
    #     file_list7 = np.asarray(in_atlas)
    #     print('######################## File List ######################: \n',file_list7)
    #
    #     np.save('atlas_file_list',file_list7)
    #     file_name7 = 'atlas_file_list.npy'
    #     out_atlas = opj(os.getcwd(),file_name7) # path
    #
    #
    #
    #
    #     return out_brain, out_mask, out_motion_params, out_motion_outliers, out_joint_xformation_matrix, out_tr , out_atlas
    #
    #
    #
    # save_file_list = JoinNode(Function(function=save_file_list_function, input_names=['in_brain', 'in_mask', 'in_motion_params','in_motion_outliers','in_joint_xformation_matrix', 'in_tr', 'in_atlas'],
    #                  output_names=['out_brain','out_mask','out_motion_params','out_motion_outliers','out_joint_xformation_matrix','out_tr', 'out_atlas']),
    #                  joinsource="infosource",
    #                  joinfield=['in_brain', 'in_mask', 'in_motion_params','in_motion_outliers','in_joint_xformation_matrix','in_tr', 'in_atlas'],
    #                  name="save_file_list")

    # ### Motion outliers

    motionOutliers = Node(MotionOutliers(no_motion_correction=False,
                                         metric='fd',
                                         out_metric_plot='fd_plot.png',
                                         out_metric_values='fd_raw.txt'),
                          name='motionOutliers')

    # ## Workflow for atlas registration  from std to functional

    wf_atlas_resize_reg = Workflow(name=atlas_resize_reg_directory)

    wf_atlas_resize_reg.connect([

        # Apply the inverse matrix to the 3mm Atlas to transform it to func space
        (maskfunc4mean, std2func_xform, [(('out_file', 'reference'))]),
        (resample_atlas, std2func_xform, [('out_file', 'in_file')]),

        # Now, applying the inverse matrix
        (inv_mat, std2func_xform, [('out_file', 'in_matrix_file')]
         ),  # output: Atlas in func space
        (std2func_xform, save_file_list_in_atlas, [('out_file', 'in_atlas')]),

        # ---------------------------Save the required files --------------------------------------------
        (save_file_list_in_motion_params, dataSink,
         [('out_motion_params', 'motion_params_paths.@out_motion_params')]),
        (save_file_list_in_motion_outliers, dataSink,
         [('out_motion_outliers', 'motion_outliers_paths.@out_motion_outliers')
          ]),
        (save_file_list_in_brain, dataSink,
         [('out_brain', 'preprocessed_brain_paths.@out_brain')]),
        (save_file_list_in_mask, dataSink,
         [('out_mask', 'preprocessed_mask_paths.@out_mask')]),
        (save_file_list_in_joint_xformation_matrix, dataSink,
         [('out_joint_xformation_matrix',
           'joint_xformation_matrix_paths.@out_joint_xformation_matrix')]),
        (save_file_list_in_tr, dataSink, [('out_tr', 'tr_paths.@out_tr')]),
        (save_file_list_in_atlas, dataSink, [('out_atlas',
                                              'atlas_paths.@out_atlas')])
    ])

    # In[909]:

    wf_coreg_reg = Workflow(name=coreg_reg_directory)
    # wf_coreg_reg.base_dir = base_directory
    # Dir where all the outputs will be stored(inside coregistrationPipeline folder).

    if ANAT == 1:
        wf_coreg_reg.connect(BIDSDataGrabber, 'anat_file_path', skullStrip,
                             'in_file')  # Resampled the anat file to 3mm

        wf_coreg_reg.connect(skullStrip, 'out_file', resample_anat, 'in_file')

        wf_coreg_reg.connect(
            resample_anat, 'out_file', func2anat_reg, 'reference'
        )  # Make the resampled file as reference in func2anat_reg

        # Sec 1. The above 3 steps registers the mean image to resampled anat image and
        # calculates the xformation matrix .. I hope the xformation matrix will be saved

        wf_coreg_reg.connect(MNI152_2mm, 'standard_file', resample_mni,
                             'in_file')

        wf_coreg_reg.connect(resample_mni, 'out_file', anat2std_reg,
                             'reference')

        wf_coreg_reg.connect(resample_anat, 'out_file', anat2std_reg,
                             'in_file')

        # Calculates the Xformationmatrix from anat3mm to MNI 3mm

        # We can get those matrices by refering to func2anat_reg.outputs.out_matrix_file and similarly for anat2std_reg

        wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file', concat_xform,
                             'in_file')

        wf_coreg_reg.connect(anat2std_reg, 'out_matrix_file', concat_xform,
                             'in_file2')

        wf_coreg_reg.connect(concat_xform, 'out_file', dataSink,
                             'tranformation_matrix_fun2std.@out_file')

        wf_coreg_reg.connect(concat_xform, 'out_file',
                             save_file_list_in_joint_xformation_matrix,
                             'in_joint_xformation_matrix')

        # Now inverse the func2std MAT to std2func
        wf_coreg_reg.connect(concat_xform, 'out_file', wf_atlas_resize_reg,
                             'inv_mat.in_file')
# ------------------------------------------------------------------------------------------------------------------------------

# Registration of Functional to MNI 3mm space w/o using anatomical
    if ANAT == 0:
        print('Not using Anatomical high resoulution files')
        wf_coreg_reg.connect(MNI152_2mm, 'standard_file', resample_mni,
                             'in_file')
        wf_coreg_reg.connect(
            resample_mni, 'out_file', func2anat_reg, 'reference'
        )  # Make the resampled file as reference in func2anat_reg

        wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file', dataSink,
                             'tranformation_matrix_fun2std.@out_file')

        wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file',
                             save_file_list_in_joint_xformation_matrix,
                             'in_joint_xformation_matrix')

        # Now inverse the func2std MAT to std2func
        wf_coreg_reg.connect(func2anat_reg, 'out_matrix_file',
                             wf_atlas_resize_reg, 'inv_mat.in_file')

    # ## Co-Registration, Normalization and Bandpass Workflow
    # 1. Co-registration means alligning the func to anat
    # 2. Normalization means aligning func/anat to standard
    # 3. Applied band pass filtering in range - highpass=0.008, lowpass=0.08

    # In[910]:

    wf_motion_correction_bet = Workflow(name=motion_correction_bet_directory)
    # wf_motion_correction_bet.base_dir = base_directory

    wf_motion_correction_bet.connect([
        (from_mcflirt, meanfunc, [('in_file', 'in_file')]),
        (meanfunc, meanfuncmask, [('out_file', 'in_file')]),
        (from_mcflirt, applyMask, [('in_file', 'in_file')]),  # 1
        (meanfuncmask, applyMask, [
            ('mask_file', 'in_file2')
        ]),  # 2 output: 1&2,  BET on coregistered fmri scan
        (meanfunc, maskfunc4mean, [('out_file', 'in_file')]),  # 3
        (meanfuncmask, maskfunc4mean,
         [('mask_file', 'in_file2')]),  # 4 output: 3&4, BET on mean func scan
        (applyMask, save_file_list_in_brain, [('out_file', 'in_brain')]),
        (applyMask, save_file_list_in_mask, [('out_file2', 'in_mask')]),
        (maskfunc4mean, wf_coreg_reg, [('out_file', 'func2anat_reg.in_file')])
    ])

    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")

    infosource.iterables = [('subject_id', subject_list)]

    # Create the workflow

    wf = Workflow(name=parent_wf_directory)
    # base_dir = opj(s,'result')
    wf.base_dir = base_directory  # Dir where all the outputs will be stored(inside BETFlow folder).

    # wf.connect([      (infosource, BIDSDataGrabber, [('subject_id','subject_id')]),
    #                   (BIDSDataGrabber, extract, [('func_file_path','in_file')]),
    #
    #                   (BIDSDataGrabber,getMetadata, [('func_file_path','in_file')]),
    #
    #                   (getMetadata,slicetimer, [('tr','time_repetition')]),
    #
    #
    #                   (getMetadata,slicetimer, [('index_dir','index_dir')]),
    #
    #                   (getMetadata,slicetimer, [('interleaved','interleaved')]),
    #
    #                   (getMetadata,save_file_list_in_tr, [('tr','in_tr')]),
    #
    #                   (extract,slicetimer,[('roi_file','in_file')]),
    #
    #                   (slicetimer, mcflirt,[('slice_time_corrected_file','in_file')])
    #                   (mcflirt,dataSink,[('par_file','motion_params.@par_file')]), # saves the motion parameters calculated before
    #
    #                   (mcflirt,save_file_list_in_motion_params,[('par_file','in_motion_params')]),
    #
    #                   (mcflirt,wf_motion_correction_bet,[('out_file','from_mcflirt.in_file')])
    #            ])
    # # Run it in parallel
    # wf.run('MultiProc', plugin_args={'n_procs': num_proc})
    #
    #
    #
    # # Visualize the detailed graph
    # # from IPython.display import Image
    # wf.write_graph(graph2use='flat', format='png', simple_form=True)

    # Options:
    # discard 4 Volumes (extract), slicetimer, mcflirt
    print('Preprocessing Options:')
    print('Skipping 4 dummy volumes - ', options_binary_string[0])
    print('Slicetiming correction - ', options_binary_string[1])
    print('Finding Motion Outliers - ', options_binary_string[2])
    print('Doing Motion Correction - ', options_binary_string[3])

    # ANAT = 0
    nodes = [extract, slicetimer, motionOutliers, mcflirt]
    wf.connect(infosource, 'subject_id', BIDSDataGrabber, 'subject_id')
    wf.connect(BIDSDataGrabber, 'func_file_path', getMetadata, 'in_file')
    wf.connect(getMetadata, 'tr', save_file_list_in_tr, 'in_tr')

    old_node = BIDSDataGrabber
    old_node_output = 'func_file_path'

    for idx, include in enumerate(options_binary_string):

        if old_node == extract:
            old_node_output = 'roi_file'
        elif old_node == slicetimer:
            old_node_output = 'slice_time_corrected_file'
        # elif old_node == mcflirt:

        # old_node_output = 'out_file'

        if int(include):
            new_node = nodes[idx]

            if new_node == slicetimer:
                wf.connect(getMetadata, 'tr', slicetimer, 'time_repetition')
                wf.connect(getMetadata, 'index_dir', slicetimer, 'index_dir')
                wf.connect(getMetadata, 'interleaved', slicetimer,
                           'interleaved')
                new_node_input = 'in_file'
            elif new_node == extract:
                new_node_input = 'in_file'
            elif new_node == mcflirt:
                new_node_input = 'in_file'
                wf.connect(mcflirt, 'par_file', dataSink,
                           'motion_params.@par_file'
                           )  # saves the motion parameters calculated before

                wf.connect(mcflirt, 'par_file',
                           save_file_list_in_motion_params, 'in_motion_params')

                wf.connect(mcflirt, 'out_file', wf_motion_correction_bet,
                           'from_mcflirt.in_file')

            elif new_node == motionOutliers:

                wf.connect(meanfuncmask, 'mask_file', motionOutliers, 'mask')

                wf.connect(motionOutliers, 'out_file', dataSink,
                           'motionOutliers.@out_file')

                wf.connect(motionOutliers, 'out_metric_plot', dataSink,
                           'motionOutliers.@out_metric_plot')

                wf.connect(motionOutliers, 'out_metric_values', dataSink,
                           'motionOutliers.@out_metric_values')

                wf.connect(motionOutliers, 'out_file',
                           save_file_list_in_motion_outliers,
                           'in_motion_outliers')

                new_node_input = 'in_file'

                wf.connect(old_node, old_node_output, new_node, new_node_input)

                continue

            wf.connect(old_node, old_node_output, new_node, new_node_input)

            old_node = new_node

        else:
            if idx == 3:
                # new_node = from_mcflirt
                # new_node_input = 'from_mcflirt.in_file'

                wf.connect(old_node, old_node_output, wf_motion_correction_bet,
                           'from_mcflirt.in_file')

                # old_node = new_node

    TEMP_DIR_FOR_STORAGE = opj(base_directory, 'crash_files')
    wf.config = {"execution": {"crashdump_dir": TEMP_DIR_FOR_STORAGE}}

    # Visualize the detailed graph
    # from IPython.display import Image

    wf.write_graph(graph2use='flat', format='png', simple_form=True)

    # Run it in parallel
    wf.run('MultiProc', plugin_args={'n_procs': num_proc})
コード例 #5
0
def intensity_norm(wf_name='intensity_norm'):
    """ Workflow that uses a mask against a source from where the mean value will be taken.
    This mean value will be used to demean the whole source and leave it in out_file.

    Parameters
    ----------
    wf_name: str
        The name of the workflow.

    Nipype Inputs
    -------------
    intnorm_input.source: existing file
        The image from where to extract the signal values and normalize.

    intnorm_input.mask: existing file
        The mask to specify which voxels to use to calculate the statistics
        for normalization.

    Nipype Outputs
    --------------
    intnorm_output.out_file: existing file

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields = ["source", "mask"]

    out_fields = ["out_file"]

    # input
    intnorm_input = setup_node(IdentityInterface(fields=in_fields,
                                                 mandatory_inputs=True),
                               name="intnorm_input")

    # fix the affine matrix (it's necessary for some cases)
    resample = setup_node(Function(
        function=resample_to_img,
        input_names=["in_file", "target", "interpolation"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                          name="resample_mask")

    resample.inputs.interpolation = "nearest"

    # calculate masked mean value
    mean_val = setup_node(Function(
        function=math_img,
        input_names=["formula", "img", "mask"],
        output_names=["out_value"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                          name='mean_value')
    mean_val.inputs.formula = "np.mean(np.nonzero(img[mask > 0]))"

    # normalize
    norm_img = setup_node(Function(
        function=math_img,
        input_names=["formula", "out_file", "img", "val"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                          name='norm_img')
    norm_img.inputs.formula = "img / val"

    # output
    intnorm_output = setup_node(IdentityInterface(fields=out_fields),
                                name="intnorm_output")

    # Create the workflow object
    wf = Workflow(name=wf_name)

    wf.connect([
        # resample
        (intnorm_input, resample, [("source", "target"), ("mask", "in_file")]),
        # normalize
        (intnorm_input, mean_val, [("source", "img")]),
        (resample, mean_val, [("out_file", "mask")]),
        (intnorm_input, norm_img, [
            ("source", "img"),
            (("source", rename, "_intnormed"), "out_file"),
        ]),
        (mean_val, norm_img, [("out_value", "val")]),
        (norm_img, intnorm_output, [("out_file", "out_file")]),
    ])

    return wf
def _main(subject_list,vols,subid_vol_dict, number_of_skipped_volumes,brain_path,\
    mask_path,\
    atlas_path,\
    tr_path,\
    motion_params_path,\
    func2std_mat_path,\
    MNI3mm_path,\
    base_directory,\
    fc_datasink_name,\
   motion_param_regression,\
   band_pass_filtering,\
   global_signal_regression,\
   smoothing,\
   volcorrect,\
   num_proc,\
   functional_connectivity_directory ):

    # ## Volume correction
    # * I have already extracted 4 volumes.
    # * Now extract 120 - 4 = 116 volumes from each subject
    # * So define vols = 114
    #

    if number_of_skipped_volumes == None:
        number_of_skipped_volumes = 4
    vols = vols - number_of_skipped_volumes

    def vol_correct(sub_id, subid_vol_dict, vols, number_of_skipped_volumes):
        sub_vols = subid_vol_dict[sub_id] - number_of_skipped_volumes
        if sub_vols > vols:
            t_min = sub_vols - vols
        elif sub_vols == vols:
            t_min = 0
        else:
            raise Exception('Volumes of Sub ',sub_id,' less than desired!')
        return int(t_min)


    # In[491]:



    volCorrect = Node(Function(function=vol_correct, input_names=['sub_id','subid_vol_dict','vols','number_of_skipped_volumes'],
                                    output_names=['t_min']), name='volCorrect')

    volCorrect.inputs.subid_vol_dict = subid_vol_dict
    volCorrect.inputs.vols = vols
    volCorrect.inputs.number_of_skipped_volumes = number_of_skipped_volumes


    # ## Define a function to fetch the filenames of a particular subject ID



    def get_subject_filenames(subject_id,brain_path,mask_path,atlas_path,tr_path,motion_params_path,func2std_mat_path,MNI3mm_path):
        import re
        from itertools import zip_longest
        for brain,mask,atlas,tr,motion_param,func2std_mat in zip_longest(brain_path,mask_path,atlas_path,tr_path,motion_params_path,func2std_mat_path): #itertools helps to zip unequal save_file_list_in_mask
        #  Source : https://stackoverflow.com/questions/11318977/zipping-unequal-lists-in-python-in-to-a-list-which-does-not-drop-any-element-fro
            print('*******************',brain,mask,atlas,tr,motion_param,func2std_mat)

            sub_id_extracted = re.search('.+_subject_id_(\d+)', brain).group(1)
            if str(subject_id) in brain:
    #             print("Files for subject ",subject_id,brain,mask,atlas,tr,motion_param)
                return brain,mask,atlas,tr,motion_param,func2std_mat,MNI3mm_path

        print ('Unable to locate Subject: ',subject_id,'extracted: ',sub_id_extracted)
        # print ('Unable to locate Subject: ',subject_id)
        raise Exception('Unable to locate Subject: ',subject_id,'extracted: ',sub_id_extracted)
        # raise Exception('Unable to locate Subject: ',subject_id)
        return 0




    # Make a node
    getSubjectFilenames = Node(Function(function=get_subject_filenames, input_names=['subject_id','brain_path','mask_path','atlas_path','tr_path','motion_params_path','func2std_mat_path','MNI3mm_path'],
                                    output_names=['brain','mask','atlas','tr','motion_param','func2std_mat', 'MNI3mm_path']), name='getSubjectFilenames')


    getSubjectFilenames.inputs.brain_path = brain_path
    getSubjectFilenames.inputs.mask_path = mask_path
    getSubjectFilenames.inputs.atlas_path = atlas_path
    getSubjectFilenames.inputs.tr_path = tr_path
    getSubjectFilenames.inputs.motion_params_path = motion_params_path
    getSubjectFilenames.inputs.func2std_mat_path = func2std_mat_path
    getSubjectFilenames.inputs.MNI3mm_path = MNI3mm_path




    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")

    infosource.iterables = [('subject_id',subject_list)]



    # ## Band Pass Filtering
    # Let's do a band pass filtering on the data using the
    # code from https://neurostars.org/t/bandpass-filtering-different-outputs-from-fsl-and-nipype-custom-function/824/2

    ### AFNI

    bandpass = Node(afni.Bandpass(highpass=0.01, lowpass=0.1,
                             despike=False, no_detrend=True, notrans=True,
                             outputtype='NIFTI_GZ'),name='bandpass')

    # bandpass = Node(afni.Bandpass(highpass=0.001, lowpass=0.01,
    #                          despike=False, no_detrend=True, notrans=True,
    #                          tr=2.0,outputtype='NIFTI_GZ'),name='bandpass')


    # ## Highpass filtering

    # In[506]:

    """
    Perform temporal highpass filtering on the data
    """

    # https://afni.nimh.nih.gov/pub/dist/doc/program_help/3dBandpass.html
    # os.chdir('/home1/varunk/Autism-Connectome-Analysis-bids-related/')

    highpass = Node(afni.Bandpass(highpass=0.009, lowpass=99999,
                             despike=False, no_detrend=True, notrans=True,
                             outputtype='NIFTI_GZ'),name='highpass')

    #  FSL bandpass/Highpass
    # highpass = Node(interface=ImageMaths(suffix='_tempfilt'),
    #                   iterfield=['in_file'],
    #                   name='highpass')
    #
    # highpass.inputs.op_string = '-bptf 27.77775001525879  -1' # 23.64 # 31.25


    # ## Smoothing
    # ### Using 6mm fwhm
    # sigma = 6/2.3548 = 2.547987090198743

    spatialSmooth = Node(interface=ImageMaths(op_string='-s 2.5479',
                                                suffix='_smoothed'),
                       name='spatialSmooth')


    # ## Performs Gram Schmidt Process
    # https://en.wikipedia.org/wiki/Gram%E2%80%93Schmidt_process

    # In[509]:


    def orthogonalize(in_file, mask_file):
        import numpy as np
        import nibabel as nib
        import os
        from os.path import join as opj

        def gram_schmidt(voxel_time_series, mean_vector):
            numerator = np.dot(voxel_time_series,mean_vector)
            dinominator = np.dot(mean_vector,mean_vector)
            voxel_time_series_orthogonalized = voxel_time_series - (numerator/dinominator)*mean_vector

    #         TO CONFIRM IF THE VECTORS ARE ORTHOGONAL
    #         sum_dot_prod = np.sum(np.dot(voxel_time_series_orthogonalized,mean_vector))

    #         print('Sum of entries of orthogonalized vector = ',sum_dot_prod)
            return voxel_time_series_orthogonalized


        mask_data = nib.load(mask_file)
        mask = mask_data.get_data()

        brain_data = nib.load(in_file)
        brain = brain_data.get_data()

        x_dim, y_dim, z_dim, t_dim = brain_data.shape



        # Find mean brain


        mean_vector = np.zeros(t_dim)


        num_brain_voxels = 0

        # Count the number of brain voxels
        for i in range(x_dim):
            for j in range(y_dim):
                for k in range(z_dim):
                    if mask[i,j,k] == 1:
                        mean_vector = mean_vector + brain[i,j,k,:]
                        num_brain_voxels = num_brain_voxels + 1


        mean_vector = mean_vector / num_brain_voxels

        # Orthogonalize
        for i in range(x_dim):
            for j in range(y_dim):
                for k in range(z_dim):
                    if mask[i,j,k] == 1:
                        brain[i,j,k,:] = gram_schmidt(brain[i,j,k,:], mean_vector)



        sub_id = in_file.split('/')[-1].split('.')[0].split('_')[0].split('-')[1]

        gsr_file_name = 'sub-' + sub_id + '_task-rest_run-1_bold.nii.gz'

    #     gsr_file_name_nii = gsr_file_name + '.nii.gz'

        out_file = opj(os.getcwd(),gsr_file_name) # path

        brain_with_header = nib.Nifti1Image(brain, affine=brain_data.affine,header = brain_data.header)
        nib.save(brain_with_header,gsr_file_name)

        return out_file








    # In[510]:


    globalSignalRemoval = Node(Function(function=orthogonalize, input_names=['in_file','mask_file'],
                                      output_names=['out_file']), name='globalSignalRemoval' )
    # globalSignalRemoval.inputs.mask_file = mask_file
    # globalSignalRemoval.iterables = [('in_file',file_paths)]


    # ## GLM for regression of motion parameters

    # In[511]:


    def calc_residuals(in_file,
                       motion_file):
        """
        Calculates residuals of nuisance regressors -motion parameters for every voxel for a subject using GLM.

        Parameters
        ----------
        in_file : string
            Path of a subject's motion corrected nifti file.
        motion_par_file : string
            path of a subject's motion parameters


        Returns
        -------
        out_file : string
            Path of residual file in nifti format

        """
        import nibabel as nb
        import numpy as np
        import os
        from os.path import join as opj
        nii = nb.load(in_file)
        data = nii.get_data().astype(np.float32)
        global_mask = (data != 0).sum(-1) != 0


        # Check and define regressors which are provided from files
        if motion_file is not None:
            motion = np.genfromtxt(motion_file)
            if motion.shape[0] != data.shape[3]:
                raise ValueError('Motion parameters {0} do not match data '
                                 'timepoints {1}'.format(motion.shape[0],
                                                         data.shape[3]))
            if motion.size == 0:
                raise ValueError('Motion signal file {0} is '
                                 'empty'.format(motion_file))

        # Calculate regressors
        regressor_map = {'constant' : np.ones((data.shape[3],1))}

        regressor_map['motion'] = motion


        X = np.zeros((data.shape[3], 1))

        for rname, rval in regressor_map.items():
            X = np.hstack((X, rval.reshape(rval.shape[0],-1)))

        X = X[:,1:]

        if np.isnan(X).any() or np.isnan(X).any():
            raise ValueError('Regressor file contains NaN')

        Y = data[global_mask].T

        try:
            B = np.linalg.inv(X.T.dot(X)).dot(X.T).dot(Y)
        except np.linalg.LinAlgError as e:
            if "Singular matrix" in e:
                raise Exception("Error details: {0}\n\nSingular matrix error: "
                                "The nuisance regression configuration you "
                                "selected may have been too stringent, and the "
                                "regression could not be completed. Ensure your "
                                "parameters are not too "
                                "extreme.\n\n".format(e))
            else:
                raise Exception("Error details: {0}\n\nSomething went wrong with "
                                "nuisance regression.\n\n".format(e))

        Y_res = Y - X.dot(B)

        data[global_mask] = Y_res.T

        img = nb.Nifti1Image(data, header=nii.get_header(),
                             affine=nii.get_affine())

        subject_name = in_file.split('/')[-1].split('.')[0]
        filename = subject_name + '_residual.nii.gz'
        out_file = os.path.join(os.getcwd(),filename )
        img.to_filename(out_file) # alt to nib.save

        return out_file


    # In[512]:


    # Create a Node for above
    calc_residuals = Node(Function(function=calc_residuals, input_names=['in_file','motion_file'],
                                    output_names=['out_file']), name='calc_residuals')


    # ## Datasink
    # I needed to define the structure of what files are saved and where.

    # In[513]:


    # Create DataSink object
    dataSink = Node(DataSink(), name='datasink')

    # Name of the output folder
    dataSink.inputs.base_directory = opj(base_directory,fc_datasink_name)




    # To create the substitutions I looked the `datasink` folder where I was redirecting the output. I manually selected the part of file/folder name that I wanted to change and copied below to be substituted.
    #

    # In[514]:


    # Define substitution strings so that the data is similar to BIDS
    substitutions = [('_subject_id_', 'sub-')]

    # Feed the substitution strings to the DataSink node
    dataSink.inputs.substitutions = substitutions



    # ### Following is a Join Node that collects the preprocessed file paths and saves them in a file

    # In[516]:


    def save_file_list_function(in_fc_map_brain_file):
        # Imports
        import numpy as np
        import os
        from os.path import join as opj


        file_list = np.asarray(in_fc_map_brain_file)
        print('######################## File List ######################: \n',file_list)

        np.save('fc_map_brain_file_list',file_list)
        file_name = 'fc_map_brain_file_list.npy'
        out_fc_map_brain_file = opj(os.getcwd(),file_name) # path






        return out_fc_map_brain_file



    # In[517]:


    save_file_list = JoinNode(Function(function=save_file_list_function, input_names=['in_fc_map_brain_file'],
                     output_names=['out_fc_map_brain_file']),
                     joinsource="infosource",
                     joinfield=['in_fc_map_brain_file'],
                     name="save_file_list")


    # ## Create a FC node
    #
    # This node:
    # 1. Exracts the average time series of the brain ROI's using the atlas and stores
    #     it as a matrix of size [ROIs x Volumes].
    # 2. Extracts the Voxel time series and stores it in matrix of size [Voxels x Volumes]
    #


    # And save  FC matrix files in shape of brains
    def pear_coff(in_file, atlas_file, mask_file):
        # code to find how many voxels are in the brain region using the mask

            # imports
        import numpy as np
        import nibabel as nib
        import os
        from os.path import join as opj

        mask_data = nib.load(mask_file)
        mask = mask_data.get_data()

        x_dim, y_dim, z_dim = mask_data.shape


        atlasPath = atlas_file
        # Read the atlas
        atlasObject = nib.load(atlasPath)
        atlas = atlasObject.get_data()

        num_ROIs = int((np.max(atlas) - np.min(atlas) ))


        # Read the brain in_file

        brain_data = nib.load(in_file)
        brain = brain_data.get_data()

        x_dim, y_dim, z_dim, num_volumes = brain.shape


        num_brain_voxels = 0

        x_dim, y_dim, z_dim = mask_data.shape

        for i in range(x_dim):
            for j in range(y_dim):
                for k in range(z_dim):
                    if mask[i,j,k] == 1:
                        num_brain_voxels = num_brain_voxels + 1

        # Initialize a matrix of ROI time series and voxel time series

        ROI_matrix = np.zeros((num_ROIs, num_volumes))
        voxel_matrix = np.zeros((num_brain_voxels, num_volumes))

        # Fill up the voxel_matrix

        voxel_counter = 0
        for i in range(x_dim):
            for j in range(y_dim):
                for k in range(z_dim):
                    if mask[i,j,k] == 1:
                        voxel_matrix[voxel_counter,:] = brain[i,j,k,:]
                        voxel_counter = voxel_counter + 1


        # Fill up the ROI_matrix
        # Keep track of number of voxels per ROI as well by using an array - num_voxels_in_ROI[]

        num_voxels_in_ROI = np.zeros((num_ROIs,1)) # A column arrray containing number of voxels in each ROI

        for i in range(x_dim):
            for j in range(y_dim):
                for k in range(z_dim):
                    label = int(atlas[i,j,k]) - 1
                    if label != -1:
                        ROI_matrix[label,:] = np.add(ROI_matrix[label,:], brain[i,j,k,:])
                        num_voxels_in_ROI[label,0] = num_voxels_in_ROI[label,0] + 1

        ROI_matrix = np.divide(ROI_matrix,num_voxels_in_ROI) # Check if divide is working correctly

        X, Y = ROI_matrix, voxel_matrix


        # Subtract mean from X and Y

        X = np.subtract(X, np.mean(X, axis=1, keepdims=True))
        Y = np.subtract(Y, np.mean(Y, axis=1, keepdims=True))

        temp1 = np.dot(X,Y.T)
        temp2 = np.sqrt(np.sum(np.multiply(X,X), axis=1, keepdims=True))
        temp3 = np.sqrt(np.sum(np.multiply(Y,Y), axis=1, keepdims=True))
        temp4 = np.dot(temp2,temp3.T)
        coff_matrix = np.divide(temp1, (temp4 + 1e-7))


        # Check if any ROI is missing and replace the NAN values in coff_matrix by 0
        if np.argwhere(np.isnan(coff_matrix)).shape[0] != 0:
            print("Some ROIs are not present. Replacing NAN in coff matrix by 0")
            np.nan_to_num(coff_matrix, copy=False)

        # TODO: when I have added 1e-7 in the dinominator, then why did I feel the need to replace NAN by zeros
        sub_id = in_file.split('/')[-1].split('.')[0].split('_')[0].split('-')[1]


        fc_file_name = sub_id + '_fc_map'

        print ("Pear Matrix calculated for subject: ",sub_id)

        roi_brain_matrix = coff_matrix
        brain_file = in_file


        x_dim, y_dim, z_dim, t_dim = brain.shape

        (brain_data.header).set_data_shape([x_dim,y_dim,z_dim,num_ROIs])

        brain_roi_tensor = np.zeros((brain_data.header.get_data_shape()))

        print("Creating brain for Subject-",sub_id)
        for roi in range(num_ROIs):
            brain_voxel_counter = 0
            for i in range(x_dim):
                for j in range(y_dim):
                    for k in range(z_dim):
                        if mask[i,j,k] == 1:
                            brain_roi_tensor[i,j,k,roi] = roi_brain_matrix[roi,brain_voxel_counter]
                            brain_voxel_counter = brain_voxel_counter + 1


            assert (brain_voxel_counter == len(roi_brain_matrix[roi,:]))
        print("Created brain for Subject-",sub_id)


        path = os.getcwd()
        fc_file_name = fc_file_name + '.nii.gz'
        out_file = opj(path,fc_file_name)

        brain_with_header = nib.Nifti1Image(brain_roi_tensor, affine=brain_data.affine,header = brain_data.header)
        nib.save(brain_with_header,out_file)


        fc_map_brain_file = out_file
        return fc_map_brain_file



    # In[521]:


    # Again Create the Node and set default values to paths

    pearcoff = Node(Function(function=pear_coff, input_names=['in_file','atlas_file','mask_file'],
                                    output_names=['fc_map_brain_file']), name='pearcoff')



    # # IMPORTANT:
    # * The ROI 255 has been removed due to resampling. Therefore the FC maps will have nan at that row. So don't use that ROI :)
    # * I came to know coz I keep getting this error: RuntimeWarning: invalid value encountered in true_divide
    # * To debug it, I read the coff matrix and checked its diagnol to discover the nan value.
    #
    #
    #

    # ## Extract volumes




    # ExtractROI - For volCorrect
    extract = Node(ExtractROI(t_size=-1),
                   output_type='NIFTI',
                   name="extract")



    # ###  Node for applying xformation matrix to functional data
    #

    # In[523]:


    func2std_xform = Node(FLIRT(output_type='NIFTI_GZ',
                             apply_xfm=True), name="func2std_xform")





    # motion_param_regression = 1
    # band_pass_filtering = 0
    # global_signal_regression = 0
    # smoothing = 1
    # volcorrect = 1
    if num_proc == None:
        num_proc = 7

    combination = 'motionRegress' + str(int(motion_param_regression)) + \
     'global' + str(int(global_signal_regression)) + 'smoothing' + str(int(smoothing)) +\
     'filt' + str(int(band_pass_filtering))

    print("Combination: ",combination)

    binary_string = str(int(motion_param_regression)) + str(int(global_signal_regression)) + \
    str(int(smoothing)) + str(int(band_pass_filtering)) + str(int(volcorrect))

    base_dir = opj(base_directory,functional_connectivity_directory)
    # wf = Workflow(name=functional_connectivity_directory)
    wf = Workflow(name=combination)

    wf.base_dir = base_dir # Dir where all the outputs will be stored.

    wf.connect(infosource ,'subject_id', getSubjectFilenames, 'subject_id')


    # ------- Dynamic Pipeline ------------------------


    nodes = [
    calc_residuals,
    globalSignalRemoval,
    spatialSmooth,
    bandpass,
    volCorrect]


    # from nipype.interfaces import fsl

    old_node = getSubjectFilenames
    old_node_output = 'brain'

    binary_string = binary_string+'0' # so that the loop runs one more time
    for idx, include in enumerate(binary_string):
        # 11111
        # motion_param_regression
        # global_signal_regression
        # smoothing
        # band_pass_filtering
        # volcorrect

        if old_node == calc_residuals:
            old_node_output = 'out_file'
        elif old_node == extract :
            old_node_output = 'roi_file'
        elif old_node == globalSignalRemoval:
            old_node_output = 'out_file'
        elif old_node == bandpass:
            old_node_output = 'out_file'
        elif old_node == highpass:
            old_node_output = 'out_file'
        elif old_node == spatialSmooth:
            old_node_output = 'out_file'
        elif old_node == volCorrect:
            old_node_output = 'out_file'


        if int(include):
            # if old_node is None:
            #
            #     wf.add_nodes([nodes[idx]])
            #
            # else:



            new_node = nodes[idx]


            if new_node == calc_residuals:
                wf.connect([(getSubjectFilenames, calc_residuals, [('motion_param', 'motion_file')])])
                new_node_input = 'in_file'

            elif new_node == extract :
                wf.connect([( volCorrect, extract, [('t_min','t_min')])])
                new_node_input = 'in_file'

            elif new_node == globalSignalRemoval:
                wf.connect([(getSubjectFilenames, globalSignalRemoval, [('mask','mask_file')])])
                new_node_input = 'in_file'

            elif new_node == bandpass:
                wf.connect([(getSubjectFilenames, bandpass, [('tr','tr')])])
                new_node_input = 'in_file'

            elif new_node == highpass:
                wf.connect([(getSubjectFilenames, highpass, [('tr','tr')])]) #Commenting for FSL
                new_node_input = 'in_file'

            elif new_node == spatialSmooth:
                new_node_input = 'in_file'

            elif new_node == volCorrect:
                wf.connect([(infosource, volCorrect, [('subject_id','sub_id')])])
                wf.connect([( volCorrect, extract, [('t_min','t_min')])])
                new_node = extract
                new_node_input = 'in_file'


            wf.connect(old_node, old_node_output, new_node, new_node_input)

            old_node = new_node


        else:
            if idx == 3: # bandpas == 0 => Highpass
                new_node = highpass
                wf.connect([(getSubjectFilenames, highpass, [('tr','tr')])]) #Commenting for FSL
                new_node_input = 'in_file'

                wf.connect(old_node, old_node_output, new_node, new_node_input)

                old_node = new_node

    wf.connect(old_node, old_node_output, pearcoff, 'in_file')
    wf.connect(getSubjectFilenames,'atlas', pearcoff, 'atlas_file')
    wf.connect(getSubjectFilenames, 'mask', pearcoff, 'mask_file')

    wf.connect(pearcoff, 'fc_map_brain_file', func2std_xform ,'in_file')
    wf.connect(getSubjectFilenames,'func2std_mat', func2std_xform, 'in_matrix_file')
    wf.connect(getSubjectFilenames, 'MNI3mm_path', func2std_xform,'reference')

    folder_name = combination + '.@fc_map_brain_file'
    wf.connect(func2std_xform, 'out_file',  save_file_list, 'in_fc_map_brain_file')
    wf.connect(save_file_list, 'out_fc_map_brain_file',  dataSink,folder_name)


    TEMP_DIR_FOR_STORAGE = opj(base_directory,'crash_files')
    wf.config = {"execution": {"crashdump_dir": TEMP_DIR_FOR_STORAGE}}

    wf.write_graph(graph2use='flat', format='png')
    wf.run('MultiProc', plugin_args={'n_procs': num_proc})
コード例 #7
0
ファイル: mvpa_preproc.py プロジェクト: mikbuch/pymri
def create_realign_reference(run, whichvol_glob, name="realignref"):
    """
    run (int): run's number

    whichvol_glob: 'first' or 'middle' or 'last' or
        'mean', if 'mean' was chosed for run then whichvol_glob does't matter
    """
    realignref = Workflow(name=name)

    inputnode = Node(interface=util.IdentityInterface(fields=["in_sub", "in_hand"]), name="inputspec")

    ds = Node(DataGrabber(infields=["subject_id", "hand"], outfields=["func"]), name="datasource")
    ds.inputs.base_directory = opap(base_directory)
    ds.inputs.template = "%s/%s_Hand/*.nii*"
    ds.inputs.sort_filelist = True
    # ds.inputs.subject_id = 'GK011RZJA'
    # ds.inputs.hand = 'Left'

    realignref.connect(inputnode, "in_hand", ds, "hand")
    realignref.connect(inputnode, "in_sub", ds, "subject_id")

    img2float = MapNode(
        interface=fsl.ImageMaths(out_data_type="float", op_string="", suffix="_dtype"),
        iterfield=["in_file"],
        name="img2float",
    )

    realignref.connect(ds, ("func", pickrun), img2float, "in_file")
    # realignref.connect(inputnode, 'in_files', img2float, 'in_file')

    if whichvol_glob != "mean":
        extract_ref = Node(interface=fsl.ExtractROI(t_size=1), iterfield=["in_file"], name="extractref")

        realignref.connect(img2float, ("out_file", pickfirst), extract_ref, "in_file")
        realignref.connect(img2float, ("out_file", pickvol, 0, whichvol_glob), extract_ref, "t_min")

    outputnode = pe.Node(interface=util.IdentityInterface(fields=["ref_vol"]), name="outputnode")

    if whichvol_glob != "mean":
        realignref.connect(extract_ref, "roi_file", outputnode, "ref_vol")
    else:
        meanfunc = pe.Node(interface=fsl.ImageMaths(op_string="-Tmean", suffix="_mean"), name="meanfunc")
        realignref.connect(img2float, ("out_file", pickfirst), meanfunc, "in_file")
        realignref.connect(meanfunc, "out_file", outputnode, "ref_vol")

    return realignref
コード例 #8
0
ファイル: example_analysis.py プロジェクト: NILAB-UvA/nitools
from nipype.interfaces.utility import IdentityInterface
from nipype.pipeline import Workflow, Node
from nipype.interfaces.fsl.model import FEAT
import os.path as op
from glob import glob
from .nodes import ConcatenateIterables, Combine_events_and_confounds, Custom_Level1design_Feat

tasks = ['other', 'self_run-1', 'self_run-2']
base_dir = '/home/lsnoek1/SharedStates'
out_dir = op.join(base_dir, 'firstlevel')
sub_ids = sorted([
    op.basename(f)
    for f in glob(op.join(base_dir, 'preproc', 'fmriprep', 'sub-???'))
])

meta_wf = Workflow('firstlevel_spynoza')

concat_iterables_node = Node(
    interface=ConcatenateIterables(fields=['sub_id', 'task']),
    name='concat_iterables')

input_node = Node(IdentityInterface(fields=['sub_id', 'task']),
                  name='inputspec')
input_node.iterables = [('sub_id', sub_ids), ('task', tasks)]

meta_wf.connect(input_node, 'sub_id', concat_iterables_node, 'sub_id')
meta_wf.connect(input_node, 'task', concat_iterables_node, 'task')

templates = {
    'func': '{sub_id}/func/{sub_id}_task-{task}*_preproc.nii.gz',
    'func_mask': '{sub_id}/func/{sub_id}_task-{task}*_brainmask.nii.gz',
コード例 #9
0
    def create_workflow(self):
        """Create the Niype workflow of the super-resolution pipeline.

        It is composed of a succession of Nodes and their corresponding parameters,
        where the output of node i goes to the input of node i+1.

        """
        sub_ses = self.subject
        if self.session is not None:
            sub_ses = ''.join([sub_ses, '_', self.session])

        if self.session is None:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk",
                                                   __version__]), self.subject)
        else:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, self.session, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk", __version__]),
                                         self.subject, self.session)

        if not os.path.exists(wf_base_dir):
            os.makedirs(wf_base_dir)
        print("Process directory: {}".format(wf_base_dir))

        # Initialization (Not sure we can control the name of nipype log)
        if os.path.isfile(os.path.join(wf_base_dir, "pypeline.log")):
            os.unlink(os.path.join(wf_base_dir, "pypeline.log"))

        self.wf = Workflow(name=self.pipeline_name, base_dir=wf_base_dir)

        config.update_config({
            'logging': {
                'log_directory': os.path.join(wf_base_dir),
                'log_to_file': True
            },
            'execution': {
                'remove_unnecessary_outputs': False,
                'stop_on_first_crash': True,
                'stop_on_first_rerun': False,
                'crashfile_format': "txt",
                'use_relative_paths': True,
                'write_provenance': False
            }
        })

        # Update nypipe logging with config
        nipype_logging.update_logging(config)
        # config.enable_provenance()

        if self.use_manual_masks:
            dg = Node(interface=DataGrabber(outfields=['T2ws', 'masks']),
                      name='data_grabber')
            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            if self.session is not None:
                t2ws_template = os.path.join(
                    self.subject, self.session, 'anat',
                    '_'.join([sub_ses, '*run-*', '*T2w.nii.gz']))
                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat',
                        '_'.join([sub_ses, '*run-*', '*mask.nii.gz']))
            else:
                t2ws_template = os.path.join(self.subject, 'anat',
                                             sub_ses + '*_run-*_T2w.nii.gz')

                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, 'anat', sub_ses + '*_run-*_*mask.nii.gz')

            dg.inputs.field_template = dict(T2ws=t2ws_template,
                                            masks=masks_template)

            brainMask = MapNode(
                interface=IdentityInterface(fields=['out_file']),
                name='brain_masks_bypass',
                iterfield=['out_file'])

            if self.m_stacks is not None:
                custom_masks_filter = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='custom_masks_filter')
                custom_masks_filter.inputs.stacks_id = self.m_stacks

        else:
            dg = Node(interface=DataGrabber(outfields=['T2ws']),
                      name='data_grabber')

            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            dg.inputs.field_template = dict(
                T2ws=os.path.join(self.subject, 'anat', sub_ses +
                                  '*_run-*_T2w.nii.gz'))
            if self.session is not None:
                dg.inputs.field_template = dict(T2ws=os.path.join(
                    self.subject, self.session, 'anat', '_'.join(
                        [sub_ses, '*run-*', '*T2w.nii.gz'])))

            if self.m_stacks is not None:
                t2ws_filter_prior_masks = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='t2ws_filter_prior_masks')
                t2ws_filter_prior_masks.inputs.stacks_id = self.m_stacks

            brainMask = MapNode(interface=preprocess.BrainExtraction(),
                                name='brainExtraction',
                                iterfield=['in_file'])

            brainMask.inputs.bids_dir = self.bids_dir
            brainMask.inputs.in_ckpt_loc = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_localization",
                             "Unet.ckpt-88000.index")).split('.index')[0]
            brainMask.inputs.threshold_loc = 0.49
            brainMask.inputs.in_ckpt_seg = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_segmentation",
                             "Unet.ckpt-20000.index")).split('.index')[0]
            brainMask.inputs.threshold_seg = 0.5

        t2ws_filtered = Node(interface=preprocess.FilteringByRunid(),
                             name='t2ws_filtered')
        masks_filtered = Node(interface=preprocess.FilteringByRunid(),
                              name='masks_filtered')

        if not self.m_skip_stacks_ordering:
            stacksOrdering = Node(interface=preprocess.StacksOrdering(),
                                  name='stackOrdering')
        else:
            stacksOrdering = Node(
                interface=IdentityInterface(fields=['stacks_order']),
                name='stackOrdering')
            stacksOrdering.inputs.stacks_order = self.m_stacks

        if not self.m_skip_nlm_denoising:
            nlmDenoise = MapNode(interface=preprocess.BtkNLMDenoising(),
                                 name='nlmDenoise',
                                 iterfield=['in_file', 'in_mask'])
            nlmDenoise.inputs.bids_dir = self.bids_dir

            # Sans le mask le premier correct slice intensity...
            srtkCorrectSliceIntensity01_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity01_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity01_nlm.inputs.bids_dir = self.bids_dir
            srtkCorrectSliceIntensity01_nlm.inputs.out_postfix = '_uni'

        srtkCorrectSliceIntensity01 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity01',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity01.inputs.bids_dir = self.bids_dir
        srtkCorrectSliceIntensity01.inputs.out_postfix = '_uni'

        srtkSliceBySliceN4BiasFieldCorrection = MapNode(
            interface=preprocess.MialsrtkSliceBySliceN4BiasFieldCorrection(),
            name='srtkSliceBySliceN4BiasFieldCorrection',
            iterfield=['in_file', 'in_mask'])
        srtkSliceBySliceN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        srtkSliceBySliceCorrectBiasField = MapNode(
            interface=preprocess.MialsrtkSliceBySliceCorrectBiasField(),
            name='srtkSliceBySliceCorrectBiasField',
            iterfield=['in_file', 'in_mask', 'in_field'])
        srtkSliceBySliceCorrectBiasField.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        if not self.m_skip_nlm_denoising:
            srtkCorrectSliceIntensity02_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity02_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity02_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization01_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization01_nlm')
            srtkIntensityStandardization01_nlm.inputs.bids_dir = self.bids_dir

            srtkHistogramNormalization_nlm = Node(
                interface=preprocess.MialsrtkHistogramNormalization(),
                name='srtkHistogramNormalization_nlm')
            srtkHistogramNormalization_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization02_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization02_nlm')
            srtkIntensityStandardization02_nlm.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        srtkCorrectSliceIntensity02 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity02',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity02.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization01 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization01')
        srtkIntensityStandardization01.inputs.bids_dir = self.bids_dir

        srtkHistogramNormalization = Node(
            interface=preprocess.MialsrtkHistogramNormalization(),
            name='srtkHistogramNormalization')
        srtkHistogramNormalization.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization02 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization02')
        srtkIntensityStandardization02.inputs.bids_dir = self.bids_dir

        srtkMaskImage01 = MapNode(interface=preprocess.MialsrtkMaskImage(),
                                  name='srtkMaskImage01',
                                  iterfield=['in_file', 'in_mask'])
        srtkMaskImage01.inputs.bids_dir = self.bids_dir

        srtkImageReconstruction = Node(
            interface=reconstruction.MialsrtkImageReconstruction(),
            name='srtkImageReconstruction')
        srtkImageReconstruction.inputs.bids_dir = self.bids_dir
        srtkImageReconstruction.inputs.sub_ses = sub_ses
        srtkImageReconstruction.inputs.no_reg = self.m_skip_svr

        srtkTVSuperResolution = Node(
            interface=reconstruction.MialsrtkTVSuperResolution(),
            name='srtkTVSuperResolution')
        srtkTVSuperResolution.inputs.bids_dir = self.bids_dir
        srtkTVSuperResolution.inputs.sub_ses = sub_ses
        srtkTVSuperResolution.inputs.in_loop = self.primal_dual_loops
        srtkTVSuperResolution.inputs.in_deltat = self.deltatTV
        srtkTVSuperResolution.inputs.in_lambda = self.lambdaTV
        srtkTVSuperResolution.inputs.use_manual_masks = self.use_manual_masks

        srtkN4BiasFieldCorrection = Node(
            interface=postprocess.MialsrtkN4BiasFieldCorrection(),
            name='srtkN4BiasFieldCorrection')
        srtkN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        if self.m_do_refine_hr_mask:
            srtkHRMask = Node(
                interface=postprocess.MialsrtkRefineHRMaskByIntersection(),
                name='srtkHRMask')
            srtkHRMask.inputs.bids_dir = self.bids_dir
        else:
            srtkHRMask = Node(interface=postprocess.BinarizeImage(),
                              name='srtkHRMask')

        srtkMaskImage02 = Node(interface=preprocess.MialsrtkMaskImage(),
                               name='srtkMaskImage02')
        srtkMaskImage02.inputs.bids_dir = self.bids_dir

        # Build workflow : connections of the nodes
        # Nodes ready : Linking now
        if self.use_manual_masks:
            if self.m_stacks is not None:
                self.wf.connect(dg, "masks", custom_masks_filter,
                                "input_files")
                self.wf.connect(custom_masks_filter, "output_files", brainMask,
                                "out_file")
            else:
                self.wf.connect(dg, "masks", brainMask, "out_file")
        else:
            if self.m_stacks is not None:
                self.wf.connect(dg, "T2ws", t2ws_filter_prior_masks,
                                "input_files")
                self.wf.connect(t2ws_filter_prior_masks, "output_files",
                                brainMask, "in_file")
            else:
                self.wf.connect(dg, "T2ws", brainMask, "in_file")

        if not self.m_skip_stacks_ordering:
            self.wf.connect(brainMask, "out_file", stacksOrdering,
                            "input_masks")

        self.wf.connect(stacksOrdering, "stacks_order", t2ws_filtered,
                        "stacks_id")
        self.wf.connect(dg, "T2ws", t2ws_filtered, "input_files")

        self.wf.connect(stacksOrdering, "stacks_order", masks_filtered,
                        "stacks_id")
        self.wf.connect(brainMask, "out_file", masks_filtered, "input_files")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(t2ws_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_mask")  ## Comment to match docker process

            self.wf.connect(nlmDenoise, ("out_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_mask")

        self.wf.connect(t2ws_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkCorrectSliceIntensity01_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        else:
            self.wf.connect(srtkCorrectSliceIntensity01,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceN4BiasFieldCorrection, "in_mask")

        self.wf.connect(srtkCorrectSliceIntensity01,
                        ("out_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_file")
        self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                        ("out_fld_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_field")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                            ("out_im_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_mask")
            self.wf.connect(srtkCorrectSliceIntensity02_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkIntensityStandardization01_nlm, "input_images")
            self.wf.connect(srtkIntensityStandardization01_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_masks")
            self.wf.connect(srtkHistogramNormalization_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkIntensityStandardization02_nlm, "input_images")

        self.wf.connect(srtkSliceBySliceCorrectBiasField,
                        ("out_im_file", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_mask")
        self.wf.connect(srtkCorrectSliceIntensity02,
                        ("out_file", utils.sort_ascending),
                        srtkIntensityStandardization01, "input_images")

        self.wf.connect(srtkIntensityStandardization01,
                        ("output_images", utils.sort_ascending),
                        srtkHistogramNormalization, "input_images")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkHistogramNormalization, "input_masks")
        self.wf.connect(srtkHistogramNormalization,
                        ("output_images", utils.sort_ascending),
                        srtkIntensityStandardization02, "input_images")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkIntensityStandardization02_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")
        else:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")

        self.wf.connect(srtkMaskImage01, "out_im_file",
                        srtkImageReconstruction, "input_images")
        self.wf.connect(masks_filtered, "output_files",
                        srtkImageReconstruction, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order",
                        srtkImageReconstruction, "stacks_order")

        self.wf.connect(srtkIntensityStandardization02, "output_images",
                        srtkTVSuperResolution, "input_images")
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending),
                        srtkTVSuperResolution, "input_transforms")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkTVSuperResolution, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order", srtkTVSuperResolution,
                        "stacks_order")

        self.wf.connect(srtkImageReconstruction, "output_sdi",
                        srtkTVSuperResolution, "input_sdi")

        if self.m_do_refine_hr_mask:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkHRMask, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), srtkHRMask,
                            "input_masks")
            self.wf.connect(srtkImageReconstruction,
                            ("output_transforms", utils.sort_ascending),
                            srtkHRMask, "input_transforms")
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_sr")
        else:
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_image")

        self.wf.connect(srtkTVSuperResolution, "output_sr", srtkMaskImage02,
                        "in_file")
        self.wf.connect(srtkHRMask, "output_srmask", srtkMaskImage02,
                        "in_mask")

        self.wf.connect(srtkTVSuperResolution, "output_sr",
                        srtkN4BiasFieldCorrection, "input_image")
        self.wf.connect(srtkHRMask, "output_srmask", srtkN4BiasFieldCorrection,
                        "input_mask")

        # Datasinker
        finalFilenamesGeneration = Node(
            interface=postprocess.FilenamesGeneration(), name='filenames_gen')
        finalFilenamesGeneration.inputs.sub_ses = sub_ses
        finalFilenamesGeneration.inputs.sr_id = self.sr_id
        finalFilenamesGeneration.inputs.use_manual_masks = self.use_manual_masks

        self.wf.connect(stacksOrdering, "stacks_order",
                        finalFilenamesGeneration, "stacks_order")

        datasink = Node(interface=DataSink(), name='data_sinker')
        datasink.inputs.base_directory = final_res_dir

        if not self.m_skip_stacks_ordering:
            self.wf.connect(stacksOrdering, "report_image", datasink,
                            'figures.@stackOrderingQC')
            self.wf.connect(stacksOrdering, "motion_tsv", datasink,
                            'anat.@motionTSV')
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        datasink, 'anat.@LRmasks')
        self.wf.connect(srtkIntensityStandardization02,
                        ("output_images", utils.sort_ascending), datasink,
                        'anat.@LRsPreproc')
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending), datasink,
                        'xfm.@transforms')
        self.wf.connect(finalFilenamesGeneration, "substitutions", datasink,
                        "substitutions")
        self.wf.connect(srtkMaskImage01, ("out_im_file", utils.sort_ascending),
                        datasink, 'anat.@LRsDenoised')
        self.wf.connect(srtkImageReconstruction, "output_sdi", datasink,
                        'anat.@SDI')
        self.wf.connect(srtkN4BiasFieldCorrection, "output_image", datasink,
                        'anat.@SR')
        self.wf.connect(srtkTVSuperResolution, "output_json_path", datasink,
                        'anat.@SRjson')
        self.wf.connect(srtkTVSuperResolution, "output_sr_png", datasink,
                        'figures.@SRpng')
        self.wf.connect(srtkHRMask, "output_srmask", datasink, 'anat.@SRmask')
コード例 #10
0
def create_logb_workflow(name="LOGISMOSB_WF",
                         master_config=None,
                         plugin_args=None):
    logb_wf = Workflow(name=name)

    config = read_json_config("config.json")
    config['atlas_info'] = get_local_file_location(config['atlas_info'])

    inputs_node = Node(IdentityInterface(fields=[
        't1_file', 't2_file', 'posterior_files', 'joint_fusion_file',
        'brainlabels_file', 'hncma_atlas'
    ]),
                       name="inputspec")
    inputs_node.run_without_submitting = True

    # ensure that t1 and t2 are in the same voxel lattice
    input_t2 = Node(BRAINSResample(), "ResampleInputT2Volume")
    input_t2.inputs.outputVolume = "t2_resampled.nii.gz"
    input_t2.inputs.pixelType = 'ushort'
    input_t2.inputs.interpolationMode = "Linear"

    logb_wf.connect([(inputs_node, input_t2, [('t1_file', 'referenceVolume'),
                                              ('t2_file', 'inputVolume')])])

    white_matter_masking_node = Node(interface=WMMasking(), name="WMMasking")
    white_matter_masking_node.inputs.dilation = config['WMMasking']['dilation']
    white_matter_masking_node.inputs.csf_threshold = config['WMMasking'][
        'csf_threshold']
    if master_config and master_config['labelmap_colorlookup_table']:
        white_matter_masking_node.inputs.atlas_info = master_config[
            'labelmap_colorlookup_table']
    else:
        white_matter_masking_node.inputs.atlas_info = config['atlas_info']

    logb_wf.connect([(inputs_node, white_matter_masking_node,
                      [("posterior_files", "posterior_files"),
                       ("joint_fusion_file", "atlas_file"),
                       ("brainlabels_file", "brainlabels_file"),
                       ("hncma_atlas", "hncma_file")])])

    gm_labels = Node(interface=CreateGMLabelMap(), name="GM_Labelmap")
    gm_labels.inputs.atlas_info = config['atlas_info']
    logb_wf.connect([(inputs_node, gm_labels, [('joint_fusion_file',
                                                'atlas_file')])])

    logismosb_output_node = create_output_spec(
        ["wmsurface_file", "gmsurface_file"],
        config["hemisphere_names"],
        name="outputspec")

    for hemisphere in config["hemisphere_names"]:
        genus_zero_filter = Node(
            interface=GenusZeroImageFilter(),
            name="{0}_GenusZeroImageFilter".format(hemisphere))
        genus_zero_filter.inputs.connectivity = config['GenusZeroImageFilter'][
            'connectivity']
        genus_zero_filter.inputs.biggestComponent = config[
            'GenusZeroImageFilter']['biggestComponent']
        genus_zero_filter.inputs.connectedComponent = config[
            'GenusZeroImageFilter']['connectedComponent']
        genus_zero_filter.inputs.out_mask = "{0}_genus_zero_white_matter.nii.gz".format(
            hemisphere)

        logb_wf.connect([(white_matter_masking_node, genus_zero_filter,
                          [('{0}_wm'.format(hemisphere), 'in_file')])])

        surface_generation = Node(
            interface=BRAINSSurfaceGeneration(),
            name="{0}_BRAINSSurfaceGeneration".format(hemisphere))
        surface_generation.inputs.smoothSurface = config[
            'BRAINSSurfaceGeneration']['smoothSurface']
        surface_generation.inputs.numIterations = config[
            'BRAINSSurfaceGeneration']['numIterations']
        surface_generation.inputs.out_file = "{0}_white_matter_surface.vtk".format(
            hemisphere)

        logb_wf.connect([(genus_zero_filter, surface_generation,
                          [('out_file', 'in_file')])])

        logismosb = Node(interface=LOGISMOSB(),
                         name="{0}_LOGISMOSB".format(hemisphere))
        logismosb.inputs.smoothnessConstraint = config['LOGISMOSB'][
            'smoothnessConstraint']
        logismosb.inputs.nColumns = config['LOGISMOSB']['nColumns']
        logismosb.inputs.columnChoice = config['LOGISMOSB']['columnChoice']
        logismosb.inputs.columnHeight = config['LOGISMOSB']['columnHeight']
        logismosb.inputs.nodeSpacing = config['LOGISMOSB']['nodeSpacing']
        logismosb.inputs.w = config['LOGISMOSB']['w']
        logismosb.inputs.a = config['LOGISMOSB']['a']
        logismosb.inputs.nPropagate = config['LOGISMOSB']['nPropagate']
        logismosb.inputs.basename = hemisphere
        if config['LOGISMOSB']['thickRegions']:
            logismosb.inputs.thick_regions = config['LOGISMOSB'][
                'thickRegions']
        else:
            logismosb.inputs.useHNCMALabels = True

        if plugin_args:
            logismosb.plugin_args = plugin_args

        logb_wf.connect([
            (inputs_node, logismosb, [("t1_file", "t1_file"),
                                      ('hncma_atlas', 'atlas_file')]),
            (input_t2, logismosb, [("outputVolume", "t2_file")]),
            (genus_zero_filter, logismosb, [("out_file", "wm_file")]),
            (surface_generation, logismosb, [("out_file", "mesh_file")]),
            (white_matter_masking_node, logismosb,
             [('{0}_boundary'.format(hemisphere), 'brainlabels_file')]),
            (logismosb, logismosb_output_node,
             [("gmsurface_file", "{0}_gmsurface_file".format(hemisphere)),
              ("wmsurface_file", "{0}_wmsurface_file".format(hemisphere))])
        ])

    return logb_wf
コード例 #11
0
ファイル: pals_workflow.py プロジェクト: npnl/PALS
def pals(config: dict):
    # Get config file defining workflow
    # configs = json.load(open(config_file, 'r'))
    print('Starting: initializing workflow.')
    # Build pipelie
    wf = Workflow(name='PALS')

    # bidsLayout = bids.BIDSLayout(config['BIDSRoot'])
    # Get data
    loader = BIDSDataGrabber(index_derivatives=False)
    loader.inputs.base_dir = config['BIDSRoot']
    loader.inputs.subject = config['Subject']
    if (config['Session'] is not None):
        loader.inputs.session = config['Session']
    loader.inputs.output_query = {
        't1w': dict(**config['T1Entities'], invalid_filters='allow')
    }
    loader.inputs.extra_derivatives = [config['BIDSRoot']]
    loader = Node(loader, name='BIDSgrabber')

    entities = {
        'subject': config['Subject'],
        'session': config['Session'],
        'suffix': 'T1w',
        'extension': '.nii.gz'
    }

    # Reorient to radiological
    if (config['Analysis']['Reorient']):
        radio = MapNode(
            Reorient(orientation=config['Analysis']['Orientation']),
            name="reorientation",
            iterfield='in_file')
        if ('Reorient' in config['Outputs'].keys()):
            reorient_sink = MapNode(Function(function=copyfile,
                                             input_names=['src', 'dst']),
                                    name='reorient_copy',
                                    iterfield='src')
            path_pattern = 'sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_desc-' + config[
                'Analysis']['Orientation'] + '_{suffix}{extension}'
            reorient_filename = join(config['Outputs']['Reorient'],
                                     path_pattern.format(**entities))
            pathlib.Path(os.path.dirname(reorient_filename)).mkdir(
                parents=True, exist_ok=True)
            reorient_sink.inputs.dst = reorient_filename
            wf.connect([(radio, reorient_sink, [('out_file', 'src')])])

    else:
        radio = MapNode(Function(function=infile_to_outfile,
                                 input_names='in_file',
                                 output_names='out_file'),
                        name='identity',
                        iterfield='in_file')

    # Brain extraction
    bet = node_fetch.extraction_node(config, **config['BrainExtraction'])
    if ('BrainExtraction' in config['Outputs'].keys()):
        path_pattern = 'sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_space-' + \
                       config['Outputs']['StartRegistrationSpace'] + '_desc-brain_mask{extension}'
        brain_mask_sink = MapNode(Function(function=copyfile,
                                           input_names=['src', 'dst']),
                                  name='brain_mask_sink',
                                  iterfield='src')
        brain_mask_out = join(config['Outputs']['BrainExtraction'],
                              path_pattern.format(**entities))
        pathlib.Path(os.path.dirname(brain_mask_out)).mkdir(parents=True,
                                                            exist_ok=True)
        brain_mask_sink.inputs.dst = brain_mask_out

    ## Lesion load calculation
    # Registration
    reg = node_fetch.registration_node(config, **config['Registration'])
    if ('RegistrationTransform' in config['Outputs'].keys()):

        path_pattern = 'sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_space-' + \
                       config['Outputs']['StartRegistrationSpace'] + '_desc-transform.mat'

        registration_transform_filename = join(
            config['Outputs']['RegistrationTransform'],
            path_pattern.format(**entities))
        registration_transform_sink = MapNode(Function(
            function=copyfile, input_names=['src', 'dst']),
                                              name='registration_transf_sink',
                                              iterfield='src')
        pathlib.Path(os.path.dirname(registration_transform_filename)).mkdir(
            parents=True, exist_ok=True)
        registration_transform_sink.inputs.dst = registration_transform_filename
        wf.connect([(reg, registration_transform_sink, [('out_matrix_file',
                                                         'src')])])

    # Get mask
    mask_path_fetcher = Node(BIDSDataGrabber(
        base_dir=config['LesionRoot'],
        subject=config['Subject'],
        index_derivatives=False,
        output_query={
            'mask': dict(**config['LesionEntities'], invalid_filters='allow')
        },
        extra_derivatives=[config['LesionRoot']]),
                             name='mask_grabber')
    if (config['Session'] is not None):
        mask_path_fetcher.inputs.session = config['Session']

    # Apply reg file to lesion mask
    apply_xfm = node_fetch.apply_xfm_node(config)

    # Lesion load calculation
    if (config['Analysis']['LesionLoadCalculation']):
        lesion_load = MapNode(Function(function=overlap,
                                       input_names=['ref_mask', 'roi_list'],
                                       output_names='out_list'),
                              name='overlap_calc',
                              iterfield=['ref_mask'])
        roi_list = []
        if (os.path.exists(config['ROIDir'])):
            buf = os.listdir(config['ROIDir'])
            roi_list = [
                os.path.abspath(os.path.join(config['ROIDir'], b)) for b in buf
            ]
        else:
            warnings.warn(f"ROIDir ({config['ROIDir']}) doesn't exist.")
        buf = config['ROIList']
        roi_list += [os.path.abspath(b) for b in buf]
        lesion_load.inputs.roi_list = roi_list

        # CSV output
        csv_output = MapNode(Function(
            function=csv_writer,
            input_names=['filename', 'data_dict', 'subject', 'session']),
                             name='csv_output',
                             iterfield=['data_dict'])
        csv_output.inputs.subject = config['Subject']
        csv_output.inputs.session = config['Session']
        path_pattern = 'sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_desc-LesionLoad.csv'
        csv_out_filename = join(config['Outputs']['RegistrationTransform'],
                                path_pattern.format(**entities))
        csv_output.inputs.filename = csv_out_filename

        wf.connect([(apply_xfm, lesion_load, [('out_file', 'ref_mask')]),
                    (lesion_load, csv_output, [('out_list', 'data_dict')])])

    ## Lesion correction
    if (config['Analysis']['LesionCorrection']):
        ## White matter removal node. Does the white matter correction; has multiple inputs that need to be supplied.
        wm_removal = MapNode(Function(
            function=white_matter_correction,
            input_names=[
                'image', 'wm_mask', 'lesion_mask', 'max_difference_fraction'
            ],
            output_names=['out_data', 'corrected_volume']),
                             name='wm_removal',
                             iterfield=['image', 'wm_mask', 'lesion_mask'])
        wm_removal.inputs.max_difference_fraction = config['LesionCorrection'][
            'WhiteMatterSpread']

        ## File loaders
        # Loads the subject image, passes it to wm_removal node
        subject_image_loader = MapNode(Function(function=image_load,
                                                input_names=['in_filename'],
                                                output_names='out_image'),
                                       name='file_load0',
                                       iterfield='in_filename')
        wf.connect([
            (radio, subject_image_loader, [('out_file', 'in_filename')]),
            (subject_image_loader, wm_removal, [('out_image', 'image')])
        ])

        # Loads the mask image, passes it to wm_removal node
        mask_image_loader = MapNode(Function(function=image_load,
                                             input_names=['in_filename'],
                                             output_names='out_image'),
                                    name='file_load2',
                                    iterfield='in_filename')
        wf.connect([
            (mask_path_fetcher, mask_image_loader, [('mask', 'in_filename')]),
            (mask_image_loader, wm_removal, [('out_image', 'lesion_mask')])
        ])

        # Save lesion mask with white matter voxels removed
        output_image = MapNode(Function(
            function=image_write,
            input_names=['image', 'reference', 'file_name']),
                               name='image_writer0',
                               iterfield=['image', 'reference'])
        path_pattern = 'sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_space-' + \
                       config['Outputs']['StartRegistrationSpace'] + '_desc-CorrectedLesion_mask{extension}'
        lesion_corrected_filename = join(config['Outputs']['LesionCorrected'],
                                         path_pattern.format(**entities))
        output_image.inputs.file_name = lesion_corrected_filename
        wf.connect([(wm_removal, output_image, [('out_data', 'image')]),
                    (mask_path_fetcher, output_image, [('mask', 'reference')])
                    ])

        ## CSV output
        csv_output_corr = MapNode(Function(function=csv_writer,
                                           input_names=[
                                               'filename', 'subject',
                                               'session', 'data', 'data_name'
                                           ]),
                                  name='csv_output_corr',
                                  iterfield=['data'])
        csv_output_corr.inputs.subject = config['Subject']
        csv_output_corr.inputs.session = config['Session']
        csv_output_corr.inputs.data_name = 'CorrectedVolume'

        path_pattern = 'sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_desc-LesionLoad.csv'
        csv_out_filename = join(config['Outputs']['RegistrationTransform'],
                                path_pattern.format(**entities))
        csv_output_corr.inputs.filename = csv_out_filename

        wf.connect([(wm_removal, csv_output_corr, [('corrected_volume', 'data')
                                                   ])])

        ## White matter segmentation; either do segmentation or load the file
        if (config['Analysis']['WhiteMatterSegmentation']):
            # Config is set to do white matter segmentation
            # T1 intensity normalization
            t1_norm = MapNode(Function(
                function=rescale_image,
                input_names=['image', 'range_min', 'range_max', 'save_image'],
                output_names='out_file'),
                              name='normalization',
                              iterfield=['image'])
            t1_norm.inputs.range_min = config['LesionCorrection'][
                'ImageNormMin']
            t1_norm.inputs.range_max = config['LesionCorrection'][
                'ImageNormMax']
            t1_norm.inputs.save_image = True
            wf.connect([(bet, t1_norm, [('out_file', 'image')])])

            # White matter segmentation
            wm_seg = MapNode(FAST(), name="wm_seg", iterfield='in_files')
            wm_seg.inputs.out_basename = "segmentation"
            wm_seg.inputs.img_type = 1
            wm_seg.inputs.number_classes = 3
            wm_seg.inputs.hyper = 0.1
            wm_seg.inputs.iters_afterbias = 4
            wm_seg.inputs.bias_lowpass = 20
            wm_seg.inputs.segments = True
            wm_seg.inputs.no_pve = True
            ex_last = MapNode(Function(function=extract_last,
                                       input_names=['in_list'],
                                       output_names='out_entry'),
                              name='ex_last',
                              iterfield='in_list')

            file_load1 = MapNode(Function(function=image_load,
                                          input_names=['in_filename'],
                                          output_names='out_image'),
                                 name='file_load1',
                                 iterfield='in_filename')
            # White matter output; only necessary if white matter is segmented
            wm_map = MapNode(Function(
                function=image_write,
                input_names=['image', 'reference', 'file_name']),
                             name='image_writer1',
                             iterfield=['image', 'reference'])
            path_pattern = 'sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_space-' + \
                           config['Outputs']['StartRegistrationSpace'] + '_desc-WhiteMatter_mask{extension}'
            wm_map_filename = join(config['Outputs']['LesionCorrected'],
                                   path_pattern.format(**entities))
            wm_map.inputs.file_name = wm_map_filename
            wf.connect([(file_load1, wm_map, [('out_image', 'image')]),
                        (mask_path_fetcher, wm_map, [('mask', 'reference')])])
            # Connect nodes in workflow
            wf.connect([
                (wm_seg, ex_last, [('tissue_class_files', 'in_list')]),
                (t1_norm, wm_seg, [('out_file', 'in_files')]),
                # (ex_last, wm_map, [('out_entry', 'image')]),
                (ex_last, file_load1, [('out_entry', 'in_filename')]),
                (file_load1, wm_removal, [('out_image', 'wm_mask')])
            ])

        elif (config['Analysis']['LesionCorrection']):
            # No white matter segmentation should be done, but lesion correction is expected.
            # White matter segmentation must be supplied
            wm_seg_path = config['WhiteMatterSegmentationFile']
            if (len(wm_seg_path) == 0 or not os.path.exists(wm_seg_path)):
                # Check if file exists at output
                path_pattern = 'sub-{subject}/ses-{session}/anat/sub-{subject}_ses-{session}_space-' + \
                               config['Outputs']['StartRegistrationSpace'] + '_desc-WhiteMatter_mask{extension}'
                wm_map_filename = join(config['Outputs']['LesionCorrected'],
                                       path_pattern.format(**entities))
                if (os.path.exists(wm_map_filename)):
                    wm_seg_path = wm_map_filename
            else:
                raise ValueError(
                    'Config file is inconsistent; if WhiteMatterSegmentation is false but LesionCorrection'
                    ' is true, then WhiteMatterSegmentationFile must be defined and must exist.'
                )
            file_load1 = MapNode(Function(function=image_load,
                                          input_names=['in_filename'],
                                          output_names='out_image'),
                                 name='file_load1',
                                 iterfield='in_filename')
            file_load1.inputs.in_filename = wm_seg_path

            # Connect nodes in workflow
            wf.connect([(file_load1, wm_removal, [('out_image', 'wm_mask')])])

    # Connecting workflow.
    wf.connect([
        # Starter
        (loader, radio, [('t1w', 'in_file')]),
        (radio, bet, [('out_file', 'in_file')]),
        (bet, reg, [('out_file', 'in_file')]),
        (reg, apply_xfm, [('out_matrix_file', 'in_matrix_file')]),
        (mask_path_fetcher, apply_xfm, [('mask', 'in_file')]),
    ])

    try:
        graph_out = config['Outputs'][
            'LesionCorrected'] + '/sub-{subject}/ses-{session}/anat/'.format(
                **entities)
        wf.write_graph(graph2use='orig',
                       dotfilename=join(graph_out, 'graph.dot'),
                       format='png')
        os.remove(graph_out + 'graph.dot')
        os.remove(graph_out + 'graph_detailed.dot')
    except OSError:
        warnings.warn(
            "graphviz not installed; can't produce graph. See http://www.graphviz.org/download/ for "
            "installation instructions.")
    wf.run()
    return wf
コード例 #12
0
ファイル: utils.py プロジェクト: Neurita/pypes
def petpvc_mask(wf_name="petpvc_mask"):
    """ A Workflow that returns a 4D merge of 4 volumes for PETPVC: GM, WM, CSF and background.

    Parameters
    ----------
    wf_name: str
        The name of the workflow.

    Nipype.Inputs
    -------------
    pvcmask_input.tissues: list of existing files
        List of tissue files in anatomical space, the 3 file
        paths must be in this order: GM, WM, CSF

    Nipype.Outputs
    --------------
    pvcmask_output.petpvc_mask: existing file
        A 4D volume file with these maps in order: GM, WM, CSF, background

    pvcmask_output.brain_mask: existing file
        A mask that is a binarised sum of the tissues file with fslmaths.
        Can be used as brain mask in anatomical space for the PET image.

    Returns
    -------
    wf: nipype Workflow
    """
    # define nodes
    # specify input and output fields
    in_fields  = ["tissues"]

    out_fields = ["petpvc_mask",
                  "brain_mask",]

    # input
    pvcmask_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
                               name="pvcmask_input")

    tissues = setup_node(IdentityInterface(fields=["gm", "wm", "csf"], mandatory_inputs=True),
                         name="tissues")

    merge_list = setup_node(Merge(4), name="merge_list")

    ## maths for background
    img_bkg = setup_node(Function(function=math_img,
                                  input_names=["formula", "out_file", "gm", "wm", "csf"],
                                  output_names=["out_file"],
                                  imports=['from pypes.interfaces.nilearn import ni2file']),
                          name='background')
    img_bkg.inputs.out_file = "tissue_bkg.nii.gz"
    img_bkg.inputs.formula  = "np.maximum((-((gm + wm + csf) - 1)), 0)"

    ## maths for brain mask
    brain_mask = setup_node(Function(function=math_img,
                                     input_names=["formula", "out_file", "gm", "wm", "csf"],
                                     output_names=["out_file"],
                                     imports=['from pypes.interfaces.nilearn import ni2file']),
                            name='brain_mask')
    brain_mask.inputs.out_file = "tissues_brain_mask.nii.gz"
    brain_mask.inputs.formula  = "np.abs(gm + wm + csf) > 0"

    ## concat the tissues images and the background for PETPVC
    merge_tissues = setup_node(Function(function=concat_imgs,
                                        input_names=["in_files"],
                                        output_names=["out_file"],
                                        imports=['from pypes.interfaces.nilearn import ni2file']),
                               name='merge_tissues')
    merge_tissues.inputs.out_file = "petpvc_mask.nii.gz"

    # output
    pvcmask_output = setup_node(IdentityInterface(fields=out_fields), name="pvcmask_output")

    # Create the workflow object
    wf = Workflow(name=wf_name)

    # Connect the nodes
    wf.connect([
                # separate [GM, WM, CSF] into [GM] and [WM, CSF]
                (pvcmask_input, tissues,    [(("tissues", selectindex, [0]), "gm"),
                                             (("tissues", selectindex, [1]), "wm"),
                                             (("tissues", selectindex, [2]), "csf"),
                                            ]),

                (tissues,       img_bkg,    [("gm", "gm" ), ("wm", "wm" ), ("csf", "csf"),]),
                (tissues,       brain_mask, [("gm", "gm" ), ("wm", "wm" ), ("csf", "csf"),]),
                (tissues,       merge_list, [("gm", "in1"), ("wm", "in2"), ("csf", "in3"),]),

                # create a list of [GM, WM, CSF, BKG]
                (img_bkg,       merge_list, [("out_file", "in4")]),

                # merge into 4D: [GM, WM, CSF, BKG]
                (merge_list,    merge_tissues,  [("out", "in_files")]),

                # output
                (merge_tissues, pvcmask_output, [("out_file", "petpvc_mask")]),
                (brain_mask,    pvcmask_output, [("out_file", "brain_mask")]),
              ])

    return wf
コード例 #13
0
ファイル: utils.py プロジェクト: Neurita/pypes
def intensity_norm(wf_name='intensity_norm'):
    """ Workflow that uses a mask against a source from where the mean value will be taken.
    This mean value will be used to demean the whole source and leave it in out_file.

    Parameters
    ----------
    wf_name: str
        The name of the workflow.

    Nipype Inputs
    -------------
    intnorm_input.source: existing file
        The image from where to extract the signal values and normalize.

    intnorm_input.mask: existing file
        The mask to specify which voxels to use to calculate the statistics
        for normalization.

    Nipype Outputs
    --------------
    intnorm_output.out_file: existing file

    Returns
    -------
    wf: nipype Workflow
    """
    # specify input and output fields
    in_fields  = ["source",
                  "mask"]

    out_fields = ["out_file"]

    # input
    intnorm_input = setup_node(IdentityInterface(fields=in_fields, mandatory_inputs=True),
                               name="intnorm_input")

    # fix the affine matrix (it's necessary for some cases)
    resample = setup_node(Function(function=resample_to_img,
                                   input_names=["source", "target", "interpolation"],
                                   output_names=["out_file"],
                                   imports=['from pypes.interfaces.nilearn import ni2file']),
                          name="resample_mask")
    resample.inputs.interpolation = "nearest"

    # calculate masked mean value
    mean_val = setup_node(Function(function=math_img,
                                   input_names=["formula", "img", "mask"],
                                   output_names=["out_value"],
                                   imports=['from pypes.interfaces.nilearn import ni2file']),
                          name='mean_value')
    mean_val.inputs.formula = "np.mean(np.nonzero(img[mask > 0]))"

    # normalize
    norm_img = setup_node(Function(function=math_img,
                                   input_names=["formula", "out_file", "img", "val"],
                                   output_names=["out_file"],
                                   imports=['from pypes.interfaces.nilearn import ni2file']),
                          name='norm_img')
    norm_img.inputs.formula = "img / val"

    # output
    intnorm_output = setup_node(IdentityInterface(fields=out_fields),
                                name="intnorm_output")

    # Create the workflow object
    wf = Workflow(name=wf_name)

    wf.connect([
                # resample
                (intnorm_input, resample,  [("source",    "target"),
                                            ("mask",      "source")]),

                # normalize
                (intnorm_input, mean_val,  [("source",    "img" )]),
                (resample,      mean_val,  [("out_file",  "mask")]),

                (intnorm_input, norm_img,  [("source",    "img"),
                                            (("source", rename, "_intnormed"), "out_file"),
                                           ]),

                (mean_val,      norm_img,  [("out_value", "val")]),
                (norm_img, intnorm_output, [("out_file",  "out_file")]),
               ])

    return wf
コード例 #14
0
ValueError: BIDS root does not exist: /tmp/tmp9g7ddldw/bids-grabber/examples/examples/BIDS



#problem with base_dir  
bg.inputs.base_dir = "/mnt/Filbey/Evan/MJXProcessing/examples/examples/BIDS"

bg.inputs.subject = 'M7500516'
#bg.inputs.output_query = {'T1w': dict(type='anat')}
res = bg.run()
res.outputs
print("done")
"""


def printMe(paths):
    print("\n\nanalyzing " + str(paths) + "\n\n")
    
analyzeANAT = Node(Function(function=printMe, input_names=["paths"],
                            output_names=[]), name="analyzeANAT")

bg_all = Node(BIDSDataGrabber(), name='bids-grabber')
bg_all.inputs.base_dir = '/mnt/Filbey/Evan/MJXProcessing/examples/examples/BIDS'
bg_all.inputs.output_query = {'ses': dict(type='session')}
bg_all.iterables = ('subject', layout.get_subjects()[0])
wf = Workflow(name="bids_demo")
wf.connect(bg_all, "session", analyzeANAT, "paths")
wf.run()


コード例 #15
0
def create_workflow(bids_dir, output_dir, subject):

    wf_base_dir = os.path.join("{}".format(output_dir), "superres-mri",
                               "sub-{}".format(subject), "nipype")

    print("Ouput directory: {}".format(wf_base_dir))

    wf = Workflow(name="sinapp_nlmdenoise", base_dir=wf_base_dir)

    # Initialization
    if os.path.isfile(os.path.join(output_dir, "pypeline.log")):
        os.unlink(os.path.join(output_dir, "pypeline.log"))

    config.update_config({
        'logging': {
            'log_directory': os.path.join(output_dir),
            'log_to_file': True
        },
        'execution': {
            'remove_unnecessary_outputs': False,
            'stop_on_first_crash': True,
            'stop_on_first_rerun': False,
            'crashfile_format': "txt",
            'write_provenance': False,
        },
        'monitoring': {
            'enabled': True
        }
    })
    logging.update_logging(config)
    iflogger = logging.getLogger('nipype.interface')

    iflogger.info("**** Processing ****")

    bg = Node(BIDSDataGrabber(infields=['subject']), name='bids_grabber')
    bg.inputs.base_dir = bids_dir
    bg.inputs.subject = subject
    bg.inputs.index_derivatives = True
    bg.inputs.output_query = {
        'T2ws':
        dict(suffix='T2w', datatype='anat', extensions=[".nii", ".nii.gz"]),
        'masks':
        dict(suffix='mask', datatype='anat', extensions=[".nii", ".nii.gz"])
    }

    preparePaths = Node(interface=prepareDockerPaths(), name="preparePaths")
    preparePaths.inputs.local_dir = bids_dir
    preparePaths.inputs.docker_dir = '/fetaldata'

    nlmDenoise = Node(interface=MultipleBtkNLMDenoising(),
                      base_dir=os.path.join(output_dir, 'bids_demo'),
                      name='nlmDenoise')
    nlmDenoise.inputs.bids_dir = bids_dir
    nlmDenoise.inputs.weight = 0.1

    wf.connect(bg, "T2ws", preparePaths, "local_T2ws_paths")
    wf.connect(bg, "masks", preparePaths, "local_masks_paths")
    wf.connect(preparePaths, "docker_T2ws_paths", nlmDenoise, "input_images")
    wf.connect(preparePaths, "docker_masks_paths", nlmDenoise, "input_masks")

    return wf
コード例 #16
0
Perform Feat preprocessing on given data files and then merge ouputs.
Inputs are taken using DataGrabber interface.

'''


###############################################################################
#
#      CREATE MAIN WORKFLOW
#
###############################################################################

from nipype.pipeline import Workflow, Node

featreg_merge = Workflow(name='featreg_merge')


###############################################################################
#
#     CREATE FEAT REGISTRATION WORKFLOW NODE
#
###############################################################################

from nipype.workflows.fmri.fsl import create_featreg_preproc
import nipype.interfaces.fsl as fsl

preproc = create_featreg_preproc(highpass=True, whichvol='mean')
preproc.inputs.inputspec.fwhm = 0
preproc.inputs.inputspec.highpass = 128./(2*2.5)
コード例 #17
0
def get_wf_main(name='wf_main'):

    wf_main = Workflow(name=name)

    inputspec = Node(IdentityInterface(fields=[
        'resampled_anat_file_path', 'func2anat_mat_path',
        'reference_func_file_path', 'csf_tissue_prior_path',
        'wm_tissue_prior_path', 'threshold', 'std2func_mat_path',
        'brain_mask_eroded'
    ]),
                     name="inputspec")
    outputspec = Node(IdentityInterface(fields=[
        'csf_tissue_prior_path', 'wm_tissue_prior_path', 'qc_stats_dict'
    ]),
                      name="outputspec")

    tissue_priors = tp.get_wf_tissue_priors(name='wf_tissue_priors')
    tissue_masks = gm.get_wf_tissue_masks(name='wf_tissue_masks')

    def compute_qc_stats(anat_file_path, csf_mask, csf_prior, wm_mask,
                         wm_prior):
        import numpy as np
        import nibabel as nib
        from collections import OrderedDict as od

        # print('$$$$$$$$$$$$Inside$$$$$$$$$QCFUNC')

        csf_prior_data = nib.load(csf_prior).get_data()

        wm_prior_data = nib.load(wm_prior).get_data()

        csf_mask_data = nib.load(csf_mask).get_data()

        wm_mask_data = nib.load(wm_mask).get_data()

        # A
        voxels_count_csf_prior = len((np.where(csf_prior_data == 1))[0])
        voxels_count_wm_prior = len((np.where(wm_prior_data == 1))[0])
        # B
        voxels_count_csf_mask = len((np.where(csf_mask_data == 1))[0])
        voxels_count_wm_mask = len((np.where(wm_mask_data == 1))[0])
        # A - B
        A_minus_B_csf = len(np.where((csf_prior_data - csf_mask_data) == 1)[0])
        A_minus_B_wm = len(np.where((wm_prior_data - wm_mask_data) == 1)[0])
        # B - A
        B_minus_A_csf = len(
            np.where((csf_prior_data - csf_mask_data) == -1)[0])
        B_minus_A_wm = len(np.where((wm_prior_data - wm_mask_data) == -1)[0])
        # A U B
        A_union_B_csf = len(np.where((csf_prior_data + csf_mask_data) != 0)[0])
        A_union_B_wm = len(np.where((wm_prior_data + wm_mask_data) != 0)[0])
        # A I B
        A_intersection_B_csf = len(
            np.where((csf_prior_data * csf_mask_data) == 1)[0])
        A_intersection_B_wm = len(
            np.where((wm_prior_data * wm_mask_data) == 1)[0])

        print('voxels_count_csf_prior ', voxels_count_csf_prior)
        print('voxels_count_wm_prior ', voxels_count_wm_prior)
        print('voxels_count_csf_mask ', voxels_count_csf_mask)
        print('voxels_count_wm_mask ', voxels_count_wm_mask)
        print('prior_minus_mask_csf ', A_minus_B_csf)
        print('prior_minus_mask_wm ', A_minus_B_wm)
        print('mask_minus_prior_csf ', B_minus_A_csf)
        print('mask_minus_prior_wm ', B_minus_A_wm)
        print('prior_union_mask_csf ', A_union_B_csf)
        print('prior_union_mask_wm ', A_union_B_wm)
        print('prior_intersection_mask_csf ', A_intersection_B_csf)
        print('prior_intersection_mask_wm ', A_intersection_B_wm)
        quality_csf = A_intersection_B_csf / A_union_B_csf
        quality_wm = A_intersection_B_wm / A_union_B_wm
        print('quality_csf ', quality_csf)
        print('quality_wm ', quality_wm)
        # A : Prior and B :Mask
        print('Anat File path ', anat_file_path)
        sub_id = anat_file_path.split('/')[-1].split('_')[0].split('-')[1]
        print('Sub ID ', sub_id)
        dict = od()
        dict['sub_id'] = [sub_id]
        dict['voxels_count_csf_prior'] = [voxels_count_csf_prior]
        dict['voxels_count_wm_prior'] = [voxels_count_wm_prior]
        dict['voxels_count_csf_mask'] = [voxels_count_csf_mask]
        dict['voxels_count_wm_mask'] = [voxels_count_wm_mask]
        dict['prior_minus_mask_csf'] = [A_minus_B_csf]
        dict['prior_minus_mask_wm'] = [A_minus_B_wm]
        dict['mask_minus_prior_csf'] = [B_minus_A_csf]
        dict['mask_minus_prior_wm'] = [B_minus_A_wm]
        dict['prior_union_mask_csf'] = [A_union_B_csf]
        dict['prior_union_mask_wm'] = [A_union_B_wm]
        dict['prior_intersection_mask_csf'] = [A_intersection_B_csf]
        dict['prior_intersection_mask_wm'] = [A_intersection_B_wm]
        dict['quality_csf'] = [quality_csf]
        dict['quality_wm'] = [quality_wm]

        return dict

    qc_stats = Node(Function(function=compute_qc_stats,
                             input_names=[
                                 'anat_file_path', 'csf_mask', 'csf_prior',
                                 'wm_mask', 'wm_prior'
                             ],
                             output_names=['dict']),
                    name='qc_stats')

    wf_main.connect(inputspec, 'csf_tissue_prior_path', tissue_priors,
                    'inputspec.csf_tissue_prior_path')
    wf_main.connect(inputspec, 'wm_tissue_prior_path', tissue_priors,
                    'inputspec.wm_tissue_prior_path')
    wf_main.connect(inputspec, 'threshold', tissue_priors,
                    'inputspec.threshold')
    wf_main.connect(inputspec, 'reference_func_file_path', tissue_priors,
                    'inputspec.reference_func_file_path')
    wf_main.connect(inputspec, 'std2func_mat_path', tissue_priors,
                    'inputspec.std2func_mat_path')

    wf_main.connect(tissue_priors, 'outputspec.csf_tissue_prior_path',
                    outputspec, 'csf_tissue_prior_path')
    wf_main.connect(tissue_priors, 'outputspec.wm_tissue_prior_path',
                    outputspec, 'wm_tissue_prior_path')

    wf_main.connect(inputspec, 'resampled_anat_file_path', tissue_masks,
                    'inputspec.resampled_anat_file_path')
    wf_main.connect(inputspec, 'reference_func_file_path', tissue_masks,
                    'inputspec.reference_func_file_path')
    wf_main.connect(inputspec, 'func2anat_mat_path', tissue_masks,
                    'inputspec.func2anat_mat_path')
    wf_main.connect(inputspec, 'std2func_mat_path', tissue_masks,
                    'inputspec.std2func_mat_path')
    wf_main.connect(inputspec, 'brain_mask_eroded', tissue_masks,
                    'inputspec.brain_mask_eroded')
    wf_main.connect(inputspec, 'threshold', tissue_masks,
                    'inputspec.threshold')

    # wf_main.connect(tissue_masks, 'outputspec.csf_mask', outputspec,'csf_mask')
    # wf_main.connect(tissue_masks, 'outputspec.wm_mask', outputspec,'wm_mask')

    wf_main.connect(tissue_priors, 'outputspec.csf_tissue_prior_path',
                    qc_stats, 'csf_prior')
    wf_main.connect(tissue_priors, 'outputspec.wm_tissue_prior_path', qc_stats,
                    'wm_prior')
    wf_main.connect(tissue_masks, 'outputspec.csf_mask', qc_stats, 'csf_mask')
    wf_main.connect(tissue_masks, 'outputspec.wm_mask', qc_stats, 'wm_mask')
    wf_main.connect(inputspec, 'resampled_anat_file_path', qc_stats,
                    'anat_file_path')

    wf_main.connect(qc_stats, 'dict', outputspec, 'qc_stats_dict')

    return wf_main
コード例 #18
0
def get_wf_tissue_masks(name='wf_tissue_masks'):
    '''
    This Function gives a workflow that resamples the T1 brains, extracts the
    tissue types thresholds at 0.5 and registers them to T2* space
    It then registers the tissue priors to the T2* space and then performs a
    bitwise AND between two maps.
    '''
    # csf_tissue_prior_path, gm_tissue_prior_path, wm_tissue_prior_path,
    # threshold = 0.5

    wf_tissue_masks = Workflow(name=name)

    inputspec = Node(IdentityInterface(fields=[
        'resampled_anat_file_path', 'func2anat_mat_path', 'std2func_mat_path',
        'reference_func_file_path', 'brain_mask_eroded', 'threshold'
    ]),
                     name="inputspec")

    # FSL FAST node to segment the T1 brain
    fast = Node(FAST(out_basename='fast_'), name='fast')
    # probability_maps=True,segments=True,
    wf_tissue_masks.connect(inputspec, 'resampled_anat_file_path', fast,
                            'in_files')

    #  Invert the func2anat matrix to get anat2func
    inv_mat = Node(ConvertXFM(invert_xfm=True), name='inv_mat')
    wf_tissue_masks.connect(inputspec, 'func2anat_mat_path', inv_mat,
                            'in_file')

    # Transform the above segmented tissue masks to the functional space using the inverse matrix
    anat2func_xform_csf = Node(FLIRT(output_type='NIFTI',
                                     apply_xfm=True,
                                     interp='sinc'),
                               name='anat2func_xform_csf')

    wf_tissue_masks.connect(inputspec, 'reference_func_file_path',
                            anat2func_xform_csf, 'reference')
    wf_tissue_masks.connect(inv_mat, 'out_file', anat2func_xform_csf,
                            'in_matrix_file')

    anat2func_xform_wm = Node(FLIRT(output_type='NIFTI',
                                    apply_xfm=True,
                                    interp='sinc'),
                              name='anat2func_xform_wm')
    wf_tissue_masks.connect(inputspec, 'reference_func_file_path',
                            anat2func_xform_wm, 'reference')
    wf_tissue_masks.connect(inv_mat, 'out_file', anat2func_xform_wm,
                            'in_matrix_file')

    std2func_xform_eroded_brain = Node(FLIRT(output_type='NIFTI',
                                             apply_xfm=True,
                                             interp='nearestneighbour'),
                                       name='std2func_xform_eroded_brain')
    wf_tissue_masks.connect(inputspec, 'reference_func_file_path',
                            std2func_xform_eroded_brain, 'reference')
    wf_tissue_masks.connect(inputspec, 'std2func_mat_path',
                            std2func_xform_eroded_brain, 'in_matrix_file')

    def select_item_from_array(arr, index=0):
        import numpy as np
        arr = np.array(arr)
        return arr[index]

    wf_tissue_masks.connect(
        fast, ('partial_volume_files', select_item_from_array, 0),
        anat2func_xform_csf, 'in_file')
    wf_tissue_masks.connect(
        fast, ('partial_volume_files', select_item_from_array, 2),
        anat2func_xform_wm, 'in_file')
    wf_tissue_masks.connect(inputspec, 'brain_mask_eroded',
                            std2func_xform_eroded_brain, 'in_file')

    # Threshold

    def get_opstring(threshold):
        op = '-thr ' + str(threshold) + ' -bin'
        return op

    # print(inputspec.outputs)
    # ----- CSF ------

    threshold_csf = Node(interface=ImageMaths(suffix='_thresh'),
                         name='threshold_csf')
    # threshold_csf.inputs.op_string = '-thresh '+str(inputspec.outputs.threshold)+' -bin'
    wf_tissue_masks.connect(inputspec, ('threshold', get_opstring),
                            threshold_csf, 'op_string')
    wf_tissue_masks.connect(anat2func_xform_csf, 'out_file', threshold_csf,
                            'in_file')

    # ------- GM --------

    # threshold_gm = Node(interface=ImageMaths(op_string='-thresh',
    #                                             suffix='_thresh'),
    #                    name='threshold_gm')
    #
    #
    # wf_tissue_priors.connect(inputspec, ('threshold', get_opstring), threshold_gm, 'op_string' )
    # wf_tissue_priors.connect(fast, partial_volume_map[1], threshold_gm, 'in_file')
    #
    # -------- WM --------

    threshold_wm = Node(interface=ImageMaths(suffix='_thresh'),
                        name='threshold_wm')
    wf_tissue_masks.connect(inputspec, ('threshold', get_opstring),
                            threshold_wm, 'op_string')
    wf_tissue_masks.connect(anat2func_xform_wm, 'out_file', threshold_wm,
                            'in_file')

    #  -------------------

    #
    # wf_tissue_masks.connect(threshold_csf, 'out_file', std2func_xform_csf, 'in_file')
    # wf_tissue_masks.connect(threshold_wm, 'out_file', std2func_xform_wm, 'in_file')

    # Masking the outer brain CSF

    csf_mask = Node(interface=ApplyMask(), name='csf_mask')
    wf_tissue_masks.connect(threshold_csf, 'out_file', csf_mask, 'in_file')
    wf_tissue_masks.connect(std2func_xform_eroded_brain, 'out_file', csf_mask,
                            'mask_file')

    # Masking the outer brain WM that might be present due to poor BET

    wm_mask = Node(interface=ApplyMask(), name='wm_mask')
    wf_tissue_masks.connect(threshold_wm, 'out_file', wm_mask, 'in_file')
    wf_tissue_masks.connect(std2func_xform_eroded_brain, 'out_file', wm_mask,
                            'mask_file')

    # wm_mask = Node(interface=ApplyMask(),
    #                    name='wm_mask')
    # wf_tissue_masks.connect(std2func_xform_wm, 'out_file', wm_mask, 'in_file')
    # wf_tissue_masks.connect(std2func_xform_wm_prior, 'out_file', wm_mask, 'mask_file')

    outputspec = Node(IdentityInterface(fields=['csf_mask', 'wm_mask']),
                      name="outputspec")

    wf_tissue_masks.connect(csf_mask, 'out_file', outputspec, 'csf_mask')
    # wf_tissue_priors.connect(threshold_gm, 'out_file', outputspec, 'gm_tissue_prior_path')
    wf_tissue_masks.connect(wm_mask, 'out_file', outputspec, 'wm_mask')

    return wf_tissue_masks
コード例 #19
0
'''
featreg_merge.py
script

Perform Feat preprocessing on given data files and then merge ouputs.

'''

from nipype.workflows.fmri.fsl import create_featreg_preproc
import nipype.interfaces.fsl as fsl
from nipype.pipeline import Workflow, Node

# get filelist from file
nifti_filelist = open('nifti_filelist.txt').read().splitlines()

featreg_merge = Workflow(name='featreg_merge')

preproc = create_featreg_preproc(highpass=True, whichvol='mean')
preproc.inputs.inputspec.func = nifti_filelist
preproc.inputs.inputspec.fwhm = 0
preproc.inputs.inputspec.highpass = 128. / (2 * 2.5)
# preproc.base_dir = '/tmp/pre/working_dir'
# preproc.run()

merge = Node(interface=fsl.utils.Merge(dimension='t',
                                       output_type='NIFTI_GZ',
                                       merged_file='merged.nii.gz'),
             name='merge')
featreg_merge.connect(preproc, 'outputspec.highpassed_files', merge,
                      'in_files')
コード例 #20
0
script

Perform Feat preprocessing on given data files and then merge ouputs.
Inputs are taken using DataGrabber interface.

'''

###############################################################################
#
#      CREATE MAIN WORKFLOW
#
###############################################################################

from nipype.pipeline import Workflow, Node

featreg_merge = Workflow(name='featreg_merge')

###############################################################################
#
#     CREATE FEAT REGISTRATION WORKFLOW NODE
#
###############################################################################

from nipype.workflows.fmri.fsl import create_featreg_preproc
import nipype.interfaces.fsl as fsl

preproc = create_featreg_preproc(highpass=True, whichvol='mean')
preproc.inputs.inputspec.fwhm = 0
preproc.inputs.inputspec.highpass = 128. / (2 * 2.5)

###############################################################################
コード例 #21
0
class AnatomicalPipeline:
    """Class used to represent the workflow of the Super-Resolution reconstruction pipeline.

    Attributes
    -----------
    bids_dir : string
        BIDS root directory (required)

    output_dir : string
        Output derivatives directory (required)

    subject : string
        Subject ID (in the form ``sub-XX``)

    wf : nipype.pipeline.Workflow
        Nipype workflow of the reconstruction pipeline

    deltatTV : string
        Super-resolution optimization time-step

    lambdaTV : float
        Regularization weight (default is 0.75)

    primal_dual_loops : string
        Number of primal/dual loops used in the optimization of the total-variation
        super-resolution algorithm.

    sr_id : string
        ID of the reconstruction useful to distinguish when multiple reconstructions
        with different order of stacks are run on the same subject

    session : string
        Session ID if applicable (in the form ``ses-YY``)

    m_stacks : list(int)
        List of stack to be used in the reconstruction.
        The specified order is kept if `skip_stacks_ordering` is True.

    m_masks_derivatives_dir : string
        directory basename in BIDS directory derivatives where to search for masks (optional)

    m_skip_svr : bool
        Weither the Slice-to-Volume Registration should be skipped in the image reconstruction.
        (default is False)

    m_do_refine_hr_mask : bool
        Weither a refinement of the HR mask should be performed. (default is False)

    m_skip_nlm_denoising : bool
        Weither the NLM denoising preprocessing should be skipped. (default is False)

    m_skip_stacks_ordering : bool (optional)
        Weither the automatic stacks ordering should be skipped. (default is False)

    Examples
    --------
    >>> from pymialsrtk.pipelines.anatomical.srr import AnatomicalPipeline
    >>> # Create a new instance
    >>> pipeline = AnatomicalPipeline(bids_dir='/path/to/bids_dir',
                                      output_dir='/path/to/output_dir',
                                      subject='sub-01',
                                      p_stacks=[1,3,2,0],
                                      sr_id=1,
                                      session=None,
                                      paramTV={deltatTV = "0.001",
                                               lambdaTV = "0.75",
                                               primal_dual_loops = "20"},
                                      masks_derivatives_dir="/custom/mask_dir",
                                      masks_desc=None,
                                      p_dict_custom_interfaces=None)
    >>> # Create the super resolution Nipype workflow
    >>> pipeline.create_workflow()
    >>> # Execute the workflow
    >>> res = pipeline.run(number_of_cores=1) # doctest: +SKIP

    """

    pipeline_name = "srr_pipeline"
    run_start_time = None
    run_end_time = None
    run_elapsed_time = None

    bids_dir = None
    output_dir = None
    subject = None
    wf = None
    deltatTV = "0.75"
    lambdaTV = "0.001"
    primal_dual_loops = "20"
    sr_id = None
    session = None

    m_stacks = None

    # Custom interfaces options
    m_skip_svr = None
    m_skip_nlm_denoising = None
    m_skip_stacks_ordering = None
    m_do_refine_hr_mask = None

    m_masks_derivatives_dir = None
    use_manual_masks = False
    m_masks_desc = None

    openmp_number_of_cores = None
    nipype_number_of_cores = None

    def __init__(self,
                 bids_dir,
                 output_dir,
                 subject,
                 p_stacks=None,
                 sr_id=1,
                 session=None,
                 paramTV=None,
                 p_masks_derivatives_dir=None,
                 p_masks_desc=None,
                 p_dict_custom_interfaces=None,
                 openmp_number_of_cores=None,
                 nipype_number_of_cores=None):
        """Constructor of AnatomicalPipeline class instance."""

        # BIDS processing parameters
        self.bids_dir = bids_dir
        self.output_dir = output_dir
        self.subject = subject
        self.sr_id = sr_id
        self.session = session
        self.m_stacks = p_stacks

        self.openmp_number_of_cores = openmp_number_of_cores
        self.nipype_number_of_cores = nipype_number_of_cores

        # (default) sr tv parameters
        if paramTV is None:
            paramTV = dict()
        self.deltatTV = paramTV["deltatTV"] if "deltatTV" in paramTV.keys(
        ) else 0.01
        self.lambdaTV = paramTV["lambdaTV"] if "lambdaTV" in paramTV.keys(
        ) else 0.75
        self.primal_dual_loops = paramTV[
            "primal_dual_loops"] if "primal_dual_loops" in paramTV.keys(
            ) else 10

        # Use manual/custom brain masks
        # If masks directory is not specified use the automated brain extraction method.
        self.m_masks_derivatives_dir = p_masks_derivatives_dir
        self.use_manual_masks = True if self.m_masks_derivatives_dir is not None else False
        self.m_masks_desc = p_masks_desc if self.use_manual_masks else None

        # Custom interfaces and default values.
        if p_dict_custom_interfaces is not None:
            self.m_skip_svr = p_dict_custom_interfaces[
                'skip_svr'] if 'skip_svr' in p_dict_custom_interfaces.keys(
                ) else False
            self.m_do_refine_hr_mask = p_dict_custom_interfaces[
                'do_refine_hr_mask'] if 'do_refine_hr_mask' in p_dict_custom_interfaces.keys(
                ) else False
            self.m_skip_nlm_denoising = p_dict_custom_interfaces[
                'skip_nlm_denoising'] if 'skip_nlm_denoising' in p_dict_custom_interfaces.keys(
                ) else False

            self.m_skip_stacks_ordering = p_dict_custom_interfaces['skip_stacks_ordering'] if \
                ((self.m_stacks is not None) and ('skip_stacks_ordering' in p_dict_custom_interfaces.keys())) else False
        else:
            self.m_skip_svr = False
            self.m_do_refine_hr_mask = False
            self.m_skip_nlm_denoising = False
            self.m_skip_stacks_ordering = False

    def create_workflow(self):
        """Create the Niype workflow of the super-resolution pipeline.

        It is composed of a succession of Nodes and their corresponding parameters,
        where the output of node i goes to the input of node i+1.

        """
        sub_ses = self.subject
        if self.session is not None:
            sub_ses = ''.join([sub_ses, '_', self.session])

        if self.session is None:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk",
                                                   __version__]), self.subject)
        else:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, self.session, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk", __version__]),
                                         self.subject, self.session)

        if not os.path.exists(wf_base_dir):
            os.makedirs(wf_base_dir)
        print("Process directory: {}".format(wf_base_dir))

        # Initialization (Not sure we can control the name of nipype log)
        if os.path.isfile(os.path.join(wf_base_dir, "pypeline.log")):
            os.unlink(os.path.join(wf_base_dir, "pypeline.log"))

        self.wf = Workflow(name=self.pipeline_name, base_dir=wf_base_dir)

        config.update_config({
            'logging': {
                'log_directory': os.path.join(wf_base_dir),
                'log_to_file': True
            },
            'execution': {
                'remove_unnecessary_outputs': False,
                'stop_on_first_crash': True,
                'stop_on_first_rerun': False,
                'crashfile_format': "txt",
                'use_relative_paths': True,
                'write_provenance': False
            }
        })

        # Update nypipe logging with config
        nipype_logging.update_logging(config)
        # config.enable_provenance()

        if self.use_manual_masks:
            dg = Node(interface=DataGrabber(outfields=['T2ws', 'masks']),
                      name='data_grabber')
            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            if self.session is not None:
                t2ws_template = os.path.join(
                    self.subject, self.session, 'anat',
                    '_'.join([sub_ses, '*run-*', '*T2w.nii.gz']))
                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat',
                        '_'.join([sub_ses, '*run-*', '*mask.nii.gz']))
            else:
                t2ws_template = os.path.join(self.subject, 'anat',
                                             sub_ses + '*_run-*_T2w.nii.gz')

                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, 'anat', sub_ses + '*_run-*_*mask.nii.gz')

            dg.inputs.field_template = dict(T2ws=t2ws_template,
                                            masks=masks_template)

            brainMask = MapNode(
                interface=IdentityInterface(fields=['out_file']),
                name='brain_masks_bypass',
                iterfield=['out_file'])

            if self.m_stacks is not None:
                custom_masks_filter = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='custom_masks_filter')
                custom_masks_filter.inputs.stacks_id = self.m_stacks

        else:
            dg = Node(interface=DataGrabber(outfields=['T2ws']),
                      name='data_grabber')

            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            dg.inputs.field_template = dict(
                T2ws=os.path.join(self.subject, 'anat', sub_ses +
                                  '*_run-*_T2w.nii.gz'))
            if self.session is not None:
                dg.inputs.field_template = dict(T2ws=os.path.join(
                    self.subject, self.session, 'anat', '_'.join(
                        [sub_ses, '*run-*', '*T2w.nii.gz'])))

            if self.m_stacks is not None:
                t2ws_filter_prior_masks = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='t2ws_filter_prior_masks')
                t2ws_filter_prior_masks.inputs.stacks_id = self.m_stacks

            brainMask = MapNode(interface=preprocess.BrainExtraction(),
                                name='brainExtraction',
                                iterfield=['in_file'])

            brainMask.inputs.bids_dir = self.bids_dir
            brainMask.inputs.in_ckpt_loc = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_localization",
                             "Unet.ckpt-88000.index")).split('.index')[0]
            brainMask.inputs.threshold_loc = 0.49
            brainMask.inputs.in_ckpt_seg = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_segmentation",
                             "Unet.ckpt-20000.index")).split('.index')[0]
            brainMask.inputs.threshold_seg = 0.5

        t2ws_filtered = Node(interface=preprocess.FilteringByRunid(),
                             name='t2ws_filtered')
        masks_filtered = Node(interface=preprocess.FilteringByRunid(),
                              name='masks_filtered')

        if not self.m_skip_stacks_ordering:
            stacksOrdering = Node(interface=preprocess.StacksOrdering(),
                                  name='stackOrdering')
        else:
            stacksOrdering = Node(
                interface=IdentityInterface(fields=['stacks_order']),
                name='stackOrdering')
            stacksOrdering.inputs.stacks_order = self.m_stacks

        if not self.m_skip_nlm_denoising:
            nlmDenoise = MapNode(interface=preprocess.BtkNLMDenoising(),
                                 name='nlmDenoise',
                                 iterfield=['in_file', 'in_mask'])
            nlmDenoise.inputs.bids_dir = self.bids_dir

            # Sans le mask le premier correct slice intensity...
            srtkCorrectSliceIntensity01_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity01_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity01_nlm.inputs.bids_dir = self.bids_dir
            srtkCorrectSliceIntensity01_nlm.inputs.out_postfix = '_uni'

        srtkCorrectSliceIntensity01 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity01',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity01.inputs.bids_dir = self.bids_dir
        srtkCorrectSliceIntensity01.inputs.out_postfix = '_uni'

        srtkSliceBySliceN4BiasFieldCorrection = MapNode(
            interface=preprocess.MialsrtkSliceBySliceN4BiasFieldCorrection(),
            name='srtkSliceBySliceN4BiasFieldCorrection',
            iterfield=['in_file', 'in_mask'])
        srtkSliceBySliceN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        srtkSliceBySliceCorrectBiasField = MapNode(
            interface=preprocess.MialsrtkSliceBySliceCorrectBiasField(),
            name='srtkSliceBySliceCorrectBiasField',
            iterfield=['in_file', 'in_mask', 'in_field'])
        srtkSliceBySliceCorrectBiasField.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        if not self.m_skip_nlm_denoising:
            srtkCorrectSliceIntensity02_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity02_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity02_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization01_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization01_nlm')
            srtkIntensityStandardization01_nlm.inputs.bids_dir = self.bids_dir

            srtkHistogramNormalization_nlm = Node(
                interface=preprocess.MialsrtkHistogramNormalization(),
                name='srtkHistogramNormalization_nlm')
            srtkHistogramNormalization_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization02_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization02_nlm')
            srtkIntensityStandardization02_nlm.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        srtkCorrectSliceIntensity02 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity02',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity02.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization01 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization01')
        srtkIntensityStandardization01.inputs.bids_dir = self.bids_dir

        srtkHistogramNormalization = Node(
            interface=preprocess.MialsrtkHistogramNormalization(),
            name='srtkHistogramNormalization')
        srtkHistogramNormalization.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization02 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization02')
        srtkIntensityStandardization02.inputs.bids_dir = self.bids_dir

        srtkMaskImage01 = MapNode(interface=preprocess.MialsrtkMaskImage(),
                                  name='srtkMaskImage01',
                                  iterfield=['in_file', 'in_mask'])
        srtkMaskImage01.inputs.bids_dir = self.bids_dir

        srtkImageReconstruction = Node(
            interface=reconstruction.MialsrtkImageReconstruction(),
            name='srtkImageReconstruction')
        srtkImageReconstruction.inputs.bids_dir = self.bids_dir
        srtkImageReconstruction.inputs.sub_ses = sub_ses
        srtkImageReconstruction.inputs.no_reg = self.m_skip_svr

        srtkTVSuperResolution = Node(
            interface=reconstruction.MialsrtkTVSuperResolution(),
            name='srtkTVSuperResolution')
        srtkTVSuperResolution.inputs.bids_dir = self.bids_dir
        srtkTVSuperResolution.inputs.sub_ses = sub_ses
        srtkTVSuperResolution.inputs.in_loop = self.primal_dual_loops
        srtkTVSuperResolution.inputs.in_deltat = self.deltatTV
        srtkTVSuperResolution.inputs.in_lambda = self.lambdaTV
        srtkTVSuperResolution.inputs.use_manual_masks = self.use_manual_masks

        srtkN4BiasFieldCorrection = Node(
            interface=postprocess.MialsrtkN4BiasFieldCorrection(),
            name='srtkN4BiasFieldCorrection')
        srtkN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        if self.m_do_refine_hr_mask:
            srtkHRMask = Node(
                interface=postprocess.MialsrtkRefineHRMaskByIntersection(),
                name='srtkHRMask')
            srtkHRMask.inputs.bids_dir = self.bids_dir
        else:
            srtkHRMask = Node(interface=postprocess.BinarizeImage(),
                              name='srtkHRMask')

        srtkMaskImage02 = Node(interface=preprocess.MialsrtkMaskImage(),
                               name='srtkMaskImage02')
        srtkMaskImage02.inputs.bids_dir = self.bids_dir

        # Build workflow : connections of the nodes
        # Nodes ready : Linking now
        if self.use_manual_masks:
            if self.m_stacks is not None:
                self.wf.connect(dg, "masks", custom_masks_filter,
                                "input_files")
                self.wf.connect(custom_masks_filter, "output_files", brainMask,
                                "out_file")
            else:
                self.wf.connect(dg, "masks", brainMask, "out_file")
        else:
            if self.m_stacks is not None:
                self.wf.connect(dg, "T2ws", t2ws_filter_prior_masks,
                                "input_files")
                self.wf.connect(t2ws_filter_prior_masks, "output_files",
                                brainMask, "in_file")
            else:
                self.wf.connect(dg, "T2ws", brainMask, "in_file")

        if not self.m_skip_stacks_ordering:
            self.wf.connect(brainMask, "out_file", stacksOrdering,
                            "input_masks")

        self.wf.connect(stacksOrdering, "stacks_order", t2ws_filtered,
                        "stacks_id")
        self.wf.connect(dg, "T2ws", t2ws_filtered, "input_files")

        self.wf.connect(stacksOrdering, "stacks_order", masks_filtered,
                        "stacks_id")
        self.wf.connect(brainMask, "out_file", masks_filtered, "input_files")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(t2ws_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_mask")  ## Comment to match docker process

            self.wf.connect(nlmDenoise, ("out_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_mask")

        self.wf.connect(t2ws_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkCorrectSliceIntensity01_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        else:
            self.wf.connect(srtkCorrectSliceIntensity01,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceN4BiasFieldCorrection, "in_mask")

        self.wf.connect(srtkCorrectSliceIntensity01,
                        ("out_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_file")
        self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                        ("out_fld_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_field")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                            ("out_im_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_mask")
            self.wf.connect(srtkCorrectSliceIntensity02_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkIntensityStandardization01_nlm, "input_images")
            self.wf.connect(srtkIntensityStandardization01_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_masks")
            self.wf.connect(srtkHistogramNormalization_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkIntensityStandardization02_nlm, "input_images")

        self.wf.connect(srtkSliceBySliceCorrectBiasField,
                        ("out_im_file", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_mask")
        self.wf.connect(srtkCorrectSliceIntensity02,
                        ("out_file", utils.sort_ascending),
                        srtkIntensityStandardization01, "input_images")

        self.wf.connect(srtkIntensityStandardization01,
                        ("output_images", utils.sort_ascending),
                        srtkHistogramNormalization, "input_images")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkHistogramNormalization, "input_masks")
        self.wf.connect(srtkHistogramNormalization,
                        ("output_images", utils.sort_ascending),
                        srtkIntensityStandardization02, "input_images")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkIntensityStandardization02_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")
        else:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")

        self.wf.connect(srtkMaskImage01, "out_im_file",
                        srtkImageReconstruction, "input_images")
        self.wf.connect(masks_filtered, "output_files",
                        srtkImageReconstruction, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order",
                        srtkImageReconstruction, "stacks_order")

        self.wf.connect(srtkIntensityStandardization02, "output_images",
                        srtkTVSuperResolution, "input_images")
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending),
                        srtkTVSuperResolution, "input_transforms")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkTVSuperResolution, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order", srtkTVSuperResolution,
                        "stacks_order")

        self.wf.connect(srtkImageReconstruction, "output_sdi",
                        srtkTVSuperResolution, "input_sdi")

        if self.m_do_refine_hr_mask:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkHRMask, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), srtkHRMask,
                            "input_masks")
            self.wf.connect(srtkImageReconstruction,
                            ("output_transforms", utils.sort_ascending),
                            srtkHRMask, "input_transforms")
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_sr")
        else:
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_image")

        self.wf.connect(srtkTVSuperResolution, "output_sr", srtkMaskImage02,
                        "in_file")
        self.wf.connect(srtkHRMask, "output_srmask", srtkMaskImage02,
                        "in_mask")

        self.wf.connect(srtkTVSuperResolution, "output_sr",
                        srtkN4BiasFieldCorrection, "input_image")
        self.wf.connect(srtkHRMask, "output_srmask", srtkN4BiasFieldCorrection,
                        "input_mask")

        # Datasinker
        finalFilenamesGeneration = Node(
            interface=postprocess.FilenamesGeneration(), name='filenames_gen')
        finalFilenamesGeneration.inputs.sub_ses = sub_ses
        finalFilenamesGeneration.inputs.sr_id = self.sr_id
        finalFilenamesGeneration.inputs.use_manual_masks = self.use_manual_masks

        self.wf.connect(stacksOrdering, "stacks_order",
                        finalFilenamesGeneration, "stacks_order")

        datasink = Node(interface=DataSink(), name='data_sinker')
        datasink.inputs.base_directory = final_res_dir

        if not self.m_skip_stacks_ordering:
            self.wf.connect(stacksOrdering, "report_image", datasink,
                            'figures.@stackOrderingQC')
            self.wf.connect(stacksOrdering, "motion_tsv", datasink,
                            'anat.@motionTSV')
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        datasink, 'anat.@LRmasks')
        self.wf.connect(srtkIntensityStandardization02,
                        ("output_images", utils.sort_ascending), datasink,
                        'anat.@LRsPreproc')
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending), datasink,
                        'xfm.@transforms')
        self.wf.connect(finalFilenamesGeneration, "substitutions", datasink,
                        "substitutions")
        self.wf.connect(srtkMaskImage01, ("out_im_file", utils.sort_ascending),
                        datasink, 'anat.@LRsDenoised')
        self.wf.connect(srtkImageReconstruction, "output_sdi", datasink,
                        'anat.@SDI')
        self.wf.connect(srtkN4BiasFieldCorrection, "output_image", datasink,
                        'anat.@SR')
        self.wf.connect(srtkTVSuperResolution, "output_json_path", datasink,
                        'anat.@SRjson')
        self.wf.connect(srtkTVSuperResolution, "output_sr_png", datasink,
                        'figures.@SRpng')
        self.wf.connect(srtkHRMask, "output_srmask", datasink, 'anat.@SRmask')

    def run(self, memory=None):
        """Execute the workflow of the super-resolution reconstruction pipeline.

        Nipype execution engine will take care of the management and execution of
        all processing steps involved in the super-resolution reconstruction pipeline.
        Note that the complete execution graph is saved as a PNG image to support
        transparency on the whole processing.

        Parameters
        ----------
        memory : int
            Maximal memory used by the workflow
        """

        # Use nipype.interface logger to print some information messages
        iflogger = nipype_logging.getLogger('nipype.interface')
        iflogger.info("**** Workflow graph creation ****")
        self.wf.write_graph(dotfilename='graph.dot',
                            graph2use='colored',
                            format='png',
                            simple_form=True)

        # Copy and rename the generated "graph.png" image
        src = os.path.join(self.wf.base_dir, self.wf.name, 'graph.png')
        if self.session is not None:
            dst = os.path.join(
                self.output_dir, '-'.join(["pymialsrtk", __version__]),
                self.subject, self.session, 'figures',
                f'{self.subject}_{self.session}_rec-SR_id-{self.sr_id}_desc-processing_graph.png'
            )
        else:
            dst = os.path.join(
                self.output_dir, '-'.join(["pymialsrtk", __version__]),
                self.subject, 'figures',
                f'{self.subject}_rec-SR_id-{self.sr_id}_desc-processing_graph.png'
            )
        # Create the figures/ and parent directories if they do not exist
        figures_dir = os.path.dirname(dst)
        os.makedirs(figures_dir, exist_ok=True)
        # Make the copy
        iflogger.info(f'\t > Copy {src} to {dst}...')
        shutil.copy(src=src, dst=dst)

        # Create dictionary of arguments passed to plugin_args
        args_dict = {
            'raise_insufficient': False,
            'n_procs': self.nipype_number_of_cores
        }

        if (memory is not None) and (memory > 0):
            args_dict['memory_gb'] = memory

        iflogger.info("**** Processing ****")
        # datetime object containing current start date and time
        start = datetime.now()
        self.run_start_time = start.strftime("%B %d, %Y / %H:%M:%S")
        print(f" Start date / time : {self.run_start_time}")

        # Execute the workflow
        if self.nipype_number_of_cores > 1:
            res = self.wf.run(plugin='MultiProc', plugin_args=args_dict)
        else:
            res = self.wf.run()

        # Copy and rename the workflow execution log
        src = os.path.join(self.wf.base_dir, "pypeline.log")
        if self.session is not None:
            dst = os.path.join(
                self.output_dir, '-'.join(["pymialsrtk", __version__]),
                self.subject, self.session, 'logs',
                f'{self.subject}_{self.session}_rec-SR_id-{self.sr_id}_log.txt'
            )
        else:
            dst = os.path.join(
                self.output_dir, '-'.join(["pymialsrtk",
                                           __version__]), self.subject, 'logs',
                f'{self.subject}_rec-SR_id-{self.sr_id}_log.txt')
        # Create the logs/ and parent directories if they do not exist
        logs_dir = os.path.dirname(dst)
        os.makedirs(logs_dir, exist_ok=True)
        # Make the copy
        iflogger.info(f'\t > Copy {src} to {dst}...')
        shutil.copy(src=src, dst=dst)

        # datetime object containing current end date and time
        end = datetime.now()
        self.run_end_time = end.strftime("%B %d, %Y / %H:%M:%S")
        print(f" End date / time : {self.run_end_time}")

        # Compute elapsed running time in minutes and seconds
        duration = end - start
        (minutes, seconds) = divmod(duration.total_seconds(), 60)
        self.run_elapsed_time = f'{int(minutes)} minutes and {int(seconds)} seconds'
        print(f" Elapsed time: {self.run_end_time}")

        iflogger.info("**** Write dataset derivatives description ****")
        for toolbox in ["pymialsrtk", "nipype"]:
            write_bids_derivative_description(bids_dir=self.bids_dir,
                                              deriv_dir=self.output_dir,
                                              pipeline_name=toolbox)

        iflogger.info("**** Super-resolution HTML report creation ****")
        self.create_subject_report()

        return res

    def create_subject_report(self):
        """Create the HTML report"""
        # Set main subject derivatives directory
        if self.session is None:
            sub_ses = self.subject
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk",
                                                   __version__]), self.subject)
        else:
            sub_ses = f'{self.subject}_{self.session}'
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk", __version__]),
                                         self.subject, self.session)
        # Get the HTML report template
        path = pkg_resources.resource_filename(
            'pymialsrtk', "data/report/templates/template.html")
        jinja_template_dir = os.path.dirname(path)

        file_loader = FileSystemLoader(jinja_template_dir)
        env = Environment(loader=file_loader)

        template = env.get_template('template.html')

        # Load main data derivatives necessary for the report
        sr_nii_image = os.path.join(
            final_res_dir, 'anat',
            f'{sub_ses}_rec-SR_id-{self.sr_id}_T2w.nii.gz')
        img = nib.load(sr_nii_image)
        sx, sy, sz = img.header.get_zooms()

        sr_json_metadata = os.path.join(
            final_res_dir, 'anat',
            f'{sub_ses}_rec-SR_id-{self.sr_id}_T2w.json')
        with open(sr_json_metadata) as f:
            sr_json_metadata = json.load(f)

        workflow_image = os.path.join(
            '..', 'figures',
            f'{sub_ses}_rec-SR_id-{self.sr_id}_desc-processing_graph.png')

        sr_png_image = os.path.join(
            '..', 'figures', f'{sub_ses}_rec-SR_id-{self.sr_id}_T2w.png')

        motion_report_image = os.path.join(
            '..', 'figures',
            f'{sub_ses}_rec-SR_id-{self.sr_id}_desc-motion_stats.png')

        log_file = os.path.join('..', 'logs',
                                f'{sub_ses}_rec-SR_id-{self.sr_id}_log.txt')

        # Create the text for {{subject}} and {{session}} fields in template
        report_subject_text = f'{self.subject.split("-")[-1]}'
        if self.session is not None:
            report_session_text = f'{self.session.split("-")[-1]}'
        else:
            report_session_text = None

        # Generate the report
        report_html_content = template.render(
            subject=report_subject_text,
            session=report_session_text,
            processing_datetime=self.run_start_time,
            run_time=self.run_elapsed_time,
            log=log_file,
            sr_id=self.sr_id,
            stacks=self.m_stacks,
            svr="on" if not self.m_skip_svr else "off",
            nlm_denoising="on" if not self.m_skip_nlm_denoising else "off",
            stacks_ordering="on" if not self.m_skip_stacks_ordering else "off",
            do_refine_hr_mask="on" if self.m_do_refine_hr_mask else "off",
            use_auto_masks="on"
            if self.m_masks_derivatives_dir is None else "off",
            custom_masks_dir=self.m_masks_derivatives_dir
            if self.m_masks_derivatives_dir is not None else None,
            sr_resolution=f"{sx} x {sy} x {sz} mm<sup>3</sup>",
            sr_json_metadata=sr_json_metadata,
            workflow_graph=workflow_image,
            sr_png_image=sr_png_image,
            motion_report_image=motion_report_image,
            version=__version__,
            os=f'{platform.system()} {platform.release()}',
            python=f'{sys.version}',
            openmp_threads=self.openmp_number_of_cores,
            nipype_threads=self.nipype_number_of_cores,
            jinja_version=__jinja2_version__)
        # Create the report directory if it does not exist
        report_dir = os.path.join(final_res_dir, 'report')
        os.makedirs(report_dir, exist_ok=True)

        # Save the HTML report file
        out_report_filename = os.path.join(report_dir, f'{sub_ses}.html')
        print(f'\t* Save HTML report as {out_report_filename}...')
        with open(out_report_filename, "w+") as file:
            file.write(report_html_content)
コード例 #22
0
ファイル: featreg_merge.py プロジェクト: mikbuch/pymri
'''
featreg_merge.py
script

Perform Feat preprocessing on given data files and then merge ouputs.

'''

from nipype.workflows.fmri.fsl import create_featreg_preproc
import nipype.interfaces.fsl as fsl
from nipype.pipeline import Workflow, Node

# get filelist from file
nifti_filelist = open('nifti_filelist.txt').read().splitlines()

featreg_merge = Workflow(name='featreg_merge')

preproc = create_featreg_preproc(highpass=True, whichvol='mean')
preproc.inputs.inputspec.func = nifti_filelist
preproc.inputs.inputspec.fwhm = 0
preproc.inputs.inputspec.highpass = 128./(2*2.5)
# preproc.base_dir = '/tmp/pre/working_dir'
# preproc.run() 


merge = Node(
    interface=fsl.utils.Merge(
        dimension='t',
        output_type='NIFTI_GZ',
        merged_file='merged.nii.gz'
        ),
コード例 #23
0
ファイル: mvpa_preproc.py プロジェクト: mikbuch/pymri
def create_featreg_merge(run, whichvol_glob, name="featregmerge"):
    ###########################################################################
    #
    #     FEATREG_MERGE WORKFLOW
    #
    ###########################################################################

    featregmerge = Workflow(name=name)

    inputnode = Node(
        interface=util.IdentityInterface(fields=["in_sub", "in_hand", "run", "whichvol_glob"]), name="inputspec"
    )
    # inputnode.inputs.in_sub = 'GK011RZJA'

    ###########################################################################
    #
    #     DATA GRABBER NODE
    #
    ###########################################################################

    ds = Node(DataGrabber(infields=["subject_id", "hand"], outfields=["func"]), name="datasource")
    ds.inputs.base_directory = opap(base_directory)
    ds.inputs.template = "%s/%s_Hand/*.nii*"
    ds.inputs.sort_filelist = True
    # ds.inputs.subject_id = 'GK011RZJA'
    # ds.inputs.hand = 'Left'

    featregmerge.connect(inputnode, "in_hand", ds, "hand")
    featregmerge.connect(inputnode, "in_sub", ds, "subject_id")

    """
        To print the list of files being taken uncomment the following lines.
    """
    #  functional_input = ds.run().outputs
    #  input_files = functional_input.get()['func']
    #  print input_files

    ###########################################################################
    #
    #     CREATE FEAT REGISTRATION WORKFLOW NODE
    #
    ###########################################################################

    preproc = create_featreg_preproc(highpass=True, whichvol="first")
    preproc.inputs.inputspec.fwhm = 0
    preproc.inputs.inputspec.highpass = 128.0 / (2 * 2.5)

    # remove_nodes takes list as an argument
    preproc.remove_nodes([preproc.get_node("extractref")])
    """
    preproc.disconnect(
        preproc.get_node('extractref'), 'roi_file',
        preproc.get_node('realign'), 'ref_file'
        )
    """
    featregmerge.connect(ds, "func", preproc, "inputspec.func")

    ###########################################################################
    #
    #     MERGE NODE
    #
    ###########################################################################

    merge = Node(
        interface=fsl.utils.Merge(dimension="t", output_type="NIFTI_GZ", merged_file="bold.nii.gz"), name="merge"
    )
    featregmerge.connect(preproc, "outputspec.highpassed_files", merge, "in_files")

    masksnode = Node(
        interface=fsl.utils.Merge(dimension="t", output_type="NIFTI_GZ", merged_file="masks_merged.nii.gz"),
        name="masksnode",
    )
    featregmerge.connect(preproc, "outputspec.mask", masksnode, "in_files")

    # ### SPLIT MERGED MASKS ##################################################

    splitnode = Node(interface=fsl.utils.Split(dimension="t", output_type="NIFTI_GZ"), name="splitnode")
    featregmerge.connect(masksnode, "merged_file", splitnode, "in_file")

    return featregmerge
コード例 #24
0
def create_fs_compatible_logb_workflow(name="LOGISMOSB", plugin_args=None, config=None):
    """
    Create a workflow to run LOGISMOS-B from FreeSurfer Inputs

    :param name:
    :param plugin_args:
    :param config:
    :return:
    """

    if not config:
        config = read_json_config("fs_logb_config.json")

    wf = Workflow(name)

    inputspec = Node(
        IdentityInterface(
            [
                "t1_file",
                "t2_file",
                "white",
                "aseg",
                "hemi",
                "recoding_file",
                "gm_proba",
                "wm_proba",
                "lut_file",
                "hncma_atlas",
            ]
        ),
        name="inputspec",
    )

    # convert the white mesh to a vtk file with scanner coordinates
    to_vtk = Node(MRIsConvert(), name="WhiteVTK")
    to_vtk.inputs.out_file = "white.vtk"
    to_vtk.inputs.to_scanner = True

    wf.connect(inputspec, "white", to_vtk, "in_file")

    # convert brainslabels to nifti
    aseg_to_nifti = Node(MRIConvert(), "ABCtoNIFTI")
    aseg_to_nifti.inputs.out_file = "aseg.nii.gz"
    aseg_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, "aseg", aseg_to_nifti, "in_file")

    # create brainslabels from aseg
    aseg2brains = Node(
        Function(["in_file", "recode_file", "out_file"], ["out_file"], recode_labelmap),
        name="ConvertAseg2BRAINSLabels",
    )
    aseg2brains.inputs.out_file = "brainslabels.nii.gz"

    wf.connect(
        [
            (inputspec, aseg2brains, [("recoding_file", "recode_file")]),
            (aseg_to_nifti, aseg2brains, [("out_file", "in_file")]),
        ]
    )

    t1_to_nifti = Node(MRIConvert(), "T1toNIFTI")
    t1_to_nifti.inputs.out_file = "t1.nii.gz"
    t1_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, "t1_file", t1_to_nifti, "in_file")

    def t2_convert(in_file=None, reference_file=None, out_file=None):
        """
        This function...

        :param in_file:
        :param reference_file:
        :param out_file:
        :return:
        """
        import os
        from nipype.interfaces.freesurfer import MRIConvert
        from nipype.interfaces.traits_extension import Undefined
        from nipype import Node

        if in_file:
            t2_to_nifti = Node(MRIConvert(), "T2toNIFTI")
            t2_to_nifti.inputs.in_file = in_file
            t2_to_nifti.inputs.out_file = os.path.abspath(out_file)
            t2_to_nifti.inputs.out_orientation = "LPS"
            if reference_file:
                t2_to_nifti.inputs.reslice_like = reference_file
            result = t2_to_nifti.run()
            out_file = os.path.abspath(result.outputs.out_file)
        else:
            out_file = Undefined
        return out_file

    t2_node = Node(
        Function(["in_file", "reference_file", "out_file"], ["out_file"], t2_convert),
        name="T2Convert",
    )
    t2_node.inputs.out_file = "t2.nii.gz"
    wf.connect(inputspec, "t2_file", t2_node, "in_file")
    wf.connect(t1_to_nifti, "out_file", t2_node, "reference_file")

    # convert raw t1 to lia
    t1_to_ras = Node(MRIConvert(), "T1toRAS")
    t1_to_ras.inputs.out_orientation = "LIA"
    t1_to_ras.inputs.out_file = "t1_lia.mgz"
    wf.connect(inputspec, "t1_file", t1_to_ras, "in_file")

    # Create ones image for use when masking the white matter
    ones = Node(
        Function(["in_volume", "out_file"], ["out_file"], create_ones_image),
        name="Ones_Image",
    )
    ones.inputs.out_file = "ones.mgz"

    wf.connect(t1_to_ras, "out_file", ones, "in_volume")

    # use the ones image to obtain a white matter mask
    surfmask = Node(SurfaceMask(), name="WhiteMask")
    surfmask.inputs.out_file = "white_ras.mgz"

    wf.connect(ones, "out_file", surfmask, "in_volume")
    wf.connect(inputspec, "white", surfmask, "in_surface")

    surfmask_to_nifti = Node(MRIConvert(), "MasktoNIFTI")
    surfmask_to_nifti.inputs.out_file = "white.nii.gz"
    surfmask_to_nifti.inputs.out_orientation = "LPS"

    wf.connect(surfmask, "out_file", surfmask_to_nifti, "in_file")

    # create hemi masks

    split = Node(SplitLabels(), name="SplitLabelMask")
    split.inputs.out_file = "HemiBrainLabels.nii.gz"
    wf.connect(
        [
            (aseg2brains, split, [("out_file", "in_file")]),
            (inputspec, split, [("lut_file", "lookup_table")]),
            (aseg_to_nifti, split, [("out_file", "labels_file")]),
            (inputspec, split, [("hemi", "hemi")]),
        ]
    )

    dilate = Node(MultiLabelDilation(), "DilateLabels")
    dilate.inputs.out_file = "DilatedBrainLabels.nii.gz"
    dilate.inputs.radius = 1
    wf.connect(split, "out_file", dilate, "in_file")

    convert_label_map = Node(MRIConvert(), "ConvertLabelMapToMatchT1")
    convert_label_map.inputs.resample_type = "nearest"
    convert_label_map.inputs.out_file = "BrainLabelsFromAsegInT1Space.nii.gz"
    wf.connect(t1_to_nifti, "out_file", convert_label_map, "reslice_like")
    wf.connect(dilate, "out_file", convert_label_map, "in_file")

    logb = Node(LOGISMOSB(), name="LOGISMOS-B")
    logb.inputs.smoothnessConstraint = config["LOGISMOSB"]["smoothnessConstraint"]
    logb.inputs.nColumns = config["LOGISMOSB"]["nColumns"]
    logb.inputs.columnChoice = config["LOGISMOSB"]["columnChoice"]
    logb.inputs.columnHeight = config["LOGISMOSB"]["columnHeight"]
    logb.inputs.nodeSpacing = config["LOGISMOSB"]["nodeSpacing"]
    logb.inputs.w = config["LOGISMOSB"]["w"]
    logb.inputs.a = config["LOGISMOSB"]["a"]
    logb.inputs.nPropagate = config["LOGISMOSB"]["nPropagate"]

    if plugin_args:
        logb.plugin_args = plugin_args

    wf.connect(
        [
            (t1_to_nifti, logb, [("out_file", "t1_file")]),
            (t2_node, logb, [("out_file", "t2_file")]),
            (
                inputspec,
                logb,
                [
                    ("hemi", "basename"),
                    ("hncma_atlas", "atlas_file"),
                    ("wm_proba", "wm_proba_file"),
                    ("gm_proba", "gm_proba_file"),
                ],
            ),
            (to_vtk, logb, [("converted", "mesh_file")]),
            (surfmask_to_nifti, logb, [("out_file", "wm_file")]),
            (convert_label_map, logb, [("out_file", "brainlabels_file")]),
        ]
    )

    outputspec = Node(
        IdentityInterface(["gmsurface_file", "wmsurface_file"]), name="outputspec"
    )

    wf.connect(
        [
            (
                logb,
                outputspec,
                [
                    ("gmsurface_file", "gmsurface_file"),
                    ("wmsurface_file", "wmsurface_file"),
                ],
            )
        ]
    )

    return wf
コード例 #25
0
ファイル: workflow_flirt.py プロジェクト: mikbuch/pymri
subject_template = 'GK'


###############################################################################
#
#      CREATE MAIN WORKFLOW
#
###############################################################################

from nipype.pipeline import Workflow, Node
import nipype.interfaces.utility as util

from pymri.utils.paths_dirs_info import get_subject_names

flirt_apply_all_subs = Workflow(name='flirt_apply_all_subs')

inputsub = Node(
    interface=util.IdentityInterface(
        fields=['sub']
        ),
    name='inputsub'
    )
# inputsub.inputs.sub = ['GK011RZJA', 'GK012OHPA']
# inputsub.iterables = ('sub', ['GK011RZJA', 'GK012OHPA'])
inputsub.iterables = (
    'sub', get_subject_names(base_directory, subject_template)
    )

inputhand = Node(
    interface=util.IdentityInterface(
コード例 #26
0
ファイル: workflow.py プロジェクト: BRAINSia/BRAINSTools
def create_fs_logb_workflow_for_both_hemispheres(
    name="FSLOGB", plugin_args=None, ml=False, config=None
):
    """
    Creates a workflow that connects FreeSurfer with LOGISMOS-B

    :param name:
    :param plugin_args:
    :param ml:
    :param config:
    :return:
    """

    fslogb_wf = Workflow(name=name)

    inputspec = Node(
        IdentityInterface(
            [
                "recoding_file",
                "lut_file",
                "aseg_presurf",
                "rawavg",
                "t2_raw",
                "lh_white",
                "rh_white",
                "hncma_atlas",
            ]
        ),
        name="inputspec",
    )

    inputspec.inputs.recoding_file = get_local_file_location("abc_fs_equivelants.json")
    inputspec.inputs.lut_file = get_local_file_location("FreeSurferColorLUT.csv")

    # create outputspec with gm and wm surfaces
    outputs = [
        "lh_gm_surf_file",
        "lh_wm_surf_file",
        "rh_gm_surf_file",
        "rh_wm_surf_file",
    ]
    outputspec = Node(IdentityInterface(outputs), name="outputspec")

    for hemi in ("lh", "rh"):
        hemi_logb_wf = create_fs_compatible_logb_workflow(
            "{0}_LOGBWF".format(hemi), plugin_args=plugin_args, config=config
        )
        hemi_logb_wf.inputs.inputspec.hemi = hemi
        fslogb_wf.connect(
            [
                (
                    inputspec,
                    hemi_logb_wf,
                    [
                        ("aseg_presurf", "inputspec.aseg"),
                        ("rawavg", "inputspec.t1_file"),
                        ("t2_raw", "inputspec.t2_file"),
                        ("hncma_atlas", "inputspec.hncma_atlas"),
                        ("{0}_white".format(hemi), "inputspec.white"),
                    ],
                ),
                (
                    inputspec,
                    hemi_logb_wf,
                    [
                        ("recoding_file", "inputspec.recoding_file"),
                        ("lut_file", "inputspec.lut_file"),
                    ],
                ),
            ]
        )

        # move the outputs from logb to the outputspec
        fslogb_wf.connect(
            [
                (
                    hemi_logb_wf,
                    outputspec,
                    [
                        ("outputspec.gmsurface_file", "{0}_gm_surf_file".format(hemi)),
                        ("outputspec.wmsurface_file", "{0}_wm_surf_file".format(hemi)),
                    ],
                )
            ]
        )

    return fslogb_wf
コード例 #27
0
# Get mask from FSLDIR location
mask_file = \
    os.environ['FSLDIR'] + '/data/standard/MNI152_T1_2mm_brain_mask.nii.gz'


###############################################################################
#
#      MASK MULTIPLE WORKFLOW
#
###############################################################################

from nipype.pipeline import Workflow, Node
import nipype.interfaces.io as nio
from os.path import abspath as opap

mask_multiple = Workflow(name='mask_multiple')

grabber = nio.DataGrabber()
grabber.inputs.base_directory = opap(base_directory)
grabber.inputs.template = '*.nii*'
grabber.inputs.sort_filelist = True

grabbed = grabber.run()
rois_filelist = grabbed.outputs.outfiles

###############################################################################
#
#      MASK SINGLE WORKFLOW
#
###############################################################################
apply_mask_multiple = Workflow(name='apply_mask_multiple')
コード例 #28
0
def create_logb_workflow(name="LOGISMOSB_WF", master_config=None, plugin_args=None):
    """
    This function...

    :param name:
    :param master_config:
    :param plugin_args:
    :return:
    """
    logb_wf = Workflow(name=name)

    config = read_json_config("config.json")
    config["atlas_info"] = get_local_file_location(config["atlas_info"])

    inputs_node = Node(
        IdentityInterface(
            fields=[
                "t1_file",
                "t2_file",
                "posterior_files",
                "joint_fusion_file",
                "brainlabels_file",
                "hncma_atlas",
            ]
        ),
        name="inputspec",
    )
    inputs_node.run_without_submitting = True

    # ensure that t1 and t2 are in the same voxel lattice
    input_t2 = Node(BRAINSResample(), "ResampleInputT2Volume")
    input_t2.inputs.outputVolume = "t2_resampled.nii.gz"
    input_t2.inputs.pixelType = "ushort"
    input_t2.inputs.interpolationMode = "Linear"

    logb_wf.connect(
        [
            (
                inputs_node,
                input_t2,
                [("t1_file", "referenceVolume"), ("t2_file", "inputVolume")],
            )
        ]
    )

    white_matter_masking_node = Node(interface=WMMasking(), name="WMMasking")
    white_matter_masking_node.inputs.dilation = config["WMMasking"]["dilation"]
    white_matter_masking_node.inputs.csf_threshold = config["WMMasking"][
        "csf_threshold"
    ]
    if master_config and master_config["labelmap_colorlookup_table"]:
        white_matter_masking_node.inputs.atlas_info = master_config[
            "labelmap_colorlookup_table"
        ]
    else:
        white_matter_masking_node.inputs.atlas_info = config["atlas_info"]

    logb_wf.connect(
        [
            (
                inputs_node,
                white_matter_masking_node,
                [
                    ("posterior_files", "posterior_files"),
                    ("joint_fusion_file", "atlas_file"),
                    ("brainlabels_file", "brainlabels_file"),
                    ("hncma_atlas", "hncma_file"),
                ],
            )
        ]
    )

    gm_labels = Node(interface=CreateGMLabelMap(), name="GM_Labelmap")
    gm_labels.inputs.atlas_info = config["atlas_info"]
    logb_wf.connect([(inputs_node, gm_labels, [("joint_fusion_file", "atlas_file")])])

    logismosb_output_node = create_output_spec(
        ["wmsurface_file", "gmsurface_file"],
        config["hemisphere_names"],
        name="outputspec",
    )

    for hemisphere in config["hemisphere_names"]:
        genus_zero_filter = Node(
            interface=GenusZeroImageFilter(),
            name="{0}_GenusZeroImageFilter".format(hemisphere),
        )
        genus_zero_filter.inputs.connectivity = config["GenusZeroImageFilter"][
            "connectivity"
        ]
        genus_zero_filter.inputs.biggestComponent = config["GenusZeroImageFilter"][
            "biggestComponent"
        ]
        genus_zero_filter.inputs.connectedComponent = config["GenusZeroImageFilter"][
            "connectedComponent"
        ]
        genus_zero_filter.inputs.out_mask = "{0}_genus_zero_white_matter.nii.gz".format(
            hemisphere
        )

        logb_wf.connect(
            [
                (
                    white_matter_masking_node,
                    genus_zero_filter,
                    [("{0}_wm".format(hemisphere), "in_file")],
                )
            ]
        )

        surface_generation = Node(
            interface=BRAINSSurfaceGeneration(),
            name="{0}_BRAINSSurfaceGeneration".format(hemisphere),
        )
        surface_generation.inputs.smoothSurface = config["BRAINSSurfaceGeneration"][
            "smoothSurface"
        ]
        surface_generation.inputs.numIterations = config["BRAINSSurfaceGeneration"][
            "numIterations"
        ]
        surface_generation.inputs.out_file = "{0}_white_matter_surface.vtk".format(
            hemisphere
        )

        logb_wf.connect(
            [(genus_zero_filter, surface_generation, [("out_file", "in_file")])]
        )

        logismosb = Node(interface=LOGISMOSB(), name="{0}_LOGISMOSB".format(hemisphere))
        logismosb.inputs.smoothnessConstraint = config["LOGISMOSB"][
            "smoothnessConstraint"
        ]
        logismosb.inputs.nColumns = config["LOGISMOSB"]["nColumns"]
        logismosb.inputs.columnChoice = config["LOGISMOSB"]["columnChoice"]
        logismosb.inputs.columnHeight = config["LOGISMOSB"]["columnHeight"]
        logismosb.inputs.nodeSpacing = config["LOGISMOSB"]["nodeSpacing"]
        logismosb.inputs.w = config["LOGISMOSB"]["w"]
        logismosb.inputs.a = config["LOGISMOSB"]["a"]
        logismosb.inputs.nPropagate = config["LOGISMOSB"]["nPropagate"]
        logismosb.inputs.basename = hemisphere
        if config["LOGISMOSB"]["thickRegions"]:
            logismosb.inputs.thick_regions = config["LOGISMOSB"]["thickRegions"]
        else:
            logismosb.inputs.useHNCMALabels = True

        if plugin_args:
            logismosb.plugin_args = plugin_args

        logb_wf.connect(
            [
                (
                    inputs_node,
                    logismosb,
                    [("t1_file", "t1_file"), ("hncma_atlas", "atlas_file")],
                ),
                (input_t2, logismosb, [("outputVolume", "t2_file")]),
                (genus_zero_filter, logismosb, [("out_file", "wm_file")]),
                (surface_generation, logismosb, [("out_file", "mesh_file")]),
                (
                    white_matter_masking_node,
                    logismosb,
                    [("{0}_boundary".format(hemisphere), "brainlabels_file")],
                ),
                (
                    logismosb,
                    logismosb_output_node,
                    [
                        ("gmsurface_file", "{0}_gmsurface_file".format(hemisphere)),
                        ("wmsurface_file", "{0}_wmsurface_file".format(hemisphere)),
                    ],
                ),
            ]
        )

    return logb_wf
コード例 #29
0
def petpvc_mask(wf_name="petpvc_mask"):
    """ A Workflow that returns a 4D merge of 4 volumes for PETPVC: GM, WM, CSF and background.

    Parameters
    ----------
    wf_name: str
        The name of the workflow.

    Nipype.Inputs
    -------------
    pvcmask_input.tissues: list of existing files
        List of tissue files in anatomical space, the 3 file
        paths must be in this order: GM, WM, CSF

    Nipype.Outputs
    --------------
    pvcmask_output.petpvc_mask: existing file
        A 4D volume file with these maps in order: GM, WM, CSF, background

    pvcmask_output.brain_mask: existing file
        A mask that is a binarised sum of the tissues file with fslmaths.
        Can be used as brain mask in anatomical space for the PET image.

    Returns
    -------
    wf: nipype Workflow
    """
    # define nodes
    # specify input and output fields
    in_fields = ["tissues"]

    out_fields = [
        "petpvc_mask",
        "brain_mask",
    ]

    # input
    pvcmask_input = setup_node(IdentityInterface(fields=in_fields,
                                                 mandatory_inputs=True),
                               name="pvcmask_input")

    tissues = setup_node(IdentityInterface(fields=["gm", "wm", "csf"],
                                           mandatory_inputs=True),
                         name="tissues")

    merge_list = setup_node(Merge(4), name="merge_list")

    # maths for background
    img_bkg = setup_node(Function(
        function=math_img,
        input_names=["formula", "out_file", "gm", "wm", "csf"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                         name='background')
    img_bkg.inputs.out_file = "tissue_bkg.nii.gz"
    img_bkg.inputs.formula = "np.maximum((-((gm + wm + csf) - 1)), 0)"

    # maths for brain mask
    brain_mask = setup_node(Function(
        function=math_img,
        input_names=["formula", "out_file", "gm", "wm", "csf"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                            name='brain_mask')
    brain_mask.inputs.out_file = "tissues_brain_mask.nii.gz"
    brain_mask.inputs.formula = "np.abs(gm + wm + csf) > 0"

    # concat the tissues images and the background for PETPVC
    merge_tissues = setup_node(Function(
        function=concat_imgs,
        input_names=["in_files"],
        output_names=["out_file"],
        imports=['from neuro_pypes.interfaces.nilearn import ni2file']),
                               name='merge_tissues')
    merge_tissues.inputs.out_file = "petpvc_mask.nii.gz"

    # output
    pvcmask_output = setup_node(IdentityInterface(fields=out_fields),
                                name="pvcmask_output")

    # Create the workflow object
    wf = Workflow(name=wf_name)

    # Connect the nodes
    wf.connect([
        # separate [GM, WM, CSF] into [GM] and [WM, CSF]
        (pvcmask_input, tissues, [(("tissues", selectindex, 0), "gm"),
                                  (("tissues", selectindex, 1), "wm"),
                                  (("tissues", selectindex, 2), "csf")]),
        (tissues, img_bkg, [("gm", "gm"), ("wm", "wm"), ("csf", "csf")]),
        (tissues, brain_mask, [("gm", "gm"), ("wm", "wm"), ("csf", "csf")]),
        (tissues, merge_list, [("gm", "in1"), ("wm", "in2"), ("csf", "in3")]),

        # create a list of [GM, WM, CSF, BKG]
        (img_bkg, merge_list, [("out_file", "in4")]),

        # merge into 4D: [GM, WM, CSF, BKG]
        (merge_list, merge_tissues, [("out", "in_files")]),

        # output
        (merge_tissues, pvcmask_output, [("out_file", "petpvc_mask")]),
        (brain_mask, pvcmask_output, [("out_file", "brain_mask")]),
    ])

    return wf
コード例 #30
0
def get_wf_tissue_priors(name='wf_tissue_priors3'):
    '''
    This Function gives a workflow that Resamples the tissue priors and then thresholds it at 0.5
    '''
    # csf_tissue_prior_path, gm_tissue_prior_path, wm_tissue_prior_path,
    # threshold = 0.5

    wf_tissue_priors = Workflow(name=name)

    inputspec = Node(IdentityInterface(fields=['csf_tissue_prior_path',  'wm_tissue_prior_path',
                                 'threshold','std2func_mat_path', 'reference_func_file_path']),
                      name="inputspec")
    '''
    # 'gm_tissue_prior_path',

    resample_tissue_prior_csf = Node(Resample(voxel_size=(3, 3, 3), resample_mode='Cu', # cubic interpolation
                             outputtype='NIFTI'),
                    name="resample_tissue_prior_csf")



    # resample_tissue_prior_gm = Node(Resample(voxel_size=(3, 3, 3), resample_mode='Cu', # cubic interpolation
    #                          outputtype='NIFTI'),
    #                 name="resample_tissue_prior_gm")



    resample_tissue_prior_wm = Node(Resample(voxel_size=(3, 3, 3), resample_mode='Cu', # cubic interpolation
                             outputtype='NIFTI'),
                    name="resample_tissue_prior_wm")


    wf_tissue_priors.connect(inputspec, 'csf_tissue_prior_path', resample_tissue_prior_csf, 'in_file' )
    # wf_tissue_priors.connect(inputspec, 'gm_tissue_prior_path', resample_tissue_prior_gm, 'in_file' )
    wf_tissue_priors.connect(inputspec, 'wm_tissue_prior_path', resample_tissue_prior_wm, 'in_file' )
    '''

    # #  Invert the func2anat matrix to get anat2func
    # inv_mat = Node(ConvertXFM(invert_xfm=True), name='inv_mat')
    # wf_tissue_priors.connect(inputspec, 'func2anat_mat_path', inv_mat, 'in_file')

    # Transform the  tissue priors to the functional space using the inverse matrix
    std2func_xform_csf_prior = Node(FLIRT(output_type='NIFTI',
                             apply_xfm=True, interp='sinc'), name='std2func_xform_csf_prior')

    wf_tissue_priors.connect(inputspec, 'reference_func_file_path', std2func_xform_csf_prior, 'reference')
    wf_tissue_priors.connect(inputspec, 'std2func_mat_path', std2func_xform_csf_prior, 'in_matrix_file')

    std2func_xform_wm_prior = Node(FLIRT(output_type='NIFTI',
                                apply_xfm=True, interp='sinc'), name='std2func_xform_wm_prior')
    wf_tissue_priors.connect(inputspec, 'reference_func_file_path', std2func_xform_wm_prior, 'reference')
    wf_tissue_priors.connect(inputspec, 'std2func_mat_path', std2func_xform_wm_prior, 'in_matrix_file')

    # Transformed the priors
    #  Get the input in_file(s) of the std2func_xform_csf and std2func_xform_wm from the old workspace
    wf_tissue_priors.connect(inputspec, 'csf_tissue_prior_path', std2func_xform_csf_prior, 'in_file')
    wf_tissue_priors.connect(inputspec, 'wm_tissue_prior_path', std2func_xform_wm_prior, 'in_file')





    # Threshold

    def get_opstring(threshold, tissue_type):
        if tissue_type == 'csf':
            max = 216  #  216 is the highest intensity of the resampled afni output for CSF
        elif tissue_type == 'wm':
            max = 253 #  253 is the highest intensity of the resampled afni output for WM

        threshold = int(threshold * max)
        op = '-thr '+str(threshold)+' -bin'
        return op

    # ----- CSF ------

    threshold_csf = Node(interface=ImageMaths(suffix='_thresh'),
                       name='threshold_csf')



    wf_tissue_priors.connect(inputspec, ('threshold', get_opstring, 'csf'), threshold_csf, 'op_string' )
    wf_tissue_priors.connect(std2func_xform_csf_prior, 'out_file', threshold_csf, 'in_file')

    # ------- GM --------

    # threshold_gm = Node(interface=ImageMaths(suffix='_thresh'),
    #                    name='threshold_gm')


    # wf_tissue_priors.connect(inputspec, ('threshold', get_opstring), threshold_gm, 'op_string' )
    # wf_tissue_priors.connect(resample_tissue_prior_gm, 'out_file', threshold_gm, 'in_file')

    # -------- WM --------

    threshold_wm = Node(interface=ImageMaths(suffix='_thresh'),
                       name='threshold_wm')

    wf_tissue_priors.connect(inputspec, ('threshold', get_opstring, 'wm'), threshold_wm, 'op_string' )
    wf_tissue_priors.connect(std2func_xform_wm_prior, 'out_file', threshold_wm, 'in_file')

    #  -------------------




    outputspec = Node(IdentityInterface(fields=['csf_tissue_prior_path', 'wm_tissue_prior_path', 'threshold']),
                      name="outputspec")

    # , 'gm_tissue_prior_path'
    wf_tissue_priors.connect(threshold_csf, 'out_file', outputspec, 'csf_tissue_prior_path')
    # wf_tissue_priors.connect(threshold_gm, 'out_file', outputspec, 'gm_tissue_prior_path')
    wf_tissue_priors.connect(threshold_wm, 'out_file', outputspec, 'wm_tissue_prior_path')

    return wf_tissue_priors
コード例 #31
0
    subjects_list = [sub for sub in subjects_list if subject_template in sub]

    return subjects_list


###############################################################################
#
#      CREATE MAIN WORKFLOW
#
###############################################################################

from nipype.pipeline import Workflow, Node
import nipype.interfaces.utility as util

mvpa_preproc = Workflow(name=workflow_name)

inputsub = Node(
    interface=util.IdentityInterface(
        fields=['sub']
        ),
    name='inputsub'
    )
# inputsub.inputs.sub = ['GK011RZJA', 'GK012OHPA']
# inputsub.iterables = ('sub', ['GK011RZJA', 'GK012OHPA'])
inputsub.iterables = (
    'sub', get_subject_names(base_directory, subject_template)
    )

inputhand = Node(
    interface=util.IdentityInterface(
コード例 #32
0
def create_fs_compatible_logb_workflow(name="LOGISMOSB",
                                       plugin_args=None,
                                       config=None):
    """Create a workflow to run LOGISMOS-B from FreeSurfer Inputs"""

    if not config:
        config = read_json_config("fs_logb_config.json")

    wf = Workflow(name)

    inputspec = Node(IdentityInterface([
        't1_file', 't2_file', 'white', 'aseg', 'hemi', 'recoding_file',
        'gm_proba', 'wm_proba', 'lut_file', 'hncma_atlas'
    ]),
                     name="inputspec")

    # convert the white mesh to a vtk file with scanner coordinates
    to_vtk = Node(MRIsConvert(), name="WhiteVTK")
    to_vtk.inputs.out_file = "white.vtk"
    to_vtk.inputs.to_scanner = True

    wf.connect(inputspec, 'white', to_vtk, 'in_file')

    # convert brainslabels to nifti
    aseg_to_nifti = Node(MRIConvert(), "ABCtoNIFTI")
    aseg_to_nifti.inputs.out_file = "aseg.nii.gz"
    aseg_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, 'aseg', aseg_to_nifti, 'in_file')

    # create brainslabels from aseg
    aseg2brains = Node(Function(['in_file', 'recode_file', 'out_file'],
                                ['out_file'], recode_labelmap),
                       name="ConvertAseg2BRAINSLabels")
    aseg2brains.inputs.out_file = "brainslabels.nii.gz"

    wf.connect([(inputspec, aseg2brains, [('recoding_file', 'recode_file')]),
                (aseg_to_nifti, aseg2brains, [('out_file', 'in_file')])])

    t1_to_nifti = Node(MRIConvert(), "T1toNIFTI")
    t1_to_nifti.inputs.out_file = "t1.nii.gz"
    t1_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, 't1_file', t1_to_nifti, 'in_file')

    def t2_convert(in_file=None, reference_file=None, out_file=None):
        import os
        from nipype.interfaces.freesurfer import MRIConvert
        from nipype.interfaces.traits_extension import Undefined
        from nipype import Node
        if in_file:
            t2_to_nifti = Node(MRIConvert(), "T2toNIFTI")
            t2_to_nifti.inputs.in_file = in_file
            t2_to_nifti.inputs.out_file = os.path.abspath(out_file)
            t2_to_nifti.inputs.out_orientation = "LPS"
            if reference_file:
                t2_to_nifti.inputs.reslice_like = reference_file
            result = t2_to_nifti.run()
            out_file = os.path.abspath(result.outputs.out_file)
        else:
            out_file = Undefined
        return out_file

    t2_node = Node(Function(['in_file', 'reference_file', 'out_file'],
                            ['out_file'], t2_convert),
                   name="T2Convert")
    t2_node.inputs.out_file = "t2.nii.gz"
    wf.connect(inputspec, 't2_file', t2_node, 'in_file')
    wf.connect(t1_to_nifti, 'out_file', t2_node, 'reference_file')

    # convert raw t1 to lia
    t1_to_ras = Node(MRIConvert(), "T1toRAS")
    t1_to_ras.inputs.out_orientation = "LIA"
    t1_to_ras.inputs.out_file = "t1_lia.mgz"
    wf.connect(inputspec, 't1_file', t1_to_ras, 'in_file')

    # Create ones image for use when masking the white matter
    ones = Node(Function(['in_volume', 'out_file'], ['out_file'],
                         create_ones_image),
                name="Ones_Image")
    ones.inputs.out_file = "ones.mgz"

    wf.connect(t1_to_ras, 'out_file', ones, 'in_volume')

    # use the ones image to obtain a white matter mask
    surfmask = Node(SurfaceMask(), name="WhiteMask")
    surfmask.inputs.out_file = "white_ras.mgz"

    wf.connect(ones, 'out_file', surfmask, 'in_volume')
    wf.connect(inputspec, 'white', surfmask, 'in_surface')

    surfmask_to_nifti = Node(MRIConvert(), "MasktoNIFTI")
    surfmask_to_nifti.inputs.out_file = "white.nii.gz"
    surfmask_to_nifti.inputs.out_orientation = "LPS"

    wf.connect(surfmask, 'out_file', surfmask_to_nifti, 'in_file')

    # create hemi masks

    split = Node(SplitLabels(), name="SplitLabelMask")
    split.inputs.out_file = "HemiBrainLabels.nii.gz"
    wf.connect([(aseg2brains, split, [('out_file', 'in_file')]),
                (inputspec, split, [('lut_file', 'lookup_table')]),
                (aseg_to_nifti, split, [('out_file', 'labels_file')]),
                (inputspec, split, [('hemi', 'hemi')])])

    dilate = Node(MultiLabelDilation(), "DilateLabels")
    dilate.inputs.out_file = "DilatedBrainLabels.nii.gz"
    dilate.inputs.radius = 1
    wf.connect(split, 'out_file', dilate, 'in_file')

    convert_label_map = Node(MRIConvert(), "ConvertLabelMapToMatchT1")
    convert_label_map.inputs.resample_type = "nearest"
    convert_label_map.inputs.out_file = "BrainLabelsFromAsegInT1Space.nii.gz"
    wf.connect(t1_to_nifti, 'out_file', convert_label_map, 'reslice_like')
    wf.connect(dilate, 'out_file', convert_label_map, 'in_file')

    logb = Node(LOGISMOSB(), name="LOGISMOS-B")
    logb.inputs.smoothnessConstraint = config['LOGISMOSB'][
        'smoothnessConstraint']
    logb.inputs.nColumns = config['LOGISMOSB']['nColumns']
    logb.inputs.columnChoice = config['LOGISMOSB']['columnChoice']
    logb.inputs.columnHeight = config['LOGISMOSB']['columnHeight']
    logb.inputs.nodeSpacing = config['LOGISMOSB']['nodeSpacing']
    logb.inputs.w = config['LOGISMOSB']['w']
    logb.inputs.a = config['LOGISMOSB']['a']
    logb.inputs.nPropagate = config['LOGISMOSB']['nPropagate']

    if plugin_args:
        logb.plugin_args = plugin_args

    wf.connect([(t1_to_nifti, logb, [('out_file', 't1_file')]),
                (t2_node, logb, [('out_file', 't2_file')]),
                (inputspec, logb, [('hemi', 'basename'),
                                   ('hncma_atlas', 'atlas_file'),
                                   ('wm_proba', 'wm_proba_file'),
                                   ('gm_proba', 'gm_proba_file')]),
                (to_vtk, logb, [('converted', 'mesh_file')]),
                (surfmask_to_nifti, logb, [('out_file', 'wm_file')]),
                (convert_label_map, logb, [('out_file', 'brainlabels_file')])])

    outputspec = Node(IdentityInterface(['gmsurface_file', 'wmsurface_file']),
                      name="outputspec")

    wf.connect([(logb, outputspec, [('gmsurface_file', 'gmsurface_file'),
                                    ('wmsurface_file', 'wmsurface_file')])])

    return wf
コード例 #33
0
# composite transform
dictWarps = {'transforms': [], 'outpath': []}
for strWarpDir in lsWarpDirs:
    strAffinePath = glob.glob(os.path.join(strWarpDir, 'out_matrix',
                                           '*.mat'))[0]
    # Remove rigid body components (translation and rotation) which don't
    # contribute meaningful variation
    strAffinePath = remove_rigidbody(strAffinePath)
    # We use the inverse warp field, which contains the nonlinear transformation from MNI->subject
    strNonlinearPath = glob.glob(
        os.path.join(strWarpDir, 'inverse_warp_field', '*.nii.gz'))[0]
    dictWarps['transforms'].append([strNonlinearPath, strAffinePath])
    dictWarps['outpath'].append(
        os.path.join(strWarpDir, 'composite_to_mni.nii.gz'))

# Use ANTs ApplyTransforms to compose the transforms
antstool = MapNode(ants.ApplyTransforms(input_image=TEMPLATE,
                                        reference_image=TEMPLATE,
                                        interpolation='BSpline',
                                        invert_transform_flags=[False, True],
                                        print_out_composite_warp_file=True),
                   name='applytransforms',
                   iterfield=['output_image', 'transforms'])
antstool.inputs.output_image = dictWarps['outpath']
antstool.inputs.transforms = dictWarps['transforms']

# Create and run nipype workflow
wf = Workflow('composite_transforms')
wf.add_nodes([antstool])
wf.run(plugin='MultiProc', plugin_args={'n_procs': PIPELINE_JOBS})
コード例 #34
0
    def _skullstrip_register_func(self, input_img, output_path):
        output_path = os.path.abspath(output_path)
        workflow = Workflow('register_func', base_dir=output_path)

        # Compute mean image
        meanimage = Node(fsl.MeanImage(dimension='T', output_type='NIFTI_GZ'),
                         name='mean')
        meanimage.inputs.in_file = input_img

        # Skull strip
        funcstrip = skullstrip.make_func_mask_workflow(base_dir=output_path)
        workflow.connect(meanimage, 'out_file', funcstrip,
                         'inputnode.mean_file')

        # Register with EPI template.
        register = Node(ants.Registration(
            fixed_image=strEPITemplatePath,
            transforms=['Translation', 'Rigid', 'Affine', 'SyN'],
            transform_parameters=[(0.1, ), (0.1, ), (0.1, ), (0.2, 3.0, 0.0)],
            number_of_iterations=([[1000, 1000, 1000]] * 3 + [[100, 50, 30]]),
            dimension=3,
            write_composite_transform=True,
            collapse_output_transforms=False,
            metric=['Mattes'] * 3 + [['Mattes', 'CC']],
            metric_weight=[1, 1, 1, [0.5, 0.5]],
            radius_or_number_of_bins=[32] * 3 + [[32, 4]],
            sampling_strategy=['Regular'] * 3 + [[None, None]],
            sampling_percentage=[0.3] * 3 + [[None, None]],
            convergence_threshold=[1e-8] * 3 + [-0.01],
            convergence_window_size=[20, 20, 20, 5],
            smoothing_sigmas=[[4, 2, 1]] * 3 + [[1, 0.5, 0]],
            sigma_units=['vox'] * 4,
            shrink_factors=[[6, 4, 2], [3, 2, 1], [3, 2, 1], [4, 2, 1]],
            use_estimate_learning_rate_once=[True] * 4,
            use_histogram_matching=[False] * 3 + [True],
            initial_moving_transform_com=1,
            terminal_output='file',
            num_threads=self.n_ants_jobs),
                        name='register',
                        mem_gb=16,
                        n_procs=self.n_ants_jobs)
        workflow.connect(funcstrip, 'outputnode.masked_file', register,
                         'moving_image')

        # Apply transformation to the entire image timeseries
        transform = Node(ants.ApplyTransforms(
            reference_image=strEPITemplatePath,
            input_image=input_img,
            float=True,
            output_image=os.path.join(output_path,
                                      'registered_func_Warped.nii.gz'),
            interpolation='BSpline',
            interpolation_parameters=(5, ),
            input_image_type=3,
            num_threads=self.n_ants_jobs),
                         name='apply_transforms')
        transform.interface.num_threads = self.n_ants_jobs
        workflow.connect(register, 'composite_transform', transform,
                         'transforms')

        workflow.run()
        return os.path.join(output_path, 'registered_func_Warped.nii.gz')