コード例 #1
0
def mgz2nii(file):
    from nipype.interfaces.freesurfer import MRIConvert
    mc = MRIConvert()
    mc.inputs.in_file = file
    mc.inputs.out_file = file[:-3] + 'nii'
    mc.inputs.out_type = 'nii'
    mc.run()
コード例 #2
0
def change_orientation(image_file, out_file, orientation="LPS"):
    convert = MRIConvert()
    convert.inputs.in_file = image_file
    convert.inputs.out_file = os.path.abspath(out_file)
    convert.inputs.out_orientation = orientation
    result = convert.run()
    return result.outputs.out_file
コード例 #3
0
ファイル: autorecon1.py プロジェクト: Conxz/nipype
 def convert_modalities(in_file=None, out_file=None):
     """Returns an undefined output if the in_file is not defined"""
     from nipype.interfaces.freesurfer import MRIConvert
     import os
     if in_file:
         convert = MRIConvert()
         convert.inputs.in_file = in_file
         convert.inputs.out_file = out_file
         convert.inputs.no_scale = True
         out = convert.run()
         out_file = os.path.abspath(out.outputs.out_file)
     return out_file
コード例 #4
0
 def convert_modalities(in_file=None, out_file=None):
     """Returns an undefined output if the in_file is not defined"""
     from nipype.interfaces.freesurfer import MRIConvert
     import os
     if in_file:
         convert = MRIConvert()
         convert.inputs.in_file = in_file
         convert.inputs.out_file = out_file
         convert.inputs.no_scale = True
         out = convert.run()
         out_file = os.path.abspath(out.outputs.out_file)
     return out_file
コード例 #5
0
def convert_fs_scan(in_file, out_file, resample_type=None):
    if not os.path.isfile(out_file):
        from nipype.interfaces.freesurfer import MRIConvert
        convert = MRIConvert()
        convert.inputs.in_file = in_file
        convert.inputs.out_file = out_file
        convert.inputs.out_orientation = "LPS"
        convert.inputs.conform = True
        convert.inputs.no_change = True
        if resample_type:
            convert.inputs.resample_type = resample_type
        convert.run()
    return os.path.abspath(out_file)
コード例 #6
0
def create_pial_mask(subject_id, subjects_dir, verbose=False):

    ribbon = os.path.abspath(os.path.join(subjects_dir, subject_id, 'mri','ribbon'))
    aseg = os.path.abspath(os.path.join(subjects_dir, subject_id, 'mri','aseg'))

    mc = MRIConvert()
    mc.inputs.in_file = ribbon + '.mgz'
    mc.inputs.out_file = ribbon + '.nii.gz'
    mc.inputs.out_type = 'nii'
    mc.cmdline
    mc.run()

    mc = MRIConvert()
    mc.inputs.in_file = aseg + '.mgz'
    mc.inputs.out_file = aseg + '.nii.gz'
    mc.inputs.out_type = 'nii'
    mc.cmdline
    mc.run()
    
#tic_labels_remove aseg.nii.gz --out_nii 1.mask.nii.gz --remove 3 42
#fslmaths 1.mask.nii.gz -bin 1.mask.nii.gz

#fslmaths 1.mask.nii.gz -add ribbon.nii.gz -bin 2.mask.nii.gz
#fslmaths 2.mask.nii.gz -kernel sphere 10 -dilM -ero -fillh 3.mask.nii.gz

    return
コード例 #7
0
def change_orientation(image_file, out_file, orientation="LPS"):
    """
    This function...
    :param image_file:
    :param out_file:
    :param orientation:
    :return: result.outputs.out_file
    """
    convert = MRIConvert()
    convert.inputs.in_file = image_file
    convert.inputs.out_file = os.path.abspath(out_file)
    convert.inputs.out_orientation = orientation
    result = convert.run()
    return result.outputs.out_file
コード例 #8
0
def change_orientation(image_file, out_file, orientation="LPS"):
    """
    This function...

    :param image_file:
    :param out_file:
    :param orientation:
    :return:
    """
    convert = MRIConvert()
    convert.inputs.in_file = image_file
    convert.inputs.out_file = os.path.abspath(out_file)
    convert.inputs.out_orientation = orientation
    result = convert.run()
    return result.outputs.out_file
コード例 #9
0
def checkT1s(T1_files, cw256=False):
    """Verifying size of inputs and setting workflow parameters"""
    import SimpleITK as sitk
    import os
    import sys
    # check that the files are in a list
    if not type(T1_files) == list:
        T1_files = [T1_files]
    if len(T1_files) == 0:
        print("ERROR: No T1's Given")
        sys.exit(-1)
    for i, t1 in enumerate(T1_files):
        if t1.endswith(".mgz"):
            # convert input fs files to NIFTI
            convert = MRIConvert()
            convert.inputs.in_file = t1
            convert.inputs.out_file = os.path.abspath(
                os.path.basename(t1).replace('.mgz', '.nii.gz'))
            convert.run()
            T1_files[i] = convert.inputs.out_file
    size = None
    origvol_names = list()
    for i, t1 in enumerate(T1_files):
        # assign an input number
        file_num = str(i + 1)
        while len(file_num) < 3:
            file_num = '0' + file_num
        origvol_names.append("{0}.mgz".format(file_num))
        # check the size of the image
        img = sitk.ReadImage(t1)
        if not size:
            size = img.GetSize()
        elif size != img.GetSize():
            print(
                "ERROR: T1s not the same size. Cannot process {0} {1} together"
                .format(T1_files[0], otherFilename))
            sys.exit(-1)
    # check if cw256 is set to crop the images if size is larger than 256
    if not cw256:
        for dim in size:
            if dim > 256:
                print("Setting MRI Convert to crop images to 256 FOV")
                cw256 = True
    if len(T1_files) > 1:
        resample_type = 'cubic'
    else:
        resample_type = 'interpolate'
    return T1_files, cw256, resample_type, origvol_names
コード例 #10
0
    def t2_convert(in_file=None, reference_file=None, out_file=None):
        """
        This function...

        :param in_file:
        :param reference_file:
        :param out_file:
        :return:
        """
        import os
        from nipype.interfaces.freesurfer import MRIConvert
        from nipype.interfaces.traits_extension import Undefined
        from nipype import Node

        if in_file:
            t2_to_nifti = Node(MRIConvert(), "T2toNIFTI")
            t2_to_nifti.inputs.in_file = in_file
            t2_to_nifti.inputs.out_file = os.path.abspath(out_file)
            t2_to_nifti.inputs.out_orientation = "LPS"
            if reference_file:
                t2_to_nifti.inputs.reslice_like = reference_file
            result = t2_to_nifti.run()
            out_file = os.path.abspath(result.outputs.out_file)
        else:
            out_file = Undefined
        return out_file
コード例 #11
0
 def img2imgcoord(self, ref_img, ref_name=None, method='linear', input_reorient2std=True, ref_reorient2std=False, wf_base_dir = None, wf_name='register', linear_reg_file=None, warp_field_file = None, return_as_array=False):
                  
     ## wf_base_dir
     if wf_base_dir is None and self.working_dir is not None:
         wf_base_dir = self.working_dir
     
     elif wf_base_dir is None and self.working_dir is None:
         print('Working dir has not been specified, results will be stored in:  ', os.path.abspath('.'))             
         wf_base_dir = os.path.abspath('.')
              
     ## converting rawavg to nifti
     mc = MRIConvert()
     mc.inputs.in_file = self.img_file
     mc.inputs.out_file = 'rawavg.nii.gz'
     mc.inputs.out_type = 'niigz'
     mc_node = pe.Node(mc,name='rawavg_to_nifti')
     wf = pe.Workflow(name=wf_name,base_dir=wf_base_dir)
     wf.add_nodes([mc_node])
     wf.run()
     
     ras_coords = self.coordinates['ras_coord']
     
     new_coords = img2img_coord_register(ras_coords, os.path.join(wf_base_dir,wf_name,'rawavg_to_nifti','rawavg.nii.gz'), ref_img, wf_base_dir, method=method,
                                         input_reorient2std=input_reorient2std, ref_reorient2std=ref_reorient2std,
                                         wf_name=wf_name, linear_reg_file=linear_reg_file, warp_field_file = warp_field_file)
     
     if return_as_array is False:
         ## traits
         traits={}
         for trait in self.traits_list:
             traits[trait] = self.__getattribute__(trait)
     
         new_coords = Coords(coords=new_coords, img_file=ref_img, subject=ref_name,**traits)
     
     return new_coords
コード例 #12
0
def generate_ROI_file(FreeSurfer_ROI_file):
	"""
	This script generates a dictionary of ROIs found in the FreeSurfer parcellation filename
	"""
	from nipype.interfaces.freesurfer import MRIConvert
	mc = MRIConvert()
	mc.inputs.in_file = FreeSurfer_ROI_file
	mc.inputs.out_type = 'niigz'
	mc.run()

	import nipype.interfaces.cmtk as cmtk
	rg = cmtk.ROIGen()
	rg.inputs.aparc_aseg_file = FreeSurfer_ROI_file.split('.')[0] + '_out.nii.gz'
	rg.inputs.use_freesurfer_LUT = True
	out_file = rg.run()

	return out_file
コード例 #13
0
def generate_ROI_file(FreeSurfer_ROI_file):
	"""
	This script generates a dictionary of ROIs found in the FreeSurfer parcellation filename
	"""
	from nipype.interfaces.freesurfer import MRIConvert
	mc = MRIConvert()
	mc.inputs.in_file = FreeSurfer_ROI_file
	mc.inputs.out_type = 'niigz'
	mc.run()

	import nipype.interfaces.cmtk as cmtk
	rg = cmtk.ROIGen()
	rg.inputs.aparc_aseg_file = FreeSurfer_ROI_file.split('.')[0] + '_out.nii.gz'
	rg.inputs.use_freesurfer_LUT = True
	out_file = rg.run()

	return out_file
コード例 #14
0
ファイル: autorecon1.py プロジェクト: beausievers/nipype
def checkT1s(T1_files, cw256=False):
    """Verifying size of inputs and setting workflow parameters"""
    import SimpleITK as sitk
    import os
    import sys
    # check that the files are in a list
    if not type(T1_files) == list:
        T1_files = [T1_files]
    if len(T1_files) == 0:
        print("ERROR: No T1's Given")
        sys.exit(-1)
    for i, t1 in enumerate(T1_files):
        if t1.endswith(".mgz"):
            # convert input fs files to NIFTI
            convert = MRIConvert()
            convert.inputs.in_file = t1
            convert.inputs.out_file = os.path.abspath(os.path.basename(t1).replace('.mgz', '.nii.gz'))
            convert.run()
            T1_files[i] = convert.inputs.out_file
    size = None
    origvol_names = list()
    for i, t1 in enumerate(T1_files):
        # assign an input number
        file_num = str(i + 1)
        while len(file_num) < 3:
            file_num = '0' + file_num
        origvol_names.append("{0}.mgz".format(file_num))
        # check the size of the image
        img = sitk.ReadImage(t1)
        if not size:
            size = img.GetSize()
        elif size != img.GetSize():
            print("ERROR: T1s not the same size. Cannot process {0} {1} together".format(T1_files[0],
                                                                                         otherFilename))
            sys.exit(-1)
    # check if cw256 is set to crop the images if size is larger than 256
    if not cw256:
        for dim in size:
            if dim > 256:
                print("Setting MRI Convert to crop images to 256 FOV")
                cw256 = True
    if len(T1_files) > 1:
        resample_type = 'cubic'
    else:
        resample_type = 'interpolate'
    return T1_files, cw256, resample_type, origvol_names
コード例 #15
0
def mri_convert_with_reslice(launcher, in_file, out_file, slice_file):
    mc = MRIConvert()
    mc.inputs.in_file = in_file
    mc.inputs.out_file = out_file
    mc.inputs.out_type = 'mgz'
    mc.inputs.reslice_like = slice_file
    mc.inputs.resample_type = 'nearest'

    launcher.run(mc.cmdline.replace("mri_convert", "mri_convert.bin"))
コード例 #16
0
ファイル: preprocess.py プロジェクト: BRAINSia/BRAINSTools
def convert_fs_scan(in_file, out_file, resample_type=None):
    """
    This function...

    :param in_file:
    :param out_file:
    :param resample_type:
    :return:
    """
    if not os.path.isfile(out_file):
        from nipype.interfaces.freesurfer import MRIConvert

        convert = MRIConvert()
        convert.inputs.in_file = in_file
        convert.inputs.out_file = out_file
        convert.inputs.out_orientation = "LPS"
        convert.inputs.conform = True
        convert.inputs.no_change = True
        if resample_type:
            convert.inputs.resample_type = resample_type
        convert.run()
    return os.path.abspath(out_file)
コード例 #17
0
ファイル: masking.py プロジェクト: gpiantoni/bairanalysis
def make_w_masking():
    w_mask = Workflow('masking')

    n_in = Node(
        IdentityInterface(fields=[
            'T1w',
            'subject',  # without sub-
            'freesurfer2func',
            'func',
        ]),
        name='input')

    n_out = Node(IdentityInterface(fields=[
        'func',
    ]), name='output')

    n_fl = Node(FLIRT(), name='flirt')
    n_fl.inputs.output_type = 'NIFTI_GZ'
    n_fl.inputs.apply_xfm = True
    n_fl.inputs.interp = 'nearestneighbour'

    n_conv = Node(MRIConvert(), name='convert')
    n_conv.inputs.out_type = 'niigz'

    reconall = Node(ReconAll(), name='reconall')
    reconall.inputs.directive = 'all'
    reconall.inputs.subjects_dir = '/Fridge/R01_BAIR/freesurfer'

    w_mask.connect(n_in, 'T1w', reconall, 'T1_files')
    w_mask.connect(n_in, 'subject', reconall, 'subject_id')

    n_mul = Node(interface=BinaryMaths(), name='mul')
    n_mul.inputs.operation = 'mul'

    w_mask.connect(reconall, ('ribbon', select_ribbon), n_conv, 'in_file')
    w_mask.connect(n_conv, 'out_file', n_fl, 'in_file')
    w_mask.connect(n_in, 'func', n_fl, 'reference')
    w_mask.connect(n_in, 'freesurfer2func', n_fl, 'in_matrix_file')

    w_mask.connect(n_in, 'func', n_mul, 'in_file')
    w_mask.connect(n_fl, 'out_file', n_mul, 'operand_file')

    w_mask.connect(n_mul, 'out_file', n_out, 'func')

    return w_mask
コード例 #18
0
def create_AutoRecon1(name="AutoRecon1",
                      longitudinal=False,
                      distance=None,
                      custom_atlas=None,
                      plugin_args=None,
                      shrink=None,
                      stop=None,
                      fsvernum=5.3):
    """Creates the AutoRecon1 workflow in nipype.

    Inputs::
           inputspec.T1_files : T1 files (mandatory)
           inputspec.T2_file : T2 file (optional)
           inputspec.FLAIR_file : FLAIR file (optional)
           inputspec.cw256 : Conform inputs to 256 FOV (optional)
           inputspec.num_threads: Number of threads to use with EM Register (default=1)
    Outpus::

    """
    ar1_wf = pe.Workflow(name=name)
    inputspec = pe.Node(interface=IdentityInterface(fields=[
        'T1_files', 'T2_file', 'FLAIR_file', 'cw256', 'num_threads',
        'reg_template_withskull', 'awk_file'
    ]),
                        run_without_submitting=True,
                        name='inputspec')

    if not longitudinal:
        # single session processing
        verify_inputs = pe.Node(Function(
            ["T1_files", "cw256"],
            ["T1_files", "cw256", "resample_type", "origvol_names"], checkT1s),
                                name="Check_T1s")
        ar1_wf.connect([(inputspec, verify_inputs, [('T1_files', 'T1_files'),
                                                    ('cw256', 'cw256')])])

        # T1 image preparation
        # For all T1's mri_convert ${InputVol} ${out_file}
        T1_image_preparation = pe.MapNode(MRIConvert(),
                                          iterfield=['in_file', 'out_file'],
                                          name="T1_prep")

        ar1_wf.connect([
            (verify_inputs, T1_image_preparation, [('T1_files', 'in_file'),
                                                   ('origvol_names',
                                                    'out_file')]),
        ])

        def convert_modalities(in_file=None, out_file=None):
            """Returns an undefined output if the in_file is not defined"""
            from nipype.interfaces.freesurfer import MRIConvert
            import os
            if in_file:
                convert = MRIConvert()
                convert.inputs.in_file = in_file
                convert.inputs.out_file = out_file
                convert.inputs.no_scale = True
                out = convert.run()
                out_file = os.path.abspath(out.outputs.out_file)
            return out_file

        T2_convert = pe.Node(Function(['in_file', 'out_file'], ['out_file'],
                                      convert_modalities),
                             name="T2_Convert")
        T2_convert.inputs.out_file = 'T2raw.mgz'
        ar1_wf.connect([(inputspec, T2_convert, [('T2_file', 'in_file')])])

        FLAIR_convert = pe.Node(Function(['in_file', 'out_file'], ['out_file'],
                                         convert_modalities),
                                name="FLAIR_Convert")
        FLAIR_convert.inputs.out_file = 'FLAIRraw.mgz'
        ar1_wf.connect([(inputspec, FLAIR_convert, [('FLAIR_file', 'in_file')])
                        ])
    else:
        # longitudinal inputs
        inputspec = pe.Node(interface=IdentityInterface(fields=[
            'T1_files', 'iscales', 'ltas', 'subj_to_template_lta',
            'template_talairach_xfm', 'template_brainmask'
        ]),
                            run_without_submitting=True,
                            name='inputspec')

        def output_names(T1_files):
            """Create file names that are dependent on the number of T1 inputs"""
            iscale_names = list()
            lta_names = list()
            for i, t1 in enumerate(T1_files):
                # assign an input number
                file_num = str(i + 1)
                while len(file_num) < 3:
                    file_num = '0' + file_num
                iscale_names.append("{0}-iscale.txt".format(file_num))
                lta_names.append("{0}.lta".format(file_num))
            return iscale_names, lta_names

        filenames = pe.Node(Function(['T1_files'],
                                     ['iscale_names', 'lta_names'],
                                     output_names),
                            name="Longitudinal_Filenames")
        ar1_wf.connect([(inputspec, filenames, [('T1_files', 'T1_files')])])

        copy_ltas = pe.MapNode(Function(['in_file', 'out_file'], ['out_file'],
                                        copy_file),
                               iterfield=['in_file', 'out_file'],
                               name='Copy_ltas')
        ar1_wf.connect([(inputspec, copy_ltas, [('ltas', 'in_file')]),
                        (filenames, copy_ltas, [('lta_names', 'out_file')])])

        copy_iscales = pe.MapNode(Function(['in_file', 'out_file'],
                                           ['out_file'], copy_file),
                                  iterfield=['in_file', 'out_file'],
                                  name='Copy_iscales')
        ar1_wf.connect([(inputspec, copy_iscales, [('iscales', 'in_file')]),
                        (filenames, copy_iscales, [('iscale_names', 'out_file')
                                                   ])])

        concatenate_lta = pe.MapNode(ConcatenateLTA(),
                                     iterfield=['in_file'],
                                     name="Concatenate_ltas")
        ar1_wf.connect([(copy_ltas, concatenate_lta, [('out_file', 'in_file')
                                                      ]),
                        (inputspec, concatenate_lta, [('subj_to_template_lta',
                                                       'subj_to_base')])])

    # Motion Correction
    """
    When there are multiple source volumes, this step will correct for small
    motions between them and then average them together.  The output of the
    motion corrected average is mri/rawavg.mgz which is then conformed to
    255 cubed char images (1mm isotropic voxels) in mri/orig.mgz.
    """
    def createTemplate(in_files, out_file):
        import os
        import shutil
        if len(in_files) == 1:
            # if only 1 T1 scan given, no need to run RobustTemplate
            print(
                "WARNING: only one run found. This is OK, but motion correction "
                +
                "cannot be performed on one run, so I'll copy the run to rawavg "
                + "and continue.")
            shutil.copyfile(in_files[0], out_file)
            intensity_scales = None
            transforms = None
        else:
            from nipype.interfaces.freesurfer import RobustTemplate
            # if multiple T1 scans are given run RobustTemplate
            intensity_scales = [
                os.path.basename(f.replace('.mgz', '-iscale.txt'))
                for f in in_files
            ]
            transforms = [
                os.path.basename(f.replace('.mgz', '.lta')) for f in in_files
            ]
            robtemp = RobustTemplate()
            robtemp.inputs.in_files = in_files
            robtemp.inputs.average_metric = 'median'
            robtemp.inputs.out_file = out_file
            robtemp.inputs.no_iteration = True
            robtemp.inputs.fixed_timepoint = True
            robtemp.inputs.auto_detect_sensitivity = True
            robtemp.inputs.initial_timepoint = 1
            robtemp.inputs.scaled_intensity_outputs = intensity_scales
            robtemp.inputs.transform_outputs = transforms
            robtemp.inputs.subsample_threshold = 200
            robtemp.inputs.intensity_scaling = True
            robtemp_result = robtemp.run()
            # collect the outputs from RobustTemplate
            out_file = robtemp_result.outputs.out_file
            intensity_scales = [
                os.path.abspath(f)
                for f in robtemp_result.outputs.scaled_intensity_outputs
            ]
            transforms = [
                os.path.abspath(f)
                for f in robtemp_result.outputs.transform_outputs
            ]
        out_file = os.path.abspath(out_file)
        return out_file, intensity_scales, transforms

    if not longitudinal:
        create_template = pe.Node(Function(
            ['in_files', 'out_file'],
            ['out_file', 'intensity_scales', 'transforms'], createTemplate),
                                  name="Robust_Template")
        create_template.inputs.out_file = 'rawavg.mgz'
        ar1_wf.connect([(T1_image_preparation, create_template,
                         [('out_file', 'in_files')])])
    else:
        create_template = pe.Node(RobustTemplate(), name="Robust_Template")
        create_template.inputs.average_metric = 'median'
        create_template.inputs.out_file = 'rawavg.mgz'
        create_template.inputs.no_iteration = True
        ar1_wf.connect([(concatenate_lta, create_template,
                         [('out_file', 'initial_transforms')]),
                        (inputSpec, create_template, [('in_t1s', 'in_files')]),
                        (copy_iscales, create_template,
                         [('out_file', 'in_intensity_scales')])])

    # mri_convert
    conform_template = pe.Node(MRIConvert(), name='Conform_Template')
    conform_template.inputs.out_file = 'orig.mgz'
    if not longitudinal:
        conform_template.inputs.conform = True
        ar1_wf.connect([(verify_inputs, conform_template, [
            ('cw256', 'cw256'), ('resample_type', 'resample_type')
        ])])
    else:
        conform_template.inputs.out_datatype = 'uchar'

    ar1_wf.connect([(create_template, conform_template, [('out_file',
                                                          'in_file')])])

    # Talairach
    """
    This computes the affine transform from the orig volume to the MNI305 atlas using Avi Snyders 4dfp
    suite of image registration tools, through a FreeSurfer script called talairach_avi.
    Several of the downstream programs use talairach coordinates as seed points.
    """

    bias_correction = pe.Node(MNIBiasCorrection(), name="Bias_correction")
    bias_correction.inputs.iterations = 1
    bias_correction.inputs.protocol_iterations = 1000
    bias_correction.inputs.distance = distance
    if stop:
        bias_correction.inputs.stop = stop
    if shrink:
        bias_correction.inputs.shrink = shrink
    bias_correction.inputs.no_rescale = True
    bias_correction.inputs.out_file = 'orig_nu.mgz'

    ar1_wf.connect([
        (conform_template, bias_correction, [('out_file', 'in_file')]),
    ])

    if not longitudinal:
        # single session processing
        talairach_avi = pe.Node(TalairachAVI(), name="Compute_Transform")
        if custom_atlas != None:
            # allows to specify a custom atlas
            talairach_avi.inputs.atlas = custom_atlas
        talairach_avi.inputs.out_file = 'talairach.auto.xfm'
        ar1_wf.connect([(bias_correction, talairach_avi, [('out_file',
                                                           'in_file')])])
    else:
        # longitudinal processing
        # Just copy the template xfm
        talairach_avi = pe.Node(Function(['in_file', 'out_file'], ['out_file'],
                                         copy_file),
                                name='Copy_Template_Transform')
        talairach_avi.inputs.out_file = 'talairach.auto.xfm'

        ar1_wf.connect([(inputspec, talairach_avi, [('template_talairach_xfm',
                                                     'in_file')])])

    copy_transform = pe.Node(Function(['in_file', 'out_file'], ['out_file'],
                                      copy_file),
                             name='Copy_Transform')
    copy_transform.inputs.out_file = 'talairach.xfm'

    ar1_wf.connect([(talairach_avi, copy_transform, [('out_file', 'in_file')])
                    ])

    # In recon-all the talairach.xfm is added to orig.mgz, even though
    # it does not exist yet. This is a compromise to keep from
    # having to change the time stamp of the orig volume after talairaching.
    # Here we are going to add xfm to the header after the xfm has been created.
    # This may mess up the timestamp.

    add_xform_to_orig = pe.Node(AddXFormToHeader(),
                                name="Add_Transform_to_Orig")
    add_xform_to_orig.inputs.copy_name = True
    add_xform_to_orig.inputs.out_file = conform_template.inputs.out_file

    ar1_wf.connect([
        (conform_template, add_xform_to_orig, [('out_file', 'in_file')]),
        (copy_transform, add_xform_to_orig, [('out_file', 'transform')])
    ])

    # This node adds the transform to the orig_nu.mgz file. This step does not
    # exist in the recon-all workflow, because that workflow adds the talairach
    # to the orig.mgz file header before the talairach actually exists.
    add_xform_to_orig_nu = pe.Node(AddXFormToHeader(),
                                   name="Add_Transform_to_Orig_Nu")
    add_xform_to_orig_nu.inputs.copy_name = True
    add_xform_to_orig_nu.inputs.out_file = bias_correction.inputs.out_file

    ar1_wf.connect([
        (bias_correction, add_xform_to_orig_nu, [('out_file', 'in_file')]),
        (copy_transform, add_xform_to_orig_nu, [('out_file', 'transform')])
    ])

    # check the alignment of the talairach
    # TODO: Figure out how to read output from this node.
    check_alignment = pe.Node(CheckTalairachAlignment(),
                              name="Check_Talairach_Alignment")
    check_alignment.inputs.threshold = 0.005
    ar1_wf.connect([
        (copy_transform, check_alignment, [('out_file', 'in_file')]),
    ])

    if not longitudinal:

        def awkfile(in_file, log_file):
            """
            This method uses 'awk' which must be installed prior to running the workflow and is not a
            part of nipype or freesurfer.
            """
            import subprocess
            import os
            command = ['awk', '-f', in_file, log_file]
            print(''.join(command))
            subprocess.call(command)
            log_file = os.path.abspath(log_file)
            return log_file

        awk_logfile = pe.Node(Function(['in_file', 'log_file'], ['log_file'],
                                       awkfile),
                              name='Awk')

        ar1_wf.connect([(talairach_avi, awk_logfile, [('out_log', 'log_file')
                                                      ]),
                        (inputspec, awk_logfile, [('awk_file', 'in_file')])])

        # TODO datasink the output from TalirachQC...not sure how to do this
        tal_qc = pe.Node(TalairachQC(), name="Detect_Aligment_Failures")
        ar1_wf.connect([(awk_logfile, tal_qc, [('log_file', 'log_file')])])

    if fsvernum < 6:
        # intensity correction is performed before normalization
        intensity_correction = pe.Node(MNIBiasCorrection(),
                                       name="Intensity_Correction")
        intensity_correction.inputs.out_file = 'nu.mgz'
        intensity_correction.inputs.iterations = 2
        ar1_wf.connect([(add_xform_to_orig, intensity_correction,
                         [('out_file', 'in_file')]),
                        (copy_transform, intensity_correction,
                         [('out_file', 'transform')])])

        add_to_header_nu = pe.Node(AddXFormToHeader(), name="Add_XForm_to_NU")
        add_to_header_nu.inputs.copy_name = True
        add_to_header_nu.inputs.out_file = 'nu.mgz'
        ar1_wf.connect([(intensity_correction, add_to_header_nu, [
            ('out_file', 'in_file'),
        ]), (copy_transform, add_to_header_nu, [('out_file', 'transform')])])

    # Intensity Normalization
    # Performs intensity normalization of the orig volume and places the result in mri/T1.mgz.
    # Attempts to correct for fluctuations in intensity that would otherwise make intensity-based
    # segmentation much more difficult. Intensities for all voxels are scaled so that the mean
    # intensity of the white matter is 110.

    mri_normalize = pe.Node(Normalize(), name="Normalize_T1")
    mri_normalize.inputs.gradient = 1
    mri_normalize.inputs.out_file = 'T1.mgz'

    if fsvernum < 6:
        ar1_wf.connect([(add_to_header_nu, mri_normalize, [('out_file',
                                                            'in_file')])])
    else:
        ar1_wf.connect([(add_xform_to_orig_nu, mri_normalize, [('out_file',
                                                                'in_file')])])

    ar1_wf.connect([(copy_transform, mri_normalize, [('out_file', 'transform')
                                                     ])])

    # Skull Strip
    """
    Removes the skull from mri/T1.mgz and stores the result in
    mri/brainmask.auto.mgz and mri/brainmask.mgz. Runs the mri_watershed program.
    """
    if not longitudinal:
        mri_em_register = pe.Node(EMRegister(), name="EM_Register")
        mri_em_register.inputs.out_file = 'talairach_with_skull.lta'
        mri_em_register.inputs.skull = True
        if plugin_args:
            mri_em_register.plugin_args = plugin_args

        if fsvernum < 6:
            ar1_wf.connect(add_to_header_nu, 'out_file', mri_em_register,
                           'in_file')
        else:
            ar1_wf.connect(add_xform_to_orig_nu, 'out_file', mri_em_register,
                           'in_file')

        ar1_wf.connect([(inputspec, mri_em_register,
                         [('num_threads', 'num_threads'),
                          ('reg_template_withskull', 'template')])])

        brainmask = pe.Node(WatershedSkullStrip(),
                            name='Watershed_Skull_Strip')
        brainmask.inputs.t1 = True
        brainmask.inputs.out_file = 'brainmask.auto.mgz'
        ar1_wf.connect([
            (mri_normalize, brainmask, [('out_file', 'in_file')]),
            (mri_em_register, brainmask, [('out_file', 'transform')]),
            (inputspec, brainmask, [('reg_template_withskull', 'brain_atlas')])
        ])
    else:
        copy_template_brainmask = pe.Node(Function(['in_file', 'out_file'],
                                                   ['out_file'], copy_file),
                                          name='Copy_Template_Brainmask')
        copy_template_brainmask.inputs.out_file = 'brainmask_{0}.mgz'.format(
            config['long_template'])

        ar1_wf.connect([(inputspec, copy_template_brainmask,
                         [('template_brainmask', 'in_file')])])

        mask1 = pe.Node(ApplyMask(), name="ApplyMask1")
        mask1.inputs.keep_mask_deletion_edits = True
        mask1.inputs.out_file = 'brainmask.auto.mgz'

        ar1_wf.connect([(mri_normalize, mask1, [('out_file', 'in_file')]),
                        (copy_template_brainmask, mask1, [('out_file',
                                                           'mask_file')])])

        brainmask = pe.Node(ApplyMask(), name="ApplyMask2")
        brainmask.inputs.keep_mask_deletion_edits = True
        brainmask.inputs.transfer = 255
        brainmask.inputs.out_file = mask1.inputs.out_file

        ar1_wf.connect([(mask1, brainmask, [('out_file', 'in_file')]),
                        (copy_template_brainmask, brainmask, [('out_file',
                                                               'mask_file')])])

    copy_brainmask = pe.Node(Function(['in_file', 'out_file'], ['out_file'],
                                      copy_file),
                             name='Copy_Brainmask')
    copy_brainmask.inputs.out_file = 'brainmask.mgz'

    ar1_wf.connect([(brainmask, copy_brainmask, [('out_file', 'in_file')])])

    outputs = [
        'origvols', 't2_raw', 'flair', 'rawavg', 'orig_nu', 'orig',
        'talairach_auto', 'talairach', 't1', 'talskull', 'brainmask_auto',
        'brainmask', 'braintemplate'
    ]

    if fsvernum < 6:
        outputspec = pe.Node(IdentityInterface(fields=outputs + ['nu']),
                             name="outputspec")
        ar1_wf.connect([(add_to_header_nu, outputspec, [('out_file', 'nu')])])
    else:
        outputspec = pe.Node(IdentityInterface(fields=outputs),
                             name="outputspec")

    ar1_wf.connect([
        (T1_image_preparation, outputspec, [('out_file', 'origvols')]),
        (T2_convert, outputspec, [('out_file', 't2_raw')]),
        (FLAIR_convert, outputspec, [('out_file', 'flair')]),
        (create_template, outputspec, [('out_file', 'rawavg')]),
        (add_xform_to_orig, outputspec, [('out_file', 'orig')]),
        (add_xform_to_orig_nu, outputspec, [('out_file', 'orig_nu')]),
        (talairach_avi, outputspec, [('out_file', 'talairach_auto')]),
        (copy_transform, outputspec, [('out_file', 'talairach')]),
        (mri_normalize, outputspec, [('out_file', 't1')]),
        (brainmask, outputspec, [('out_file', 'brainmask_auto')]),
        (copy_brainmask, outputspec, [('out_file', 'brainmask')]),
    ])

    if not longitudinal:
        ar1_wf.connect([
            (mri_em_register, outputspec, [('out_file', 'talskull')]),
        ])
    else:
        ar1_wf.connect([
            (copy_template_brainmask, outputspec, [('out_file',
                                                    'braintemplate')]),
        ])

    return ar1_wf, outputs
コード例 #19
0
from nipype.interfaces.freesurfer import MRIConvert
import os
import shutil
import csv
import sys
import csv
from os import listdir
from os.path import isfile, join

############# MRI CONVERT ##############
# path of subjects in .mgz format
sub_path = '/media/Ali/8A9E6F039E6EE6E3/freesurfer/subjects'
# path of output converted files to .nii format
output_dir = '/media/Ali/8A9E6F039E6EE6E3/MRI/MRI-Converted'

dirs = [f for f in listdir(sub_path) if f.startswith('sub') == True]
mc = MRIConvert()
for sub in dirs:
    mgz_path = os.path.join(sub_path, sub, 'mri/brain.mgz')
    nii_path = os.path.join(output_dir, '{}-brain.nii'.format(sub))
    mc.inputs.in_file = mgz_path
    mc.inputs.out_file = nii_path
    mc.inputs.out_type = 'nii'
    mc.run()
コード例 #20
0
def postprocessing_t1w_extract_hippo(caps_directory,
                                     tsv,
                                     working_directory=None,
                                     hemi='right'):
    """
    This is a postprocessing pipeline to prepare the slice-level and patch-level data from the whole MRI and save them
    on disk, so that to facilitate the training process:
        - For slice-level CNN, all slices were extracted from the whole MRI from three different axis. The first and last
          15 slice were discarded due to the lack of information.
        - For patch-level CNN, the 3D patch (with specific patch size) were extracted by a 3D window.

    :param caps_directory: CAPS directory where stores the output of preprocessing
    :param tsv: subject list file containing the participant_id and session_id
    :param hemi: chooses which hippocampus is extracted (left or right)
    :param working_directory: working directory to store the intermediate files
    :return:
    """

    from nipype.interfaces.freesurfer import MRIConvert
    import nipype.interfaces.utility as nutil
    import nipype.pipeline.engine as npe
    import nipype.interfaces.io as nio
    import tempfile
    from .T1_postprocessing_extract_hippo_utils import get_caps_t1, save_as_pt, compress_nii, get_subid_sesid_datasink

    if working_directory is None:
        working_directory = tempfile.mkdtemp()

    inputnode = npe.Node(
        nutil.IdentityInterface(fields=['caps_directory', 'tsv']),
        name='inputnode')
    inputnode.inputs.caps_directory = caps_directory
    inputnode.inputs.tsv = tsv

    get_subject_session_list = npe.Node(
        name='get_subject_session_list',
        interface=nutil.Function(function=get_caps_t1,
                                 input_names=['caps_directory', 'tsv'],
                                 output_names=[
                                     'preprocessed_T1',
                                     'cropped_hipp_file_name',
                                     'participant_id', 'session_id',
                                     'preprocessed_T1_folder'
                                 ]))

    # extract the hippocampus.
    hippocampus_patches = npe.MapNode(name='hippocampus_patches',
                                      iterfield=['in_file', 'out_file'],
                                      interface=MRIConvert())

    hippocampus_patches.inputs.out_type = 'nii'

    # TODO, to decide the position of hippocampus of each hemisphere
    if hemi == 'left':
        hippocampus_patches.inputs.crop_center = (
            61, 96, 68)  # the center of the right and left hippocampus
        hippocampus_patches.inputs.crop_size = (
            50, 50, 50)  # the output cropped hippocampus size
    else:
        hippocampus_patches.inputs.crop_center = (
            109, 96, 68)  # the center of the right and right hippocampus
        hippocampus_patches.inputs.crop_size = (
            50, 50, 50)  # the output cropped hippocampus size

    # zip the result imgs
    # in the newest version of nipype for MRIConvert, it seems that they can be saved directly as nii.gz
    zip_hippocampus = npe.MapNode(name='zip_hippocampus',
                                  interface=nutil.Function(
                                      input_names=['in_file'],
                                      output_names=['out_file'],
                                      function=compress_nii),
                                  iterfield=['in_file'])

    # save nii.gz into classifiers .pt format.
    save_as_pt = npe.MapNode(name='save_as_pt',
                             iterfield=['input_img'],
                             interface=nutil.Function(
                                 function=save_as_pt,
                                 input_names=['input_img'],
                                 output_names=['output_file']))

    # get the information for datasinker.
    get_identifiers = npe.MapNode(nutil.Function(
        input_names=['participant_id', 'session_id', 'caps_directory', 'hemi'],
        output_names=[
            'base_directory', 'subst_tuple_list', 'regexp_substitutions'
        ],
        function=get_subid_sesid_datasink),
                                  iterfield=['participant_id', 'session_id'],
                                  name='get_subid_sesid_datasink')
    get_identifiers.inputs.caps_directory = caps_directory
    get_identifiers.inputs.hemi = hemi

    # datasink
    datasink = npe.MapNode(nio.DataSink(
        infields=['output_hippocampus_nii', 'output_hippocampus_pt']),
                           name='datasinker',
                           iterfield=[
                               'output_hippocampus_nii',
                               'output_hippocampus_pt', 'base_directory',
                               'substitutions', 'regexp_substitutions'
                           ])

    outputnode = npe.Node(nutil.IdentityInterface(
        fields=['output_hippocampus_nii', 'output_hippocampus_pt']),
                          name='outputnode')

    wf = npe.Workflow(name='t1w_postprocessing_dl_extract_hippo')
    wf.base_dir = working_directory

    wf.connect([
        (inputnode, get_subject_session_list, [('tsv', 'tsv')]),
        (inputnode, get_subject_session_list, [('caps_directory',
                                                'caps_directory')]),
        (get_subject_session_list, hippocampus_patches, [('preprocessed_T1',
                                                          'in_file')]),
        (get_subject_session_list, hippocampus_patches,
         [('cropped_hipp_file_name', 'out_file')]),
        (hippocampus_patches, zip_hippocampus, [('out_file', 'in_file')]),
        (zip_hippocampus, save_as_pt, [('out_file', 'input_img')]),

        # Saving files with datasink:
        (get_subject_session_list, get_identifiers, [('participant_id',
                                                      'participant_id')]),
        (get_subject_session_list, get_identifiers, [('session_id',
                                                      'session_id')]),
        (get_identifiers, datasink, [('base_directory', 'base_directory')]),
        (get_identifiers, datasink, [('subst_tuple_list', 'substitutions')]),
        (get_identifiers, datasink, [('regexp_substitutions',
                                      'regexp_substitutions')]),
        (save_as_pt, datasink, [('output_file', 'output_hippocampus_pt')]),
        (zip_hippocampus, datasink, [('out_file', 'output_hippocampus_nii')]),
    ])

    return wf
コード例 #21
0
    def _run_interface(self, runtime):
        from additional_interfaces import DipyDenoiseT1
        from additional_interfaces import FSRename
        from nipype.interfaces.ants import N4BiasFieldCorrection
        from nipype.interfaces.ants.segmentation import BrainExtraction
        from nipype.interfaces.freesurfer import MRIConvert
        from nipype.interfaces.freesurfer import ReconAll
        import nipype.interfaces.fsl as fsl
        import nipype.pipeline.engine as pe
        import os

        subject_id = self.inputs.subject_id
        T1 = self.inputs.T1
        template_directory = self.inputs.template_directory
        out_directory = self.inputs.out_directory
        subjects_dir = out_directory + '/FreeSurfer/'

        if not os.path.isdir(subjects_dir):
            os.mkdir(subjects_dir)

        os.environ['SUBJECTS_DIR'] = subjects_dir

        # Getting a better field of view
        robustfov = pe.Node(interface=fsl.RobustFOV(), name='robustfov')
        robustfov.inputs.in_file = T1

        # Denoising
        T1_denoise = pe.Node(interface=DipyDenoiseT1(), name='T1_denoise')

        # Brain extraction
        brainextraction = pe.Node(interface=fsl.BET(), name='brainextraction')

        # Renaming files for FreeSurfer
        rename = pe.Node(FSRename(), name='rename')

        # Running FreeSurfer
        autorecon1 = pe.Node(interface=ReconAll(), name='autorecon1')
        autorecon1.inputs.subject_id = subject_id
        autorecon1.inputs.directive = 'autorecon1'
        autorecon1.inputs.args = '-noskullstrip'
        autorecon1.inputs.subjects_dir = subjects_dir

        autorecon2 = pe.Node(interface=ReconAll(), name='autorecon2')
        autorecon2.inputs.directive = 'autorecon2'

        autorecon3 = pe.Node(interface=ReconAll(), name='autorecon3')
        autorecon3.inputs.directive = 'autorecon3'

        wm_convert = pe.Node(interface=MRIConvert(), name='wm_convert')
        wm_convert.inputs.out_file = subjects_dir + '/' + subject_id + '/mri/' + 'wm.nii'
        wm_convert.inputs.out_type = 'nii'

        T1_convert = pe.Node(interface=MRIConvert(), name='T1_convert')
        T1_convert.inputs.out_file = subjects_dir + '/' + subject_id + '/mri/' + 'T1.nii.gz'
        T1_convert.inputs.out_type = 'niigz'

        mask_convert = pe.Node(interface=MRIConvert(), name='mask_convert')
        mask_convert.inputs.out_file = subjects_dir + '/' + subject_id + '/mri/' + 'brainmask.nii.gz'
        mask_convert.inputs.out_type = 'niigz'

        # Connecting the pipeline
        T1_preproc = pe.Workflow(name='t1_preproc')

        T1_preproc.connect(robustfov, 'out_roi', T1_denoise, 'in_file')
        T1_preproc.connect(T1_denoise, 'out_file', brainextraction, 'in_file')
        T1_preproc.connect(
            brainextraction, 'out_file', autorecon1, 'T1_files')
        T1_preproc.connect(
            autorecon1, 'subject_id', autorecon2, 'subject_id')
        T1_preproc.connect(
            autorecon1, 'subjects_dir', autorecon2, 'subjects_dir')
        T1_preproc.connect(
            autorecon1, 'subject_id', rename, 'subject_id')
        T1_preproc.connect(
            autorecon1, 'subjects_dir', rename, 'subjects_dir')
        T1_preproc.connect(
            autorecon2, 'subject_id', autorecon3, 'subject_id')
        T1_preproc.connect(
            autorecon2, 'subjects_dir', autorecon3, 'subjects_dir')
        T1_preproc.connect(autorecon3, 'wm', wm_convert, 'in_file')
        T1_preproc.connect(autorecon3, 'T1', T1_convert, 'in_file')
        T1_preproc.connect(
            autorecon3, 'brainmask', mask_convert, 'in_file')

        # ==============================================================
        # Running the workflow
        T1_preproc.base_dir = os.path.abspath(self.inputs.out_directory + '/_subject_id_' + self.inputs.subject_id)
        T1_preproc.write_graph()
        T1_preproc.run()

        return runtime
コード例 #22
0
ファイル: run.py プロジェクト: clowdcontrol/pipelines
def run_workflow(bids_dir):
    subjects_dir = os.path.join(bids_dir, "derivatives", "freesurfer")
    mindcontrol_base_dir = os.path.join(bids_dir, "derivatives", "mindcontrol_freesurfer")
    mindcontrol_outdir = mindcontrol_base_dir
    workflow_working_dir = os.path.join(mindcontrol_base_dir, "scratch"+"_"+str(uuid.uuid4()))

    subject_paths = glob(op.join(subjects_dir, "*"))

    subjects = []
    for path in subject_paths:
        subject = path.split('/')[-1]
        # check if mri dir exists, and don't add fsaverage
        if op.exists(op.join(path, 'mri')) and subject != 'fsaverage':
            subjects.append(subject)


    input_node = Node(IdentityInterface(fields=['subject_id',"subjects_dir",
                                            "mindcontrol_base_dir", "output_dir"]), name='inputnode')
    input_node.iterables=("subject_id", subjects)
    input_node.inputs.subjects_dir = subjects_dir
    input_node.inputs.mindcontrol_base_dir = mindcontrol_base_dir #this is where start_static_server is running
    input_node.inputs.output_dir = mindcontrol_outdir #this is in the freesurfer/ directory under the base_dir

    dg_node=Node(Function(input_names=["subjects_dir", "subject", "volumes"],
                          output_names=["volume_paths"], 
                          function=data_grabber), 
                 name="datagrab")
    #dg_node.inputs.subjects_dir = subjects_dir
    dg_node.inputs.volumes = volumes


    mriconvert_node = MapNode(MRIConvert(out_type="niigz"), 
                              iterfield=["in_file"], 
                              name='convert')

    get_stats_node = Node(Function(input_names=["subjects_dir", "subject"],
                                   output_names = ["output_dict"],
                                   function=parse_stats), name="get_freesurfer_stats")

    write_mindcontrol_entries = Node(Function(input_names = ["mindcontrol_base_dir",
                                                             "output_dir",
                                                             "subject",
                                                             "stats"],
                                              output_names=["output_json"],
                                              function=create_mindcontrol_entries), 
                                     name="get_mindcontrol_entries")

    datasink_node = Node(DataSink(),
                         name='datasink')
    subst = [('out_file',''),('_subject_id_',''),('_out','')]  + [("_convert%d" % index, "") for index in range(len(volumes))] 
    datasink_node.inputs.substitutions = subst

    wf = Workflow(name="MindPrepFS")
    wf.base_dir = workflow_working_dir
    wf.connect(input_node,"subject_id", dg_node,"subject")
    wf.connect(input_node,"subjects_dir", dg_node, "subjects_dir")
    wf.connect(input_node, "subject_id", get_stats_node, "subject")
    wf.connect(input_node, "subjects_dir", get_stats_node, "subjects_dir")
    wf.connect(input_node, "subject_id", write_mindcontrol_entries, "subject")
    wf.connect(input_node, "mindcontrol_base_dir", write_mindcontrol_entries, "mindcontrol_base_dir")
    wf.connect(input_node, "output_dir", write_mindcontrol_entries, "output_dir")
    wf.connect(get_stats_node, "output_dict", write_mindcontrol_entries, "stats")
    wf.connect(input_node, "output_dir", datasink_node, "base_directory")
    wf.connect(dg_node,"volume_paths", mriconvert_node, "in_file")
    wf.connect(mriconvert_node,'out_file',datasink_node,'out_file')
    wf.connect(write_mindcontrol_entries, "output_json", datasink_node, "out_file.@json")
    wf.run()

    shutil.rmtree(workflow_working_dir)
    from nipype.utils.filemanip import load_json, save_json

    files = glob(os.path.join(mindcontrol_base_dir, "*", "mindcontrol_entries.json"))
    output = []
    for f in files:
        output += load_json(f)
    save_json(os.path.join(mindcontrol_base_dir, "all_entries.json"), output)
コード例 #23
0
# Datasink- where our select outputs will go
substitutions = [('_subject_id_', '')]  #output file name substitutions
datasink = Node(DataSink(substitutions=substitutions), name='datasink')
datasink.inputs.base_directory = output_dir
datasink.inputs.container = output_dir

# In[3]:

## Nodes for preprocessing

# Reorient to standard space using FSL
reorientfunc = Node(Reorient2Std(), name='reorientfunc')
reorientstruct = Node(Reorient2Std(), name='reorientstruct')

# Reslice- using MRI_convert
reslice = Node(MRIConvert(vox_size=resampled_voxel_size, out_type='nii'),
               name='reslice')

# Segment structural scan
#segment = Node(Segment(affine_regularization='none'), name='segment')
segment = Node(FAST(no_bias=True, segments=True, number_classes=3),
               name='segment')

#Slice timing correction based on interleaved acquisition using FSL
slicetime_correct = Node(SliceTimer(interleaved=interleave,
                                    slice_direction=slice_dir,
                                    time_repetition=TR),
                         name='slicetime_correct')

# Motion correction
motion_correct = Node(MCFLIRT(save_plots=True, mean_vol=True),
コード例 #24
0
def create_fs_compatible_logb_workflow(name="LOGISMOSB", plugin_args=None, config=None):
    """
    Create a workflow to run LOGISMOS-B from FreeSurfer Inputs

    :param name:
    :param plugin_args:
    :param config:
    :return:
    """

    if not config:
        config = read_json_config("fs_logb_config.json")

    wf = Workflow(name)

    inputspec = Node(
        IdentityInterface(
            [
                "t1_file",
                "t2_file",
                "white",
                "aseg",
                "hemi",
                "recoding_file",
                "gm_proba",
                "wm_proba",
                "lut_file",
                "hncma_atlas",
            ]
        ),
        name="inputspec",
    )

    # convert the white mesh to a vtk file with scanner coordinates
    to_vtk = Node(MRIsConvert(), name="WhiteVTK")
    to_vtk.inputs.out_file = "white.vtk"
    to_vtk.inputs.to_scanner = True

    wf.connect(inputspec, "white", to_vtk, "in_file")

    # convert brainslabels to nifti
    aseg_to_nifti = Node(MRIConvert(), "ABCtoNIFTI")
    aseg_to_nifti.inputs.out_file = "aseg.nii.gz"
    aseg_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, "aseg", aseg_to_nifti, "in_file")

    # create brainslabels from aseg
    aseg2brains = Node(
        Function(["in_file", "recode_file", "out_file"], ["out_file"], recode_labelmap),
        name="ConvertAseg2BRAINSLabels",
    )
    aseg2brains.inputs.out_file = "brainslabels.nii.gz"

    wf.connect(
        [
            (inputspec, aseg2brains, [("recoding_file", "recode_file")]),
            (aseg_to_nifti, aseg2brains, [("out_file", "in_file")]),
        ]
    )

    t1_to_nifti = Node(MRIConvert(), "T1toNIFTI")
    t1_to_nifti.inputs.out_file = "t1.nii.gz"
    t1_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, "t1_file", t1_to_nifti, "in_file")

    def t2_convert(in_file=None, reference_file=None, out_file=None):
        """
        This function...

        :param in_file:
        :param reference_file:
        :param out_file:
        :return:
        """
        import os
        from nipype.interfaces.freesurfer import MRIConvert
        from nipype.interfaces.traits_extension import Undefined
        from nipype import Node

        if in_file:
            t2_to_nifti = Node(MRIConvert(), "T2toNIFTI")
            t2_to_nifti.inputs.in_file = in_file
            t2_to_nifti.inputs.out_file = os.path.abspath(out_file)
            t2_to_nifti.inputs.out_orientation = "LPS"
            if reference_file:
                t2_to_nifti.inputs.reslice_like = reference_file
            result = t2_to_nifti.run()
            out_file = os.path.abspath(result.outputs.out_file)
        else:
            out_file = Undefined
        return out_file

    t2_node = Node(
        Function(["in_file", "reference_file", "out_file"], ["out_file"], t2_convert),
        name="T2Convert",
    )
    t2_node.inputs.out_file = "t2.nii.gz"
    wf.connect(inputspec, "t2_file", t2_node, "in_file")
    wf.connect(t1_to_nifti, "out_file", t2_node, "reference_file")

    # convert raw t1 to lia
    t1_to_ras = Node(MRIConvert(), "T1toRAS")
    t1_to_ras.inputs.out_orientation = "LIA"
    t1_to_ras.inputs.out_file = "t1_lia.mgz"
    wf.connect(inputspec, "t1_file", t1_to_ras, "in_file")

    # Create ones image for use when masking the white matter
    ones = Node(
        Function(["in_volume", "out_file"], ["out_file"], create_ones_image),
        name="Ones_Image",
    )
    ones.inputs.out_file = "ones.mgz"

    wf.connect(t1_to_ras, "out_file", ones, "in_volume")

    # use the ones image to obtain a white matter mask
    surfmask = Node(SurfaceMask(), name="WhiteMask")
    surfmask.inputs.out_file = "white_ras.mgz"

    wf.connect(ones, "out_file", surfmask, "in_volume")
    wf.connect(inputspec, "white", surfmask, "in_surface")

    surfmask_to_nifti = Node(MRIConvert(), "MasktoNIFTI")
    surfmask_to_nifti.inputs.out_file = "white.nii.gz"
    surfmask_to_nifti.inputs.out_orientation = "LPS"

    wf.connect(surfmask, "out_file", surfmask_to_nifti, "in_file")

    # create hemi masks

    split = Node(SplitLabels(), name="SplitLabelMask")
    split.inputs.out_file = "HemiBrainLabels.nii.gz"
    wf.connect(
        [
            (aseg2brains, split, [("out_file", "in_file")]),
            (inputspec, split, [("lut_file", "lookup_table")]),
            (aseg_to_nifti, split, [("out_file", "labels_file")]),
            (inputspec, split, [("hemi", "hemi")]),
        ]
    )

    dilate = Node(MultiLabelDilation(), "DilateLabels")
    dilate.inputs.out_file = "DilatedBrainLabels.nii.gz"
    dilate.inputs.radius = 1
    wf.connect(split, "out_file", dilate, "in_file")

    convert_label_map = Node(MRIConvert(), "ConvertLabelMapToMatchT1")
    convert_label_map.inputs.resample_type = "nearest"
    convert_label_map.inputs.out_file = "BrainLabelsFromAsegInT1Space.nii.gz"
    wf.connect(t1_to_nifti, "out_file", convert_label_map, "reslice_like")
    wf.connect(dilate, "out_file", convert_label_map, "in_file")

    logb = Node(LOGISMOSB(), name="LOGISMOS-B")
    logb.inputs.smoothnessConstraint = config["LOGISMOSB"]["smoothnessConstraint"]
    logb.inputs.nColumns = config["LOGISMOSB"]["nColumns"]
    logb.inputs.columnChoice = config["LOGISMOSB"]["columnChoice"]
    logb.inputs.columnHeight = config["LOGISMOSB"]["columnHeight"]
    logb.inputs.nodeSpacing = config["LOGISMOSB"]["nodeSpacing"]
    logb.inputs.w = config["LOGISMOSB"]["w"]
    logb.inputs.a = config["LOGISMOSB"]["a"]
    logb.inputs.nPropagate = config["LOGISMOSB"]["nPropagate"]

    if plugin_args:
        logb.plugin_args = plugin_args

    wf.connect(
        [
            (t1_to_nifti, logb, [("out_file", "t1_file")]),
            (t2_node, logb, [("out_file", "t2_file")]),
            (
                inputspec,
                logb,
                [
                    ("hemi", "basename"),
                    ("hncma_atlas", "atlas_file"),
                    ("wm_proba", "wm_proba_file"),
                    ("gm_proba", "gm_proba_file"),
                ],
            ),
            (to_vtk, logb, [("converted", "mesh_file")]),
            (surfmask_to_nifti, logb, [("out_file", "wm_file")]),
            (convert_label_map, logb, [("out_file", "brainlabels_file")]),
        ]
    )

    outputspec = Node(
        IdentityInterface(["gmsurface_file", "wmsurface_file"]), name="outputspec"
    )

    wf.connect(
        [
            (
                logb,
                outputspec,
                [
                    ("gmsurface_file", "gmsurface_file"),
                    ("wmsurface_file", "wmsurface_file"),
                ],
            )
        ]
    )

    return wf
コード例 #25
0
    def __init__(self,coords, img_file, subject=None, coord_type='ras', working_dir=None, **traits):
        """ General class to work with coordinates
        
        Parameters
        -----------
        
        coords: numpy array 
                x,y,z coordinates matrix (npoints x 3)
        
        img_file: str
                path to image file
        
        
        subject: str
                subject name
                
        coord_type: str, {'ras', 'voxel'}
                coordinate system
        
        working_dir: 
                the path to working directory

        
        **traits: other traits of the coordinates;
                 the traits size should be equal to the number of npoints
                 for instance, one can add "name" or "color" as extra traits for each coordinate
        
        
        Returns
        --------
        
        An object of Coords class
    
        """
        
        coords = np.atleast_2d(coords)
        self.img_file = img_file
        self.subject=subject
        self.coord_type = coord_type
        self.img = nib.load(img_file)
        self.working_dir = working_dir
        self.vox2ras = self.img.affine
        self.ras2vox = np.linalg.inv(self.vox2ras)
        self.npoints = coords.shape[0]
        self.coordinates = {}
        affineM= self._to_affine_matrix(coords)
        self._count=0
        
        if coord_type=='ras':
            self.coordinates['ras_coord'] = coords
            self.coordinates['voxel_coord'] = np.round(np.dot(self.ras2vox,affineM).T[:,:3])
        
        elif coord_type=='voxel':
            self.coordinates['voxel_coord'] = coords
            self.coordinates['ras_coord'] = np.dot(self.vox2ras,affineM).T[:,:3]
            
        else:
            raise ValueError('type should be either "ras" or "voxel"')
            
            
        ### to freesurfer coords
        
        rnum1 = np.random.randint(10**15,10**16) 
        rnum2 = np.random.randint(10**10,10**11)  
        rnum = '{rnum1}_{rnum2}'.format(rnum1=rnum1,rnum2=rnum2)       
        os.makedirs(os.path.join(os.path.abspath('.'),'temp_{rnum}'.format(rnum=rnum)))
        wf_dir = os.path.join(os.path.abspath('.'),'temp_{rnum}'.format(rnum=rnum))
        
        ## creating rawavg.mgz
        mc = MRIConvert()
        mc.inputs.in_file = img_file
        mc.inputs.out_file = os.path.join(wf_dir,'rawavg.mgz')
        mc.inputs.out_type = 'mgz'
        mc.run()

        ## creating orig.mgz

        mc = MRIConvert()
        mc.inputs.in_file = os.path.join(wf_dir,'rawavg.mgz')
        mc.inputs.out_file = os.path.join(wf_dir,'orig.mgz')
        mc.inputs.out_type = 'mgz'
        mc.inputs.conform = True
        mc.run()

    
        rawavg_file = os.path.join(wf_dir,'rawavg.mgz')            
        orig_file = os.path.join(wf_dir,'orig.mgz')
        
        
        
        ### loading 
        orig_img = nib.freesurfer.load(orig_file)
        self.ras2fsvox = orig_img.header.get_ras2vox()
        self.fsvox2ras_tkr = orig_img.header.get_vox2ras_tkr()
        self.ras2ras_tkr = np.dot(self.fsvox2ras_tkr,self.ras2fsvox)
            
        ras_affineM = self._to_affine_matrix(self.coordinates['ras_coord'])
        self.coordinates['ras_tkr_coord'] = np.dot(self.fsvox2ras_tkr,np.dot(self.ras2fsvox, ras_affineM)).T[:,:3]
        self.coordinates['fsvoxel_coord'] = np.round(np.dot(self.ras2fsvox,ras_affineM).T[:,:3])
        
        shutil.rmtree(wf_dir)  
          
        ##3 adding traits    
        self.traits_list = []
        for trait in traits:
            self.add_trait(trait,traits[trait])     
コード例 #26
0
def mri_convert(launcher, in_file, out_file):
    mc = MRIConvert()
    mc.inputs.in_file = in_file
    mc.inputs.out_file = out_file
    mc.inputs.out_type = 'mgz'
    launcher.run(mc.cmdline.replace("mri_convert", "mri_convert.bin"))
コード例 #27
0
    def build_core_nodes(self):
        """Build and connect the core nodes of the pipelines.
        """

        import fmri_preprocessing_workflows as utils
        import nipype.interfaces.utility as nutil
        import nipype.interfaces.spm as spm
        import nipype.pipeline.engine as npe
        from clinica.utils.filemanip import zip_nii, unzip_nii

        # Zipping
        # =======
        unzip_node = npe.MapNode(name='Unzipping',
                                 iterfield=['in_file'],
                                 interface=nutil.Function(
                                     input_names=['in_file'],
                                     output_names=['out_file'],
                                     function=unzip_nii))

        unzip_T1w = unzip_node.clone('UnzippingT1w')
        unzip_phasediff = unzip_node.clone('UnzippingPhasediff')
        unzip_bold = unzip_node.clone('UnzippingBold')
        unzip_magnitude1 = unzip_node.clone('UnzippingMagnitude1')

        # FieldMap calculation
        # ====================
        if self.parameters['unwarping']:
            fm_node = npe.MapNode(name="FieldMapCalculation",
                                  iterfield=[
                                      'phase', 'magnitude', 'epi', 'et',
                                      'blipdir', 'tert'
                                  ],
                                  interface=spm.FieldMap())

        # Slice timing correction
        # =======================
        st_node = npe.MapNode(name="SliceTimingCorrection",
                              iterfield=[
                                  'in_files', 'time_repetition', 'slice_order',
                                  'num_slices', 'ref_slice', 'time_acquisition'
                              ],
                              interface=spm.SliceTiming())

        # Motion correction and unwarping
        # ===============================

        if self.parameters['unwarping']:
            mc_node = npe.MapNode(name="MotionCorrectionUnwarping",
                                  iterfield=["scans", "pmscan"],
                                  interface=spm.RealignUnwarp())
            mc_node.inputs.register_to_mean = True
            mc_node.inputs.reslice_mask = False
        else:
            mc_node = npe.MapNode(name="MotionCorrection",
                                  iterfield=["in_files"],
                                  interface=spm.Realign())
            mc_node.inputs.register_to_mean = True

        # Brain extraction
        # ================
        import os.path as path
        from nipype.interfaces.freesurfer import MRIConvert
        if self.parameters['freesurfer_brain_mask']:
            brain_masks = [
                path.join(self.caps_directory, 'subjects', self.subjects[i],
                          self.sessions[i], 't1/freesurfer_cross_sectional',
                          self.subjects[i] + '_' + self.sessions[i],
                          'mri/brain.mgz') for i in range(len(self.subjects))
            ]
            conv_brain_masks = [
                str(self.subjects[i] + '_' + self.sessions[i] + '.nii')
                for i in range(len(self.subjects))
            ]
            bet_node = npe.MapNode(interface=MRIConvert(),
                                   iterfield=["in_file", "out_file"],
                                   name="BrainConversion")
            bet_node.inputs.in_file = brain_masks
            bet_node.inputs.out_file = conv_brain_masks
            bet_node.inputs.out_type = 'nii'
        else:
            bet_node = utils.BrainExtractionWorkflow(name="BrainExtraction")

        # Registration
        # ============
        reg_node = npe.MapNode(
            interface=spm.Coregister(),
            iterfield=["apply_to_files", "source", "target"],
            name="Registration")

        # Normalization
        # =============
        norm_node = npe.MapNode(interface=spm.Normalize12(),
                                iterfield=['image_to_align', 'apply_to_files'],
                                name='Normalization')

        # Smoothing
        # =========
        smooth_node = npe.MapNode(interface=spm.Smooth(),
                                  iterfield=['in_files'],
                                  name='Smoothing')
        smooth_node.inputs.fwhm = self.parameters['full_width_at_half_maximum']

        # Zipping
        # =======
        zip_node = npe.MapNode(name='Zipping',
                               iterfield=['in_file'],
                               interface=nutil.Function(
                                   input_names=['in_file'],
                                   output_names=['out_file'],
                                   function=zip_nii))

        zip_bet_node = zip_node.clone('ZippingBET')
        zip_mc_node = zip_node.clone('ZippingMC')
        zip_reg_node = zip_node.clone('ZippingRegistration')
        zip_norm_node = zip_node.clone('ZippingNormalization')
        zip_smooth_node = zip_node.clone('ZippingSmoothing')

        # Connections
        # ===========

        if self.parameters['freesurfer_brain_mask']:
            self.connect([
                # Brain extraction
                (bet_node, reg_node, [('out_file', 'target')]),
                (bet_node, zip_bet_node, [('out_file', 'in_file')]),
            ])
        else:
            self.connect([
                # Brain extraction
                (unzip_T1w, bet_node, [('out_file', 'Segmentation.data')]),
                (unzip_T1w, bet_node, [('out_file', 'ApplyMask.in_file')]),
                (bet_node, reg_node, [('ApplyMask.out_file', 'target')]),
                (bet_node, zip_bet_node, [('Fill.out_file', 'in_file')]),
            ])

        if self.parameters['unwarping']:
            self.connect([
                # FieldMap calculation
                (self.input_node, fm_node, [('et', 'et')]),
                (self.input_node, fm_node, [('blipdir', 'blipdir')]),
                (self.input_node, fm_node, [('tert', 'tert')]),
                (self.input_node, unzip_phasediff, [('phasediff', 'in_file')]),
                (self.input_node, unzip_magnitude1, [('magnitude1', 'in_file')
                                                     ]),
                (unzip_magnitude1, fm_node, [('out_file', 'magnitude')]),
                (unzip_phasediff, fm_node, [('out_file', 'phase')]),
                (unzip_bold, fm_node, [('out_file', 'epi')]),
                # Motion correction and unwarping
                (st_node, mc_node, [('timecorrected_files', 'scans')]),
                (fm_node, mc_node, [('vdm', 'pmscan')]),
                (mc_node, reg_node, [('realigned_unwarped_files',
                                      'apply_to_files')]),
                (mc_node, zip_mc_node, [('realigned_unwarped_files', 'in_file')
                                        ]),
            ])
        else:
            self.connect([
                # Motion correction and unwarping
                (st_node, mc_node, [('timecorrected_files', 'in_files')]),
                (mc_node, reg_node, [('realigned_files', 'apply_to_files')]),
                (mc_node, zip_mc_node, [('realigned_files', 'in_file')]),
            ])
        self.connect([
            # Unzipping
            (self.input_node, unzip_T1w, [('T1w', 'in_file')]),
            (self.input_node, unzip_bold, [('bold', 'in_file')]),
            # Slice timing correction
            (unzip_bold, st_node, [('out_file', 'in_files')]),
            (self.input_node, st_node, [('time_repetition', 'time_repetition')
                                        ]),
            (self.input_node, st_node, [('num_slices', 'num_slices')]),
            (self.input_node, st_node, [('slice_order', 'slice_order')]),
            (self.input_node, st_node, [('ref_slice', 'ref_slice')]),
            (self.input_node, st_node, [('time_acquisition',
                                         'time_acquisition')]),
            # Registration
            (mc_node, reg_node, [('mean_image', 'source')]),
            # Normalization
            (unzip_T1w, norm_node, [('out_file', 'image_to_align')]),
            (reg_node, norm_node, [('coregistered_files', 'apply_to_files')]),
            # Smoothing
            (norm_node, smooth_node, [('normalized_files', 'in_files')]),
            # Zipping
            (reg_node, zip_reg_node, [('coregistered_files', 'in_file')]),
            (norm_node, zip_norm_node, [('normalized_files', 'in_file')]),
            (smooth_node, zip_smooth_node, [('smoothed_files', 'in_file')]),
            # Returning output
            (zip_bet_node, self.output_node, [('out_file', 't1_brain_mask')]),
            (mc_node, self.output_node, [('realignment_parameters',
                                          'mc_params')]),
            (zip_mc_node, self.output_node, [('out_file', 'native_fmri')]),
            (zip_reg_node, self.output_node, [('out_file', 't1_fmri')]),
            (zip_norm_node, self.output_node, [('out_file', 'mni_fmri')]),
            (zip_smooth_node, self.output_node, [('out_file',
                                                  'mni_smoothed_fmri')]),
        ])
コード例 #28
0
def create_fs_compatible_logb_workflow(name="LOGISMOSB",
                                       plugin_args=None,
                                       config=None):
    """Create a workflow to run LOGISMOS-B from FreeSurfer Inputs"""

    if not config:
        config = read_json_config("fs_logb_config.json")

    wf = Workflow(name)

    inputspec = Node(IdentityInterface([
        't1_file', 't2_file', 'white', 'aseg', 'hemi', 'recoding_file',
        'gm_proba', 'wm_proba', 'lut_file', 'hncma_atlas'
    ]),
                     name="inputspec")

    # convert the white mesh to a vtk file with scanner coordinates
    to_vtk = Node(MRIsConvert(), name="WhiteVTK")
    to_vtk.inputs.out_file = "white.vtk"
    to_vtk.inputs.to_scanner = True

    wf.connect(inputspec, 'white', to_vtk, 'in_file')

    # convert brainslabels to nifti
    aseg_to_nifti = Node(MRIConvert(), "ABCtoNIFTI")
    aseg_to_nifti.inputs.out_file = "aseg.nii.gz"
    aseg_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, 'aseg', aseg_to_nifti, 'in_file')

    # create brainslabels from aseg
    aseg2brains = Node(Function(['in_file', 'recode_file', 'out_file'],
                                ['out_file'], recode_labelmap),
                       name="ConvertAseg2BRAINSLabels")
    aseg2brains.inputs.out_file = "brainslabels.nii.gz"

    wf.connect([(inputspec, aseg2brains, [('recoding_file', 'recode_file')]),
                (aseg_to_nifti, aseg2brains, [('out_file', 'in_file')])])

    t1_to_nifti = Node(MRIConvert(), "T1toNIFTI")
    t1_to_nifti.inputs.out_file = "t1.nii.gz"
    t1_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, 't1_file', t1_to_nifti, 'in_file')

    def t2_convert(in_file=None, reference_file=None, out_file=None):
        import os
        from nipype.interfaces.freesurfer import MRIConvert
        from nipype.interfaces.traits_extension import Undefined
        from nipype import Node
        if in_file:
            t2_to_nifti = Node(MRIConvert(), "T2toNIFTI")
            t2_to_nifti.inputs.in_file = in_file
            t2_to_nifti.inputs.out_file = os.path.abspath(out_file)
            t2_to_nifti.inputs.out_orientation = "LPS"
            if reference_file:
                t2_to_nifti.inputs.reslice_like = reference_file
            result = t2_to_nifti.run()
            out_file = os.path.abspath(result.outputs.out_file)
        else:
            out_file = Undefined
        return out_file

    t2_node = Node(Function(['in_file', 'reference_file', 'out_file'],
                            ['out_file'], t2_convert),
                   name="T2Convert")
    t2_node.inputs.out_file = "t2.nii.gz"
    wf.connect(inputspec, 't2_file', t2_node, 'in_file')
    wf.connect(t1_to_nifti, 'out_file', t2_node, 'reference_file')

    # convert raw t1 to lia
    t1_to_ras = Node(MRIConvert(), "T1toRAS")
    t1_to_ras.inputs.out_orientation = "LIA"
    t1_to_ras.inputs.out_file = "t1_lia.mgz"
    wf.connect(inputspec, 't1_file', t1_to_ras, 'in_file')

    # Create ones image for use when masking the white matter
    ones = Node(Function(['in_volume', 'out_file'], ['out_file'],
                         create_ones_image),
                name="Ones_Image")
    ones.inputs.out_file = "ones.mgz"

    wf.connect(t1_to_ras, 'out_file', ones, 'in_volume')

    # use the ones image to obtain a white matter mask
    surfmask = Node(SurfaceMask(), name="WhiteMask")
    surfmask.inputs.out_file = "white_ras.mgz"

    wf.connect(ones, 'out_file', surfmask, 'in_volume')
    wf.connect(inputspec, 'white', surfmask, 'in_surface')

    surfmask_to_nifti = Node(MRIConvert(), "MasktoNIFTI")
    surfmask_to_nifti.inputs.out_file = "white.nii.gz"
    surfmask_to_nifti.inputs.out_orientation = "LPS"

    wf.connect(surfmask, 'out_file', surfmask_to_nifti, 'in_file')

    # create hemi masks

    split = Node(SplitLabels(), name="SplitLabelMask")
    split.inputs.out_file = "HemiBrainLabels.nii.gz"
    wf.connect([(aseg2brains, split, [('out_file', 'in_file')]),
                (inputspec, split, [('lut_file', 'lookup_table')]),
                (aseg_to_nifti, split, [('out_file', 'labels_file')]),
                (inputspec, split, [('hemi', 'hemi')])])

    dilate = Node(MultiLabelDilation(), "DilateLabels")
    dilate.inputs.out_file = "DilatedBrainLabels.nii.gz"
    dilate.inputs.radius = 1
    wf.connect(split, 'out_file', dilate, 'in_file')

    convert_label_map = Node(MRIConvert(), "ConvertLabelMapToMatchT1")
    convert_label_map.inputs.resample_type = "nearest"
    convert_label_map.inputs.out_file = "BrainLabelsFromAsegInT1Space.nii.gz"
    wf.connect(t1_to_nifti, 'out_file', convert_label_map, 'reslice_like')
    wf.connect(dilate, 'out_file', convert_label_map, 'in_file')

    logb = Node(LOGISMOSB(), name="LOGISMOS-B")
    logb.inputs.smoothnessConstraint = config['LOGISMOSB'][
        'smoothnessConstraint']
    logb.inputs.nColumns = config['LOGISMOSB']['nColumns']
    logb.inputs.columnChoice = config['LOGISMOSB']['columnChoice']
    logb.inputs.columnHeight = config['LOGISMOSB']['columnHeight']
    logb.inputs.nodeSpacing = config['LOGISMOSB']['nodeSpacing']
    logb.inputs.w = config['LOGISMOSB']['w']
    logb.inputs.a = config['LOGISMOSB']['a']
    logb.inputs.nPropagate = config['LOGISMOSB']['nPropagate']

    if plugin_args:
        logb.plugin_args = plugin_args

    wf.connect([(t1_to_nifti, logb, [('out_file', 't1_file')]),
                (t2_node, logb, [('out_file', 't2_file')]),
                (inputspec, logb, [('hemi', 'basename'),
                                   ('hncma_atlas', 'atlas_file'),
                                   ('wm_proba', 'wm_proba_file'),
                                   ('gm_proba', 'gm_proba_file')]),
                (to_vtk, logb, [('converted', 'mesh_file')]),
                (surfmask_to_nifti, logb, [('out_file', 'wm_file')]),
                (convert_label_map, logb, [('out_file', 'brainlabels_file')])])

    outputspec = Node(IdentityInterface(['gmsurface_file', 'wmsurface_file']),
                      name="outputspec")

    wf.connect([(logb, outputspec, [('gmsurface_file', 'gmsurface_file'),
                                    ('wmsurface_file', 'wmsurface_file')])])

    return wf
コード例 #29
0
    # -c flag is control for local computing (2= use localhost; required for -j flag)
    # -j flag is for number of processors allowed
    call(['antsMultivariateTemplateConstruction2.sh', '–d','3','–o', output_prefix,'–r','1','–c','2','–j', str(num_proc), '*.nii.gz'])
    
    sample_template = abspath(output_prefix + 'template0.nii.gz')
    
    return(sample_template)

# In[4]:


######### Template creation nodes #########

#convert freesurfer brainmask files to .nii
convertT1 = MapNode(MRIConvert(out_file='T1.nii.gz',
                               out_type='niigz', 
                    out_orientation='RAS'), 
                    name='convertT1', 
                    iterfield = ['in_file'])

#reorient files to standard space
reorientT1 = MapNode(Reorient2Std(out_file = 'brain_reorient.nii.gz'),
                     name = 'reorientT1',
                     iterfield = ['in_file'])

#pass files into template function (normalized, pre-skull-stripping)
makeTemplate = Node(Function(input_names=['subject_T1s','num_proc','output_prefix'],
                             output_names=['sample_template'],
                             function=make3DTemplate),
                    name='makeTemplate')
makeTemplate.inputs.num_proc=16 # feel free to change to suit what's free on SNI=VCS
コード例 #30
0
ファイル: anatomical.py プロジェクト: suyashdb/mriqc
def anat_qc_workflow(name='MRIQC_Anat', settings=None):
    """
    One-subject-one-session-one-run pipeline to extract the NR-IQMs from
    anatomical images
    """
    if settings is None:
        settings = {}

    workflow = pe.Workflow(name=name)
    deriv_dir = op.abspath(op.join(settings['output_dir'], 'derivatives'))

    if not op.exists(deriv_dir):
        os.makedirs(deriv_dir)
    # Define workflow, inputs and outputs
    inputnode = pe.Node(niu.IdentityInterface(
        fields=['bids_dir', 'subject_id', 'session_id', 'run_id']),
                        name='inputnode')
    outputnode = pe.Node(niu.IdentityInterface(fields=['out_json']),
                         name='outputnode')

    # 0. Get data
    datasource = pe.Node(niu.Function(input_names=[
        'bids_dir', 'data_type', 'subject_id', 'session_id', 'run_id'
    ],
                                      output_names=['anatomical_scan'],
                                      function=bids_getfile),
                         name='datasource')
    datasource.inputs.data_type = 'anat'

    meta = pe.Node(ReadSidecarJSON(), name='metadata')

    # 1a. Reorient anatomical image
    arw = pe.Node(MRIConvert(out_type='niigz', out_orientation='LAS'),
                  name='Reorient')
    # 1b. Estimate bias
    n4itk = pe.Node(ants.N4BiasFieldCorrection(dimension=3, save_bias=True),
                    name='Bias')
    # 2. Skull-stripping (afni)
    asw = skullstrip_wf()
    mask = pe.Node(fsl.ApplyMask(), name='MaskAnatomical')
    # 3. Head mask (including nasial-cerebelum mask)
    hmsk = headmsk_wf()
    # 4. Air mask (with and without artifacts)
    amw = airmsk_wf(settings=settings)

    # Brain tissue segmentation
    segment = pe.Node(fsl.FAST(img_type=1,
                               segments=True,
                               out_basename='segment'),
                      name='segmentation')

    # AFNI check smoothing
    fwhm = pe.Node(afp.FWHMx(combine=True, detrend=True), name='smoothness')
    # fwhm.inputs.acf = True  # add when AFNI >= 16

    # Compute python-coded measures
    measures = pe.Node(StructuralQC(testing=settings.get('testing', False)),
                       'measures')

    # Link images that should be reported
    dsreport = pe.Node(nio.DataSink(base_directory=settings['report_dir'],
                                    parameterization=True),
                       name='dsreport')
    dsreport.inputs.container = 'anat'
    dsreport.inputs.substitutions = [('_data', ''),
                                     ('background_fit', 'plot_bgfit')]
    dsreport.inputs.regexp_substitutions = [
        ('_u?(sub-[\\w\\d]*)\\.([\\w\\d_]*)(?:\\.([\\w\\d_-]*))+',
         '\\1_ses-\\2_\\3'),
        ('anatomical_bgplotsub-[^/.]*_dvars_std', 'plot_dvars'),
        ('sub-[^/.]*_T1w_out_calc_thresh', 'mask'),
        ('sub-[^/.]*_T1w_out\\.', 'mosaic_t1w.')
    ]

    # Connect all nodes
    workflow.connect([
        (inputnode, datasource, [('bids_dir', 'bids_dir'),
                                 ('subject_id', 'subject_id'),
                                 ('session_id', 'session_id'),
                                 ('run_id', 'run_id')]),
        (datasource, arw, [('anatomical_scan', 'in_file')]),
        (datasource, meta, [('anatomical_scan', 'in_file')]),
        (arw, asw, [('out_file', 'inputnode.in_file')]),
        (arw, n4itk, [('out_file', 'input_image')]),
        # (asw, n4itk, [('outputnode.out_mask', 'mask_image')]),
        (n4itk, mask, [('output_image', 'in_file')]),
        (asw, mask, [('outputnode.out_mask', 'mask_file')]),
        (mask, segment, [('out_file', 'in_files')]),
        (n4itk, hmsk, [('output_image', 'inputnode.in_file')]),
        (segment, hmsk, [('tissue_class_map', 'inputnode.in_segm')]),
        (n4itk, measures, [('output_image', 'in_noinu')]),
        (arw, measures, [('out_file', 'in_file')]),
        (arw, fwhm, [('out_file', 'in_file')]),
        (asw, fwhm, [('outputnode.out_mask', 'mask')]),
        (arw, amw, [('out_file', 'inputnode.in_file')]),
        (n4itk, amw, [('output_image', 'inputnode.in_noinu')]),
        (asw, amw, [('outputnode.out_mask', 'inputnode.in_mask')]),
        (hmsk, amw, [('outputnode.out_file', 'inputnode.head_mask')]),
        (amw, measures, [('outputnode.out_file', 'air_msk')]),
        (amw, measures, [('outputnode.artifact_msk', 'artifact_msk')]),
        (segment, measures, [('tissue_class_map', 'in_segm'),
                             ('partial_volume_files', 'in_pvms')]),
        (n4itk, measures, [('bias_image', 'in_bias')]),
        (measures, dsreport, [('out_noisefit', '@anat_noiseplot')]),
        (arw, dsreport, [('out_file', '@anat_t1w')]),
        (asw, dsreport, [('outputnode.out_mask', '@anat_t1_mask')])
    ])

    # Format name
    out_name = pe.Node(niu.Function(
        input_names=['subid', 'sesid', 'runid', 'prefix', 'out_path'],
        output_names=['out_file'],
        function=bids_path),
                       name='FormatName')
    out_name.inputs.out_path = deriv_dir
    out_name.inputs.prefix = 'anat'

    # Save to JSON file
    jfs_if = nio.JSONFileSink()
    setattr(jfs_if, '_always_run', settings.get('force_run', False))
    datasink = pe.Node(jfs_if, name='datasink')
    datasink.inputs.qc_type = 'anat'

    workflow.connect([(inputnode, out_name, [('subject_id', 'subid'),
                                             ('session_id', 'sesid'),
                                             ('run_id', 'runid')]),
                      (inputnode, datasink, [('subject_id', 'subject_id'),
                                             ('session_id', 'session_id'),
                                             ('run_id', 'run_id')]),
                      (fwhm, datasink, [(('fwhm', fwhm_dict), 'fwhm')]),
                      (measures, datasink, [('summary', 'summary'),
                                            ('spacing', 'spacing'),
                                            ('size', 'size'), ('icvs', 'icvs'),
                                            ('rpve', 'rpve'), ('inu', 'inu'),
                                            ('snr', 'snr'), ('cnr', 'cnr'),
                                            ('fber', 'fber'), ('efc', 'efc'),
                                            ('qi1', 'qi1'), ('qi2', 'qi2'),
                                            ('cjv', 'cjv'),
                                            ('wm2max', 'wm2max')]),
                      (out_name, datasink, [('out_file', 'out_file')]),
                      (meta, datasink, [('out_dict', 'metadata')]),
                      (datasink, outputnode, [('out_file', 'out_file')])])
    return workflow
コード例 #31
0
                            sampling_strategy=['Regular', 'Regular', 'None'],
                            shrink_factors=[[8, 4, 2, 1]] * 3,
                            smoothing_sigmas=[[3, 2, 1, 0]] * 3,
                            transform_parameters=[(0.1, ), (0.1, ),
                                                  (0.1, 3.0, 0.0)],
                            use_histogram_matching=True,
                            write_composite_transform=True),
               name='antsreg')

# FreeSurferSource - Data grabber specific for FreeSurfer data
fssource = Node(FreeSurferSource(subjects_dir=fs_dir),
                run_without_submitting=True,
                name='fssource')

# Convert FreeSurfer's MGZ format into NIfTI format
convert2nii = Node(MRIConvert(out_type='nii'), name='convert2nii')

# Coregister the median to the surface
bbregister = Node(BBRegister(init='fsl', contrast_type='t2',
                             out_fsl_file=True),
                  name='bbregister')

# Convert the BBRegister transformation to ANTS ITK format
convert2itk = Node(C3dAffineTool(fsl2ras=True, itk_transform=True),
                   name='convert2itk')

# Concatenate BBRegister's and ANTS' transforms into a list
merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')

# Transform the contrast images. First to anatomical and then to the target
warpall = MapNode(ApplyTransforms(args='--float',
コード例 #32
0
subject_list = ['233','234','235','236','237','238','239','240','242','243','244','245','246','248','249','250','251','253','254','256','257','259']
#subject_list = ['259']
###############################
#specify nodes
###############################
smooth = Node(SUSAN(fwhm = 8.0,
                    output_type =u'NIFTI_GZ',
                    brightness_threshold=20),
                    name="smooth")

#freesurfer recon-all segmentation
recon_all = Node(ReconAll(subject_id ='subject_id',
                 directive = u'all'),
                 name="recon_all")

mr_convertT1=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertT1")
mr_convertaseg=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertaseg")
mr_convertaparc_aseg=MapNode(MRIConvert(out_type=u'niigz'),
                name="mr_convertaparc_aseg",iterfield='in_file')
mr_convertbrainmask=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertbrainmask")
mr_convertbrain=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertbrain")
mr_convertwmparc=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertwmparc")
mr_convertwm=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertwm")
###############################
#specify input output
コード例 #33
0
def mk_w_angio(freesurfer_dir, angiogram, out_dir):

    n_input = Node(IdentityInterface(fields=[
        'fs_dir',
        'fs_subj',
        'angiogram',
        'out_dir',
    ]),
                   name='input')

    n_input.inputs.fs_dir = str(freesurfer_dir.parent)
    n_input.inputs.fs_subj = freesurfer_dir.name
    n_input.inputs.angiogram = str(angiogram)
    n_input.inputs.out_dir = str(out_dir)

    n_coreg = Node(Registration(), name='antsReg')
    n_coreg.inputs.num_threads = 40
    n_coreg.inputs.use_histogram_matching = False
    n_coreg.inputs.dimension = 3
    n_coreg.inputs.winsorize_lower_quantile = 0.001
    n_coreg.inputs.winsorize_upper_quantile = 0.999
    n_coreg.inputs.float = True
    n_coreg.inputs.interpolation = 'Linear'
    n_coreg.inputs.transforms = [
        'Rigid',
    ]
    n_coreg.inputs.transform_parameters = [
        [
            0.1,
        ],
    ]
    n_coreg.inputs.metric = [
        'MI',
    ]
    n_coreg.inputs.metric_weight = [
        1,
    ]
    n_coreg.inputs.radius_or_number_of_bins = [
        32,
    ]
    n_coreg.inputs.sampling_strategy = [
        'Regular',
    ]
    n_coreg.inputs.sampling_percentage = [
        0.5,
    ]
    n_coreg.inputs.sigma_units = [
        'mm',
    ]
    n_coreg.inputs.convergence_threshold = [
        1e-6,
    ]
    n_coreg.inputs.smoothing_sigmas = [
        [1, 0],
    ]
    n_coreg.inputs.shrink_factors = [
        [1, 1],
    ]
    n_coreg.inputs.convergence_window_size = [
        10,
    ]
    n_coreg.inputs.number_of_iterations = [
        [250, 100],
    ]
    n_coreg.inputs.output_warped_image = True
    n_coreg.inputs.output_inverse_warped_image = True
    n_coreg.inputs.output_transform_prefix = 'angio_to_struct'

    n_apply = Node(ApplyTransforms(), name='ants_apply')
    n_apply.inputs.dimension = 3
    n_apply.inputs.interpolation = 'Linear'
    n_apply.inputs.default_value = 0

    n_convert = Node(MRIConvert(), 'convert')
    n_convert.inputs.out_type = 'niigz'

    n_binarize = Node(Threshold(), 'make_mask')
    n_binarize.inputs.thresh = .1
    n_binarize.inputs.args = '-bin'

    n_mask = Node(BinaryMaths(), 'mask')
    n_mask.inputs.operation = 'mul'

    n_veins = Node(Rename(), 'rename_veins')
    n_veins.inputs.format_string = 'angiogram.nii.gz'

    n_sink = Node(DataSink(), 'sink')
    n_sink.inputs.base_directory = '/Fridge/users/giovanni/projects/intraop/loenen/angiogram'
    n_sink.inputs.remove_dest_dir = True

    fs = Node(FreeSurferSource(), 'freesurfer')

    n_split = Node(Split(), 'split_pca')
    n_split.inputs.dimension = 't'

    w = Workflow('tmp_angiogram')
    w.base_dir = str(out_dir)

    w.connect(n_input, 'fs_dir', fs, 'subjects_dir')
    w.connect(n_input, 'fs_subj', fs, 'subject_id')
    w.connect(n_input, 'angiogram', n_split, 'in_file')
    w.connect(n_split, ('out_files', select_file, 0), n_coreg, 'moving_image')
    w.connect(fs, 'T1', n_coreg, 'fixed_image')

    w.connect(n_coreg, 'forward_transforms', n_apply, 'transforms')
    w.connect(n_split, ('out_files', select_file, 1), n_apply, 'input_image')
    w.connect(fs, 'T1', n_apply, 'reference_image')
    w.connect(fs, 'brain', n_convert, 'in_file')
    w.connect(n_convert, 'out_file', n_binarize, 'in_file')
    w.connect(n_apply, 'output_image', n_mask, 'in_file')
    w.connect(n_binarize, 'out_file', n_mask, 'operand_file')
    w.connect(n_mask, 'out_file', n_veins, 'in_file')
    w.connect(n_input, 'out_dir', n_sink, 'base_directory')
    w.connect(n_veins, 'out_file', n_sink, '@angiogram')
    w.connect(n_convert, 'out_file', n_sink, '@brain')

    return w
コード例 #34
0
                    name="level1design")

# EstimateModel - estimate the parameters of the model
level1estimate = Node(EstimateModel(estimation_method={'Classical': 1}),
                      name="level1estimate")

# EstimateContrast - estimates contrasts
conestimate = Node(EstimateContrast(), name="conestimate")

# Volume Transformation - transform contrasts into anatomical space
applyVolReg = MapNode(ApplyVolTransform(fs_target=True),
                      name='applyVolReg',
                      iterfield=['source_file'])

# MRIConvert - to gzip output files
mriconvert = MapNode(MRIConvert(out_type='niigz'),
                     name='mriconvert',
                     iterfield=['in_file'])

# Initiation of the 1st-level analysis workflow
l1analysis = Workflow(name='l1analysis')

# Connect up the 1st-level analysis components
l1analysis.connect([
    (modelspec, level1design, [('session_info', 'session_info')]),
    (level1design, level1estimate, [('spm_mat_file', 'spm_mat_file')]),
    (level1estimate, conestimate, [('spm_mat_file', 'spm_mat_file'),
                                   ('beta_images', 'beta_images'),
                                   ('residual_image', 'residual_image')]),
    (conestimate, applyVolReg, [('con_images', 'source_file')]),
    (applyVolReg, mriconvert, [('transformed_file', 'in_file')]),
コード例 #35
0
def create_main_workflow_FS_segmentation():

    # check envoiroment variables
    if not os.environ.get('FREESURFER_HOME'):
        raise RuntimeError('FREESURFER_HOME environment variable not set')

    if not os.environ.get('MNE_ROOT'):
        raise RuntimeError('MNE_ROOT environment variable not set')
        
    if not os.environ.get('SUBJECTS_DIR'):
        os.environ["SUBJECTS_DIR"] = sbj_dir
        
        if not op.exists(sbj_dir):
            os.mkdir(sbj_dir)
    

    print 'SUBJECTS_DIR %s ' % os.environ["SUBJECTS_DIR"]

    # (1) iterate over subjects to define paths with templates -> Infosource
    #     and DataGrabber
    #     Node: SubjectData - we use IdentityInterface to create our own node,
    #     to specify the list of subjects the pipeline should be executed on
    infosource = pe.Node(interface=IdentityInterface(fields=['subject_id']),
                         name="infosource")
    infosource.iterables = ('subject_id', subjects_list)

    # Grab data
    #   the template can be filled by other inputs
    #   Here we define an input field for datagrabber called subject_id.
    #   This is then used to set the template (see %s in the template).

    # we can look for DICOM files or .nii ones
    if is_nii:
        datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                                       outfields=['struct']),
                             name='datasource')
        datasource.inputs.template = '%s/*/anat/%s*.nii.gz'  
        datasource.inputs.template_args = dict(struct=[['subject_id',
                                                        'subject_id']])
    else:
        datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                                       outfields=['dcm_file']),
                             name='datasource')
        datasource.inputs.template = '%s*/*.dcm'
        datasource.inputs.template_args = dict(dcm_file=[['subject_id']])

    datasource.inputs.base_directory = MRI_path  # dir where the MRI files are
    datasource.inputs.sort_filelist = True

    # get the path of the first dicom file
    def get_first_file(dcm_files):
        return dcm_files[0]

    # return the path of the struct filename in the MRI sbj dir that will be
    # the  input of MRI convert routine
    def get_MRI_sbj_dir(dcm_file):
        from nipype.utils.filemanip import split_filename as split_f
        import os.path as op

        MRI_sbj_dir, basename, ext = split_f(dcm_file)
        struct_filename = op.join(MRI_sbj_dir, 'struct.nii.gz')
        return struct_filename

    get_firstfile = pe.Node(interface=Function(input_names=['dcm_files'],
                                               output_names=['dcm_file'],
                            function=get_first_file), name='get_firstfile')

    get_MRI_sbjdir = pe.Node(interface=Function(input_names=['dcm_file'],
                                                output_names=['struct_filename'],
                             function=get_MRI_sbj_dir), name='get_MRI_sbjdir')

    # MRI_convert Node
    # We use it if we don't have a .nii.gz file
    # The output of mriconvert is the input of recon-all
    mri_convert = pe.Node(interface=MRIConvert(), infields=['in_file'],
                          outfields=['out_file'],
                          name='mri_convert')

    # (2) ReconAll Node to generate surfaces and parcellations of structural
    #     data from anatomical images of a subject.
    recon_all = pe.Node(interface=ReconAll(), infields=['T1_files'],
                        name='recon_all')
    recon_all.inputs.subjects_dir = sbj_dir
    recon_all.inputs.directive = 'all'

    # reconall_workflow will be a node of the main workflow
    reconall_workflow = pe.Workflow(name=FS_WF_name)

    reconall_workflow.base_dir = MRI_path

    reconall_workflow.connect(infosource, 'subject_id',
                              recon_all, 'subject_id')

    reconall_workflow.connect(infosource, 'subject_id',
                              datasource,  'subject_id')

    if is_nii:
        reconall_workflow.connect(datasource, 'struct', recon_all, 'T1_files')
    else:
        reconall_workflow.connect(datasource,   'dcm_file',
                                  get_firstfile,  'dcm_files')
        reconall_workflow.connect(get_firstfile, 'dcm_file',
                                  get_MRI_sbjdir, 'dcm_file')

        reconall_workflow.connect(get_firstfile, 'dcm_file',
                                  mri_convert, 'in_file')
        reconall_workflow.connect(get_MRI_sbjdir, 'struct_filename',
                                  mri_convert, 'out_file')

        reconall_workflow.connect(mri_convert, 'out_file',
                                  recon_all, 'T1_files')

    # (3) BEM generation by the watershed algo of MNE C
    main_workflow = pe.Workflow(name=MAIN_WF_name)
    main_workflow.base_dir = sbj_dir

    # I mode: WatershedBEM Interface of nipype
    bem_generation = pe.Node(interface=WatershedBEM(),
                             infields=['subject_id', 'subjects_dir', 'atlas_mode'],
                             outfields=['mesh_files'],
                             name='bem_generation')
    bem_generation.inputs.subjects_dir = sbj_dir 
    bem_generation.inputs.atlas_mode   = True

    main_workflow.connect(reconall_workflow, 'recon_all.subject_id',
                          bem_generation, 'subject_id')

    # II mode: make_watershed_bem of MNE Python package
    def mne_watershed_bem(sbj_dir, sbj_id):

        from mne.bem import make_watershed_bem

        print 'call make_watershed_bem'
        make_watershed_bem(sbj_id, sbj_dir, overwrite=True)

    call_mne_watershed_bem = pe.Node(interface=Function(input_names=['sbj_dir', 'sbj_id'], 
                                                        output_names=['sbj_id'],
                                                        function = mne_watershed_bem), 
                                     name = 'call_mne_watershed_bem')

    # copy the generated meshes from bem/watershed to bem/ and change the names
    # according to MNE
    def copy_surfaces(sbj_id, mesh_files):
        import os
        import os.path as op
        from smri_params import sbj_dir
        from mne.report import Report

        report = Report()

        surf_names = ['brain_surface', 'inner_skull_surface',
                      'outer_skull_surface',  'outer_skin_surface']
        new_surf_names = ['brain.surf', 'inner_skull.surf',
                          'outer_skull.surf', 'outer_skin.surf']

        bem_dir = op.join(sbj_dir, sbj_id, 'bem')
        surface_dir = op.join(sbj_dir, sbj_id, 'bem/watershed')

        for i in range(len(surf_names)):
            os.system('cp %s %s' %(op.join(surface_dir,sbj_id + '_' + surf_names[i]),
                                   op.join(bem_dir, new_surf_names[i])))
                                   #op.join(bem_dir,sbj_id + '-' + new_surf_names[i])))

        report.add_bem_to_section(subject=sbj_id, subjects_dir=sbj_dir)
        report_filename = op.join(bem_dir, "BEM_report.html")
        print '*** REPORT file %s written ***' % report_filename
        print report_filename
        report.save(report_filename, open_browser=False, overwrite=True)

        return sbj_id

    copy_bem_surf = pe.Node(interface=Function(input_names=['sbj_id', 'mesh_files'], 
                                               output_names=['sbj_id'],
                                               function = copy_surfaces),
                            name='copy_bem_surf')

    main_workflow.connect(infosource, 'subject_id', copy_bem_surf, 'sbj_id')
    main_workflow.connect(bem_generation, 'mesh_files',
                          copy_bem_surf, 'mesh_files')

    return main_workflow