示例#1
0
def process_subject_anatomy(t1):
    reconall = ReconAll()
    reconall.inputs.subject_id = t1.split('/')[-3]
    reconall.inputs.directive = 'all'
    reconall.inputs.subjects_dir = subjects_dir
    reconall.inputs.T1_files = t1
    reconall.run()
示例#2
0
def process_subject_anatomy(subject, t1, subjects_dir='/cluster/transcend/MRI/WMA/recons'):
    reconall = ReconAll()
    reconall.inputs.subject_id = subject
    reconall.inputs.directive = 'all'
    reconall.inputs.subjects_dir = subjects_dir
    reconall.inputs.T1_files = t1
    reconall.run()
示例#3
0
def run_recon_all(subject=None, subjects_dir=subjects_dir, openmp=1):
    reconall = ReconAll()
    if not os.path.isdir(subjects_dir):
        print(
            f"Subjects directory {subjects_dir} does not exist, creating it.")
        os.mkdir(subjects_dir)
    if not os.path.isdir(os.path.join(subjects_dir, subject, "mri")):
        anafolder = os.path.join(args.folder, subject)
        # catch error, if no .nii exists
        try:
            nii_file = glob.glob(anafolder + "/*.nii*")[0]
            reconall.inputs.subject_id = subject
            reconall.inputs.T1_files = nii_file
            reconall.inputs.directive = 'all'
            reconall.inputs.subjects_dir = subjects_dir
            reconall.inputs.openmp = openmp
            reconall.inputs.flags = "-3T"
            print(f"Now running recon-all for subject {subject}")
            print(f".nii-file used was: {nii_file}")
            reconall.run()
        except Exception as e:
            print(e)
    else:
        print(
            f"A freesurfer segmentation of subject {subject} already exists in {subjects_dir} - aborting"
        )
示例#4
0
 def _nii_to_freesurfer(self):
     reconall = ReconAll()
     if not self.subject.startswith("sub-"):
         self.subject = "sub-" + self.subject
     # check if freesurfer segmentation was already performed
     freesurfered = os.path.join(self.FS_SUBJECTS_DIR, self.subject)
     if not os.path.isdir(freesurfered):
         nii_file = glob.glob(opj(self.mri_folder, "*.nii*"))[0]
         reconall.inputs.subject_id = self.subject
         reconall.inputs.T1_files = nii_file
         reconall.inputs.directive = 'all'
         reconall.inputs.subjects_dir = self.FS_SUBJECTS_DIR
         reconall.inputs.openmp = self.n_jobs
         reconall.inputs.flags = "-3T"
         reconall.run()
     else:
         print(
             f"A freesurfer segmentation of subject {self.subject} already exists in {self.FS_SUBJECTS_DIR}"
         )
示例#5
0
def make_w_masking():
    w_mask = Workflow('masking')

    n_in = Node(
        IdentityInterface(fields=[
            'T1w',
            'subject',  # without sub-
            'freesurfer2func',
            'func',
        ]),
        name='input')

    n_out = Node(IdentityInterface(fields=[
        'func',
    ]), name='output')

    n_fl = Node(FLIRT(), name='flirt')
    n_fl.inputs.output_type = 'NIFTI_GZ'
    n_fl.inputs.apply_xfm = True
    n_fl.inputs.interp = 'nearestneighbour'

    n_conv = Node(MRIConvert(), name='convert')
    n_conv.inputs.out_type = 'niigz'

    reconall = Node(ReconAll(), name='reconall')
    reconall.inputs.directive = 'all'
    reconall.inputs.subjects_dir = '/Fridge/R01_BAIR/freesurfer'

    w_mask.connect(n_in, 'T1w', reconall, 'T1_files')
    w_mask.connect(n_in, 'subject', reconall, 'subject_id')

    n_mul = Node(interface=BinaryMaths(), name='mul')
    n_mul.inputs.operation = 'mul'

    w_mask.connect(reconall, ('ribbon', select_ribbon), n_conv, 'in_file')
    w_mask.connect(n_conv, 'out_file', n_fl, 'in_file')
    w_mask.connect(n_in, 'func', n_fl, 'reference')
    w_mask.connect(n_in, 'freesurfer2func', n_fl, 'in_matrix_file')

    w_mask.connect(n_in, 'func', n_mul, 'in_file')
    w_mask.connect(n_fl, 'out_file', n_mul, 'operand_file')

    w_mask.connect(n_mul, 'out_file', n_out, 'func')

    return w_mask
def FreeSurfer_Reconall(subject_list, base_directory, out_directory):
    #==============================================================
    # Loading required packages
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    import nipype.interfaces.utility as util
    from nipype.interfaces.freesurfer import ReconAll
    from nipype import SelectFiles
    import os
    nodes = list()

    #====================================
    # Defining the nodes for the workflow

    # Getting the subject ID
    infosource = pe.Node(
        interface=util.IdentityInterface(fields=['subject_id']),
        name='infosource')
    infosource.iterables = ('subject_id', subject_list)

    # Getting the relevant diffusion-weighted data
    templates = dict(T1='{subject_id}/anat/{subject_id}_T1w.nii.gz')

    selectfiles = pe.Node(SelectFiles(templates), name="selectfiles")
    selectfiles.inputs.base_directory = os.path.abspath(base_directory)
    nodes.append(selectfiles)

    reconall = pe.Node(interface=ReconAll(), name='reconall')
    reconall.inputs.directive = 'autorecon2'
    reconall.inputs.subjects_dir = out_directory
    reconall.inputs.flags = '-no-isrunning'
    reconall.inputs.ignore_exception = True

    # Setting up the workflow
    fs_reconall = pe.Workflow(name='fs_reconall')

    # Reading in files
    fs_reconall.connect(infosource, 'subject_id', selectfiles, 'subject_id')
    fs_reconall.connect(selectfiles, 'T1', reconall, 'T1_files')
    fs_reconall.connect(infosource, 'subject_id', reconall, 'subject_id')

    # Running the workflow
    fs_reconall.base_dir = os.path.abspath(out_directory)
    fs_reconall.write_graph()
    fs_reconall.run('PBSGraph')
示例#7
0
class ReconAllRunner(AnatomicalPreprocessing):
    """
    Automate ReconAll execution over a provided queryset of
    :class:`~django_mri.models.scan.Scan` instances.
    """

    #: :class:`~django_analyses.models.analysis.Analysis` instance title.
    ANALYSIS_TITLE = "ReconAll"

    #: :class:`~django_analyses.models.analysis_version.AnalysisVersion`
    #: instance title.
    ANALYSIS_VERSION_TITLE = ReconAll().version

    #: :class:`~django_analyses.models.pipeline.node.Node` instance
    #: configuration.
    ANALYSIS_CONFIGURATION = {}

    #: Input definition key.
    INPUT_KEY = "T1_files"

    def get_instance_representation(self, instance: Scan) -> List[str]:
        """
        ReconAll expects a list of T1-weighted scans.

        Parameters
        ----------
        instance : Scan
            Scan to create the input representation for

        Returns
        -------
        List[str]
            A list containing the provided scan's path
        """
        nii_path = super().get_instance_representation(instance)
        return [nii_path]
experiment_dir = '/home/luiscp/Documents/Data/ADRC_90Plus/output'
output_dir = 'seg_analysis'

subject_list = ['233','234','235','236','237','238','239','240','242','243','244','245','246','248','249','250','251','253','254','256','257','259']
#subject_list = ['259']
###############################
#specify nodes
###############################
smooth = Node(SUSAN(fwhm = 8.0,
                    output_type =u'NIFTI_GZ',
                    brightness_threshold=20),
                    name="smooth")

#freesurfer recon-all segmentation
recon_all = Node(ReconAll(subject_id ='subject_id',
                 directive = u'all'),
                 name="recon_all")

mr_convertT1=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertT1")
mr_convertaseg=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertaseg")
mr_convertaparc_aseg=MapNode(MRIConvert(out_type=u'niigz'),
                name="mr_convertaparc_aseg",iterfield='in_file')
mr_convertbrainmask=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertbrainmask")
mr_convertbrain=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertbrain")
mr_convertwmparc=Node(MRIConvert(out_type=u'niigz'),
                name="mr_convertwmparc")
mr_convertwm=Node(MRIConvert(out_type=u'niigz'),
示例#9
0
from nipype.interfaces.freesurfer import ReconAll
import os
import shutil
import sys

##################################################
FREESURFER_HOME = os.environ.get('FREESURFER_HOME')
reconall = ReconAll()

# path to the subject T1 MRI image in .nii format
# this file must be in .nii format
nii_file_path = 'nii_file_path'

# Subject ID: this should be a string which defines the folder name
# that will be created automatically for the subject in /usr/local/freesurfer/subjects/
subject_id = 'subject_id'

print('Data preprocessing started for {subject} in {path}'.format(
    subject=subject_id, path=nii_file_path))

reconall.inputs.subject_id = subject_id
reconall.inputs.directive = 'all'
reconall.inputs.subjects_dir = FREESURFER_HOME + '/subjects'
reconall.inputs.T1_files = nii_file_path
reconall.run()
示例#10
0
T1_identifier = 'struct.nii.gz'  # Name of T1-weighted image

# Create the output folder - FreeSurfer can only run if this folder exists
os.system('mkdir -p %s' % fs_folder)

# Create the pipeline that runs the recon-all command
reconflow = Workflow(name="reconflow")
reconflow.base_dir = opj(experiment_dir, 'workingdir_reconflow')

# Some magical stuff happens here (not important for now)
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = ('subject_id', subject_list)
# This node represents the actual recon-all command
econall = Node(
    ReconAll(
        directive='all',
        #flags='-nuintensitycor- 3T',
        subjects_dir=fs_folder),
    name="reconall")


# This function returns for each subject the path to struct.nii.gz
def pathfinder(subject, foldername, filename):
    from os.path import join as opj
    struct_path = opj(foldername, subject, filename)
    return struct_path


# This section connects all the nodes of the pipeline to each other
reconflow.connect([
    (infosource, reconall, [('subject_id', 'subject_id')]),
    (infosource, reconall, [(('subject_id', pathfinder, data_dir,
def main():
    """
    Create the help message
    """

    pipeline_description = textwrap.dedent('''
    Pipeline to run the complte freesurfer pipeline on structural images
    ''')
    """
    Create the parser
    """

    parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter,
                            description=pipeline_description)

    # Input images
    parser.add_argument('--input_t1w',
                        dest='input_t1w',
                        metavar='FILE',
                        help='List of T1w Nifti file(s) to process',
                        nargs='+',
                        required=True)
    parser.add_argument('--input_t2w',
                        dest='input_t2w',
                        metavar='FILE',
                        help='Optional list of T2w Nifti file(s) to process',
                        nargs='+')
    parser.add_argument('--input_sid',
                        dest='input_sid',
                        metavar='FILE',
                        help='Optional list subject ID',
                        nargs='+')

    # Output directory
    parser.add_argument('-o',
                        '--output_dir',
                        dest='output_dir',
                        metavar='DIR',
                        help='Output directory where to save the results',
                        default=os.getcwd())
    """
    Add default arguments in the parser
    """

    default_parser_argument(parser)
    """
    Parse the input arguments
    """

    args = parser.parse_args()
    """
    Create the output folder if it does not exists
    """

    result_dir = os.path.abspath(args.output_dir)
    if not os.path.exists(result_dir):
        os.mkdir(result_dir)
    """
    Check the length of the input lists
    """
    if args.input_t2w is not None:
        if len(args.input_t1w) is not len(args.input_t2w):
            raise Exception('The numbers of T1w and T2w files differ')
    if args.input_sid is not None:
        if len(args.input_t1w) is not len(args.input_sid):
            raise Exception('The numbers of T1w files and subject ID differ')
    """
    Create the workflow that generates a cross sectional groupwise and extract diffusion features subject-wise
    """
    workflow = pe.Workflow(name='freesurfer')
    workflow.base_output_dir = 'freesurfer'
    input_node = pe.Node(interface=niu.IdentityInterface(
        fields=['T1_files', 'T2_files', 'subject_id']),
                         name='input_node')

    input_node.inputs.T1_files = [os.path.abspath(f) for f in args.input_t1w]
    if args.input_t2w is not None:
        input_node.inputs.T2_files = [
            os.path.abspath(f) for f in args.input_t2w
        ]
    if args.input_sid is not None:
        input_node.inputs.subject_id = args.input_sid
    recon = None
    if args.input_t2w is not None and args.input_sid is not None:
        recon = pe.MapNode(interface=ReconAll(),
                           iterfield=['T1_files', 'T2_file', 'subject_id'],
                           name='recon')
        workflow.connect(input_node, 'T2_files', recon, 'T2_file')
        workflow.connect(input_node, 'subject_id', recon, 'subject_id')
        recon.inputs.use_T2 = True
    elif args.input_t2w is not None:
        recon = pe.MapNode(interface=ReconAll(),
                           iterfield=['T1_files', 'T2_file'],
                           name='recon')
        workflow.connect(input_node, 'T2_files', recon, 'T2_file')
        recon.inputs.use_T2 = True
    elif args.input_sid is not None:
        recon = pe.MapNode(interface=ReconAll(),
                           iterfield=['T1_files', 'subject_id'],
                           name='recon')
        workflow.connect(input_node, 'subject_id', recon, 'subject_id')
    workflow.connect(input_node, 'T1_files', recon, 'T1_files')
    recon.inputs.subjects_dir = result_dir
    recon.inputs.openmp = args.openmp_core
    """
    output the graph if required
    """

    if args.graph is True:
        generate_graph(workflow=workflow)
        sys.exit(0)
    """
    Edit the qsub arguments based on the input arguments
    """

    qsubargs_time = '48:00:00'
    qsubargs_mem = '5.9G'
    if args.use_qsub is True and args.openmp_core > 1:
        qsubargs_mem = str(max(0.95, 5.9 / args.openmp_core)) + 'G'

    qsubargs = '-l s_stack=10240 -j y -b y -S /bin/csh -V'
    qsubargs = qsubargs + ' -l h_rt=' + qsubargs_time
    qsubargs = qsubargs + ' -l tmem=' + qsubargs_mem + ' -l h_vmem=' + qsubargs_mem + ' -l vf=' + qsubargs_mem
    """
    Run the workflow
    """

    run_workflow(workflow=workflow, qsubargs=qsubargs, parser=args)
    def _run_interface(self, runtime):
        from additional_interfaces import DipyDenoiseT1
        from additional_interfaces import FSRename
        from nipype.interfaces.ants import N4BiasFieldCorrection
        from nipype.interfaces.ants.segmentation import BrainExtraction
        from nipype.interfaces.freesurfer import MRIConvert
        from nipype.interfaces.freesurfer import ReconAll
        import nipype.interfaces.fsl as fsl
        import nipype.pipeline.engine as pe
        import os

        subject_id = self.inputs.subject_id
        T1 = self.inputs.T1
        template_directory = self.inputs.template_directory
        out_directory = self.inputs.out_directory
        subjects_dir = out_directory + '/FreeSurfer/'

        if not os.path.isdir(subjects_dir):
            os.mkdir(subjects_dir)

        os.environ['SUBJECTS_DIR'] = subjects_dir

        # Getting a better field of view
        robustfov = pe.Node(interface=fsl.RobustFOV(), name='robustfov')
        robustfov.inputs.in_file = T1

        # Denoising
        T1_denoise = pe.Node(interface=DipyDenoiseT1(), name='T1_denoise')

        # Brain extraction
        brainextraction = pe.Node(interface=fsl.BET(), name='brainextraction')

        # Renaming files for FreeSurfer
        rename = pe.Node(FSRename(), name='rename')

        # Running FreeSurfer
        autorecon1 = pe.Node(interface=ReconAll(), name='autorecon1')
        autorecon1.inputs.subject_id = subject_id
        autorecon1.inputs.directive = 'autorecon1'
        autorecon1.inputs.args = '-noskullstrip'
        autorecon1.inputs.subjects_dir = subjects_dir

        autorecon2 = pe.Node(interface=ReconAll(), name='autorecon2')
        autorecon2.inputs.directive = 'autorecon2'

        autorecon3 = pe.Node(interface=ReconAll(), name='autorecon3')
        autorecon3.inputs.directive = 'autorecon3'

        wm_convert = pe.Node(interface=MRIConvert(), name='wm_convert')
        wm_convert.inputs.out_file = subjects_dir + '/' + subject_id + '/mri/' + 'wm.nii'
        wm_convert.inputs.out_type = 'nii'

        T1_convert = pe.Node(interface=MRIConvert(), name='T1_convert')
        T1_convert.inputs.out_file = subjects_dir + '/' + subject_id + '/mri/' + 'T1.nii.gz'
        T1_convert.inputs.out_type = 'niigz'

        mask_convert = pe.Node(interface=MRIConvert(), name='mask_convert')
        mask_convert.inputs.out_file = subjects_dir + '/' + subject_id + '/mri/' + 'brainmask.nii.gz'
        mask_convert.inputs.out_type = 'niigz'

        # Connecting the pipeline
        T1_preproc = pe.Workflow(name='t1_preproc')

        T1_preproc.connect(robustfov, 'out_roi', T1_denoise, 'in_file')
        T1_preproc.connect(T1_denoise, 'out_file', brainextraction, 'in_file')
        T1_preproc.connect(
            brainextraction, 'out_file', autorecon1, 'T1_files')
        T1_preproc.connect(
            autorecon1, 'subject_id', autorecon2, 'subject_id')
        T1_preproc.connect(
            autorecon1, 'subjects_dir', autorecon2, 'subjects_dir')
        T1_preproc.connect(
            autorecon1, 'subject_id', rename, 'subject_id')
        T1_preproc.connect(
            autorecon1, 'subjects_dir', rename, 'subjects_dir')
        T1_preproc.connect(
            autorecon2, 'subject_id', autorecon3, 'subject_id')
        T1_preproc.connect(
            autorecon2, 'subjects_dir', autorecon3, 'subjects_dir')
        T1_preproc.connect(autorecon3, 'wm', wm_convert, 'in_file')
        T1_preproc.connect(autorecon3, 'T1', T1_convert, 'in_file')
        T1_preproc.connect(
            autorecon3, 'brainmask', mask_convert, 'in_file')

        # ==============================================================
        # Running the workflow
        T1_preproc.base_dir = os.path.abspath(self.inputs.out_directory + '/_subject_id_' + self.inputs.subject_id)
        T1_preproc.write_graph()
        T1_preproc.run()

        return runtime
示例#13
0
#: within the *ANALYSIS_INTERFACES* setting.
interfaces = {
    "apply_topup": {ApplyTOPUP().version: ApplyTOPUP},
    "binary_maths": {BinaryMaths().version: BinaryMaths},
    "BET": {BET().version: BET},
    "CAT12 Segmentation": {"12.7": Cat12Segmentation},
    "fslmerge": {Merge().version: Merge},
    "fslreorient2std": {Reorient2Std().version: Reorient2Std},
    "fslroi": {ExtractROI().version: ExtractROI},
    "FAST": {FastWrapper.version: FastWrapper},
    "FLIRT": {FLIRT().version: FLIRT},
    "FNIRT": {FNIRT().version: FNIRT},
    "FSL Anatomical Processing Script": {FslAnat.__version__: FslAnat},
    "mean_image": {MeanImage().version: MeanImage},
    "robustfov": {RobustFOV().version: RobustFOV},
    "ReconAll": {ReconAll().version: ReconAll},
    "SUSAN": {SUSAN().version: SUSAN},
    "topup": {TopupWrapper.version: TopupWrapper},
    "eddy": {Eddy().version: Eddy},
    "denoise": {DWIDenoise().version: DWIDenoise},
    "degibbs": {MRDeGibbs().version: MRDeGibbs},
    "bias_correct": {DWIBiasCorrect().version: DWIBiasCorrect},
    "dwifslpreproc": {DwiFslPreproc.__version__: DwiFslPreproc},
    "mrconvert": {MRConvert.__version__: MRConvert},
    "dwi2fod": {
        ConstrainedSphericalDeconvolution().version: ConstrainedSphericalDeconvolution  # noqa: E501
    },
    "dwi2response": {ResponseSD().version: ResponseSD},
    "5ttgen": {Generate5tt().version: Generate5tt},
    "dwi2tensor": {Dwi2Tensor.__version__: Dwi2Tensor},
    "tensor2metric": {Tensor2metric.__version__: Tensor2metric},
示例#14
0
def WorkupT1T2(processingLevel,
               mountPrefix,
               ScanDir,
               subject_data_file,
               atlas_fname_wpath,
               BCD_model_path,
               Version=110,
               InterpolationMode="Linear",
               Mode=10,
               DwiList=[]):
    processingLevel = int(processingLevel)
    """
  Run autoworkup on a single subjects data.

  This is the main function to call when processing a single subject worth of
  data.  ScanDir is the base of the directory to place results, T1Images & T2Images
  are the lists of images to be used in the auto-workup. atlas_fname_wpath is
  the path and filename of the atlas to use.
  """

    subjData = csv.reader(open(subject_data_file, 'rb'),
                          delimiter=',',
                          quotechar='"')
    myDB = dict()
    multiLevel = AutoVivification()
    for row in subjData:
        currDict = dict()
        validEntry = True
        if len(row) == 5:
            site = row[0]
            subj = row[1]
            session = row[2]
            T1s = eval(row[3])
            T2s = eval(row[4])
            fullT1s = [mountPrefix + i for i in T1s]
            fullT2s = [mountPrefix + i for i in T2s]
            currDict['T1s'] = fullT1s
            currDict['T2s'] = fullT2s
            currDict['site'] = site
            currDict['subj'] = subj
            if len(fullT1s) < 1:
                print("Invalid Entry!  {0}".format(currDict))
                validEntry = False
            if len(fullT2s) < 1:
                print("Invalid Entry!  {0}".format(currDict))
                validEntry = False
            for i in fullT1s:
                if not os.path.exists(i):
                    print("Missing File: {0}".format(i))
                    validEntry = False
            for i in fullT2s:
                if not os.path.exists(i):
                    print("Missing File: {0}".format(i))
                    validEntry = False

            if validEntry == True:
                myDB[session] = currDict
                UNIQUE_ID = site + "_" + subj + "_" + session
                multiLevel[UNIQUE_ID] = currDict
    from cPickle import dump
    dump(multiLevel, open('db.tmp', 'w'))

    ########### PIPELINE INITIALIZATION #############
    baw200 = pe.Workflow(name="BAW_20120104_workflow")
    baw200.config['execution'] = {
        'plugin': 'Linear',
        #'stop_on_first_crash':'True',
        'stop_on_first_crash': 'False',
        'stop_on_first_rerun': 'False',
        'hash_method': 'timestamp',
        'single_thread_matlab': 'True',
        'remove_unnecessary_outputs': 'False',
        'use_relative_paths': 'False',
        'remove_node_directories': 'False',
        'local_hash_check': 'True'
    }
    baw200.config['logging'] = {
        'workflow_level': 'DEBUG',
        'filemanip_level': 'DEBUG',
        'interface_level': 'DEBUG',
        'log_directory': ScanDir
    }
    baw200.base_dir = ScanDir

    ########################################################
    # Run ACPC Detect on first T1 Image - Base Image
    ########################################################
    """TODO: Determine if we want to pass subjectID and scanID, always require full
  paths, get them from the output path, or something else.
  """
    siteSource = pe.Node(interface=IdentityInterface(fields=['uid']),
                         name='99_siteSource')
    siteSource.iterables = ('uid', multiLevel.keys())

    def getFirstT1(uid, dbfile):
        from cPickle import load
        with open(dbfile) as fp:
            db = load(fp)
        print("uid:= {0}, dbfile: {1}".format(uid, dbfile))
        print("result:= {0}".format(db[uid]["T1s"]))
        return db[uid]["T1s"][0]

    def sessionImages(dbfile, uid):
        from cPickle import load
        with open(dbfile) as fp:
            db = load(fp)
        return db[uid]["T1s"], db[uid]["T2s"]

    T1andT2ImageListNode = pe.Node(interface=Function(
        function=sessionImages,
        input_names=['dbfile', 'uid'],
        output_names=['T1List', 'T2List']),
                                   run_without_submitting=True,
                                   name="99_nestedImageList")
    T1andT2ImageListNode.inputs.dbfile = os.path.join(os.getcwd(),
                                                      'db.tmp')  #multiLevel
    baw200.connect(siteSource, 'uid', T1andT2ImageListNode, 'uid')

    ########################################################
    # Run ACPC Detect on First T1 Image
    ########################################################
    BCD = pe.Node(interface=BRAINSConstellationDetector(), name="01_BCD")
    ##  Use program default BCD.inputs.inputTemplateModel = T1ACPCModelFile
    ##BCD.inputs.outputVolume =   "BCD_OUT" + "_ACPC_InPlace.nii.gz"                #$# T1AcpcImageList
    BCD.inputs.outputResampledVolume = "BCD_OUT" + "_ACPC.nii.gz"
    BCD.inputs.outputTransform = "BCD_OUT" + "_ACPC_transform.mat"
    BCD.inputs.outputLandmarksInInputSpace = "BCD_OUT" + "_ACPC_Original.fcsv"
    BCD.inputs.outputLandmarksInACPCAlignedSpace = "BCD_OUT" + "_ACPC_Landmarks.fcsv"
    BCD.inputs.outputMRML = "BCD_OUT" + "_ACPC_Scene.mrml"
    BCD.inputs.outputLandmarkWeights = "BCD_OUT" + "_ACPC_Landmarks.wgts"
    BCD.inputs.interpolationMode = InterpolationMode
    BCD.inputs.houghEyeDetectorMode = 1
    BCD.inputs.acLowerBound = 80
    BCD.inputs.llsModel = os.path.join(BCD_model_path, 'LLSModel.hdf5')
    BCD.inputs.inputTemplateModel = os.path.join(BCD_model_path, 'T1.mdl')

    # Entries below are of the form:
    baw200.connect([
        (siteSource, BCD, [(('uid', getFirstT1,
                             os.path.join(os.getcwd(),
                                          'db.tmp')), 'inputVolume')]),
    ])

    ########################################################
    # Run BROIA to make T1 image as small as possible
    ########################################################
    BROIA = pe.Node(interface=BRAINSROIAuto(), name="03_BROIA")
    BROIA.inputs.ROIAutoDilateSize = 10
    BROIA.inputs.outputVolumePixelType = "short"
    BROIA.inputs.maskOutput = True
    BROIA.inputs.cropOutput = True
    BROIA.inputs.outputVolume = "BROIA_OUT" + "_ACPC_InPlace_cropped.nii.gz"
    BROIA.inputs.outputROIMaskVolume = "BROIA_OUT" + "_ACPC_InPlace_foreground_seg.nii.gz"

    baw200.connect([(BCD, BROIA, [('outputResampledVolume', 'inputVolume')])])

    if processingLevel > 1:
        ########################################################
        # Run BABC on Multi-modal images
        ########################################################

        BLI = pe.Node(interface=BRAINSLandmarkInitializer(), name="05_BLI")
        BLI.inputs.outputTransformFilename = "landmarkInitializer_atlas_to_subject_transform.mat"

        BAtlas = MakeAtlasNode(
            atlas_fname_wpath)  ## Call function to create node

        baw200.connect([
            (BCD, BLI, [('outputLandmarksInACPCAlignedSpace',
                         'inputFixedLandmarkFilename')]),
        ])
        baw200.connect([(BAtlas, BLI, [('template_landmarks_fcsv',
                                        'inputMovingLandmarkFilename')]),
                        (BAtlas, BLI, [('template_landmark_weights_csv',
                                        'inputWeightFilename')])])

        def MakeOneFileList(T1List, T2List, altT1):
            full_list = T1List
            full_list.extend(T2List)
            full_list[0] = altT1  # The ACPC ROIcropped T1 replacement image.
            return full_list

        makeImagePathList = pe.Node(Function(
            function=MakeOneFileList,
            input_names=['T1List', 'T2List', 'altT1'],
            output_names=['imagePathList']),
                                    run_without_submitting=True,
                                    name="99_makeImagePathList")
        baw200.connect(T1andT2ImageListNode, 'T1List', makeImagePathList,
                       'T1List')
        baw200.connect(T1andT2ImageListNode, 'T2List', makeImagePathList,
                       'T2List')
        # -- Alternate mode
        baw200.connect(BROIA, 'outputVolume', makeImagePathList, 'altT1')

        # -- Standard mode to make 256^3 images
        #baw200.connect( BCD,    'outputResampledVolume', makeImagePathList, 'altT1' )

        def MakeOneFileTypeList(T1List, T2List):
            input_types = ["T1"] * len(T1List)
            input_types.extend(["T2"] * len(T2List))
            return ",".join(input_types)

        makeImageTypeList = pe.Node(Function(function=MakeOneFileTypeList,
                                             input_names=['T1List', 'T2List'],
                                             output_names=['imageTypeList']),
                                    run_without_submitting=True,
                                    name="99_makeImageTypeList")
        baw200.connect(T1andT2ImageListNode, 'T1List', makeImageTypeList,
                       'T1List')
        baw200.connect(T1andT2ImageListNode, 'T2List', makeImageTypeList,
                       'T2List')

        def MakeOutFileList(T1List, T2List):
            def GetExtBaseName(filename):
                '''
            Get the filename without the extension.  Works for .ext and .ext.gz
            '''
                import os
                currBaseName = os.path.basename(filename)
                currExt = os.path.splitext(currBaseName)[1]
                currBaseName = os.path.splitext(currBaseName)[0]
                if currExt == ".gz":
                    currBaseName = os.path.splitext(currBaseName)[0]
                    currExt = os.path.splitext(currBaseName)[1]
                return currBaseName

            all_files = T1List
            all_files.extend(T2List)
            out_corrected_names = []
            for i in all_files:
                out_name = GetExtBaseName(i) + "_corrected.nii.gz"
                out_corrected_names.append(out_name)
            return out_corrected_names

        makeOutImageList = pe.Node(Function(function=MakeOutFileList,
                                            input_names=['T1List', 'T2List'],
                                            output_names=['outImageList']),
                                   run_without_submitting=True,
                                   name="99_makeOutImageList")
        baw200.connect(T1andT2ImageListNode, 'T1List', makeOutImageList,
                       'T1List')
        baw200.connect(T1andT2ImageListNode, 'T2List', makeOutImageList,
                       'T2List')

        BABC = pe.Node(interface=BRAINSABC(), name="11_BABC")
        baw200.connect(makeImagePathList, 'imagePathList', BABC,
                       'inputVolumes')
        baw200.connect(makeImageTypeList, 'imageTypeList', BABC,
                       'inputVolumeTypes')
        baw200.connect(makeOutImageList, 'outImageList', BABC, 'outputVolumes')
        BABC.inputs.debuglevel = 0
        BABC.inputs.maxIterations = 3
        BABC.inputs.maxBiasDegree = 4
        BABC.inputs.filterIteration = 3
        BABC.inputs.filterMethod = 'GradientAnisotropicDiffusion'
        BABC.inputs.gridSize = [28, 20, 24]
        BABC.inputs.outputFormat = "NIFTI"
        BABC.inputs.outputLabels = "brain_label_seg.nii.gz"
        BABC.inputs.outputDirtyLabels = "volume_label_seg.nii.gz"
        BABC.inputs.posteriorTemplate = "POSTERIOR_%s.nii.gz"
        BABC.inputs.atlasToSubjectTransform = "atlas_to_subject.mat"
        BABC.inputs.implicitOutputs = [
            't1_average_BRAINSABC.nii.gz', 't2_average_BRAINSABC.nii.gz'
        ]

        BABC.inputs.resamplerInterpolatorType = InterpolationMode
        ##
        BABC.inputs.outputDir = './'

        baw200.connect(BAtlas, 'AtlasPVDefinition_xml', BABC,
                       'atlasDefinition')

        baw200.connect(BLI, 'outputTransformFilename', BABC,
                       'atlasToSubjectInitialTransform')
        """
      Get the first T1 and T2 corrected images from BABC
      """
        bfc_files = pe.Node(Function(
            input_names=['in_files', 'T1_count'],
            output_names=['t1_corrected', 't2_corrected'],
            function=get_first_T1_and_T2),
                            name='99_bfc_files')

        bfc_files.inputs.T1_count = len(T1andT2ImageListNode.outputs.T1List)

        baw200.connect(BABC, 'outputVolumes', bfc_files, 'in_files')
        """
      ResampleNACLabels
      """
        ResampleAtlasNACLabels = pe.Node(interface=BRAINSResample(),
                                         name="13_ResampleAtlasNACLabels")
        ResampleAtlasNACLabels.inputs.interpolationMode = "NearestNeighbor"
        ResampleAtlasNACLabels.inputs.outputVolume = "atlasToSubjectNACLabels.nii.gz"

        baw200.connect(BABC, 'atlasToSubjectTransform', ResampleAtlasNACLabels,
                       'warpTransform')
        baw200.connect(bfc_files, 't1_corrected', ResampleAtlasNACLabels,
                       'referenceVolume')
        baw200.connect(BAtlas, 'template_nac_lables', ResampleAtlasNACLabels,
                       'inputVolume')
        """
      BRAINSMush
      """
        BMUSH = pe.Node(interface=BRAINSMush(), name="15_BMUSH")
        BMUSH.inputs.outputVolume = "MushImage.nii.gz"
        BMUSH.inputs.outputMask = "MushMask.nii.gz"
        BMUSH.inputs.lowerThresholdFactor = 1.2
        BMUSH.inputs.upperThresholdFactor = 0.55

        baw200.connect(bfc_files, 't1_corrected', BMUSH, 'inputFirstVolume')
        baw200.connect(bfc_files, 't2_corrected', BMUSH, 'inputSecondVolume')
        baw200.connect(BABC, 'outputLabels', BMUSH, 'inputMaskVolume')
        """
      BRAINSROIAuto
      """
        BROI = pe.Node(interface=BRAINSROIAuto(), name="17_BRAINSROIAuto")
        BROI.inputs.closingSize = 12
        BROI.inputs.otsuPercentileThreshold = 0.01
        BROI.inputs.thresholdCorrectionFactor = 1.0
        BROI.inputs.outputROIMaskVolume = "temproiAuto_t1_ACPC_corrected_BRAINSABC.nii.gz"
        baw200.connect(bfc_files, 't1_corrected', BROI, 'inputVolume')
        """
      Split the implicit outputs of BABC
      """
        SplitAvgBABC = pe.Node(Function(
            input_names=['in_files', 'T1_count'],
            output_names=['avgBABCT1', 'avgBABCT2'],
            function=get_first_T1_and_T2),
                               run_without_submitting=True,
                               name="99_SplitAvgBABC")
        SplitAvgBABC.inputs.T1_count = 1  ## There is only 1 average T1 image.

        baw200.connect(BABC, 'implicitOutputs', SplitAvgBABC, 'in_files')
        """
      Gradient Anistropic Diffusion images for BRAINSCut
      """
        GADT1 = pe.Node(interface=GradientAnisotropicDiffusionImageFilter(),
                        name="27_GADT1")
        GADT1.inputs.timeStep = 0.025
        GADT1.inputs.conductance = 1
        GADT1.inputs.numberOfIterations = 5
        GADT1.inputs.outputVolume = "GADT1.nii.gz"

        baw200.connect(SplitAvgBABC, 'avgBABCT1', GADT1, 'inputVolume')

        GADT2 = pe.Node(interface=GradientAnisotropicDiffusionImageFilter(),
                        name="27_GADT2")
        GADT2.inputs.timeStep = 0.025
        GADT2.inputs.conductance = 1
        GADT2.inputs.numberOfIterations = 5
        GADT2.inputs.outputVolume = "GADT2.nii.gz"

        def printFullPath(outFileFullPath):
            print("=" * 80)
            print("=" * 80)
            print("=" * 80)
            print("=" * 80)
            print("{0}".format(outFileFullPath))
            return outFileFullPath

        printOutImage = pe.Node(Function(function=printFullPath,
                                         input_names=['outFileFullPath'],
                                         output_names=['genoutFileFullPath']),
                                run_without_submitting=True,
                                name="99_printOutImage")
        baw200.connect(GADT2, 'outputVolume', printOutImage, 'outFileFullPath')

        baw200.connect(SplitAvgBABC, 'avgBABCT2', GADT2, 'inputVolume')
        """
      Sum the gradient images for BRAINSCut
      """
        SGI = pe.Node(interface=GenerateSummedGradientImage(), name="27_SGI")
        SGI.inputs.outputFileName = "SummedGradImage.nii.gz"

        baw200.connect(GADT1, 'outputVolume', SGI, 'inputVolume1')
        baw200.connect(GADT2, 'outputVolume', SGI, 'inputVolume2')

        if processingLevel > 1:
            """
          Load the BRAINSCut models & probabiity maps.
          """
            BCM_outputs = [
                'phi', 'rho', 'theta', 'r_probabilityMaps',
                'l_probabilityMaps', 'models'
            ]
            BCM_Models = pe.Node(interface=nio.DataGrabber(
                input_names=['structures'], outfields=BCM_outputs),
                                 name='10_BCM_Models')
            BCM_Models.inputs.base_directory = atlas_fname_wpath
            BCM_Models.inputs.template_args['phi'] = [[
                'spatialImages', 'phi', 'nii.gz'
            ]]
            BCM_Models.inputs.template_args['rho'] = [[
                'spatialImages', 'rho', 'nii.gz'
            ]]
            BCM_Models.inputs.template_args['theta'] = [[
                'spatialImages', 'theta', 'nii.gz'
            ]]
            BCM_Models.inputs.template_args['r_probabilityMaps'] = [[
                'structures'
            ]]
            BCM_Models.inputs.template_args['l_probabilityMaps'] = [[
                'structures'
            ]]
            BCM_Models.inputs.template_args['models'] = [['structures']]

            BRAINSCut_structures = [
                'caudate', 'thalamus', 'putamen', 'hippocampus'
            ]
            #BRAINSCut_structures = ['caudate','thalamus']
            BCM_Models.iterables = ('structures', BRAINSCut_structures)
            BCM_Models.inputs.template = '%s/%s.%s'
            BCM_Models.inputs.field_template = dict(
                r_probabilityMaps='probabilityMaps/r_%s_ProbabilityMap.nii.gz',
                l_probabilityMaps='probabilityMaps/l_%s_ProbabilityMap.nii.gz',
                models='modelFiles/%sModel*',
            )
            """
          The xml creation and BRAINSCut need to be their own mini-pipeline that gets
          executed once for each of the structures in BRAINSCut_structures.  This can be
          accomplished with a map node and a new pipeline.
          """
            """
          Create xml file for BRAINSCut
          """

            BFitAtlasToSubject = pe.Node(interface=BRAINSFit(),
                                         name="30_BFitAtlasToSubject")
            BFitAtlasToSubject.inputs.costMetric = "MMI"
            BFitAtlasToSubject.inputs.maskProcessingMode = "ROI"
            BFitAtlasToSubject.inputs.numberOfSamples = 100000
            BFitAtlasToSubject.inputs.numberOfIterations = [1500, 1500]
            BFitAtlasToSubject.inputs.numberOfHistogramBins = 50
            BFitAtlasToSubject.inputs.maximumStepLength = 0.2
            BFitAtlasToSubject.inputs.minimumStepLength = [0.005, 0.005]
            BFitAtlasToSubject.inputs.transformType = ["Affine", "BSpline"]
            BFitAtlasToSubject.inputs.maxBSplineDisplacement = 7
            BFitAtlasToSubject.inputs.maskInferiorCutOffFromCenter = 65
            BFitAtlasToSubject.inputs.splineGridSize = [28, 20, 24]
            BFitAtlasToSubject.inputs.outputVolume = "Trial_Initializer_Output.nii.gz"
            BFitAtlasToSubject.inputs.outputTransform = "Trial_Initializer_Output.mat"
            baw200.connect(SplitAvgBABC, 'avgBABCT1', BFitAtlasToSubject,
                           'fixedVolume')
            baw200.connect(BABC, 'outputLabels', BFitAtlasToSubject,
                           'fixedBinaryVolume')
            baw200.connect(BAtlas, 'template_t1', BFitAtlasToSubject,
                           'movingVolume')
            baw200.connect(BAtlas, 'template_brain', BFitAtlasToSubject,
                           'movingBinaryVolume')
            baw200.connect(BLI, 'outputTransformFilename', BFitAtlasToSubject,
                           'initialTransform')

            CreateBRAINSCutXML = pe.Node(Function(
                input_names=[
                    'rho', 'phi', 'theta', 'model', 'r_probabilityMap',
                    'l_probabilityMap', 'atlasT1', 'atlasBrain', 'subjT1',
                    'subjT2', 'subjT1GAD', 'subjT2GAD', 'subjSGGAD',
                    'subjBrain', 'atlasToSubj', 'output_dir'
                ],
                output_names=['xml_filename'],
                function=create_BRAINSCut_XML),
                                         overwrite=True,
                                         name="CreateBRAINSCutXML")

            ## HACK  Makde better directory
            CreateBRAINSCutXML.inputs.output_dir = "./"  #os.path.join(baw200.base_dir, "BRAINSCut_output")
            baw200.connect(BCM_Models, 'models', CreateBRAINSCutXML, 'model')
            baw200.connect(BCM_Models, 'rho', CreateBRAINSCutXML, 'rho')
            baw200.connect(BCM_Models, 'phi', CreateBRAINSCutXML, 'phi')
            baw200.connect(BCM_Models, 'theta', CreateBRAINSCutXML, 'theta')
            baw200.connect(BCM_Models, 'r_probabilityMaps', CreateBRAINSCutXML,
                           'r_probabilityMap')
            baw200.connect(BCM_Models, 'l_probabilityMaps', CreateBRAINSCutXML,
                           'l_probabilityMap')
            baw200.connect(BAtlas, 'template_t1', CreateBRAINSCutXML,
                           'atlasT1')
            baw200.connect(BAtlas, 'template_brain', CreateBRAINSCutXML,
                           'atlasBrain')
            baw200.connect(SplitAvgBABC, 'avgBABCT1', CreateBRAINSCutXML,
                           'subjT1')
            baw200.connect(SplitAvgBABC, 'avgBABCT2', CreateBRAINSCutXML,
                           'subjT2')
            baw200.connect(GADT1, 'outputVolume', CreateBRAINSCutXML,
                           'subjT1GAD')
            baw200.connect(GADT2, 'outputVolume', CreateBRAINSCutXML,
                           'subjT2GAD')
            baw200.connect(SGI, 'outputFileName', CreateBRAINSCutXML,
                           'subjSGGAD')
            baw200.connect(BABC, 'outputLabels', CreateBRAINSCutXML,
                           'subjBrain')
            baw200.connect(BFitAtlasToSubject, 'outputTransform',
                           CreateBRAINSCutXML, 'atlasToSubj')
            #CreateBRAINSCutXML.inputs.atlasToSubj = "INTERNAL_REGISTER.mat"
            #baw200.connect(BABC,'atlasToSubjectTransform',CreateBRAINSCutXML,'atlasToSubj')

        if 1 == 1:
            """
          BRAINSCut
          """
            BRAINSCUT = pe.Node(interface=BRAINSCut(),
                                name="BRAINSCUT",
                                input_names=['netConfiguration'])
            BRAINSCUT.inputs.applyModel = True
            baw200.connect(CreateBRAINSCutXML, 'xml_filename', BRAINSCUT,
                           'netConfiguration')
            """
          BRAINSTalairach
          Not implemented yet.
          """

        ## Make deformed Atlas image space
        if processingLevel > 2:
            print(
                """
         Run ANTS Registration at processingLevel={0}
         """.format(processingLevel))
            ComputeAtlasToSubjectTransform = pe.Node(
                interface=ANTSWrapper(),
                name="19_ComputeAtlasToSubjectTransform")
            ComputeAtlasToSubjectTransform.inputs.output_prefix = "ANTS_"

            baw200.connect(SplitAvgBABC, 'avgBABCT1',
                           ComputeAtlasToSubjectTransform, "fixed_T1_image")
            baw200.connect(SplitAvgBABC, 'avgBABCT2',
                           ComputeAtlasToSubjectTransform, "fixed_T2_image")
            baw200.connect(BAtlas, 'template_t1',
                           ComputeAtlasToSubjectTransform, "moving_T1_image")
            baw200.connect(BAtlas, 'template_t1',
                           ComputeAtlasToSubjectTransform, "moving_T2_image")

            WarpAtlas = pe.Node(interface=WarpAllAtlas(), name="19_WarpAtlas")
            WarpAtlas.inputs.moving_atlas = atlas_fname_wpath
            WarpAtlas.inputs.deformed_atlas = "./"
            baw200.connect(ComputeAtlasToSubjectTransform, 'output_affine',
                           WarpAtlas, "affine_transform")
            baw200.connect(ComputeAtlasToSubjectTransform, 'output_warp',
                           WarpAtlas, "deformation_field")
            baw200.connect(SplitAvgBABC, 'avgBABCT1', WarpAtlas,
                           'reference_image')

        if processingLevel > 3:
            print(
                """
          Run Freesurfer ReconAll at processingLevel={0}
          """.format(processingLevel))
            subj_id = os.path.basename(
                os.path.dirname(os.path.dirname(baw200.base_dir)))
            scan_id = os.path.basename(os.path.dirname(baw200.base_dir))
            reconall = pe.Node(interface=ReconAll(), name="41_FS510")
            reconall.inputs.subject_id = subj_id + '_' + scan_id
            reconall.inputs.directive = 'all'
            reconall.inputs.subjects_dir = '.'
            baw200.connect(SplitAvgBABC, 'avgBABCT1', reconall, 'T1_files')
        else:
            print "Skipping freesurfer"

    print "Start Processing"
    #baw200.run(plugin='MultiProc', plugin_args={'n_procs' : 4})
    baw200.run(
        plugin='SGE',
        plugin_args=dict(
            template=GLOBAL_SGE_SCRIPT,
            qsub_args=
            "-S /bin/bash -q all.q -pe smp1 2-4 -o /dev/null -e /dev/null "))
def main():
    arguments = docopt(__doc__)
    study = arguments['<study>']
    use_server = arguments['--log-to-server']
    debug = arguments['--debug']

    config = load_config(study)

    if use_server:
        add_server_handler(config)
    if debug:
        logger.setLevel(logging.DEBUG)
    ## setup some paths
    study_base_dir = config.get_study_base()
    fs_dir = config.get_path('freesurfer')
    data_dir = config.get_path('nii')
    # not sure where to put this. Potentially it could be very large
    # keeping it means existing subjects don't get re-run.
    # it could be deleted but then would need extra code to Determine
    # if subjects have been run.
    working_dir = os.path.join(study_base_dir,
                               'pipelines/workingdir_reconflow')

    ## These are overrides, for testing
    base_dir = '/external/rprshnas01/tigrlab/'
    fs_dir = os.path.join(base_dir, 'scratch/twright/pipelines/freesurfer',
                          study)

    working_dir = os.path.join(
        base_dir, 'scratch/twright/pipelines/workingdir_reconflow')

    # freesurfer fails if the subjects dir doesn't exist
    check_folder_exists(fs_dir)
    # get the list of subjects that are not phantoms and have been qc'd
    subject_list = config.get_subject_metadata()
    subject_list = [
        subject for subject in subject_list
        if not dm_scanid.is_phantom(subject)
    ]

    # Need to determine if the study has T2 (or FLAIR) scans,
    # do this by looking in the study_config.yml for expected scantypes.
    # Current pipelines add T2 files if they exist on a per-subject basis
    # Nipype expects the each run of the pipeline to be the same across all subjects
    # it is possible to set some parameters on a per-subject basis (see nu-iter setting)
    # but is this desirable?
    scan_types = get_common_scan_types(config)

    if not 'T1' in scan_types:
        msg = 'Study {} does not have T1 scans, aborting.'.format(study)
        sys.exit(msg)

    templates = {'T1': '{dm_subject_id}/{dm_subject_id}_??_T1_??*.nii.gz'}
    if 'T2' in scan_types:
        templates['T2'] = '{dm_subject_id}/{dm_subject_id}_??_T2_??*.nii.gz'
    if 'FLAIR' in scan_types:
        logger.debug('FLAIR processing not yet implemented')
        #templates = {'T2': '{dm_subject_id}/{dm_subject_id}_??_FLAIR _??*.nii.gz'}

    # setup the nipype nodes
    # infosource justs iterates through the list of subjects
    infosource = Node(IdentityInterface(fields=['subject_id']),
                      name="infosource")
    # For testing
    subject_list = ['DTI_CMH_H001_02']
    infosource.iterables = ('subject_id', subject_list)

    # sf finds the files for each subject. The dmSelectFiles class
    # overrides the nipype.SelectFiles adding checks that the numbers
    # of files matches those defined in study_config.yml
    sf = Node(dmSelectFiles(templates), name="selectFiles")

    sf.inputs.base_directory = data_dir

    # set_nuiter implements a simple function to set the iteration count
    # on a subject by subject basis
    set_nuiter = Node(Function(input_names=['subject_id'],
                               output_names=['nu_iter'],
                               function=get_nuiter_settings),
                      name='get_nuiter')

    # reconall is the interface for the recon-all freesurfer function
    # currently seem unable to specify multiple directives
    #    (e.g. -qcache and -notal-check)
    reconall = Node(ReconAll(directive='all',
                             parallel=True,
                             subjects_dir=fs_dir),
                    name='recon-all')
    # if this is running on a cluster, we can specify node specific requirements
    #  i.e. reconall runs well with lots of cores.
    reconall.plugin_args = {
        'qsub_args': '-l nodes=1:ppn=24',
        'overwrite': True
    }

    # get_summary extracts the summary information from the output of reconall
    get_summary = Node(EnigmaSummaryTask(), name='Enigma_Summaries')

    ## Create the workflow
    reconflow = Workflow(name='reconflow')
    reconflow.base_dir = working_dir

    # need a different connection pattern and param for the reconall node
    # if T2 files exist
    sf_ra_conx = [('T1', 'T1_files')]

    if 'T2' in scan_types:
        reconall.inputs.use_T2 = True
        sf_ra_conx.append('T2', 'T2_file')

    ## Connect the outputs from each node to the corresponding inputs
    # Basically we link the defined outputs from each node, to the inputs of the next node
    #   Each item in the list is [node1, node2, [(output_node1, input_node2)]]

    # Problem here due to incompatibilities between freesurfer 5 & 6
    # this pattern works for freesurfer 5.3.0 (without the parallel flag for reconall)
    # but failes for 6.0.0, which doesn't support the nuierations flag.
    # reconflow.connect([(infosource, sf, [('subject_id', 'dm_subject_id')]),
    #                    (infosource, set_nuiter, [('subject_id', 'subject_id')]),
    #                    (sf, reconall, sf_ra_conx),
    #                    (set_nuiter, reconall, [('nu_iter', 'flags')])])

    # this is the freesurfer 6 compatible version
    reconflow.connect([(infosource, sf, [('subject_id', 'dm_subject_id')]),
                       (infosource, reconall, [('subject_id', 'subject_id')]),
                       (sf, reconall, sf_ra_conx),
                       (reconall, get_summary,
                        [('subjects_dir', 'subjects_dir'),
                         ('subject_id', 'subject_id'),
                         ('subjects_dir', 'output_path')])])

    # need to use a job template to ensure the environment is set correctly
    # on the running nodes.
    # Not sure why the current env isn't being passed
    job_template = os.path.join(os.path.dirname(__file__),
                                'job_template_scc.sh')

    ## run the actual workflow.
    # the pbsGraph plugin creates jobs for each node on a PBS torque using
    # torque scheduling to keep them in order.
    # Use plugin='SGEGraph' to run on lab cluster (not sure what will happen
    #   to the reconflow node if we don't have any 24 core machines).
    # Don't specify a plugin to run on a single machine
    reconflow.run(plugin='PBSGraph', plugin_args=dict(template=job_template))
示例#16
0
def create_grvx_workflow(parameters):

    parameters['paths']['output'].mkdir(exist_ok=True, parents=True)

    parameters['timestamp'] = datetime.now().isoformat()
    parameters_json = parameters['paths']['output'] / 'parameters.json'
    with parameters_json.open('w') as f:
        json_dump(parameters, f, indent=2, cls=JSONEncoder_path)

    bids = bids_node(parameters)

    node_reconall = Node(ReconAll(), name='freesurfer')
    node_reconall.inputs.subjects_dir = str(parameters['paths']['freesurfer_subjects_dir'])
    node_reconall.inputs.flags = ['-cw256', ]

    node_corr = Node(function_corr, name='corr_fmri_ecog')
    node_corr.inputs.pvalue = parameters['corr']['pvalue']

    node_corr_allfreq = Node(function_corr_allfreq, name='corr_fmri_ecog_allfreq')
    node_corr_allfreq.inputs.pvalue = parameters['corr']['pvalue']
    node_corr_allfreq.inputs.min_n_sign_elec = parameters['corr']['min_n_sign_elec']

    node_corr_summary = JoinNode(
        function_corr_summary,
        name='corr_fmri_ecog_summary',
        joinsource='bids',
        joinfield=('in_files', 'ecog_files', 'fmri_files'),
        )

    w_fmri = workflow_fmri(parameters)
    w_ieeg = workflow_ieeg(parameters)

    w = Workflow('workflow')
    w.base_dir = str(parameters['paths']['output'])

    if parameters['fmri']['graymatter']:
        w.connect(bids, 'subject', node_reconall, 'subject_id')  # we might use freesurfer for other stuff too
        w.connect(bids, 'anat', node_reconall, 'T1_files')
        w.connect(node_reconall, 'ribbon', w_fmri, 'graymatter.ribbon')

    w.connect(bids, 'ieeg', w_ieeg, 'read.ieeg')
    w.connect(bids, 'elec', w_ieeg, 'read.electrodes')

    w.connect(bids, 'anat', w_fmri, 'bet.in_file')
    w.connect(bids, 'func', w_fmri, 'feat_design.func')

    w.connect(bids, 'elec', w_fmri, 'at_elec.electrodes')

    w.connect(w_ieeg, 'ecog_compare.tsv_compare', node_corr, 'ecog_file')
    w.connect(w_fmri, 'at_elec.fmri_vals', node_corr, 'fmri_file')

    w.connect(w_ieeg, 'ecog_compare_allfreq.compare', node_corr_allfreq, 'ecog_file')
    w.connect(w_fmri, 'at_elec.fmri_vals', node_corr_allfreq, 'fmri_file')

    w.connect(node_corr, 'out_file', node_corr_summary, 'in_files')
    w.connect(w_ieeg, 'ecog_compare.tsv_compare', node_corr_summary, 'ecog_files')
    w.connect(w_fmri, 'at_elec.fmri_vals', node_corr_summary, 'fmri_files')

    w.write_graph(graph2use='flat')
    log_dir = parameters['paths']['output'] / 'log'

    config.update_config({
        'logging': {
            'log_directory': log_dir,
            'log_to_file': True,
            },
        })

    rmtree(log_dir, ignore_errors=True)
    log_dir.mkdir()
    logging.update_logging(config)

    return w
示例#17
0
sessionIterator = ['01']  # what session numbers are checked

# create identity interface node
infosource = Node(IdentityInterface(fields=['subject_id']),
                  name="infosource")
infosource.iterables = [('subject_id', subject_list,)]

# get the anat file for every session for the subject
for session in sessionIterator:
    anat_file = opj('sub-{subject_id}', 'ses-' + session, 'anat', 'sub-{subject_id}_ses-' + session + '_T1w.nii')

templates = {'anat': anat_file}

selectfiles = Node(SelectFiles(templates, base_directory='/mnt/Filbey/Evan/examples/BIDS'), name="selectfiles")

# Datasink - creates output folder for important outputs
datasink = Node(DataSink(base_directory="/mnt/Filbey/Evan/tmp/sinker",
                         container="datasink"),
                name="datasink")

# reconAll node
recon_all = Node(ReconAll(), name="reconAll")

wf_sub = Workflow(name="choosing_subjects")

wf_sub.connect(infosource, "subject_id", selectfiles, "subject_id")
wf_sub.connect(selectfiles, "anat", datasink, "anat_files")
wf_sub.connect(selectfiles, "anat", recon_all, 'input.inputspec.T1_files')
wf_sub.connect(infosource, "subject_id", recon_all, 'input.inputspec.subject_id')
wf_sub.run()
def create_main_workflow_FS_segmentation():

    # check envoiroment variables
    if not os.environ.get('FREESURFER_HOME'):
        raise RuntimeError('FREESURFER_HOME environment variable not set')

    if not os.environ.get('MNE_ROOT'):
        raise RuntimeError('MNE_ROOT environment variable not set')
        
    if not os.environ.get('SUBJECTS_DIR'):
        os.environ["SUBJECTS_DIR"] = sbj_dir
        
        if not op.exists(sbj_dir):
            os.mkdir(sbj_dir)
    

    print 'SUBJECTS_DIR %s ' % os.environ["SUBJECTS_DIR"]

    # (1) iterate over subjects to define paths with templates -> Infosource
    #     and DataGrabber
    #     Node: SubjectData - we use IdentityInterface to create our own node,
    #     to specify the list of subjects the pipeline should be executed on
    infosource = pe.Node(interface=IdentityInterface(fields=['subject_id']),
                         name="infosource")
    infosource.iterables = ('subject_id', subjects_list)

    # Grab data
    #   the template can be filled by other inputs
    #   Here we define an input field for datagrabber called subject_id.
    #   This is then used to set the template (see %s in the template).

    # we can look for DICOM files or .nii ones
    if is_nii:
        datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                                       outfields=['struct']),
                             name='datasource')
        datasource.inputs.template = '%s/*/anat/%s*.nii.gz'  
        datasource.inputs.template_args = dict(struct=[['subject_id',
                                                        'subject_id']])
    else:
        datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                                       outfields=['dcm_file']),
                             name='datasource')
        datasource.inputs.template = '%s*/*.dcm'
        datasource.inputs.template_args = dict(dcm_file=[['subject_id']])

    datasource.inputs.base_directory = MRI_path  # dir where the MRI files are
    datasource.inputs.sort_filelist = True

    # get the path of the first dicom file
    def get_first_file(dcm_files):
        return dcm_files[0]

    # return the path of the struct filename in the MRI sbj dir that will be
    # the  input of MRI convert routine
    def get_MRI_sbj_dir(dcm_file):
        from nipype.utils.filemanip import split_filename as split_f
        import os.path as op

        MRI_sbj_dir, basename, ext = split_f(dcm_file)
        struct_filename = op.join(MRI_sbj_dir, 'struct.nii.gz')
        return struct_filename

    get_firstfile = pe.Node(interface=Function(input_names=['dcm_files'],
                                               output_names=['dcm_file'],
                            function=get_first_file), name='get_firstfile')

    get_MRI_sbjdir = pe.Node(interface=Function(input_names=['dcm_file'],
                                                output_names=['struct_filename'],
                             function=get_MRI_sbj_dir), name='get_MRI_sbjdir')

    # MRI_convert Node
    # We use it if we don't have a .nii.gz file
    # The output of mriconvert is the input of recon-all
    mri_convert = pe.Node(interface=MRIConvert(), infields=['in_file'],
                          outfields=['out_file'],
                          name='mri_convert')

    # (2) ReconAll Node to generate surfaces and parcellations of structural
    #     data from anatomical images of a subject.
    recon_all = pe.Node(interface=ReconAll(), infields=['T1_files'],
                        name='recon_all')
    recon_all.inputs.subjects_dir = sbj_dir
    recon_all.inputs.directive = 'all'

    # reconall_workflow will be a node of the main workflow
    reconall_workflow = pe.Workflow(name=FS_WF_name)

    reconall_workflow.base_dir = MRI_path

    reconall_workflow.connect(infosource, 'subject_id',
                              recon_all, 'subject_id')

    reconall_workflow.connect(infosource, 'subject_id',
                              datasource,  'subject_id')

    if is_nii:
        reconall_workflow.connect(datasource, 'struct', recon_all, 'T1_files')
    else:
        reconall_workflow.connect(datasource,   'dcm_file',
                                  get_firstfile,  'dcm_files')
        reconall_workflow.connect(get_firstfile, 'dcm_file',
                                  get_MRI_sbjdir, 'dcm_file')

        reconall_workflow.connect(get_firstfile, 'dcm_file',
                                  mri_convert, 'in_file')
        reconall_workflow.connect(get_MRI_sbjdir, 'struct_filename',
                                  mri_convert, 'out_file')

        reconall_workflow.connect(mri_convert, 'out_file',
                                  recon_all, 'T1_files')

    # (3) BEM generation by the watershed algo of MNE C
    main_workflow = pe.Workflow(name=MAIN_WF_name)
    main_workflow.base_dir = sbj_dir

    # I mode: WatershedBEM Interface of nipype
    bem_generation = pe.Node(interface=WatershedBEM(),
                             infields=['subject_id', 'subjects_dir', 'atlas_mode'],
                             outfields=['mesh_files'],
                             name='bem_generation')
    bem_generation.inputs.subjects_dir = sbj_dir 
    bem_generation.inputs.atlas_mode   = True

    main_workflow.connect(reconall_workflow, 'recon_all.subject_id',
                          bem_generation, 'subject_id')

    # II mode: make_watershed_bem of MNE Python package
    def mne_watershed_bem(sbj_dir, sbj_id):

        from mne.bem import make_watershed_bem

        print 'call make_watershed_bem'
        make_watershed_bem(sbj_id, sbj_dir, overwrite=True)

    call_mne_watershed_bem = pe.Node(interface=Function(input_names=['sbj_dir', 'sbj_id'], 
                                                        output_names=['sbj_id'],
                                                        function = mne_watershed_bem), 
                                     name = 'call_mne_watershed_bem')

    # copy the generated meshes from bem/watershed to bem/ and change the names
    # according to MNE
    def copy_surfaces(sbj_id, mesh_files):
        import os
        import os.path as op
        from smri_params import sbj_dir
        from mne.report import Report

        report = Report()

        surf_names = ['brain_surface', 'inner_skull_surface',
                      'outer_skull_surface',  'outer_skin_surface']
        new_surf_names = ['brain.surf', 'inner_skull.surf',
                          'outer_skull.surf', 'outer_skin.surf']

        bem_dir = op.join(sbj_dir, sbj_id, 'bem')
        surface_dir = op.join(sbj_dir, sbj_id, 'bem/watershed')

        for i in range(len(surf_names)):
            os.system('cp %s %s' %(op.join(surface_dir,sbj_id + '_' + surf_names[i]),
                                   op.join(bem_dir, new_surf_names[i])))
                                   #op.join(bem_dir,sbj_id + '-' + new_surf_names[i])))

        report.add_bem_to_section(subject=sbj_id, subjects_dir=sbj_dir)
        report_filename = op.join(bem_dir, "BEM_report.html")
        print '*** REPORT file %s written ***' % report_filename
        print report_filename
        report.save(report_filename, open_browser=False, overwrite=True)

        return sbj_id

    copy_bem_surf = pe.Node(interface=Function(input_names=['sbj_id', 'mesh_files'], 
                                               output_names=['sbj_id'],
                                               function = copy_surfaces),
                            name='copy_bem_surf')

    main_workflow.connect(infosource, 'subject_id', copy_bem_surf, 'sbj_id')
    main_workflow.connect(bem_generation, 'mesh_files',
                          copy_bem_surf, 'mesh_files')

    return main_workflow
示例#19
0
#convert dicom to nifti
test_subject.get_seqs()
test_subject.to_bids(out_path)
"""
# create array of subject objects with path to raw data

subjectList = [Subject]  # Subject
# includes pointers to bids format data and a to_bids function


# recon all freesurfer - build commamd

for subject in subjectList:
    # initialize reconAll interface
    reconall = ReconAll()
    # add various attributes to the interface
    reconall.inputs.subject_id = subject.subjectID
    reconall.inputs.directive = 'foo'
    reconall.inputs.subjects_dir = subject.bidsPath
    reconall.inputs.T1_files = subject.seqs.anat
    # execute the command in shell with above attributes
    #todo
    #check if output exists prior to execution or catch error from command line
    reconall.cmdline

# add recon-all & ants to subject object

# need to add new attributes to subject class. subclass? add to existing? what is "pythonic"? how do I enforce order in
# processing and handle process failure or error? could try hashing the output prior to running the command. this
# checks if the object exists. shell script may have the check already but the goal is to not reprocess anything.
def create_main_workflow_FS_segmentation():

    # Check envoiroment variables
    if not os.environ.get('FREESURFER_HOME'):
        raise RuntimeError('FREESURFER_HOME environment variable not set')

    if not os.environ.get('SUBJECTS_DIR'):
        os.environ["SUBJECTS_DIR"] = subjects_dir

        if not op.exists(subjects_dir):
            os.mkdir(subjects_dir)

    print('SUBJECTS_DIR %s ' % os.environ["SUBJECTS_DIR"])

    main_workflow = pe.Workflow(name=MAIN_WF_name)
    main_workflow.base_dir = subjects_dir

    # (1) we create a node to pass input filenames to DataGrabber from nipype
    #     iterate over subjects
    infosource = create_iterator(['subject_id'], [subject_ids])

    # # and a node to grab data. The template_args in this node iterate upon
    # the values in the infosource node
    # Here we define an input field for datagrabber called subject_id.
    # This is then used to set the template (see %s in the template).
    # we look for .nii files
    template_path = '%s/anatomy/highres001.nii.gz'
    template_args = [['subject_id']]
    infields = ['subject_id']
    datasource = create_datagrabber(data_path,
                                    template_path,
                                    template_args,
                                    infields=infields)

    # (2) ReconAll Node to generate surfaces and parcellations of structural
    #     data from anatomical images of a subject.
    recon_all = pe.Node(interface=ReconAll(),
                        infields=['T1_files'],
                        name='recon_all')
    recon_all.inputs.subjects_dir = subjects_dir
    recon_all.inputs.directive = 'all'

    # reconall_workflow will be a node of the main workflow
    reconall_workflow = pe.Workflow(name=FS_WF_name)
    reconall_workflow.base_dir = wf_path

    reconall_workflow.connect(infosource, 'subject_id', recon_all,
                              'subject_id')

    reconall_workflow.connect(infosource, 'subject_id', datasource,
                              'subject_id')

    reconall_workflow.connect(datasource, 'raw_file', recon_all, 'T1_files')

    # (3) BEM generation by make_watershed_bem of MNE Python package
    bem_generation = pe.Node(interface=Function(
        input_names=['subjects_dir', 'sbj_id'],
        output_names=['sbj_id'],
        function=_create_bem_sol),
                             name='call_mne_watershed_bem')
    bem_generation.inputs.subjects_dir = subjects_dir
    main_workflow.connect(reconall_workflow, 'recon_all.subject_id',
                          bem_generation, 'sbj_id')

    return main_workflow
示例#21
0
        BET().version: BET
    },
    "CAT12 Segmentation": {
        "12.6": Cat12Segmentation
    },
    "fslreorient2std": {
        Reorient2Std().version: Reorient2Std
    },
    "FAST": {
        FAST().version: FastWrapper
    },
    "FLIRT": {
        FLIRT().version: FLIRT
    },
    "FNIRT": {
        FNIRT().version: FNIRT
    },
    "FSL Anatomical Processing Script": {
        FslAnat.__version__: FslAnat
    },
    "SUSAN": {
        SUSAN().version: SUSAN
    },
    "ReconAll": {
        ReconAll().version: ReconAll
    },
    "robustfov": {
        RobustFOV().version: RobustFOV
    },
}
示例#22
0
cursor = connection.cursor()

num_threads = 12

base_dir = "/Shared/sinapse/CACHE/20161010_AtrophySimulation_Baseline_CACHE"
for row in cursor.execute(
        "SELECT t1_image_file, t2_image_file, session_id FROM input"):
    session_id = str(row[2])
    t1_file = str(row[0])
    t2_file = str(row[1])

    wf = Workflow(name="FreeSurfer_{0}".format(session_id))

    subject_directory = os.path.dirname(os.path.dirname(t1_file))

    recon_all = Node(ReconAll(), "ReconAll")
    recon_all.inputs.T1_files = [t1_file]
    recon_all.inputs.T2_file = t2_file
    recon_all.inputs.openmp = num_threads
    recon_all.inputs.subject_id = "FreeSurfer"
    recon_all.inputs.flags = "-no-isrunning"
    recon_all.inputs.subjects_dir = os.path.join(
        "/Shared/sinapse/CACHE/20161010_AtrophySimulation_Baseline",
        session_id)
    recon_all.plugin_args = plugin_args = {
        "qsub_args": "-q HJ,UI,all.q,COE -pe smp {0}".format(num_threads),
        "overwrite": True
    }

    hncma_atlas = os.path.join(subject_directory, "WarpedAtlas2Subject",
                               "hncma_atlas.nii.gz")
示例#23
0
文件: fmri.py 项目: gpiantoni/boavus
def workflow_fmri(PARAMETERS, FREESURFER_PATH):
    """TODO: input and output"""

    input = Node(
        IdentityInterface(fields=['subject', 'T1w', 'bold', 'electrodes']),
        name='input')

    node_bet = Node(BET(), name='bet')
    node_bet.inputs.frac = 0.5
    node_bet.inputs.vertical_gradient = 0
    node_bet.inputs.robust = True

    node_featdesign = Node(function_prepare_design, name='feat_design')

    node_feat = Node(FEAT(), name='feat')

    node_compare = Node(function_fmri_compare, name='fmri_compare')
    node_compare.inputs.measure = PARAMETERS['fmri_compare']['measure']
    node_compare.inputs.normalize_to_mean = PARAMETERS['fmri_compare'][
        'normalize_to_mean']

    node_upsample = Node(
        FLIRT(), name='upsample')  # not perfect, there is a small offset
    node_upsample.inputs.apply_isoxfm = UPSAMPLE_RESOLUTION
    node_upsample.inputs.interp = 'nearestneighbour'

    node_downsample = Node(
        FLIRT(), name='downsample')  # not perfect, there is a small offset
    node_downsample.inputs.apply_xfm = True
    node_downsample.inputs.uses_qform = True
    # node_downsample.inputs.apply_isoxfm = DOWNSAMPLE_RESOLUTION
    node_downsample.inputs.interp = 'nearestneighbour'

    node_threshold = Node(Threshold(), name='threshold')
    node_threshold.inputs.thresh = GRAYMATTER_THRESHOLD
    node_threshold.inputs.args = '-bin'

    node_graymatter = Node(function_fmri_graymatter, name='graymatter')

    node_realign_gm = Node(FLIRT(), name='realign_gm')
    node_realign_gm.inputs.apply_xfm = True
    node_realign_gm.inputs.uses_qform = True

    kernel_sizes = arange(
        PARAMETERS['at_elec']['kernel_start'],
        PARAMETERS['at_elec']['kernel_end'],
        PARAMETERS['at_elec']['kernel_step'],
    )
    node_atelec = Node(function_fmri_atelec, name='at_elec')
    node_atelec.inputs.distance = PARAMETERS['at_elec']['distance']
    node_atelec.inputs.kernel_sizes = list(kernel_sizes)
    node_atelec.inputs.graymatter = PARAMETERS['graymatter']

    w = Workflow('fmri')

    w.connect(input, 'T1w', node_bet, 'in_file')
    w.connect(input, 'bold', node_featdesign, 'func')
    w.connect(input, 'electrodes', node_atelec, 'electrodes')

    w.connect(node_bet, 'out_file', node_featdesign, 'anat')

    w.connect(node_featdesign, 'fsf_file', node_feat, 'fsf_file')
    w.connect(node_feat, 'feat_dir', node_compare, 'feat_path')

    if PARAMETERS['upsample']:
        w.connect(node_compare, 'out_file', node_upsample, 'in_file')
        w.connect(node_compare, 'out_file', node_upsample, 'reference')
        w.connect(node_upsample, 'out_file', node_atelec, 'in_file')
    else:
        w.connect(node_compare, 'out_file', node_atelec, 'in_file')

    if PARAMETERS['graymatter']:

        node_reconall = Node(ReconAll(), name='freesurfer')
        node_reconall.inputs.subjects_dir = str(FREESURFER_PATH)
        node_reconall.inputs.flags = [
            '-cw256',
        ]

        w.connect(input, 'T1w', node_reconall, 'T1_files')
        w.connect(input, 'subject', node_reconall, 'subject_id')

        if PARAMETERS['upsample']:
            w.connect(node_graymatter, 'out_file', node_realign_gm, 'in_file')
            w.connect(node_upsample, 'out_file', node_realign_gm, 'reference')
            w.connect(node_realign_gm, 'out_file', node_threshold, 'in_file')
        else:
            w.connect(node_graymatter, 'out_file', node_downsample, 'in_file')
            w.connect(node_compare, 'out_file', node_downsample, 'reference')
            w.connect(node_downsample, 'out_file', node_threshold, 'in_file')

        w.connect(node_threshold, 'out_file', node_atelec, 'graymatter')

        w.connect(node_reconall, 'ribbon', node_graymatter, 'ribbon')

    return w
示例#24
0
info = dict(T1=[['subject_id']])

infosource = Node(IdentityInterface(fields=['subject_id']), name='infosource')
infosource.iterables = ('subject_id', sids)

# Create a datasource node to get the T1 file
datasource = Node(DataGrabber(infields=['subject_id'], outfields=info.keys()),
                  name='datasource')
datasource.inputs.template = '%s/%s'
datasource.inputs.base_directory = os.path.abspath(data_dir)
datasource.inputs.field_template = dict(T1='%s/s1/anatomy/T1_002.nii.gz')
datasource.inputs.template_args = info
datasource.inputs.sort_filelist = True

reconall_node = Node(ReconAll(), name='reconall_node')
reconall_node.inputs.openmp = 2
reconall_node.inputs.args = '-hippocampal-subfields-T1'
reconall_node.inputs.subjects_dir = '/home/data/madlab/surfaces/emuR01'
reconall_node.plugin_args = {
    'sbatch_args': ('-p investor --qos pq_madlab -n 2'),
    'overwrite': True
}

wf = Workflow(name='fsrecon')

wf.connect(infosource, 'subject_id', datasource, 'subject_id')
wf.connect(infosource, 'subject_id', reconall_node, 'subject_id')
wf.connect(datasource, 'T1', reconall_node, 'T1_files')

wf.base_dir = os.path.abspath('/scratch/madlab/emu/')
         "title": "12.7",
         "description": "",
         "fixed_run_method_kwargs": {
             "verbose_output_dict": True
         },
         "input": CAT12_SEGMENTATION_INPUT_SPECIFICATION,
         "output": CAT12_SEGMENTATION_OUTPUT_SPECIFICATION,
     }],
 },
 {
     "title":
     "ReconAll",
     "description":
     "Performs all, or any part of, the FreeSurfer cortical reconstruction process.",  # noqa: E501
     "versions": [{
         "title": ReconAll().version or "1.0",
         "description":
         f"Default FreeSurfer ReconAll version for nipype {_NIPYPE_VERSION}.",  # noqa: E501
         "input": RECON_ALL_INPUT_SPECIFICATION,
         "output": RECON_ALL_OUTPUT_SPECIFICATION,
         "nested_results_attribute": "outputs.get_traitsfree",
         "max_parallel": 10,
     }],
 },
 {
     "title":
     "mrconvert",
     "description":
     "Performs conversion between different file types and optionally extract a subset of the input image",  # noqa: E501
     "versions": [{
         "title": MRConvert.__version__ or "1.0",