Exemple #1
0
def create_fs_compatible_logb_workflow(name="LOGISMOSB",
                                       plugin_args=None,
                                       config=None):
    """Create a workflow to run LOGISMOS-B from FreeSurfer Inputs"""

    if not config:
        config = read_json_config("fs_logb_config.json")

    wf = Workflow(name)

    inputspec = Node(IdentityInterface([
        't1_file', 't2_file', 'white', 'aseg', 'hemi', 'recoding_file',
        'gm_proba', 'wm_proba', 'lut_file', 'hncma_atlas'
    ]),
                     name="inputspec")

    # convert the white mesh to a vtk file with scanner coordinates
    to_vtk = Node(MRIsConvert(), name="WhiteVTK")
    to_vtk.inputs.out_file = "white.vtk"
    to_vtk.inputs.to_scanner = True

    wf.connect(inputspec, 'white', to_vtk, 'in_file')

    # convert brainslabels to nifti
    aseg_to_nifti = Node(MRIConvert(), "ABCtoNIFTI")
    aseg_to_nifti.inputs.out_file = "aseg.nii.gz"
    aseg_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, 'aseg', aseg_to_nifti, 'in_file')

    # create brainslabels from aseg
    aseg2brains = Node(Function(['in_file', 'recode_file', 'out_file'],
                                ['out_file'], recode_labelmap),
                       name="ConvertAseg2BRAINSLabels")
    aseg2brains.inputs.out_file = "brainslabels.nii.gz"

    wf.connect([(inputspec, aseg2brains, [('recoding_file', 'recode_file')]),
                (aseg_to_nifti, aseg2brains, [('out_file', 'in_file')])])

    t1_to_nifti = Node(MRIConvert(), "T1toNIFTI")
    t1_to_nifti.inputs.out_file = "t1.nii.gz"
    t1_to_nifti.inputs.out_orientation = "LPS"
    wf.connect(inputspec, 't1_file', t1_to_nifti, 'in_file')

    def t2_convert(in_file=None, reference_file=None, out_file=None):
        import os
        from nipype.interfaces.freesurfer import MRIConvert
        from nipype.interfaces.traits_extension import Undefined
        from nipype import Node
        if in_file:
            t2_to_nifti = Node(MRIConvert(), "T2toNIFTI")
            t2_to_nifti.inputs.in_file = in_file
            t2_to_nifti.inputs.out_file = os.path.abspath(out_file)
            t2_to_nifti.inputs.out_orientation = "LPS"
            if reference_file:
                t2_to_nifti.inputs.reslice_like = reference_file
            result = t2_to_nifti.run()
            out_file = os.path.abspath(result.outputs.out_file)
        else:
            out_file = Undefined
        return out_file

    t2_node = Node(Function(['in_file', 'reference_file', 'out_file'],
                            ['out_file'], t2_convert),
                   name="T2Convert")
    t2_node.inputs.out_file = "t2.nii.gz"
    wf.connect(inputspec, 't2_file', t2_node, 'in_file')
    wf.connect(t1_to_nifti, 'out_file', t2_node, 'reference_file')

    # convert raw t1 to lia
    t1_to_ras = Node(MRIConvert(), "T1toRAS")
    t1_to_ras.inputs.out_orientation = "LIA"
    t1_to_ras.inputs.out_file = "t1_lia.mgz"
    wf.connect(inputspec, 't1_file', t1_to_ras, 'in_file')

    # Create ones image for use when masking the white matter
    ones = Node(Function(['in_volume', 'out_file'], ['out_file'],
                         create_ones_image),
                name="Ones_Image")
    ones.inputs.out_file = "ones.mgz"

    wf.connect(t1_to_ras, 'out_file', ones, 'in_volume')

    # use the ones image to obtain a white matter mask
    surfmask = Node(SurfaceMask(), name="WhiteMask")
    surfmask.inputs.out_file = "white_ras.mgz"

    wf.connect(ones, 'out_file', surfmask, 'in_volume')
    wf.connect(inputspec, 'white', surfmask, 'in_surface')

    surfmask_to_nifti = Node(MRIConvert(), "MasktoNIFTI")
    surfmask_to_nifti.inputs.out_file = "white.nii.gz"
    surfmask_to_nifti.inputs.out_orientation = "LPS"

    wf.connect(surfmask, 'out_file', surfmask_to_nifti, 'in_file')

    # create hemi masks

    split = Node(SplitLabels(), name="SplitLabelMask")
    split.inputs.out_file = "HemiBrainLabels.nii.gz"
    wf.connect([(aseg2brains, split, [('out_file', 'in_file')]),
                (inputspec, split, [('lut_file', 'lookup_table')]),
                (aseg_to_nifti, split, [('out_file', 'labels_file')]),
                (inputspec, split, [('hemi', 'hemi')])])

    dilate = Node(MultiLabelDilation(), "DilateLabels")
    dilate.inputs.out_file = "DilatedBrainLabels.nii.gz"
    dilate.inputs.radius = 1
    wf.connect(split, 'out_file', dilate, 'in_file')

    convert_label_map = Node(MRIConvert(), "ConvertLabelMapToMatchT1")
    convert_label_map.inputs.resample_type = "nearest"
    convert_label_map.inputs.out_file = "BrainLabelsFromAsegInT1Space.nii.gz"
    wf.connect(t1_to_nifti, 'out_file', convert_label_map, 'reslice_like')
    wf.connect(dilate, 'out_file', convert_label_map, 'in_file')

    logb = Node(LOGISMOSB(), name="LOGISMOS-B")
    logb.inputs.smoothnessConstraint = config['LOGISMOSB'][
        'smoothnessConstraint']
    logb.inputs.nColumns = config['LOGISMOSB']['nColumns']
    logb.inputs.columnChoice = config['LOGISMOSB']['columnChoice']
    logb.inputs.columnHeight = config['LOGISMOSB']['columnHeight']
    logb.inputs.nodeSpacing = config['LOGISMOSB']['nodeSpacing']
    logb.inputs.w = config['LOGISMOSB']['w']
    logb.inputs.a = config['LOGISMOSB']['a']
    logb.inputs.nPropagate = config['LOGISMOSB']['nPropagate']

    if plugin_args:
        logb.plugin_args = plugin_args

    wf.connect([(t1_to_nifti, logb, [('out_file', 't1_file')]),
                (t2_node, logb, [('out_file', 't2_file')]),
                (inputspec, logb, [('hemi', 'basename'),
                                   ('hncma_atlas', 'atlas_file'),
                                   ('wm_proba', 'wm_proba_file'),
                                   ('gm_proba', 'gm_proba_file')]),
                (to_vtk, logb, [('converted', 'mesh_file')]),
                (surfmask_to_nifti, logb, [('out_file', 'wm_file')]),
                (convert_label_map, logb, [('out_file', 'brainlabels_file')])])

    outputspec = Node(IdentityInterface(['gmsurface_file', 'wmsurface_file']),
                      name="outputspec")

    wf.connect([(logb, outputspec, [('gmsurface_file', 'gmsurface_file'),
                                    ('wmsurface_file', 'wmsurface_file')])])

    return wf
output_dir = 'output_stimulation_no_voxels_CA3'
working_dir = 'workingdir_stimulation_no_voxels_CA3'

no_voxels_CA3 = Workflow(name='no_voxels_CA3')
no_voxels_CA3.base_dir = opj(experiment_dir, working_dir)

# ==========================================================================================================================================================
# In[3]:
# to prevent nipype from iterating over the anat image with each func run-, you need seperate
# nodes to select the files
# and this will solve the problem I have for almost 6 months
# but notice that in the sessions, you have to iterate also over subject_id to get the {subject_id} var

# Infosource - a function free node to iterate over the list of subject names

infosource = Node(IdentityInterface(fields=['frequencies', 'subjects']),
                  name="infosource")
infosource.iterables = [('frequencies', frequency_list),
                        ('subjects', subject_list)]

# ==========================================================================================================================================================
# In[4]:

template_brain = '/media/amr/Amr_4TB/Work/October_Acquistion/anat_temp_enhanced_3.nii.gz'
template_mask = '/media/amr/Amr_4TB/Work/October_Acquistion/anat_template_enhanced_mask_2.nii.gz'
left_side_mask = '/media/amr/Amr_4TB/Work/October_Acquistion/left_anat_tem_3_enh_mask.nii.gz'

templates = {
    'thresh_zstat':
    '/media/amr/Amr_4TB/Work/stimulation/Stimulation_2nd_level_WorkingDir_{frequencies}_CA3/stimulation_2nd_level_{frequencies}_CA3/_subject_id_{subjects}/cluster_copes1/thresh_zstat1.nii.gz',
    'anat_2_temp':
def CreateMeasurementWorkflow(WFname, LABELS_CONFIG_FILE):
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    ###### UTILITY FUNCTIONS #######
    # This function returns a label map that only covers the FOV of the input DWI scan
    def CreateDWILabelMap(T2LabelMapVolume,DWIBrainMask):
        import os
        import SimpleITK as sitk
        T2LabelMapVolume = sitk.ReadImage(T2LabelMapVolume,sitk.sitkUInt16) #FreeSurfer labelmap needs uint-16
        DWIBrainMask = sitk.ReadImage(DWIBrainMask)
        # 1- Dilate input DWI mask
        dilateFilter = sitk.BinaryDilateImageFilter()
        dilateFilter.SetKernelRadius(1)
        dilated_mask = dilateFilter.Execute( DWIBrainMask )
        # 2- Resample dilated mask to the space of T2LabelMap (1x1x1)
        # Use Linear interpolation + thresholding
        resFilt = sitk.ResampleImageFilter()
        resFilt.SetReferenceImage(T2LabelMapVolume)
        resFilt.SetOutputPixelType(sitk.sitkFloat32)
        resFilt.SetInterpolator(sitk.sitkLinear)
        resampled_dilated_mask = resFilt.Execute(dilated_mask)
        # Thresholding by 0
        threshFilt = sitk.BinaryThresholdImageFilter()
        thresh_resampled_dilated_mask = threshFilt.Execute(resampled_dilated_mask,0.0001,1.0,1,0)
        # 3- Cast the thresholded image to uInt-16
        castFilt = sitk.CastImageFilter()
        castFilt.SetOutputPixelType(sitk.sitkUInt16)
        casted_thresh_resampled_dilated_mask = castFilt.Execute(thresh_resampled_dilated_mask)
        # 4- Multiply this binary mask to the T2 labelmap volume
        mulFilt = sitk.MultiplyImageFilter()
        DWILabelMapVolume = mulFilt.Execute(casted_thresh_resampled_dilated_mask,T2LabelMapVolume)
        # write the output label map
        outputVolume = os.path.realpath('DWILabelMapVolume.nrrd')
        sitk.WriteImage(DWILabelMapVolume, outputVolume)
        return outputVolume

    def MakeResamplerInFileList(FAImage,MDImage,RDImage,FrobeniusNormImage,Lambda1Image,Lambda2Image,Lambda3Image):
        RISsList = [FAImage,MDImage,RDImage,FrobeniusNormImage,Lambda1Image,Lambda2Image,Lambda3Image]
        return RISsList

    # This functions computes statistics of each input RIS volume over all input labels
    # and writes the results as a CSV file
    def ComputeStatistics(inputVolume,T2LabelMapVolume,DWILabelMapVolume,labelCodesFile):
        import os
        import SimpleITK as sitk
        #### Util Funcs ####
        def createLabelsDictionary(labelCodesFile):
            import csv
            labelsDictionary={}
            with open(labelCodesFile) as lf:
                reader = csv.reader(lf, delimiter=',')
                for line in reader:
                  if line[0][0] == "#":
                     continue
                  else:
                     labelsDictionary[line[0]] = line[1]
            return labelsDictionary

        def computeVoxelVolume(inputVolume):
            import operator
            return reduce(operator.mul, inputVolume.GetSpacing())

        def ReturnStatisticsList(labelID,voxelVolume,resampledRISVolume,DWILabelMap,T2LabelMap):
            statFilter = sitk.LabelStatisticsImageFilter()
            # RIS stats over input label ID
            statFilter.Execute(resampledRISVolume, DWILabelMap)
            mean = statFilter.GetMean(labelID)
            std = statFilter.GetSigma(labelID)
            maximum = statFilter.GetMaximum(labelID)
            minimum = statFilter.GetMinimum(labelID)
            median = statFilter.GetMedian(labelID)
            effectiveVolume = statFilter.GetCount(labelID)*voxelVolume
            # compute total volume of input label ID in the non-cropped labelmap (T2LabelMap)
            statFilter.Execute(resampledRISVolume, T2LabelMap)
            totalVolume = statFilter.GetCount(labelID)*voxelVolume
            # if effectiveVolume is 0, the label is missed in dwi scan, or it doesn't exists in current labelmaps
            # in both cases we need zero confidence coefficient for that.
            if effectiveVolume == 0:
                confidence_coeficient = 0
                maximum = 0
                minimum = 0
            else:
                if totalVolume == 0:
                   raise ValueError('Label {0} is not found in T2 labels map, but exists in DWI labels map!'.format(labelID))
                confidence_coeficient=effectiveVolume/totalVolume
            # Now create statistics list
            statsList = [format(mean,'.4f'),
                         format(std,'.4f'),
                         format(maximum,'.4f'),
                         format(minimum,'.4f'),
                         format(median,'.4f'),
                         effectiveVolume,
                         totalVolume,
                         format(confidence_coeficient,'.3f')]
            return statsList, totalVolume

        def writeLabelStatistics(filename,statisticsDictionary):
            import csv
            with open(filename, 'wb') as lf:
                headerdata = [['#Label', 'mean', 'std', 'max', 'min', 'median', 'effective_volume', 'total_volume', 'confidence_coeficient']]
                wr = csv.writer(lf, delimiter=',')
                wr.writerows(headerdata)
                for key, value in sorted(statisticsDictionary.items()):
                    wr.writerows([[key] + value])
        #### #### #### ####
        resampledRISVolume = sitk.ReadImage(inputVolume)
        T2LabelMap = sitk.ReadImage(T2LabelMapVolume)
        DWILabelMap = sitk.ReadImage(DWILabelMapVolume)
        labelsDictionary = createLabelsDictionary(labelCodesFile)
        statisticsDictionary={}
        voxelVolume=computeVoxelVolume(resampledRISVolume)
        for key in labelsDictionary:
            labelID = int(key)
            [statisticsList, total_volume] = ReturnStatisticsList(labelID,voxelVolume,resampledRISVolume,DWILabelMap,T2LabelMap)
            if total_volume != 0:
               statisticsDictionary[labelsDictionary[key]] = statisticsList
        # Create output file name
        inputBaseName = os.path.basename(inputVolume)
        inputName = os.path.splitext(inputBaseName)[0]
        RISName = inputName.split('_',1)[0]
        CSVStatisticsFile = os.path.realpath(RISName + '_statistics.csv')
        writeLabelStatistics(CSVStatisticsFile,statisticsDictionary)
        return CSVStatisticsFile

    # This function helps to pick desirable output from the output list
    def pickFromList(inputlist,item):
        return inputlist[item]

    def ResampleRISVolumes(referenceVolume, inputVolume):
        import os
        import SimpleITK as sitk
        refVolume = sitk.ReadImage(referenceVolume)
        RISVolume = sitk.ReadImage(inputVolume)
        # 1- voxel-wise square root of input volume
        sqrtFilt = sitk.SqrtImageFilter()
        RIS_sqrt = sqrtFilt.Execute(RISVolume)
        # 2-resample squared image using cubic BSpline
        resFilt = sitk.ResampleImageFilter()
        resFilt.SetReferenceImage(refVolume)
        resFilt.SetInterpolator(sitk.sitkBSpline)
        RIS_sqrt_res = resFilt.Execute(RIS_sqrt)
        # 3- square the resampled RIS volume voxel-wise
        squarFilt = sitk.SquareImageFilter()
        RIS_resampled = squarFilt.Execute(RIS_sqrt_res)
        # Create output file name
        inputBaseName = os.path.basename(inputVolume)
        RISName = os.path.splitext(inputBaseName)[0]
        outputVolume = os.path.realpath(RISName + '_res.nrrd')
        sitk.WriteImage(RIS_resampled,outputVolume)
        assert os.path.isfile(outputVolume), "Resampled RIS file is not found: %s" % outputVolume
        return outputVolume
    #################################
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    MeasurementWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(interface=IdentityInterface(fields=['T2LabelMapVolume','DWIBrainMask','LabelsConfigFile',
                                                             'FAImage','MDImage','RDImage','FrobeniusNormImage',
                                                             'Lambda1Image','Lambda2Image','Lambda3Image']),
                         name='inputsSpec')
    inputsSpec.inputs.LabelsConfigFile = LABELS_CONFIG_FILE

    outputsSpec = pe.Node(interface=IdentityInterface(fields=['FA_stats','MD_stats','RD_stats','FrobeniusNorm_stats',
                                                              'Lambda1_stats','Lambda2_stats','Lambda3_stats']),
                          name='outputsSpec')

    # Step1: Create the labelmap volume for DWI scan
    CreateDWILabelMapNode = pe.Node(interface=Function(function = CreateDWILabelMap,
                                                       input_names=['T2LabelMapVolume','DWIBrainMask'],
                                                       output_names=['DWILabelMapVolume']),
                                    name="CreateDWILabelMap")
    MeasurementWF.connect(inputsSpec, 'T2LabelMapVolume', CreateDWILabelMapNode, 'T2LabelMapVolume')
    MeasurementWF.connect(inputsSpec, 'DWIBrainMask', CreateDWILabelMapNode, 'DWIBrainMask')

    # Now we have two labelmap volumes (both have 1x1x1 voxel lattice):
    # (1) T2LabelMap: Used to compute total_volume for each label
    # (2) DWILabelMap: It is probably cropped and missed some part of labels,
    #                  and is used to compute all stats like [mean,std,max,min,median,effective_volume].

    # Step2: Resample each RIS to T2LabelmapVolume voxel lattice
    MakeResamplerInFilesListNode = pe.Node(Function(function=MakeResamplerInFileList,
                                                    input_names=['FAImage','MDImage','RDImage','FrobeniusNormImage',
                                                                 'Lambda1Image','Lambda2Image','Lambda3Image'],
                                                    output_names=['RISsList']),
                                           name="MakeResamplerInFilesListNode")
    MeasurementWF.connect([(inputsSpec, MakeResamplerInFilesListNode, [('FAImage','FAImage'),
                                                                       ('MDImage','MDImage'),
                                                                       ('RDImage','RDImage'),
                                                                       ('FrobeniusNormImage','FrobeniusNormImage'),
                                                                       ('Lambda1Image','Lambda1Image'),
                                                                       ('Lambda2Image','Lambda2Image'),
                                                                       ('Lambda3Image','Lambda3Image')])])
    # To resample RIS volumes we should consider that the output of resampling
    # should not have any negative intensity value becuase negative values have no
    # meaning in rotationally invariant scalar measures.
    # There are 3 options:
    # 1- Use linear interpolation (commented out part using BRAINSResample)
    # 2- Use Gaussian interpolation
    # 3- Use cubic BSpline interpolation
    # Third option is chosen here, but with some considerations:
    # "voxel-wise squared root of intensity values" +
    # cubic BSpline interpolation +
    # "voxel-wise square of intesity values"
    ResampleRISsNode = pe.MapNode(interface=Function(function = ResampleRISVolumes,
                                                     input_names=['referenceVolume','inputVolume'],
                                                     output_names=['outputVolume']),
                                  name="ResampleRISs",
                                  iterfield=['inputVolume'])
    MeasurementWF.connect(inputsSpec,'T2LabelMapVolume',ResampleRISsNode,'referenceVolume')
    MeasurementWF.connect(MakeResamplerInFilesListNode,'RISsList',ResampleRISsNode,'inputVolume')
    '''
    ResampleRISsNode = pe.MapNode(interface=BRAINSResample(), name="ResampleRISs",
                                  iterfield=['inputVolume', 'outputVolume'])
    ResampleRISsNode.inputs.interpolationMode = 'Linear'
    ResampleRISsNode.inputs.pixelType = 'float'
    ResampleRISsNode.inputs.outputVolume = ['FA_res.nrrd','MD_res.nrrd','RD_res.nrrd','frobenius_norm_res.nrrd',
                                            'lambda1_res.nrrd','lambda2_res.nrrd','lambda3_res.nrrd']
    MeasurementWF.connect(inputsSpec,'T2LabelMapVolume',ResampleRISsNode,'referenceVolume')
    MeasurementWF.connect(MakeResamplerInFilesListNode,'RISsList',ResampleRISsNode,'inputVolume')
    '''
    # Step3: Computes statistics of each resampled RIS over all input labels
    # and writes the results as a CSV file (a csv file for each RIS)
    ComputeStatisticsNode = pe.MapNode(interface=Function(function = ComputeStatistics,
                                                          input_names=['inputVolume','T2LabelMapVolume','DWILabelMapVolume','labelCodesFile'],
                                                          output_names=['CSVStatisticsFile']),
                                       name="ComputeStatistics",
                                       iterfield=['inputVolume'])
    MeasurementWF.connect(ResampleRISsNode, 'outputVolume', ComputeStatisticsNode, 'inputVolume')
    MeasurementWF.connect(inputsSpec, 'T2LabelMapVolume', ComputeStatisticsNode, 'T2LabelMapVolume')
    MeasurementWF.connect(CreateDWILabelMapNode, 'DWILabelMapVolume', ComputeStatisticsNode, 'DWILabelMapVolume')
    MeasurementWF.connect(inputsSpec, 'LabelsConfigFile', ComputeStatisticsNode, 'labelCodesFile')
    MeasurementWF.connect(ComputeStatisticsNode, ('CSVStatisticsFile', pickFromList, 0), outputsSpec, 'FA_stats')
    MeasurementWF.connect(ComputeStatisticsNode, ('CSVStatisticsFile', pickFromList, 1), outputsSpec, 'MD_stats')
    MeasurementWF.connect(ComputeStatisticsNode, ('CSVStatisticsFile', pickFromList, 2), outputsSpec, 'RD_stats')
    MeasurementWF.connect(ComputeStatisticsNode, ('CSVStatisticsFile', pickFromList, 3), outputsSpec, 'FrobeniusNorm_stats')
    MeasurementWF.connect(ComputeStatisticsNode, ('CSVStatisticsFile', pickFromList, 4), outputsSpec, 'Lambda1_stats')
    MeasurementWF.connect(ComputeStatisticsNode, ('CSVStatisticsFile', pickFromList, 5), outputsSpec, 'Lambda2_stats')
    MeasurementWF.connect(ComputeStatisticsNode, ('CSVStatisticsFile', pickFromList, 6), outputsSpec, 'Lambda3_stats')

    return MeasurementWF
Exemple #4
0
    def create_workflow(self):
        """Create the Niype workflow of the super-resolution pipeline.

        It is composed of a succession of Nodes and their corresponding parameters,
        where the output of node i goes to the input of node i+1.

        """
        sub_ses = self.subject
        if self.session is not None:
            sub_ses = ''.join([sub_ses, '_', self.session])

        if self.session is None:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk",
                                                   __version__]), self.subject)
        else:
            wf_base_dir = os.path.join(
                self.output_dir, '-'.join(["nipype", __nipype_version__]),
                self.subject, self.session, "rec-{}".format(self.sr_id))
            final_res_dir = os.path.join(self.output_dir,
                                         '-'.join(["pymialsrtk", __version__]),
                                         self.subject, self.session)

        if not os.path.exists(wf_base_dir):
            os.makedirs(wf_base_dir)
        print("Process directory: {}".format(wf_base_dir))

        # Initialization (Not sure we can control the name of nipype log)
        if os.path.isfile(os.path.join(wf_base_dir, "pypeline.log")):
            os.unlink(os.path.join(wf_base_dir, "pypeline.log"))

        self.wf = Workflow(name=self.pipeline_name, base_dir=wf_base_dir)

        config.update_config({
            'logging': {
                'log_directory': os.path.join(wf_base_dir),
                'log_to_file': True
            },
            'execution': {
                'remove_unnecessary_outputs': False,
                'stop_on_first_crash': True,
                'stop_on_first_rerun': False,
                'crashfile_format': "txt",
                'use_relative_paths': True,
                'write_provenance': False
            }
        })

        # Update nypipe logging with config
        nipype_logging.update_logging(config)
        # config.enable_provenance()

        if self.use_manual_masks:
            dg = Node(interface=DataGrabber(outfields=['T2ws', 'masks']),
                      name='data_grabber')
            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            if self.session is not None:
                t2ws_template = os.path.join(
                    self.subject, self.session, 'anat',
                    '_'.join([sub_ses, '*run-*', '*T2w.nii.gz']))
                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat',
                        '_'.join([sub_ses, '*run-*', '*mask.nii.gz']))
            else:
                t2ws_template = os.path.join(self.subject, 'anat',
                                             sub_ses + '*_run-*_T2w.nii.gz')

                if self.m_masks_desc is not None:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, self.session, 'anat', '_'.join([
                            sub_ses, '*_run-*', '_desc-' + self.m_masks_desc,
                            '*mask.nii.gz'
                        ]))
                else:
                    masks_template = os.path.join(
                        'derivatives', self.m_masks_derivatives_dir,
                        self.subject, 'anat', sub_ses + '*_run-*_*mask.nii.gz')

            dg.inputs.field_template = dict(T2ws=t2ws_template,
                                            masks=masks_template)

            brainMask = MapNode(
                interface=IdentityInterface(fields=['out_file']),
                name='brain_masks_bypass',
                iterfield=['out_file'])

            if self.m_stacks is not None:
                custom_masks_filter = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='custom_masks_filter')
                custom_masks_filter.inputs.stacks_id = self.m_stacks

        else:
            dg = Node(interface=DataGrabber(outfields=['T2ws']),
                      name='data_grabber')

            dg.inputs.base_directory = self.bids_dir
            dg.inputs.template = '*'
            dg.inputs.raise_on_empty = False
            dg.inputs.sort_filelist = True

            dg.inputs.field_template = dict(
                T2ws=os.path.join(self.subject, 'anat', sub_ses +
                                  '*_run-*_T2w.nii.gz'))
            if self.session is not None:
                dg.inputs.field_template = dict(T2ws=os.path.join(
                    self.subject, self.session, 'anat', '_'.join(
                        [sub_ses, '*run-*', '*T2w.nii.gz'])))

            if self.m_stacks is not None:
                t2ws_filter_prior_masks = Node(
                    interface=preprocess.FilteringByRunid(),
                    name='t2ws_filter_prior_masks')
                t2ws_filter_prior_masks.inputs.stacks_id = self.m_stacks

            brainMask = MapNode(interface=preprocess.BrainExtraction(),
                                name='brainExtraction',
                                iterfield=['in_file'])

            brainMask.inputs.bids_dir = self.bids_dir
            brainMask.inputs.in_ckpt_loc = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_localization",
                             "Unet.ckpt-88000.index")).split('.index')[0]
            brainMask.inputs.threshold_loc = 0.49
            brainMask.inputs.in_ckpt_seg = pkg_resources.resource_filename(
                "pymialsrtk",
                os.path.join("data", "Network_checkpoints",
                             "Network_checkpoints_segmentation",
                             "Unet.ckpt-20000.index")).split('.index')[0]
            brainMask.inputs.threshold_seg = 0.5

        t2ws_filtered = Node(interface=preprocess.FilteringByRunid(),
                             name='t2ws_filtered')
        masks_filtered = Node(interface=preprocess.FilteringByRunid(),
                              name='masks_filtered')

        if not self.m_skip_stacks_ordering:
            stacksOrdering = Node(interface=preprocess.StacksOrdering(),
                                  name='stackOrdering')
        else:
            stacksOrdering = Node(
                interface=IdentityInterface(fields=['stacks_order']),
                name='stackOrdering')
            stacksOrdering.inputs.stacks_order = self.m_stacks

        if not self.m_skip_nlm_denoising:
            nlmDenoise = MapNode(interface=preprocess.BtkNLMDenoising(),
                                 name='nlmDenoise',
                                 iterfield=['in_file', 'in_mask'])
            nlmDenoise.inputs.bids_dir = self.bids_dir

            # Sans le mask le premier correct slice intensity...
            srtkCorrectSliceIntensity01_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity01_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity01_nlm.inputs.bids_dir = self.bids_dir
            srtkCorrectSliceIntensity01_nlm.inputs.out_postfix = '_uni'

        srtkCorrectSliceIntensity01 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity01',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity01.inputs.bids_dir = self.bids_dir
        srtkCorrectSliceIntensity01.inputs.out_postfix = '_uni'

        srtkSliceBySliceN4BiasFieldCorrection = MapNode(
            interface=preprocess.MialsrtkSliceBySliceN4BiasFieldCorrection(),
            name='srtkSliceBySliceN4BiasFieldCorrection',
            iterfield=['in_file', 'in_mask'])
        srtkSliceBySliceN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        srtkSliceBySliceCorrectBiasField = MapNode(
            interface=preprocess.MialsrtkSliceBySliceCorrectBiasField(),
            name='srtkSliceBySliceCorrectBiasField',
            iterfield=['in_file', 'in_mask', 'in_field'])
        srtkSliceBySliceCorrectBiasField.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        if not self.m_skip_nlm_denoising:
            srtkCorrectSliceIntensity02_nlm = MapNode(
                interface=preprocess.MialsrtkCorrectSliceIntensity(),
                name='srtkCorrectSliceIntensity02_nlm',
                iterfield=['in_file', 'in_mask'])
            srtkCorrectSliceIntensity02_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization01_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization01_nlm')
            srtkIntensityStandardization01_nlm.inputs.bids_dir = self.bids_dir

            srtkHistogramNormalization_nlm = Node(
                interface=preprocess.MialsrtkHistogramNormalization(),
                name='srtkHistogramNormalization_nlm')
            srtkHistogramNormalization_nlm.inputs.bids_dir = self.bids_dir

            srtkIntensityStandardization02_nlm = Node(
                interface=preprocess.MialsrtkIntensityStandardization(),
                name='srtkIntensityStandardization02_nlm')
            srtkIntensityStandardization02_nlm.inputs.bids_dir = self.bids_dir

        # 4-modules sequence to be defined as a stage.
        srtkCorrectSliceIntensity02 = MapNode(
            interface=preprocess.MialsrtkCorrectSliceIntensity(),
            name='srtkCorrectSliceIntensity02',
            iterfield=['in_file', 'in_mask'])
        srtkCorrectSliceIntensity02.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization01 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization01')
        srtkIntensityStandardization01.inputs.bids_dir = self.bids_dir

        srtkHistogramNormalization = Node(
            interface=preprocess.MialsrtkHistogramNormalization(),
            name='srtkHistogramNormalization')
        srtkHistogramNormalization.inputs.bids_dir = self.bids_dir

        srtkIntensityStandardization02 = Node(
            interface=preprocess.MialsrtkIntensityStandardization(),
            name='srtkIntensityStandardization02')
        srtkIntensityStandardization02.inputs.bids_dir = self.bids_dir

        srtkMaskImage01 = MapNode(interface=preprocess.MialsrtkMaskImage(),
                                  name='srtkMaskImage01',
                                  iterfield=['in_file', 'in_mask'])
        srtkMaskImage01.inputs.bids_dir = self.bids_dir

        srtkImageReconstruction = Node(
            interface=reconstruction.MialsrtkImageReconstruction(),
            name='srtkImageReconstruction')
        srtkImageReconstruction.inputs.bids_dir = self.bids_dir
        srtkImageReconstruction.inputs.sub_ses = sub_ses
        srtkImageReconstruction.inputs.no_reg = self.m_skip_svr

        srtkTVSuperResolution = Node(
            interface=reconstruction.MialsrtkTVSuperResolution(),
            name='srtkTVSuperResolution')
        srtkTVSuperResolution.inputs.bids_dir = self.bids_dir
        srtkTVSuperResolution.inputs.sub_ses = sub_ses
        srtkTVSuperResolution.inputs.in_loop = self.primal_dual_loops
        srtkTVSuperResolution.inputs.in_deltat = self.deltatTV
        srtkTVSuperResolution.inputs.in_lambda = self.lambdaTV
        srtkTVSuperResolution.inputs.use_manual_masks = self.use_manual_masks

        srtkN4BiasFieldCorrection = Node(
            interface=postprocess.MialsrtkN4BiasFieldCorrection(),
            name='srtkN4BiasFieldCorrection')
        srtkN4BiasFieldCorrection.inputs.bids_dir = self.bids_dir

        if self.m_do_refine_hr_mask:
            srtkHRMask = Node(
                interface=postprocess.MialsrtkRefineHRMaskByIntersection(),
                name='srtkHRMask')
            srtkHRMask.inputs.bids_dir = self.bids_dir
        else:
            srtkHRMask = Node(interface=postprocess.BinarizeImage(),
                              name='srtkHRMask')

        srtkMaskImage02 = Node(interface=preprocess.MialsrtkMaskImage(),
                               name='srtkMaskImage02')
        srtkMaskImage02.inputs.bids_dir = self.bids_dir

        # Build workflow : connections of the nodes
        # Nodes ready : Linking now
        if self.use_manual_masks:
            if self.m_stacks is not None:
                self.wf.connect(dg, "masks", custom_masks_filter,
                                "input_files")
                self.wf.connect(custom_masks_filter, "output_files", brainMask,
                                "out_file")
            else:
                self.wf.connect(dg, "masks", brainMask, "out_file")
        else:
            if self.m_stacks is not None:
                self.wf.connect(dg, "T2ws", t2ws_filter_prior_masks,
                                "input_files")
                self.wf.connect(t2ws_filter_prior_masks, "output_files",
                                brainMask, "in_file")
            else:
                self.wf.connect(dg, "T2ws", brainMask, "in_file")

        if not self.m_skip_stacks_ordering:
            self.wf.connect(brainMask, "out_file", stacksOrdering,
                            "input_masks")

        self.wf.connect(stacksOrdering, "stacks_order", t2ws_filtered,
                        "stacks_id")
        self.wf.connect(dg, "T2ws", t2ws_filtered, "input_files")

        self.wf.connect(stacksOrdering, "stacks_order", masks_filtered,
                        "stacks_id")
        self.wf.connect(brainMask, "out_file", masks_filtered, "input_files")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(t2ws_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), nlmDenoise,
                            "in_mask")  ## Comment to match docker process

            self.wf.connect(nlmDenoise, ("out_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity01_nlm, "in_mask")

        self.wf.connect(t2ws_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity01, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkCorrectSliceIntensity01_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        else:
            self.wf.connect(srtkCorrectSliceIntensity01,
                            ("out_file", utils.sort_ascending),
                            srtkSliceBySliceN4BiasFieldCorrection, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceN4BiasFieldCorrection, "in_mask")

        self.wf.connect(srtkCorrectSliceIntensity01,
                        ("out_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_file")
        self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                        ("out_fld_file", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_field")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkSliceBySliceCorrectBiasField, "in_mask")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkSliceBySliceN4BiasFieldCorrection,
                            ("out_im_file", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkCorrectSliceIntensity02_nlm, "in_mask")
            self.wf.connect(srtkCorrectSliceIntensity02_nlm,
                            ("out_file", utils.sort_ascending),
                            srtkIntensityStandardization01_nlm, "input_images")
            self.wf.connect(srtkIntensityStandardization01_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkHistogramNormalization_nlm, "input_masks")
            self.wf.connect(srtkHistogramNormalization_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkIntensityStandardization02_nlm, "input_images")

        self.wf.connect(srtkSliceBySliceCorrectBiasField,
                        ("out_im_file", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_file")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkCorrectSliceIntensity02, "in_mask")
        self.wf.connect(srtkCorrectSliceIntensity02,
                        ("out_file", utils.sort_ascending),
                        srtkIntensityStandardization01, "input_images")

        self.wf.connect(srtkIntensityStandardization01,
                        ("output_images", utils.sort_ascending),
                        srtkHistogramNormalization, "input_images")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkHistogramNormalization, "input_masks")
        self.wf.connect(srtkHistogramNormalization,
                        ("output_images", utils.sort_ascending),
                        srtkIntensityStandardization02, "input_images")

        if not self.m_skip_nlm_denoising:
            self.wf.connect(srtkIntensityStandardization02_nlm,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")
        else:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkMaskImage01, "in_file")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending),
                            srtkMaskImage01, "in_mask")

        self.wf.connect(srtkMaskImage01, "out_im_file",
                        srtkImageReconstruction, "input_images")
        self.wf.connect(masks_filtered, "output_files",
                        srtkImageReconstruction, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order",
                        srtkImageReconstruction, "stacks_order")

        self.wf.connect(srtkIntensityStandardization02, "output_images",
                        srtkTVSuperResolution, "input_images")
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending),
                        srtkTVSuperResolution, "input_transforms")
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        srtkTVSuperResolution, "input_masks")
        self.wf.connect(stacksOrdering, "stacks_order", srtkTVSuperResolution,
                        "stacks_order")

        self.wf.connect(srtkImageReconstruction, "output_sdi",
                        srtkTVSuperResolution, "input_sdi")

        if self.m_do_refine_hr_mask:
            self.wf.connect(srtkIntensityStandardization02,
                            ("output_images", utils.sort_ascending),
                            srtkHRMask, "input_images")
            self.wf.connect(masks_filtered,
                            ("output_files", utils.sort_ascending), srtkHRMask,
                            "input_masks")
            self.wf.connect(srtkImageReconstruction,
                            ("output_transforms", utils.sort_ascending),
                            srtkHRMask, "input_transforms")
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_sr")
        else:
            self.wf.connect(srtkTVSuperResolution, "output_sr", srtkHRMask,
                            "input_image")

        self.wf.connect(srtkTVSuperResolution, "output_sr", srtkMaskImage02,
                        "in_file")
        self.wf.connect(srtkHRMask, "output_srmask", srtkMaskImage02,
                        "in_mask")

        self.wf.connect(srtkTVSuperResolution, "output_sr",
                        srtkN4BiasFieldCorrection, "input_image")
        self.wf.connect(srtkHRMask, "output_srmask", srtkN4BiasFieldCorrection,
                        "input_mask")

        # Datasinker
        finalFilenamesGeneration = Node(
            interface=postprocess.FilenamesGeneration(), name='filenames_gen')
        finalFilenamesGeneration.inputs.sub_ses = sub_ses
        finalFilenamesGeneration.inputs.sr_id = self.sr_id
        finalFilenamesGeneration.inputs.use_manual_masks = self.use_manual_masks

        self.wf.connect(stacksOrdering, "stacks_order",
                        finalFilenamesGeneration, "stacks_order")

        datasink = Node(interface=DataSink(), name='data_sinker')
        datasink.inputs.base_directory = final_res_dir

        if not self.m_skip_stacks_ordering:
            self.wf.connect(stacksOrdering, "report_image", datasink,
                            'figures.@stackOrderingQC')
            self.wf.connect(stacksOrdering, "motion_tsv", datasink,
                            'anat.@motionTSV')
        self.wf.connect(masks_filtered, ("output_files", utils.sort_ascending),
                        datasink, 'anat.@LRmasks')
        self.wf.connect(srtkIntensityStandardization02,
                        ("output_images", utils.sort_ascending), datasink,
                        'anat.@LRsPreproc')
        self.wf.connect(srtkImageReconstruction,
                        ("output_transforms", utils.sort_ascending), datasink,
                        'xfm.@transforms')
        self.wf.connect(finalFilenamesGeneration, "substitutions", datasink,
                        "substitutions")
        self.wf.connect(srtkMaskImage01, ("out_im_file", utils.sort_ascending),
                        datasink, 'anat.@LRsDenoised')
        self.wf.connect(srtkImageReconstruction, "output_sdi", datasink,
                        'anat.@SDI')
        self.wf.connect(srtkN4BiasFieldCorrection, "output_image", datasink,
                        'anat.@SR')
        self.wf.connect(srtkTVSuperResolution, "output_json_path", datasink,
                        'anat.@SRjson')
        self.wf.connect(srtkTVSuperResolution, "output_sr_png", datasink,
                        'figures.@SRpng')
        self.wf.connect(srtkHRMask, "output_srmask", datasink, 'anat.@SRmask')
tasks = ['other', 'self_run-1', 'self_run-2']
base_dir = '/home/lsnoek1/SharedStates'
out_dir = op.join(base_dir, 'firstlevel')
sub_ids = sorted([
    op.basename(f)
    for f in glob(op.join(base_dir, 'preproc', 'fmriprep', 'sub-???'))
])

meta_wf = Workflow('firstlevel_spynoza')

concat_iterables_node = Node(
    interface=ConcatenateIterables(fields=['sub_id', 'task']),
    name='concat_iterables')

input_node = Node(IdentityInterface(fields=['sub_id', 'task']),
                  name='inputspec')
input_node.iterables = [('sub_id', sub_ids), ('task', tasks)]

meta_wf.connect(input_node, 'sub_id', concat_iterables_node, 'sub_id')
meta_wf.connect(input_node, 'task', concat_iterables_node, 'task')

templates = {
    'func': '{sub_id}/func/{sub_id}_task-{task}*_preproc.nii.gz',
    'func_mask': '{sub_id}/func/{sub_id}_task-{task}*_brainmask.nii.gz',
    'T1': '{sub_id}/anat/*preproc.nii.gz',
    'events': 'LOGS/{sub_id}_task-{task}_events.tsv',
    'confounds': '{sub_id}/func/{sub_id}_task-{task}*_confounds.tsv'
}

select_files = Node(SelectFiles(templates=templates), name='selectfiles')
Exemple #6
0
def create_fs_reg_workflow(name='registration'):
    """Create a FEAT preprocessing workflow together with freesurfer

    Parameters
    ----------

        name : name of workflow (default: 'registration')

    Inputs::

        inputspec.source_files : files (filename or list of filenames to register)
        inputspec.mean_image : reference image to use
        inputspec.target_image : registration target

    Outputs::

        outputspec.func2anat_transform : FLIRT transform
        outputspec.anat2target_transform : FLIRT+FNIRT transform
        outputspec.transformed_files : transformed files in target space
        outputspec.transformed_mean : mean image in target space
    """

    register = Workflow(name=name)

    inputnode = Node(interface=IdentityInterface(fields=[
        'source_files', 'mean_image', 'subject_id', 'subjects_dir',
        'target_image'
    ]),
                     name='inputspec')

    outputnode = Node(interface=IdentityInterface(fields=[
        'func2anat_transform', 'out_reg_file', 'anat2target_transform',
        'transforms', 'transformed_mean', 'transformed_files', 'min_cost_file',
        'anat2target', 'aparc', 'mean2anat_mask'
    ]),
                      name='outputspec')

    # Get the subject's freesurfer source directory
    fssource = Node(FreeSurferSource(), name='fssource')
    fssource.run_without_submitting = True
    register.connect(inputnode, 'subject_id', fssource, 'subject_id')
    register.connect(inputnode, 'subjects_dir', fssource, 'subjects_dir')

    convert = Node(freesurfer.MRIConvert(out_type='nii'), name="convert")
    register.connect(fssource, 'T1', convert, 'in_file')

    # Coregister the median to the surface
    bbregister = Node(freesurfer.BBRegister(registered_file=True),
                      name='bbregister')
    bbregister.inputs.init = 'fsl'
    bbregister.inputs.contrast_type = 't2'
    bbregister.inputs.out_fsl_file = True
    bbregister.inputs.epi_mask = True
    register.connect(inputnode, 'subject_id', bbregister, 'subject_id')
    register.connect(inputnode, 'mean_image', bbregister, 'source_file')
    register.connect(inputnode, 'subjects_dir', bbregister, 'subjects_dir')

    # Create a mask of the median coregistered to the anatomical image
    mean2anat_mask = Node(fsl.BET(mask=True), name='mean2anat_mask')
    register.connect(bbregister, 'registered_file', mean2anat_mask, 'in_file')
    """
    use aparc+aseg's brain mask
    """

    binarize = Node(fs.Binarize(min=0.5, out_type="nii.gz", dilate=1),
                    name="binarize_aparc")
    register.connect(fssource, ("aparc_aseg", get_aparc_aseg), binarize,
                     "in_file")

    stripper = Node(fsl.ApplyMask(), name='stripper')
    register.connect(binarize, "binary_file", stripper, "mask_file")
    register.connect(convert, 'out_file', stripper, 'in_file')
    """
    Apply inverse transform to aparc file
    """

    aparcxfm = Node(freesurfer.ApplyVolTransform(inverse=True,
                                                 interp='nearest'),
                    name='aparc_inverse_transform')
    register.connect(inputnode, 'subjects_dir', aparcxfm, 'subjects_dir')
    register.connect(bbregister, 'out_reg_file', aparcxfm, 'reg_file')
    register.connect(fssource, ('aparc_aseg', get_aparc_aseg), aparcxfm,
                     'target_file')
    register.connect(inputnode, 'mean_image', aparcxfm, 'source_file')
    """
    Convert the BBRegister transformation to ANTS ITK format
    """

    convert2itk = Node(C3dAffineTool(), name='convert2itk')
    convert2itk.inputs.fsl2ras = True
    convert2itk.inputs.itk_transform = True
    register.connect(bbregister, 'out_fsl_file', convert2itk, 'transform_file')
    register.connect(inputnode, 'mean_image', convert2itk, 'source_file')
    register.connect(stripper, 'out_file', convert2itk, 'reference_file')
    """
    Compute registration between the subject's structural and MNI template
    This is currently set to perform a very quick registration. However, the
    registration can be made significantly more accurate for cortical
    structures by increasing the number of iterations
    All parameters are set using the example from:
    #https://github.com/stnava/ANTs/blob/master/Scripts/newAntsExample.sh
    """

    reg = Node(ants.Registration(), name='antsRegister')
    reg.inputs.output_transform_prefix = "output_"
    reg.inputs.transforms = ['Rigid', 'Affine', 'SyN']
    reg.inputs.transform_parameters = [(0.1, ), (0.1, ), (0.2, 3.0, 0.0)]
    reg.inputs.number_of_iterations = [[10000, 11110, 11110]] * 2 + [[
        100, 30, 20
    ]]
    reg.inputs.dimension = 3
    reg.inputs.write_composite_transform = True
    reg.inputs.collapse_output_transforms = True
    reg.inputs.initial_moving_transform_com = True
    reg.inputs.metric = ['Mattes'] * 2 + [['Mattes', 'CC']]
    reg.inputs.metric_weight = [1] * 2 + [[0.5, 0.5]]
    reg.inputs.radius_or_number_of_bins = [32] * 2 + [[32, 4]]
    reg.inputs.sampling_strategy = ['Regular'] * 2 + [[None, None]]
    reg.inputs.sampling_percentage = [0.3] * 2 + [[None, None]]
    reg.inputs.convergence_threshold = [1.e-8] * 2 + [-0.01]
    reg.inputs.convergence_window_size = [20] * 2 + [5]
    reg.inputs.smoothing_sigmas = [[4, 2, 1]] * 2 + [[1, 0.5, 0]]
    reg.inputs.sigma_units = ['vox'] * 3
    reg.inputs.shrink_factors = [[3, 2, 1]] * 2 + [[4, 2, 1]]
    reg.inputs.use_estimate_learning_rate_once = [True] * 3
    reg.inputs.use_histogram_matching = [False] * 2 + [True]
    reg.inputs.winsorize_lower_quantile = 0.005
    reg.inputs.winsorize_upper_quantile = 0.995
    reg.inputs.float = True
    reg.inputs.output_warped_image = 'output_warped_image.nii.gz'
    reg.inputs.num_threads = 4
    reg.plugin_args = {
        'qsub_args': '-pe orte 4',
        'sbatch_args': '--mem=6G -c 4'
    }
    register.connect(stripper, 'out_file', reg, 'moving_image')
    register.connect(inputnode, 'target_image', reg, 'fixed_image')
    """
    Concatenate the affine and ants transforms into a list
    """

    pickfirst = lambda x: x[0]

    merge = Node(Merge(2), iterfield=['in2'], name='mergexfm')
    register.connect(convert2itk, 'itk_transform', merge, 'in2')
    register.connect(reg, 'composite_transform', merge, 'in1')
    """
    Transform the mean image. First to anatomical and then to target
    """

    warpmean = Node(ants.ApplyTransforms(), name='warpmean')
    warpmean.inputs.input_image_type = 0
    warpmean.inputs.interpolation = 'Linear'
    warpmean.inputs.invert_transform_flags = [False, False]
    warpmean.inputs.terminal_output = 'file'
    warpmean.inputs.args = '--float'
    # warpmean.inputs.num_threads = 4
    # warpmean.plugin_args = {'sbatch_args': '--mem=4G -c 4'}
    """
    Transform the remaining images. First to anatomical and then to target
    """

    warpall = pe.MapNode(ants.ApplyTransforms(),
                         iterfield=['input_image'],
                         name='warpall')
    warpall.inputs.input_image_type = 0
    warpall.inputs.interpolation = 'Linear'
    warpall.inputs.invert_transform_flags = [False, False]
    warpall.inputs.terminal_output = 'file'
    warpall.inputs.args = '--float'
    warpall.inputs.num_threads = 2
    warpall.plugin_args = {'sbatch_args': '--mem=6G -c 2'}
    """
    Assign all the output files
    """

    register.connect(warpmean, 'output_image', outputnode, 'transformed_mean')
    register.connect(warpall, 'output_image', outputnode, 'transformed_files')

    register.connect(inputnode, 'target_image', warpmean, 'reference_image')
    register.connect(inputnode, 'mean_image', warpmean, 'input_image')
    register.connect(merge, 'out', warpmean, 'transforms')
    register.connect(inputnode, 'target_image', warpall, 'reference_image')
    register.connect(inputnode, 'source_files', warpall, 'input_image')
    register.connect(merge, 'out', warpall, 'transforms')
    """
    Assign all the output files
    """

    register.connect(reg, 'warped_image', outputnode, 'anat2target')
    register.connect(aparcxfm, 'transformed_file', outputnode, 'aparc')
    register.connect(bbregister, 'out_fsl_file', outputnode,
                     'func2anat_transform')
    register.connect(bbregister, 'out_reg_file', outputnode, 'out_reg_file')
    register.connect(bbregister, 'min_cost_file', outputnode, 'min_cost_file')
    register.connect(mean2anat_mask, 'mask_file', outputnode, 'mean2anat_mask')
    register.connect(reg, 'composite_transform', outputnode,
                     'anat2target_transform')
    register.connect(merge, 'out', outputnode, 'transforms')

    return register
MasterDWIWorkflow.write_graph()

SGEFlavor = 'SGE'

import nipype
from nipype.interfaces.base import CommandLine, CommandLineInputSpec, TraitedSpec, File, Directory
from nipype.interfaces.base import traits, isdefined, BaseInterface
from nipype.interfaces.utility import Merge, Split, Function, Rename, IdentityInterface
import nipype.interfaces.io as nio   # Data i/oS
import nipype.pipeline.engine as pe  # pypeline engine
from nipype.interfaces.freesurfer import ReconAll

from SEMTools import BRAINSFit,gtractResampleDWIInPlace,dtiestim,dtiprocess

inputsSpec = pe.Node(interface=IdentityInterface(fields=['SESSION_TUPLE']), name='inputspec')
inputsSpec.iterables = ('SESSION_TUPLE',SESSION_TUPLE)

GetFileNamesNode = pe.Node(interface=Function(function=GetDWIReferenceImagesFromSessionID,
                           input_names=['SESSION_TUPLE', 'BASE_STRUCT', 'BASE_DWI'],
                           output_names=['PROJ_ID', 'SUBJ_ID', 'SESSION_ID', 'FixImage', 'FixMaskImage', 'MovingDWI']),
                           run_without_submitting=True, name='99_GetDWIReferenceImagesFromSessionID')
GetFileNamesNode.inputs.BASE_STRUCT = BASE_STRUCT
GetFileNamesNode.inputs.BASE_DWI = BASE_DWI

MasterDWIWorkflow.connect(inputsSpec, 'SESSION_TUPLE', GetFileNamesNode, 'SESSION_TUPLE')

outputsSpec = pe.Node(interface=IdentityInterface(fields=['FAImage', 'MDImage', 'RDImage', 'FrobeniusNormImage',
                                                          'Lambda1Image', 'Lambda2Image', 'Lambda3Image', 'tensor_image']),
                      name='outputspec')
subject_list = ["1002", "1003", "1004"]  # list of subject identifiers
session_list = ['anat']  # list of session identifiers

output_dir = 'output_anatbet_3'  # name of output folder
working_dir = 'workingdir_firstSteps'  # name of working directory

print("bruh")

# Create Node
bet = Node(BET(), name='bet_node')

# Create a preprocessing workflow
preproc = Workflow(name='preproc')
preproc.base_dir = opj(experiment_dir, working_dir)

infosource = Node(IdentityInterface(fields=['subject_id', 'session_id']),
                  name="infosource")

infosource.iterables = [('subject_id', subject_list),
                        ('session_id', session_list)]

# SelectFiles
templates = {'func': '{subject_id}/Struct.nii.gz'}
selectfiles = Node(SelectFiles(templates, base_directory=experiment_dir),
                   name="selectfiles")

print("checking")
# Datasink
datasink = Node(DataSink(base_directory=experiment_dir, container=output_dir),
                name="datasink")
def CreateCorrectionWorkflow(WFname):

    ###### UTILITY FUNCTIONS #######
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\
    # remove the skull from the T2 volume
    def ExtractBRAINFromHead(RawScan, BrainLabels):
        import os
        import SimpleITK as sitk
        # Remove skull from the head scan
        assert os.path.exists(RawScan), "File not found: %s" % RawScan
        assert os.path.exists(BrainLabels), "File not found: %s" % BrainLabels
        headImage = sitk.ReadImage(RawScan)
        labelsMap = sitk.ReadImage(BrainLabels)
        label_mask = labelsMap>0
        brainImage = sitk.Cast(headImage,sitk.sitkInt16) * sitk.Cast(label_mask,sitk.sitkInt16)
        outputVolume = os.path.realpath('T2Stripped.nrrd')
        sitk.WriteImage(brainImage, outputVolume)
        return outputVolume

    def MakeResamplerInFileList(inputT2, inputLabelMap):
        imagesList = [inputT2, inputLabelMap]
        return imagesList

    # This function helps to pick desirable output from the output list
    def pickFromList(inlist,item):
        return inlist[item]

    # Create registration mask for ANTs from resampled label map image
    def CreateAntsRegistrationMask(brainMask):
        import os
        import SimpleITK as sitk
        assert os.path.exists(brainMask), "File not found: %s" % brainMask
        labelsMap = sitk.ReadImage(brainMask)
        label_mask = labelsMap>0
        # dilate the label mask
        dilateFilter = sitk.BinaryDilateImageFilter()
        dilateFilter.SetKernelRadius(12)
        dilated_mask = dilateFilter.Execute( label_mask )
        regMask = dilated_mask
        registrationMask = os.path.realpath('registrationMask.nrrd')
        sitk.WriteImage(regMask, registrationMask)
        return registrationMask

    # Save direction cosine for the input volume
    def SaveDirectionCosineToMatrix(inputVolume):
        import os
        import SimpleITK as sitk
        assert os.path.exists(inputVolume), "File not found: %s" % inputVolume
        t2 = sitk.ReadImage(inputVolume)
        directionCosine = t2.GetDirection()
        return directionCosine

    def MakeForceDCFilesList(inputB0, inputT2, inputLabelMap):
        import os
        assert os.path.exists(inputB0), "File not found: %s" % inputB0
        assert os.path.exists(inputT2), "File not found: %s" % inputT2
        assert os.path.exists(inputLabelMap), "File not found: %s" % inputLabelMap
        imagesList = [inputB0, inputT2, inputLabelMap]
        return imagesList

    # Force DC to ID
    def ForceDCtoID(inputVolume):
        import os
        import SimpleITK as sitk
        inImage = sitk.ReadImage(inputVolume)
        inImage.SetDirection((1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0))
        outputVolume = os.path.realpath('IDDC_'+ os.path.basename(inputVolume))
        sitk.WriteImage(inImage, outputVolume)
        return outputVolume

    def RestoreDCFromSavedMatrix(inputVolume, inputDirectionCosine):
        import os
        import SimpleITK as sitk
        inImage = sitk.ReadImage(inputVolume)
        inImage.SetDirection(inputDirectionCosine)
        outputVolume = os.path.realpath('CorrectedDWI.nrrd')
        sitk.WriteImage(inImage, outputVolume)
        return outputVolume

    def GetRigidTransformInverse(inputTransform):
        import os
        import SimpleITK as sitk
        inputTx = sitk.ReadTransform(inputTransform)
        versorRigidTx = sitk.VersorRigid3DTransform()
        versorRigidTx.SetFixedParameters(inputTx.GetFixedParameters())
        versorRigidTx.SetParameters(inputTx.GetParameters())
        invTx = versorRigidTx.GetInverse()
        inverseTransform = os.path.realpath('Inverse_'+ os.path.basename(inputTransform))
        sitk.WriteTransform(invTx, inverseTransform)
        return inverseTransform
    #################################
    #\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/
    CorrectionWF = pe.Workflow(name=WFname)

    inputsSpec = pe.Node(interface=IdentityInterface(fields=['T2Volume', 'DWIVolume','LabelMapVolume']),
                         name='inputsSpec')

    outputsSpec = pe.Node(interface=IdentityInterface(fields=['CorrectedDWI','CorrectedDWI_in_T2Space','DWIBrainMask']),
                          name='outputsSpec')

    # Step0: remove the skull from the T2 volume
    ExtractBRAINFromHeadNode = pe.Node(interface=Function(function = ExtractBRAINFromHead,
                                                          input_names=['RawScan','BrainLabels'],
                                                          output_names=['outputVolume']),
                                       name="ExtractBRAINFromHead")

    CorrectionWF.connect(inputsSpec, 'T2Volume', ExtractBRAINFromHeadNode, 'RawScan')
    CorrectionWF.connect(inputsSpec, 'LabelMapVolume', ExtractBRAINFromHeadNode, 'BrainLabels')

    # Step1: extract B0 from DWI volume
    EXTRACT_B0 = pe.Node(interface=extractNrrdVectorIndex(),name="EXTRACT_B0")
    EXTRACT_B0.inputs.vectorIndex = 0
    EXTRACT_B0.inputs.outputVolume = 'B0_Image.nrrd'
    CorrectionWF.connect(inputsSpec,'DWIVolume',EXTRACT_B0,'inputVolume')

    # Step2: Register T2 to B0 space using BRAINSFit
    BFit_T2toB0 = pe.Node(interface=BRAINSFit(), name="BFit_T2toB0")
    BFit_T2toB0.inputs.costMetric = "MMI"
    BFit_T2toB0.inputs.numberOfSamples = 100000
    BFit_T2toB0.inputs.numberOfIterations = [1500]
    BFit_T2toB0.inputs.numberOfHistogramBins = 50
    BFit_T2toB0.inputs.maximumStepLength = 0.2
    BFit_T2toB0.inputs.minimumStepLength = [0.00005]
    BFit_T2toB0.inputs.useRigid = True
    BFit_T2toB0.inputs.useAffine = True
    BFit_T2toB0.inputs.maskInferiorCutOffFromCenter = 65
    BFit_T2toB0.inputs.maskProcessingMode = "ROIAUTO"
    BFit_T2toB0.inputs.ROIAutoDilateSize = 13
    BFit_T2toB0.inputs.backgroundFillValue = 0.0
    BFit_T2toB0.inputs.initializeTransformMode = 'useCenterOfHeadAlign'
    BFit_T2toB0.inputs.strippedOutputTransform = "T2ToB0_RigidTransform.h5"
    BFit_T2toB0.inputs.writeOutputTransformInFloat = True
    CorrectionWF.connect(EXTRACT_B0, 'outputVolume', BFit_T2toB0, 'fixedVolume')
    CorrectionWF.connect(ExtractBRAINFromHeadNode, 'outputVolume', BFit_T2toB0, 'movingVolume')

    # Step3: Use T_rigid to "resample" T2 and label map images to B0 image space
    MakeResamplerInFilesListNode = pe.Node(Function(function=MakeResamplerInFileList,
                                                    input_names=['inputT2','inputLabelMap'],
                                                    output_names=['imagesList']),
                                           name="MakeResamplerInFilesListNode")
    CorrectionWF.connect([(ExtractBRAINFromHeadNode,MakeResamplerInFilesListNode,[('outputVolume','inputT2')]),
                          (inputsSpec,MakeResamplerInFilesListNode,[('LabelMapVolume','inputLabelMap')])])

    ResampleToB0Space = pe.MapNode(interface=BRAINSResample(), name="ResampleToB0Space",
                                   iterfield=['inputVolume', 'pixelType', 'outputVolume'])
    ResampleToB0Space.inputs.interpolationMode = 'Linear'
    ResampleToB0Space.inputs.outputVolume = ['T2toB0.nrrd','BRAINMaskToB0.nrrd']
    ResampleToB0Space.inputs.pixelType = ['ushort','binary']
    CorrectionWF.connect(BFit_T2toB0,'strippedOutputTransform',ResampleToB0Space,'warpTransform')
    CorrectionWF.connect(EXTRACT_B0,'outputVolume',ResampleToB0Space,'referenceVolume')
    CorrectionWF.connect(MakeResamplerInFilesListNode,'imagesList',ResampleToB0Space,'inputVolume')

    # Step4: Create registration mask from resampled label map image
    CreateRegistrationMask = pe.Node(interface=Function(function = CreateAntsRegistrationMask,
                                                        input_names=['brainMask'],
                                                        output_names=['registrationMask']),
                                     name="CreateAntsRegistrationMask")
    CorrectionWF.connect(ResampleToB0Space, ('outputVolume', pickFromList, 1),
                        CreateRegistrationMask, 'brainMask')

    # Step5: Save direction cosine for the resampled T2 image
    SaveDirectionCosineToMatrixNode = pe.Node(interface=Function(function = SaveDirectionCosineToMatrix,
                                                                 input_names=['inputVolume'],
                                                                 output_names=['directionCosine']),
                                              name="SaveDirectionCosineToMatrix")
    CorrectionWF.connect(ResampleToB0Space, ('outputVolume', pickFromList, 0),
                         SaveDirectionCosineToMatrixNode, 'inputVolume')


    # Step6: Force DC to ID
    MakeForceDCFilesListNode = pe.Node(Function(function=MakeForceDCFilesList,
                                                input_names=['inputB0','inputT2','inputLabelMap'],
                                                output_names=['imagesList']),
                                       name="MakeForceDCFilesListNode")
    CorrectionWF.connect([(EXTRACT_B0,MakeForceDCFilesListNode,[('outputVolume','inputB0')]),
                          (ResampleToB0Space,MakeForceDCFilesListNode,[(('outputVolume', pickFromList, 0),'inputT2')]),
                          (CreateRegistrationMask,MakeForceDCFilesListNode,[('registrationMask','inputLabelMap')])])

    ForceDCtoIDNode = pe.MapNode(interface=Function(function = ForceDCtoID,
                                                    input_names=['inputVolume'],
                                                    output_names=['outputVolume']),
                                 name="ForceDCtoID",
                                 iterfield=['inputVolume'])
    CorrectionWF.connect(MakeForceDCFilesListNode, 'imagesList', ForceDCtoIDNode, 'inputVolume')

    # Step7: Run antsRegistration in one direction
    antsReg_B0ToTransformedT2 = pe.Node(interface=ants.Registration(), name="antsReg_B0ToTransformedT2")
    antsReg_B0ToTransformedT2.inputs.dimension = 3
    antsReg_B0ToTransformedT2.inputs.transforms = ["SyN"]
    antsReg_B0ToTransformedT2.inputs.transform_parameters = [(0.25, 3.0, 0.0)]
    antsReg_B0ToTransformedT2.inputs.metric = ['MI']
    antsReg_B0ToTransformedT2.inputs.sampling_strategy = [None]
    antsReg_B0ToTransformedT2.inputs.sampling_percentage = [1.0]
    antsReg_B0ToTransformedT2.inputs.metric_weight = [1.0]
    antsReg_B0ToTransformedT2.inputs.radius_or_number_of_bins = [32]
    antsReg_B0ToTransformedT2.inputs.number_of_iterations = [[70, 50, 40]]
    antsReg_B0ToTransformedT2.inputs.convergence_threshold = [1e-6]
    antsReg_B0ToTransformedT2.inputs.convergence_window_size = [10]
    antsReg_B0ToTransformedT2.inputs.use_histogram_matching = [True]
    antsReg_B0ToTransformedT2.inputs.shrink_factors = [[3, 2, 1]]
    antsReg_B0ToTransformedT2.inputs.smoothing_sigmas = [[2, 1, 0]]
    antsReg_B0ToTransformedT2.inputs.sigma_units = ["vox"]
    antsReg_B0ToTransformedT2.inputs.use_estimate_learning_rate_once = [False]
    antsReg_B0ToTransformedT2.inputs.write_composite_transform = True
    antsReg_B0ToTransformedT2.inputs.collapse_output_transforms = False
    antsReg_B0ToTransformedT2.inputs.initialize_transforms_per_stage = False
    antsReg_B0ToTransformedT2.inputs.output_transform_prefix = 'Tsyn'
    antsReg_B0ToTransformedT2.inputs.winsorize_lower_quantile = 0.01
    antsReg_B0ToTransformedT2.inputs.winsorize_upper_quantile = 0.99
    antsReg_B0ToTransformedT2.inputs.float = True
    antsReg_B0ToTransformedT2.inputs.args = '--restrict-deformation 0x1x0'
    CorrectionWF.connect(ForceDCtoIDNode, ('outputVolume', pickFromList, 1), antsReg_B0ToTransformedT2, 'fixed_image')
    CorrectionWF.connect(ForceDCtoIDNode, ('outputVolume', pickFromList, 2), antsReg_B0ToTransformedT2, 'fixed_image_mask')
    CorrectionWF.connect(ForceDCtoIDNode, ('outputVolume', pickFromList, 0), antsReg_B0ToTransformedT2, 'moving_image')

    # Step8: Now, all necessary transforms are acquired. It's a time to
    #        transform input DWI image into T2 image space
    # {DWI} --> ForceDCtoID --> gtractResampleDWIInPlace(using SyN transfrom)
    # --> Restore DirectionCosine From Saved Matrix --> gtractResampleDWIInPlace(inverse of T_rigid from BFit)
    # --> {CorrectedDW_in_T2Space}
    DWI_ForceDCtoIDNode = pe.Node(interface=Function(function = ForceDCtoID,
                                                     input_names=['inputVolume'],
                                                     output_names=['outputVolume']),
                                  name='DWI_ForceDCtoIDNode')
    CorrectionWF.connect(inputsSpec,'DWIVolume',DWI_ForceDCtoIDNode,'inputVolume')

    gtractResampleDWI_SyN = pe.Node(interface=gtractResampleDWIInPlace(),
                                    name="gtractResampleDWI_SyN")
    CorrectionWF.connect(DWI_ForceDCtoIDNode,'outputVolume',
                         gtractResampleDWI_SyN,'inputVolume')
    CorrectionWF.connect(antsReg_B0ToTransformedT2,'composite_transform',
                         gtractResampleDWI_SyN,'warpDWITransform')
    CorrectionWF.connect(ForceDCtoIDNode,('outputVolume', pickFromList, 1),
                         gtractResampleDWI_SyN,'referenceVolume') # fixed image of antsRegistration
    gtractResampleDWI_SyN.inputs.outputVolume = 'IDDC_correctedDWI.nrrd'

    RestoreDCFromSavedMatrixNode = pe.Node(interface=Function(function = RestoreDCFromSavedMatrix,
                                                              input_names=['inputVolume','inputDirectionCosine'],
                                                              output_names=['outputVolume']),
                                           name='RestoreDCFromSavedMatrix')
    CorrectionWF.connect(gtractResampleDWI_SyN,'outputVolume',RestoreDCFromSavedMatrixNode,'inputVolume')
    CorrectionWF.connect(SaveDirectionCosineToMatrixNode,'directionCosine',RestoreDCFromSavedMatrixNode,'inputDirectionCosine')
    CorrectionWF.connect(RestoreDCFromSavedMatrixNode,'outputVolume', outputsSpec, 'CorrectedDWI')

    GetRigidTransformInverseNode = pe.Node(interface=Function(function = GetRigidTransformInverse,
                                                              input_names=['inputTransform'],
                                                              output_names=['inverseTransform']),
                                           name='GetRigidTransformInverse')
    CorrectionWF.connect(BFit_T2toB0,'strippedOutputTransform',GetRigidTransformInverseNode,'inputTransform')


    gtractResampleDWIInPlace_Trigid = pe.Node(interface=gtractResampleDWIInPlace(),
                                              name="gtractResampleDWIInPlace_Trigid")
    CorrectionWF.connect(RestoreDCFromSavedMatrixNode,'outputVolume',
                         gtractResampleDWIInPlace_Trigid,'inputVolume')
    CorrectionWF.connect(GetRigidTransformInverseNode,'inverseTransform',
                         gtractResampleDWIInPlace_Trigid,'inputTransform') #Inverse of rigid transform from BFit
    gtractResampleDWIInPlace_Trigid.inputs.outputVolume = 'CorrectedDWI_in_T2Space_estimate.nrrd'
    gtractResampleDWIInPlace_Trigid.inputs.outputResampledB0 = 'CorrectedDWI_in_T2Space_estimate_B0.nrrd'

    # Setp9: An extra registration step to tune the alignment between the CorrecetedDWI_in_T2Space image and T2 image.
    BFit_TuneRegistration = pe.Node(interface=BRAINSFit(), name="BFit_TuneRegistration")
    BFit_TuneRegistration.inputs.costMetric = "MMI"
    BFit_TuneRegistration.inputs.numberOfSamples = 100000
    BFit_TuneRegistration.inputs.numberOfIterations = [1500]
    BFit_TuneRegistration.inputs.numberOfHistogramBins = 50
    BFit_TuneRegistration.inputs.maximumStepLength = 0.2
    BFit_TuneRegistration.inputs.minimumStepLength = [0.00005]
    BFit_TuneRegistration.inputs.useRigid = True
    BFit_TuneRegistration.inputs.useAffine = True
    BFit_TuneRegistration.inputs.maskInferiorCutOffFromCenter = 65
    BFit_TuneRegistration.inputs.maskProcessingMode = "ROIAUTO"
    BFit_TuneRegistration.inputs.ROIAutoDilateSize = 13
    BFit_TuneRegistration.inputs.backgroundFillValue = 0.0
    BFit_TuneRegistration.inputs.initializeTransformMode = 'useCenterOfHeadAlign'
    BFit_TuneRegistration.inputs.strippedOutputTransform = "CorrectedB0inT2Space_to_T2_RigidTransform.h5"
    BFit_TuneRegistration.inputs.writeOutputTransformInFloat = True
    CorrectionWF.connect(ExtractBRAINFromHeadNode, 'outputVolume', BFit_TuneRegistration, 'fixedVolume') #T2 brain volume
    CorrectionWF.connect(gtractResampleDWIInPlace_Trigid, 'outputResampledB0', BFit_TuneRegistration, 'movingVolume') # CorrectedB0_in_T2Space

    gtractResampleDWIInPlace_TuneRigidTx = pe.Node(interface=gtractResampleDWIInPlace(),
                                                   name="gtractResampleDWIInPlace_TuneRigidTx")
    CorrectionWF.connect(gtractResampleDWIInPlace_Trigid,'outputVolume',gtractResampleDWIInPlace_TuneRigidTx,'inputVolume')
    CorrectionWF.connect(BFit_TuneRegistration,'strippedOutputTransform',gtractResampleDWIInPlace_TuneRigidTx,'inputTransform')
    gtractResampleDWIInPlace_TuneRigidTx.inputs.outputVolume = 'CorrectedDWI_in_T2Space.nrrd'
    gtractResampleDWIInPlace_TuneRigidTx.inputs.outputResampledB0 = 'CorrectedDWI_in_T2Space_B0.nrrd'

    # Finally we pass the outputs of the gtractResampleDWIInPlace_TuneRigidTx to the outputsSpec
    CorrectionWF.connect(gtractResampleDWIInPlace_TuneRigidTx, 'outputVolume', outputsSpec, 'CorrectedDWI_in_T2Space')

    # Step10: Create brain mask from the input labelmap
    DWIBRAINMASK = pe.Node(interface=BRAINSResample(), name='DWIBRAINMASK')
    DWIBRAINMASK.inputs.interpolationMode = 'Linear'
    DWIBRAINMASK.inputs.outputVolume = 'BrainMaskForDWI.nrrd'
    DWIBRAINMASK.inputs.pixelType = 'binary'
    CorrectionWF.connect(gtractResampleDWIInPlace_TuneRigidTx,'outputResampledB0',DWIBRAINMASK,'referenceVolume')
    CorrectionWF.connect(inputsSpec,'LabelMapVolume',DWIBRAINMASK,'inputVolume')
    CorrectionWF.connect(DWIBRAINMASK, 'outputVolume', outputsSpec, 'DWIBrainMask')

    return CorrectionWF
Exemple #10
0
from nipype.pipeline.engine import Node, Workflow
from nipype.interfaces.utility import IdentityInterface
from nipype.algorithms.modelgen import SpecifySPMModel
from nipype.interfaces.fsl import Level1Design as fsl_design
from nipype.interfaces.fsl.maths import MathsCommand
from nipype.interfaces.fsl import (
    FEATModel,
    FILMGLS,
    )

input_node = Node(IdentityInterface(fields=[
    'bold',
    'events',
    ]), name='input')

output_node = Node(IdentityInterface(fields=[
    'T_image',
    ]), name='output')


# node design matrix
model = Node(interface=SpecifySPMModel(), name='design_matrix')
model.inputs.input_units = 'secs'
model.inputs.output_units = 'secs'
model.inputs.high_pass_filter_cutoff = 128.
model.inputs.time_repetition = .85
model.inputs.bids_condition_column = 'trial_name'


def create_workflow_temporalpatterns_fsl():
    "job_finished_timeout": 45,
}
minipigWF.config["logging"] = {
    "workflow_level": "DEBUG",
    "filemanip_level": "DEBUG",
    "interface_level": "DEBUG",
    "log_directory": ExperimentInfo["Atlas"]["LOG_DIR"],
}

input_spec = pe.Node(
    interface=IdentityInterface(
        fields=[
            "Raw_Atlas",
            "Raw_T1",
            "Cropped_T1",
            "Raw_T2",
            "Raw_BM",
            "DomesticLUT",
            "Domestic_LabelMap",
        ]
    ),
    run_without_submitting=True,
    name="inputspec",
)

input_spec.inputs.Raw_T1 = ExperimentInfo["Subject"]["Raw_T1"]
input_spec.inputs.Raw_T2 = ExperimentInfo["Subject"]["Raw_T2"]
input_spec.inputs.Raw_BM = ExperimentInfo["Subject"]["Raw_BM"]
input_spec.inputs.Cropped_T1 = ExperimentInfo["Subject"]["Cropped_T1"]

input_spec.inputs.Raw_Atlas = ExperimentInfo["Atlas"]["IntensityImage"]
def attach_spm_pet_grouptemplate(main_wf, wf_name="spm_pet_template"):
    """ Attach a PET pre-processing workflow that uses SPM12 to `main_wf`.
    This workflow picks all spm_pet_preproc outputs 'pet_output.warped_files' in `main_wf`
    to create a group template.

    Parameters
    ----------
    main_wf: nipype Workflow

    wf_name: str
        Name of the preprocessing workflow

    Nipype Inputs for `main_wf`
    ---------------------------
    Note: The `main_wf` workflow is expected to have an `input_files` and a `datasink` nodes.

    pet_output.warped_files: input node

    datasink: nipype Node

    spm_pet_preproc: nipype Workflow

    Nipype Outputs
    --------------
    group_template.pet_template: file
        The path to the PET group template.

    Nipype Workflow Dependencies
    ----------------------------
    This workflow depends on:
    - spm_pet_preproc
    - spm_anat_preproc if `spm_pet_template.do_petpvc` is True.

    Returns
    -------
    main_wf: nipype Workflow
    """
    # Dependency workflows
    pet_wf = main_wf.get_node("spm_pet_preproc")

    in_files = get_input_node(main_wf)
    datasink = get_datasink(main_wf, name='datasink')

    # The base name of the 'pet' file for the substitutions
    pet_fbasename = remove_ext(op.basename(get_input_file_name(in_files, 'pet')))

    # the group template datasink
    base_outdir  = datasink.inputs.base_directory
    grp_datasink = pe.Node(io.DataSink(parameterization=False,
                                       base_directory=base_outdir,),
                                       name='{}_grouptemplate_datasink'.format(pet_fbasename))
    grp_datasink.inputs.container = '{}_grouptemplate'.format(pet_fbasename)

    # the list of the raw pet subjects
    warped_pets = pe.JoinNode(interface=IdentityInterface(fields=["warped_pets"]),
                              joinsource="infosrc",
                              joinfield="warped_pets",
                              name="warped_pets")

    # the group template workflow
    template_wf = spm_create_group_template_wf(wf_name)

    # output node
    output = setup_node(IdentityInterface(fields=["pet_template"]), name="group_template")

    # group dataSink output substitutions
    regexp_subst = [
                     (r"/wgrptemplate{pet}_merged_mean_smooth.nii$",  "/{pet}_grouptemplate_mni.nii"),
                     (r"/w{pet}_merged_mean_smooth.nii$",             "/{pet}_grouptemplate_mni.nii"),
                   ]
    regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    grp_datasink.inputs.regexp_substitutions = extend_trait_list(grp_datasink.inputs.regexp_substitutions,
                                                                 regexp_subst)

    # Connect the nodes
    main_wf.connect([
                     # warped pets file list input
                     (pet_wf,       warped_pets, [("warp_output.warped_files",    "warped_pets")]),

                     # group template wf
                     (warped_pets,  template_wf, [(("warped_pets", flatten_list), "grptemplate_input.in_files")]),

                     # output node
                     (template_wf, output,       [("grptemplate_output.template", "pet_template")]),

                     # template output
                     (output,      grp_datasink, [("pet_template",                "@pet_group_template")]),
                   ])

    # Now we start with the correction and registration of each subject to the group template
    do_petpvc = get_config_setting('spm_pet_template.do_petpvc')
    if do_petpvc:
        if main_wf.get_node('spm_anat_preproc') is None:
            raise AttributeError("Expected `spm_anat_preproc` workflow node to attach PETPVC.")

        preproc_wf_name = "spm_mrpet_grouptemplate_preproc"
        main_wf = attach_spm_mrpet_preprocessing(main_wf, wf_name=preproc_wf_name, do_group_template=True)

        preproc_wf = main_wf.get_node(preproc_wf_name)
        main_wf.connect([(output, preproc_wf, [("pet_template", "pet_input.pet_template".format(preproc_wf_name))]), ])
    else:
        # add the pet template to the preproc workflow
        reg_wf = spm_register_to_template_wf(wf_name="spm_pet_register_to_grouptemplate")
        main_wf.connect([
                         (output,   reg_wf,  [("pet_template",  "reg_input.template")]),
                         (in_files, reg_wf,  [("pet",           "reg_input.in_file"),]),

                         (reg_wf,   datasink, [("reg_output.warped",     "pet.grp_template.@warped"),
                                               ("reg_output.warp_field", "pet.grp_template.@warp_field"),
                                              ]),
                         ])

    # per-subject datasink output substitutions
    regexp_subst = [
                     (r"/{pet}_sn.mat$",           "/{pet}_grptemplate_params.mat"),
                     (r"/wgrptemplate_{pet}.nii$", "/{pet}_grptemplate.nii"),
                     (r"/w{pet}.nii",              "/{pet}_grptemplate.nii"),
                   ]
    regexp_subst = format_pair_list(regexp_subst, pet=pet_fbasename)
    regexp_subst += extension_duplicates(regexp_subst)
    datasink.inputs.regexp_substitutions = extend_trait_list(datasink.inputs.regexp_substitutions,
                                                             regexp_subst)

    return main_wf
Exemple #13
0
template_brain = studyhome + '/templates/MNI152_T1_2mm_brain.nii'

# ROIs for connectivity analysis
Lamyg = roi_dir + '/L_amyg_anatomical.nii'
Ramyg = roi_dir + '/R_amyg_anatomical.nii'

ROIs = [Lamyg, Ramyg]
rois = ['L_amyg', 'R_amyg']

min_clust_size = 25

# In[2]:

## File handling
# Identity node- select subjects
infosource = Node(IdentityInterface(fields=['subject_id', 'ROIs']),
                  name='infosource')
infosource.iterables = [('subject_id', subjects_list), ('ROIs', ROIs)]

# Data grabber- select fMRI and ROIs
templates = {
    'orig_func':
    preproc_dir + '/smoothed_filt_func/{subject_id}/func_filtered_smooth.nii'
}
selectfiles = Node(SelectFiles(templates), name='selectfiles')

# Datasink- where our select outputs will go
datasink = Node(DataSink(), name='datasink')
datasink.inputs.base_directory = output_dir
datasink.inputs.container = output_dir
substitutions = [('_subject_id_', ''),
Exemple #14
0
def create_logb_workflow(name="LOGISMOSB_WF",
                         master_config=None,
                         plugin_args=None):
    logb_wf = Workflow(name=name)

    config = read_json_config("config.json")
    config['atlas_info'] = get_local_file_location(config['atlas_info'])

    inputs_node = Node(IdentityInterface(fields=[
        't1_file', 't2_file', 'posterior_files', 'joint_fusion_file',
        'brainlabels_file', 'hncma_atlas'
    ]),
                       name="inputspec")
    inputs_node.run_without_submitting = True

    # ensure that t1 and t2 are in the same voxel lattice
    input_t2 = Node(BRAINSResample(), "ResampleInputT2Volume")
    input_t2.inputs.outputVolume = "t2_resampled.nii.gz"
    input_t2.inputs.pixelType = 'ushort'
    input_t2.inputs.interpolationMode = "Linear"

    logb_wf.connect([(inputs_node, input_t2, [('t1_file', 'referenceVolume'),
                                              ('t2_file', 'inputVolume')])])

    white_matter_masking_node = Node(interface=WMMasking(), name="WMMasking")
    white_matter_masking_node.inputs.dilation = config['WMMasking']['dilation']
    white_matter_masking_node.inputs.csf_threshold = config['WMMasking'][
        'csf_threshold']
    if master_config and master_config['labelmap_colorlookup_table']:
        white_matter_masking_node.inputs.atlas_info = master_config[
            'labelmap_colorlookup_table']
    else:
        white_matter_masking_node.inputs.atlas_info = config['atlas_info']

    logb_wf.connect([(inputs_node, white_matter_masking_node,
                      [("posterior_files", "posterior_files"),
                       ("joint_fusion_file", "atlas_file"),
                       ("brainlabels_file", "brainlabels_file"),
                       ("hncma_atlas", "hncma_file")])])

    gm_labels = Node(interface=CreateGMLabelMap(), name="GM_Labelmap")
    gm_labels.inputs.atlas_info = config['atlas_info']
    logb_wf.connect([(inputs_node, gm_labels, [('joint_fusion_file',
                                                'atlas_file')])])

    logismosb_output_node = create_output_spec(
        ["wmsurface_file", "gmsurface_file"],
        config["hemisphere_names"],
        name="outputspec")

    for hemisphere in config["hemisphere_names"]:
        genus_zero_filter = Node(
            interface=GenusZeroImageFilter(),
            name="{0}_GenusZeroImageFilter".format(hemisphere))
        genus_zero_filter.inputs.connectivity = config['GenusZeroImageFilter'][
            'connectivity']
        genus_zero_filter.inputs.biggestComponent = config[
            'GenusZeroImageFilter']['biggestComponent']
        genus_zero_filter.inputs.connectedComponent = config[
            'GenusZeroImageFilter']['connectedComponent']
        genus_zero_filter.inputs.out_mask = "{0}_genus_zero_white_matter.nii.gz".format(
            hemisphere)

        logb_wf.connect([(white_matter_masking_node, genus_zero_filter,
                          [('{0}_wm'.format(hemisphere), 'in_file')])])

        surface_generation = Node(
            interface=BRAINSSurfaceGeneration(),
            name="{0}_BRAINSSurfaceGeneration".format(hemisphere))
        surface_generation.inputs.smoothSurface = config[
            'BRAINSSurfaceGeneration']['smoothSurface']
        surface_generation.inputs.numIterations = config[
            'BRAINSSurfaceGeneration']['numIterations']
        surface_generation.inputs.out_file = "{0}_white_matter_surface.vtk".format(
            hemisphere)

        logb_wf.connect([(genus_zero_filter, surface_generation,
                          [('out_file', 'in_file')])])

        logismosb = Node(interface=LOGISMOSB(),
                         name="{0}_LOGISMOSB".format(hemisphere))
        logismosb.inputs.smoothnessConstraint = config['LOGISMOSB'][
            'smoothnessConstraint']
        logismosb.inputs.nColumns = config['LOGISMOSB']['nColumns']
        logismosb.inputs.columnChoice = config['LOGISMOSB']['columnChoice']
        logismosb.inputs.columnHeight = config['LOGISMOSB']['columnHeight']
        logismosb.inputs.nodeSpacing = config['LOGISMOSB']['nodeSpacing']
        logismosb.inputs.w = config['LOGISMOSB']['w']
        logismosb.inputs.a = config['LOGISMOSB']['a']
        logismosb.inputs.nPropagate = config['LOGISMOSB']['nPropagate']
        logismosb.inputs.basename = hemisphere
        if config['LOGISMOSB']['thickRegions']:
            logismosb.inputs.thick_regions = config['LOGISMOSB'][
                'thickRegions']
        else:
            logismosb.inputs.useHNCMALabels = True

        if plugin_args:
            logismosb.plugin_args = plugin_args

        logb_wf.connect([
            (inputs_node, logismosb, [("t1_file", "t1_file"),
                                      ('hncma_atlas', 'atlas_file')]),
            (input_t2, logismosb, [("outputVolume", "t2_file")]),
            (genus_zero_filter, logismosb, [("out_file", "wm_file")]),
            (surface_generation, logismosb, [("out_file", "mesh_file")]),
            (white_matter_masking_node, logismosb,
             [('{0}_boundary'.format(hemisphere), 'brainlabels_file')]),
            (logismosb, logismosb_output_node,
             [("gmsurface_file", "{0}_gmsurface_file".format(hemisphere)),
              ("wmsurface_file", "{0}_wmsurface_file".format(hemisphere))])
        ])

    return logb_wf
idvi_nonbaseline_list = ["%04d_%02d-%s" % idvi for idvi in zip(subjID,blsavi_integer,blsavi_decimal)]

# Map each nonbaseline to its baseline (add column to data_table_nonbaseline)
basevisNo = data_table_nonbaseline['baseblsavi'].values.tolist()
baseblsavi_integer = [ math.floor(x) for x in basevisNo ]
baseblsavi_decimal = [ str(x).split('.')[1] for x in basevisNo ]
idvi_basenonbaseline_list = ["%04d_%02d-%s" % idvi for idvi in zip(subjID,baseblsavi_integer,baseblsavi_decimal)]
nonbaseline_to_baseline = dict(zip(idvi_nonbaseline_list, idvi_basenonbaseline_list))

# Save spreadsheet of nonbaseline-to-baseline mappings
data_table_nonbaseline['idvi_basenonbaseline_list'] = idvi_basenonbaseline_list
data_table_nonbaseline.to_csv(os.path.join(output_dir,'baseline_mapping_mprage15T.csv'),index=False)

## ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ INPUTS ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ~ ##
# placeholder Node to enable iteration over scans
infosource_baseline = Node(interface=IdentityInterface(fields=['idvi']), name="infosource_baseline")
infosource_baseline.iterables = ('idvi', idvi_baseline_list)

# get full path to MRI corresponding to idvi
getmusemri_baseline = Node(Function(input_names=['key','dict'],output_names=['musemri'],function=get_value),
                           name='getmusemri_baseline')
getmusemri_baseline.inputs.dict = musemri_dict

# Step 1: Spatial normalization of baseline 1.5T MRI onto study-specific template

# Reorient: this simply applies 90, 180, or 270 degree rotations about each axis to make the image orientation
# the same as the FSL standard
reorient = Node(interface=fsl.Reorient2Std(output_type='NIFTI'), name="reorient")

# Use antsRegistration to compute registration between subject's baseline 1.5T MRI and study-specific template
antsreg = Node(ants.Registration(args='--float',
Exemple #16
0
 def test_summary(self):
     # Create working dirs
     # Create XnatSource node
     repository = XnatRepo(server=SERVER, cache_dir=self.cache_dir)
     analysis = DummyAnalysis(self.SUMMARY_STUDY_NAME,
                              repository.dataset(self.project),
                              SingleProc('ad'),
                              inputs=[
                                  FilesetFilter('source1', 'source1',
                                                text_format),
                                  FilesetFilter('source2', 'source2',
                                                text_format),
                                  FilesetFilter('source3', 'source3',
                                                text_format)
                              ])
     # TODO: Should test out other file formats as well.
     source_files = ['source1', 'source2', 'source3']
     inputnode = pe.Node(IdentityInterface(['subject_id', 'visit_id']),
                         'inputnode')
     inputnode.inputs.subject_id = self.SUBJECT
     inputnode.inputs.visit_id = self.VISIT
     source = pe.Node(RepositorySource(
         [analysis.bound_spec(f).slice for f in source_files]),
                      name='source')
     subject_sink_files = ['subject_sink']
     dummy_pipeline = analysis.dummy_pipeline()
     dummy_pipeline.cap()
     subject_sink = pe.Node(RepositorySink(
         [analysis.bound_spec(f).slice for f in subject_sink_files],
         dummy_pipeline),
                            name='subject_sink')
     subject_sink.inputs.name = 'subject_summary'
     subject_sink.inputs.desc = (
         "Tests the sinking of subject-wide filesets")
     # Test visit sink
     visit_sink_files = ['visit_sink']
     visit_sink = pe.Node(RepositorySink(
         [analysis.bound_spec(f).slice for f in visit_sink_files],
         dummy_pipeline),
                          name='visit_sink')
     visit_sink.inputs.name = 'visit_summary'
     visit_sink.inputs.desc = ("Tests the sinking of visit-wide filesets")
     # Test project sink
     analysis_sink_files = ['analysis_sink']
     analysis_sink = pe.Node(RepositorySink(
         [analysis.bound_spec(f).slice for f in analysis_sink_files],
         dummy_pipeline),
                             name='analysis_sink')
     analysis_sink.inputs.name = 'project_summary'
     analysis_sink.inputs.desc = (
         "Tests the sinking of project-wide filesets")
     # Create workflow connecting them together
     workflow = pe.Workflow('summary_unittest', base_dir=self.work_dir)
     workflow.add_nodes((source, subject_sink, visit_sink, analysis_sink))
     workflow.connect(inputnode, 'subject_id', source, 'subject_id')
     workflow.connect(inputnode, 'visit_id', source, 'visit_id')
     workflow.connect(inputnode, 'subject_id', subject_sink, 'subject_id')
     workflow.connect(inputnode, 'visit_id', visit_sink, 'visit_id')
     workflow.connect(source, 'source1' + PATH_SUFFIX, subject_sink,
                      'subject_sink' + PATH_SUFFIX)
     workflow.connect(source, 'source2' + PATH_SUFFIX, visit_sink,
                      'visit_sink' + PATH_SUFFIX)
     workflow.connect(source, 'source3' + PATH_SUFFIX, analysis_sink,
                      'analysis_sink' + PATH_SUFFIX)
     workflow.run()
     analysis.clear_caches()  # Refreshed cached repository tree object
     with self._connect() as login:
         # Check subject summary directories were created properly in cache
         expected_subj_filesets = ['subject_sink']
         subject_dir = self.session_cache(
             visit=XnatRepo.SUMMARY_NAME,
             from_analysis=self.SUMMARY_STUDY_NAME)
         self.assertEqual(filter_scans(os.listdir(subject_dir)),
                          [(e + '-' + e) for e in expected_subj_filesets])
         # and on XNAT
         subject_fileset_names = filter_scans(
             login.projects[self.project].experiments[self.session_label(
                 visit=XnatRepo.SUMMARY_NAME,
                 from_analysis=self.SUMMARY_STUDY_NAME)].scans.keys())
         self.assertEqual(expected_subj_filesets, subject_fileset_names)
         # Check visit summary directories were created properly in
         # cache
         expected_visit_filesets = ['visit_sink']
         visit_dir = self.session_cache(
             subject=XnatRepo.SUMMARY_NAME,
             from_analysis=self.SUMMARY_STUDY_NAME)
         self.assertEqual(filter_scans(os.listdir(visit_dir)),
                          [(e + '-' + e) for e in expected_visit_filesets])
         # and on XNAT
         visit_fileset_names = filter_scans(
             login.projects[self.project].experiments[self.session_label(
                 subject=XnatRepo.SUMMARY_NAME,
                 from_analysis=self.SUMMARY_STUDY_NAME)].scans.keys())
         self.assertEqual(expected_visit_filesets, visit_fileset_names)
         # Check project summary directories were created properly in cache
         expected_proj_filesets = ['analysis_sink']
         project_dir = self.session_cache(
             subject=XnatRepo.SUMMARY_NAME,
             visit=XnatRepo.SUMMARY_NAME,
             from_analysis=self.SUMMARY_STUDY_NAME)
         self.assertEqual(filter_scans(os.listdir(project_dir)),
                          [(e + '-' + e) for e in expected_proj_filesets])
         # and on XNAT
         project_fileset_names = filter_scans(
             login.projects[self.project].experiments[self.session_label(
                 subject=XnatRepo.SUMMARY_NAME,
                 visit=XnatRepo.SUMMARY_NAME,
                 from_analysis=self.SUMMARY_STUDY_NAME)].scans.keys())
         self.assertEqual(expected_proj_filesets, project_fileset_names)
     # Reload the data from the summary directories
     reloadinputnode = pe.Node(
         IdentityInterface(['subject_id', 'visit_id']), 'reload_inputnode')
     reloadinputnode.inputs.subject_id = self.SUBJECT
     reloadinputnode.inputs.visit_id = self.VISIT
     reloadsource_per_subject = pe.Node(RepositorySource(
         analysis.bound_spec(f).slice for f in subject_sink_files),
                                        name='reload_source_per_subject')
     reloadsource_per_visit = pe.Node(RepositorySource(
         analysis.bound_spec(f).slice for f in visit_sink_files),
                                      name='reload_source_per_visit')
     reloadsource_per_dataset = pe.Node(RepositorySource(
         analysis.bound_spec(f).slice for f in analysis_sink_files),
                                        name='reload_source_per_dataset')
     reloadsink = pe.Node(RepositorySink(
         (analysis.bound_spec(f).slice
          for f in ['resink1', 'resink2', 'resink3']), dummy_pipeline),
                          name='reload_sink')
     reloadsink.inputs.name = 'reload_summary'
     reloadsink.inputs.desc = (
         "Tests the reloading of subject and project summary filesets")
     reloadworkflow = pe.Workflow('reload_summary_unittest',
                                  base_dir=self.work_dir)
     for node in (reloadsource_per_subject, reloadsource_per_visit,
                  reloadsource_per_dataset, reloadsink):
         for iterator in ('subject_id', 'visit_id'):
             reloadworkflow.connect(reloadinputnode, iterator, node,
                                    iterator)
     reloadworkflow.connect(reloadsource_per_subject,
                            'subject_sink' + PATH_SUFFIX, reloadsink,
                            'resink1' + PATH_SUFFIX)
     reloadworkflow.connect(reloadsource_per_visit,
                            'visit_sink' + PATH_SUFFIX, reloadsink,
                            'resink2' + PATH_SUFFIX)
     reloadworkflow.connect(reloadsource_per_dataset,
                            'analysis_sink' + PATH_SUFFIX, reloadsink,
                            'resink3' + PATH_SUFFIX)
     reloadworkflow.run()
     # Check that the filesets
     self.assertEqual(
         filter_scans(
             os.listdir(
                 self.session_cache(
                     from_analysis=self.SUMMARY_STUDY_NAME))),
         ['resink1-resink1', 'resink2-resink2', 'resink3-resink3'])
     # and on XNAT
     with self._connect() as login:
         resinked_fileset_names = filter_scans(
             login.projects[self.project].experiments[self.session_label(
                 from_analysis=self.SUMMARY_STUDY_NAME)].scans.keys())
         self.assertEqual(sorted(resinked_fileset_names),
                          ['resink1', 'resink2', 'resink3'])
def CreateFreeSurferWorkflow_custom(projectid, subjectid, sessionid, WFname, CLUSTER_QUEUE, CLUSTER_QUEUE_LONG,
                                    RunAllFSComponents=True, RunMultiMode=True,
                                    constructed_FS_SUBJECTS_DIR='/never_use_this'):
    freesurferWF = pe.Workflow(name=GenerateWFName(projectid, subjectid, sessionid, WFname))

    inputsSpec = pe.Node(interface=IdentityInterface(fields=['subj_session_id', 'T1_files', 'T2_files', 'subjects_dir',
                                                             'wm_prob', 'label_file', 'mask_file']), name='inputspec')
    outputsSpec = pe.Node(
        interface=IdentityInterface(fields=['full_path_FS_output', 'processed_output_name', 'cnr_optimal_image']),
        name='outputspec')

    ### HACK: the nipype interface requires that this environmental variable is set before running
    print(("HACK SETTING SUBJECTS_DIR {0}".format(constructed_FS_SUBJECTS_DIR)))
    os.environ['SUBJECTS_DIR'] = constructed_FS_SUBJECTS_DIR
    inputsSpec.inputs.subjects_dir = constructed_FS_SUBJECTS_DIR  # HACK

    if RunMultiMode:
        mergeT1T2 = pe.Node(interface=Merge(2), name="Merge_T1T2")
        freesurferWF.connect(inputsSpec, 'T1_files', mergeT1T2, 'in1')
        freesurferWF.connect(inputsSpec, 'T2_files', mergeT1T2, 'in2')

        # Some constants based on assumpts about the label_file from BRAINSABC
        white_label = 1
        grey_label = 2

        msLDA_GenerateWeights = pe.Node(interface=MS_LDA(), name="MS_LDA")
        MSLDA_sge_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 2, 1, 1), 'overwrite': True}
        msLDA_GenerateWeights.plugin_args = MSLDA_sge_options_dictionary
        msLDA_GenerateWeights.inputs.lda_labels = [white_label, grey_label]
        msLDA_GenerateWeights.inputs.weight_file = 'weights.txt'
        msLDA_GenerateWeights.inputs.use_weights = False
        msLDA_GenerateWeights.inputs.vol_synth_file = 'synth_out.nii.gz'
        # msLDA_GenerateWeights.inputs.vol_synth_file = 'synth_out.nii.gz'
        # msLDA_GenerateWeights.inputs.shift = 0 # value to shift by

        freesurferWF.connect(mergeT1T2, 'out', msLDA_GenerateWeights, 'images')
        # freesurferWF.connect(inputsSpec,'subjects_dir',  msLDA_GenerateWeights,'subjects_dir')
        freesurferWF.connect(inputsSpec, 'label_file', msLDA_GenerateWeights, 'label_file')
        # freesurferWF.connect(inputsSpec,'mask_file',  msLDA_GenerateWeights,'mask_file') ## Mask file MUST be unsigned char
        freesurferWF.connect(msLDA_GenerateWeights, 'vol_synth_file', outputsSpec, 'cnr_optimal_image')

    if RunAllFSComponents == True:
        print("""Run FreeSurfer ReconAll at""")
        fs_reconall = pe.Node(interface=fswrap.FSScript(), name="FS52_cross_" + str(sessionid))
        freesurfer_sge_options_dictionary = {'qsub_args': modify_qsub_args(CLUSTER_QUEUE, 8, 4, 4), 'overwrite': True}
        fs_reconall.plugin_args = freesurfer_sge_options_dictionary
        fs_reconall.inputs.subcommand = 'autorecon'
        # fs_reconall.inputs.directive = 'all'
        # fs_reconall.inputs.fs_env_script = '' # NOTE: NOT NEEDED HERE 'FreeSurferEnv.sh'
        # fs_reconall.inputs.fs_home = ''       # NOTE: NOT NEEDED HERE
        freesurferWF.connect(inputsSpec, 'subj_session_id', fs_reconall, 'subj_session_id')
        if RunMultiMode:
            ## Use the output of the synthesized T1 with maximized contrast
            ## HACK:  REMOVE FOR NOW - NEEDS FURTHER TESTING
            ## freesurferWF.connect(msLDA_GenerateWeights, 'vol_synth_file', fs_reconall, 'T1_files')
            freesurferWF.connect(inputsSpec, 'T1_files', fs_reconall, 'T1_files')
            ## END HACK
        else:
            ## Use the output of the T1 only image
            freesurferWF.connect(inputsSpec, 'T1_files', fs_reconall, 'T1_files')

        freesurferWF.connect(inputsSpec, 'label_file', fs_reconall, 'brainmask')
        freesurferWF.connect(inputsSpec, 'subjects_dir', fs_reconall, 'subjects_dir')
        freesurferWF.connect(fs_reconall, 'outDir', outputsSpec, 'full_path_FS_output')
        freesurferWF.connect(fs_reconall, 'processed_output_name', outputsSpec, 'processed_output_name')
    return freesurferWF
Exemple #18
0
    def test_repository_roundtrip(self):

        # Create working dirs
        # Create DarisSource node
        repository = XnatRepo(server=SERVER, cache_dir=self.cache_dir)
        dataset = repository.dataset(self.project)
        analysis = DummyAnalysis(self.STUDY_NAME,
                                 dataset=dataset,
                                 processor=SingleProc('a_dir'),
                                 inputs=[
                                     FilesetFilter('source1', 'source1',
                                                   text_format),
                                     FilesetFilter('source2', 'source2',
                                                   text_format),
                                     FilesetFilter('source3', 'source3',
                                                   text_format),
                                     FilesetFilter('source4', 'source4',
                                                   text_format)
                                 ])
        # TODO: Should test out other file formats as well.
        source_files = ['source1', 'source2', 'source3', 'source4']
        sink_files = ['sink1', 'sink3', 'sink4']
        inputnode = pe.Node(IdentityInterface(['subject_id', 'visit_id']),
                            'inputnode')
        inputnode.inputs.subject_id = str(self.SUBJECT)
        inputnode.inputs.visit_id = str(self.VISIT)
        source = pe.Node(RepositorySource(
            analysis.bound_spec(f).slice for f in source_files),
                         name='source')
        dummy_pipeline = analysis.dummy_pipeline()
        dummy_pipeline.cap()
        sink = pe.Node(RepositorySink((analysis.bound_spec(f).slice
                                       for f in sink_files), dummy_pipeline),
                       name='sink')
        sink.inputs.name = 'repository-roundtrip-unittest'
        sink.inputs.desc = (
            "A test session created by repository roundtrip unittest")
        # Create workflow connecting them together
        workflow = pe.Workflow('source-sink-unit-test', base_dir=self.work_dir)
        workflow.add_nodes((source, sink))
        workflow.connect(inputnode, 'subject_id', source, 'subject_id')
        workflow.connect(inputnode, 'visit_id', source, 'visit_id')
        workflow.connect(inputnode, 'subject_id', sink, 'subject_id')
        workflow.connect(inputnode, 'visit_id', sink, 'visit_id')
        for source_name in source_files:
            if source_name != 'source2':
                sink_name = source_name.replace('source', 'sink')
                workflow.connect(source, source_name + PATH_SUFFIX, sink,
                                 sink_name + PATH_SUFFIX)
        workflow.run()
        # Check cache was created properly
        self.assertEqual(filter_scans(os.listdir(self.session_cache())), [
            'source1-source1', 'source2-source2', 'source3-source3',
            'source4-source4'
        ])
        expected_sink_filesets = ['sink1', 'sink3', 'sink4']
        self.assertEqual(
            filter_scans(
                os.listdir(self.session_cache(from_analysis=self.STUDY_NAME))),
            [(e + '-' + e) for e in expected_sink_filesets])
        with self._connect() as login:
            fileset_names = filter_scans(login.experiments[self.session_label(
                from_analysis=self.STUDY_NAME)].scans.keys())
        self.assertEqual(fileset_names, expected_sink_filesets)
Exemple #19
0
def create_dti():
    # main workflow for preprocessing diffusion data
    # fsl output type
    fsl.FSLCommand.set_default_output_type('NIFTI_GZ')
    # Initiation of a workflow
    dwi_preproc = Workflow(name="dwi_preproc")
    # inputnode
    inputnode = Node(IdentityInterface(fields=[
        'subject_id',
        'freesurfer_dir',
        'aseg',
        'dwi',
        'dwi_ap',
        'dwi_pa',
        'bvals',
        'bvecs'
    ]),
        name='inputnode')
    # output node
    outputnode = Node(IdentityInterface(fields=[
        'dwi_denoised',
        "dwi_unringed",
        "topup_corr",
        "topup_field",
        "bo_brain",
        "bo_brainmask",
        "topup_fieldcoef",
        "eddy_corr",
        "eddy_params",
        "rotated_bvecs",
        "total_movement_rms",
        "shell_alignment_parameters",
        'outlier_report',
        "cnr_maps",
        "residuals",
        'dti_fa',
        'dti_md',
        'dti_l1',
        'dti_l2',
        'dti_l3',
        'dti_v1',
        'dti_v2',
        'dti_v3',
        'fa2anat',
        'fa2anat_mat',
        'fa2anat_dat'
    ]),
        name='outputnode')

    
    '''
    workflow to run distortion correction
    -------------------------------------
    '''
    distor_corr = create_distortion_correct()


    '''
    tensor fitting
    --------------
    '''
    dti = Node(fsl.DTIFit(), name='dti')

    #connecting the nodes
    dwi_preproc.connect([

        (inputnode, distor_corr, [('dwi', 'inputnode.dwi')]),
        (inputnode, distor_corr, [('dwi_ap', 'inputnode.dwi_ap')]),
        (inputnode, distor_corr, [('dwi_pa', 'inputnode.dwi_pa')]),
        (inputnode, distor_corr, [("bvals", "inputnode.bvals")]),
        (inputnode, distor_corr, [("bvecs", "inputnode.bvecs")]),
        (inputnode, dti, [("bvals", "bvals")]),
        (distor_corr, outputnode, [('outputnode.bo_brain', 'bo_brain')]),
        (distor_corr, outputnode, [('outputnode.bo_brainmask', 'bo_brainmask')]),
        (distor_corr, outputnode, [('outputnode.noise', 'noise')]),
        (distor_corr, outputnode, [('outputnode.dwi_denoised', 'dwi_denoised')]),
        (distor_corr, outputnode, [('outputnode.dwi_unringed', 'dwi_unringed')]),
        (distor_corr, outputnode, [('outputnode.topup_corr', 'topup_corr')]),
        (distor_corr, outputnode, [('outputnode.topup_field', 'topup_field')]),
        (distor_corr, outputnode, [('outputnode.topup_fieldcoef', 'topup_fieldcoef')]),
        (distor_corr, outputnode, [('outputnode.eddy_corr', 'eddy_corr')]),
        (distor_corr, outputnode, [('outputnode.rotated_bvecs', 'rotated_bvecs')]),
        (distor_corr, outputnode, [('outputnode.total_movement_rms', 'total_movement_rms')]),
        (distor_corr, outputnode, [('outputnode.outlier_report', 'outlier_report')]),
        (distor_corr, outputnode, [('outputnode.shell_params', "shell_alignment_parameters",)]),
        (distor_corr, outputnode, [('outputnode.cnr_maps', 'cnr_maps')]),
        (distor_corr, outputnode, [('outputnode.residuals', 'residuals')]),
        (distor_corr, outputnode, [('outputnode.eddy_params', 'eddy_params')]),
        (distor_corr, dti, [("outputnode.rotated_bvecs", "bvecs")]),
        (distor_corr, dti, [('outputnode.bo_brainmask', 'mask')]),
        #(distor_corr, flirt, [('outputnode.eddy_corr', 'in_file')]),
        #(distor_corr, flirt, [('outputnode.eddy_corr', 'reference')]),
        #(flirt, dti, [('out_file', 'dwi')]),
        (distor_corr, dti, [('outputnode.eddy_corr', 'dwi')]),
        (dti, outputnode, [('FA', 'dti_fa')]),
        (dti, outputnode, [('MD', 'dti_md')]),
        (dti, outputnode, [('L1', 'dti_l1')]),
        (dti, outputnode, [('L2', 'dti_l2')]),
        (dti, outputnode, [('L3', 'dti_l3')]),
        (dti, outputnode, [('V1', 'dti_v1')]),
        (dti, outputnode, [('V2', 'dti_v2')]),
        (dti, outputnode, [('V3', 'dti_v3')])

    ])


    '''
    coregistration of FA and T1
    ------------------------------------
    '''
    #have to rename subject for fu (but now structural wf is connected to dwi
    #and its not necessary to rename again.)
    #def rename_subject_for_fu(input_id):
    #    output_id=input_id+"_fu"
    #    return output_id

    #modify subject name so it can be saved in the same folder as other LIFE- freesurfer data
    #rename=Node(util.Function(input_names=['input_id'],
    #                        output_names=['output_id'],
    #                        function = rename_subject_for_fu), name="rename")
    
    # linear registration with bbregister
    bbreg = Node(fs.BBRegister(contrast_type='t1',
    out_fsl_file='fa2anat.mat',
    out_reg_file='fa2anat.dat',
    registered_file='fa2anat_bbreg.nii.gz',
    init='fsl'
    ),
    name='bbregister')

    # connecting the nodes
    dwi_preproc.connect([

        (inputnode, bbreg, [('subject_id', 'subject_id')]),
        (inputnode, bbreg, [('freesurfer_dir', 'subjects_dir')]),
        (dti, bbreg, [("FA", "source_file")]),
        (bbreg, outputnode, [('out_fsl_file', 'fa2anat_mat'),
                             ('out_reg_file', 'fa2anat_dat'),
                             ('registered_file', 'fa2anat')])

    ])

    return dwi_preproc
Exemple #20
0
#     if s.endswith('_roi.mat'):
#         mat_to_nii(s)
#         s = s.replace('_roi.mat', '.nii')

# Nuisance variables
nuisance_masks = [
    '/data/mridata/SeeleyToolbox/SeeleyFirstLevel/proc/csf_ant_post_bilateral.nii',
    '/data/mridata/SeeleyToolbox/SeeleyFirstLevel/proc/avg152T1_white_mask.nii'
]

# TR
TR = 2.0

## CREATE NODES
# For distributing subject paths
infosource = Node(IdentityInterface(fields=['subject_path', 'seed']),
                  name="infosource")
infosource.iterables = [('subject_path', subjdir), ('seed', all_seeds)]

info = dict(func=[[
    'subject_path', '/processedfmri_TRCNnSFmDI/images/swua_filteredf*.nii'
]],
            motion=[[
                'subject_path',
                '/processedfmri_TRCNnSFmDI/motion_params_filtered.txt'
            ]])

selectfiles = Node(DataGrabber(infields=['subject_path'],
                               outfields=['func', 'motion'],
                               base_directory='/',
                               template='%s/%s',
# subject_list = ['229', '230', '365', '274']

subject_list = ['230']

output_dir = 'Open_Field_output'
working_dir = 'Open_Field_workingdir'

Open_Field_workflow = Workflow(name='Open_Field_workflow')
Open_Field_workflow.base_dir = opj(experiment_dir, working_dir)

# -----------------------------------------------------------------------------------------------------
# In[3]:

# Infosource - a function free node to iterate over the list of subject names
infosource = Node(IdentityInterface(fields=['subject_id']), name="infosource")
infosource.iterables = [('subject_id', subject_list)]

# -----------------------------------------------------------------------------------------------------
# In[4]:

templates = {'open_field': 'Data/{subject_id}/open_field_{subject_id}.mp4'}

selectfiles = Node(SelectFiles(templates, base_directory=experiment_dir),
                   name="selectfiles")

# -----------------------------------------------------------------------------------------------------
# In[5]:

# datasink = Node(DataSink(base_directory=experiment_dir,
#                          container=output_dir),
def create_AutoRecon3(name="AutoRecon3",
                      qcache=False,
                      plugin_args=None,
                      th3=True,
                      exvivo=True,
                      entorhinal=True,
                      fsvernum=5.3):

    # AutoRecon3
    # Workflow
    ar3_wf = pe.Workflow(name=name)

    # Input Node
    inputspec = pe.Node(IdentityInterface(fields=[
        'lh_inflated', 'rh_inflated', 'lh_smoothwm', 'rh_smoothwm', 'lh_white',
        'rh_white', 'lh_white_H', 'rh_white_H', 'lh_white_K', 'rh_white_K',
        'lh_cortex_label', 'rh_cortex_label', 'lh_orig', 'rh_orig', 'lh_sulc',
        'rh_sulc', 'lh_area', 'rh_area', 'lh_curv', 'rh_curv', 'lh_orig_nofix',
        'rh_orig_nofix', 'aseg_presurf', 'brain_finalsurfs', 'wm', 'filled',
        'brainmask', 'transform', 'orig_mgz', 'rawavg', 'norm', 'lh_atlas',
        'rh_atlas', 'lh_classifier1', 'rh_classifier1', 'lh_classifier2',
        'rh_classifier2', 'lh_classifier3', 'rh_classifier3', 'lookup_table',
        'wm_lookup_table', 'src_subject_id', 'src_subject_dir', 'color_table',
        'num_threads'
    ]),
                        name='inputspec')

    ar3_lh_wf1 = pe.Workflow(name="AutoRecon3_Left_1")
    ar3_rh_wf1 = pe.Workflow(name="AutoRecon3_Right_1")
    for hemisphere, hemi_wf in [('lh', ar3_lh_wf1), ('rh', ar3_rh_wf1)]:
        hemi_inputspec1 = pe.Node(IdentityInterface(fields=[
            'inflated', 'smoothwm', 'white', 'cortex_label', 'orig',
            'aseg_presurf', 'brain_finalsurfs', 'wm', 'filled', 'sphere',
            'sulc', 'area', 'curv', 'classifier', 'atlas', 'num_threads'
        ]),
                                  name="inputspec")

        # Spherical Inflation

        # Inflates the orig surface into a sphere while minimizing metric distortion.
        # This step is necessary in order to register the surface to the spherical
        # atlas (also known as the spherical morph). Calls mris_sphere. Creates
        # surf/?h.sphere. The -autorecon3 stage begins here.

        ar3_sphere = pe.Node(Sphere(), name="Spherical_Inflation")
        ar3_sphere.inputs.seed = 1234
        ar3_sphere.inputs.out_file = '{0}.sphere'.format(hemisphere)
        if plugin_args:
            ar3_sphere.plugin_args = plugin_args
        hemi_wf.connect([(hemi_inputspec1,
                          ar3_sphere, [('inflated', 'in_file'),
                                       ('smoothwm', 'in_smoothwm'),
                                       ('num_threads', 'num_threads')])])

        # Ipsilateral Surface Registation (Spherical Morph)

        # Registers the orig surface to the spherical atlas through surf/?h.sphere.
        # The surfaces are first coarsely registered by aligning the large scale
        # folding patterns found in ?h.sulc and then fine tuned using the small-scale
        # patterns as in ?h.curv. Calls mris_register. Creates surf/?h.sphere.reg.

        ar3_surfreg = pe.Node(Register(), name="Surface_Registration")
        ar3_surfreg.inputs.out_file = '{0}.sphere.reg'.format(hemisphere)
        ar3_surfreg.inputs.curv = True
        hemi_wf.connect([(ar3_sphere, ar3_surfreg, [('out_file', 'in_surf')]),
                         (hemi_inputspec1, ar3_surfreg,
                          [('smoothwm', 'in_smoothwm'), ('sulc', 'in_sulc'),
                           ('atlas', 'target')])])

        # Jacobian

        # Computes how much the white surface was distorted in order to register to
        # the spherical atlas during the -surfreg step.

        ar3_jacobian = pe.Node(Jacobian(), name="Jacobian")
        ar3_jacobian.inputs.out_file = '{0}.jacobian_white'.format(hemisphere)
        hemi_wf.connect([
            (hemi_inputspec1, ar3_jacobian, [('white', 'in_origsurf')]),
            (ar3_surfreg, ar3_jacobian, [('out_file', 'in_mappedsurf')])
        ])

        # Average Curvature

        # Resamples the average curvature from the atlas to that of the subject.
        # Allows the user to display activity on the surface of an individual
        # with the folding pattern (ie, anatomy) of a group.

        ar3_paint = pe.Node(Paint(), name="Average_Curvature")
        ar3_paint.inputs.averages = 5
        ar3_paint.inputs.template_param = 6
        ar3_paint.inputs.out_file = "{0}.avg_curv".format(hemisphere)
        hemi_wf.connect([(ar3_surfreg, ar3_paint, [('out_file', 'in_surf')]),
                         (hemi_inputspec1, ar3_paint, [('atlas', 'template')])
                         ])

        # Cortical Parcellation

        # Assigns a neuroanatomical label to each location on the cortical
        # surface. Incorporates both geometric information derived from the
        # cortical model (sulcus and curvature), and neuroanatomical convention.

        ar3_parcellation = pe.Node(MRIsCALabel(), "Cortical_Parcellation")
        ar3_parcellation.inputs.seed = 1234
        ar3_parcellation.inputs.hemisphere = hemisphere
        ar3_parcellation.inputs.copy_inputs = True
        ar3_parcellation.inputs.out_file = "{0}.aparc.annot".format(hemisphere)
        if plugin_args:
            ar3_parcellation.plugin_args = plugin_args
        hemi_wf.connect([(hemi_inputspec1, ar3_parcellation, [
            ('smoothwm', 'smoothwm'), ('cortex_label', 'label'),
            ('aseg_presurf', 'aseg'), ('classifier', 'classifier'),
            ('curv', 'curv'), ('sulc', 'sulc'), ('num_threads', 'num_threads')
        ]), (ar3_surfreg, ar3_parcellation, [('out_file', 'canonsurf')])])

        # Pial Surface

        ar3_pial = pe.Node(MakeSurfaces(), name="Make_Pial_Surface")
        ar3_pial.inputs.mgz = True
        ar3_pial.inputs.hemisphere = hemisphere
        ar3_pial.inputs.copy_inputs = True

        if fsvernum < 6:
            ar3_pial.inputs.white = 'NOWRITE'
            hemi_wf.connect(hemi_inputspec1, 'white', ar3_pial, 'in_white')
        else:
            ar3_pial.inputs.no_white = True
            hemi_wf.connect([(hemi_inputspec1,
                              ar3_pial, [('white', 'orig_pial'),
                                         ('white', 'orig_white')])])

        hemi_wf.connect([
            (hemi_inputspec1, ar3_pial, [('wm', 'in_wm'), ('orig', 'in_orig'),
                                         ('filled', 'in_filled'),
                                         ('brain_finalsurfs', 'in_T1'),
                                         ('aseg_presurf', 'in_aseg')]),
            (ar3_parcellation, ar3_pial, [('out_file', 'in_label')])
        ])

        # Surface Volume
        """
        Creates the ?h.volume file by first creating the ?h.mid.area file by
        adding ?h.area(.white) to ?h.area.pial, then dividing by two. Then ?h.volume
        is created by multiplying ?.mid.area with ?h.thickness.
        """

        ar3_add = pe.Node(MRIsCalc(), name="Add_Pial_Area")
        ar3_add.inputs.action = "add"
        ar3_add.inputs.out_file = '{0}.area.mid'.format(hemisphere)
        hemi_wf.connect([
            (ar3_pial, ar3_add, [('out_area', 'in_file2')]),
            (hemi_inputspec1, ar3_add, [('area', 'in_file1')]),
        ])

        ar3_divide = pe.Node(MRIsCalc(), name="Mid_Pial")
        ar3_divide.inputs.action = "div"
        ar3_divide.inputs.in_int = 2
        ar3_divide.inputs.out_file = '{0}.area.mid'.format(hemisphere)
        hemi_wf.connect([
            (ar3_add, ar3_divide, [('out_file', 'in_file1')]),
        ])

        ar3_volume = pe.Node(MRIsCalc(), name="Calculate_Volume")
        ar3_volume.inputs.action = "mul"
        ar3_volume.inputs.out_file = '{0}.volume'.format(hemisphere)
        hemi_wf.connect([
            (ar3_divide, ar3_volume, [('out_file', 'in_file1')]),
            (ar3_pial, ar3_volume, [('out_thickness', 'in_file2')]),
        ])

        # Connect the inputs
        ar3_wf.connect([(inputspec, hemi_wf, [
            ('{0}_inflated'.format(hemisphere), 'inputspec.inflated'),
            ('{0}_smoothwm'.format(hemisphere), 'inputspec.smoothwm'),
            ('{0}_white'.format(hemisphere), 'inputspec.white'),
            ('{0}_cortex_label'.format(hemisphere), 'inputspec.cortex_label'),
            ('{0}_orig'.format(hemisphere), 'inputspec.orig'),
            ('{0}_sulc'.format(hemisphere), 'inputspec.sulc'),
            ('{0}_area'.format(hemisphere), 'inputspec.area'),
            ('{0}_curv'.format(hemisphere), 'inputspec.curv'),
            ('aseg_presurf', 'inputspec.aseg_presurf'),
            ('brain_finalsurfs', 'inputspec.brain_finalsurfs'),
            ('wm', 'inputspec.wm'), ('filled', 'inputspec.filled'),
            ('{0}_atlas'.format(hemisphere), 'inputspec.atlas'),
            ('{0}_classifier1'.format(hemisphere), 'inputspec.classifier'),
            ('num_threads', 'inputspec.num_threads')
        ])])

        # Workflow1 Outputs
        hemi_outputs1 = [
            'sphere', 'sphere_reg', 'jacobian_white', 'avg_curv',
            'aparc_annot', 'area_pial', 'curv_pial', 'pial', 'thickness_pial',
            'area_mid', 'volume'
        ]
        hemi_outputspec1 = pe.Node(IdentityInterface(fields=hemi_outputs1),
                                   name="outputspec")
        hemi_wf.connect([
            (ar3_pial, hemi_outputspec1, [('out_pial', 'pial'),
                                          ('out_curv', 'curv_pial'),
                                          ('out_area', 'area_pial'),
                                          ('out_thickness', 'thickness_pial')
                                          ]),
            (ar3_divide, hemi_outputspec1, [('out_file', 'area_mid')]),
            (ar3_volume, hemi_outputspec1, [('out_file', 'volume')]),
            (ar3_parcellation, hemi_outputspec1, [('out_file', 'aparc_annot')
                                                  ]),
            (ar3_jacobian, hemi_outputspec1, [('out_file', 'jacobian_white')]),
            (ar3_paint, hemi_outputspec1, [('out_file', 'avg_curv')]),
            (ar3_surfreg, hemi_outputspec1, [('out_file', 'sphere_reg')]),
            (ar3_sphere, hemi_outputspec1, [('out_file', 'sphere')])
        ])

    # Cortical Ribbon Mask
    """
    Creates binary volume masks of the cortical ribbon
    ie, each voxel is either a 1 or 0 depending upon whether it falls in the ribbon or not.
    """
    volume_mask = pe.Node(VolumeMask(), name="Mask_Ribbon")
    volume_mask.inputs.left_whitelabel = 2
    volume_mask.inputs.left_ribbonlabel = 3
    volume_mask.inputs.right_whitelabel = 41
    volume_mask.inputs.right_ribbonlabel = 42
    volume_mask.inputs.save_ribbon = True
    volume_mask.inputs.copy_inputs = True

    ar3_wf.connect([
        (inputspec, volume_mask, [('lh_white', 'lh_white'),
                                  ('rh_white', 'rh_white')]),
        (ar3_lh_wf1, volume_mask, [('outputspec.pial', 'lh_pial')]),
        (ar3_rh_wf1, volume_mask, [('outputspec.pial', 'rh_pial')]),
    ])

    if fsvernum >= 6:
        ar3_wf.connect([(inputspec, volume_mask, [('aseg_presurf', 'in_aseg')])
                        ])
    else:
        ar3_wf.connect([(inputspec, volume_mask, [('aseg_presurf', 'aseg')])])

    ar3_lh_wf2 = pe.Workflow(name="AutoRecon3_Left_2")
    ar3_rh_wf2 = pe.Workflow(name="AutoRecon3_Right_2")

    for hemisphere, hemiwf2 in [('lh', ar3_lh_wf2), ('rh', ar3_rh_wf2)]:
        if hemisphere == 'lh':
            hemiwf1 = ar3_lh_wf1
        else:
            hemiwf1 = ar3_rh_wf1

        hemi_inputs2 = [
            'wm',
            'lh_white',
            'rh_white',
            'transform',
            'brainmask',
            'aseg_presurf',
            'cortex_label',
            'lh_pial',
            'rh_pial',
            'thickness',
            'aparc_annot',
            'ribbon',
            'smoothwm',
            'sphere_reg',
            'orig_mgz',
            'rawavg',
            'curv',
            'sulc',
            'classifier2',
            'classifier3',
        ]

        hemi_inputspec2 = pe.Node(IdentityInterface(fields=hemi_inputs2),
                                  name="inputspec")

        # Parcellation Statistics
        """
        Runs mris_anatomical_stats to create a summary table of cortical parcellation statistics for each structure, including
        structure name
        number of vertices
        total surface area (mm^2)
        total gray matter volume (mm^3)
        average cortical thickness (mm)
        standard error of cortical thicknessr (mm)
        integrated rectified mean curvature
        integrated rectified Gaussian curvature
        folding index
        intrinsic curvature index.
        """
        parcellation_stats_white = pe.Node(
            ParcellationStats(),
            name="Parcellation_Stats_{0}_White".format(hemisphere))
        parcellation_stats_white.inputs.mgz = True
        parcellation_stats_white.inputs.th3 = th3
        parcellation_stats_white.inputs.tabular_output = True
        parcellation_stats_white.inputs.surface = 'white'
        parcellation_stats_white.inputs.hemisphere = hemisphere
        parcellation_stats_white.inputs.out_color = 'aparc.annot.ctab'
        parcellation_stats_white.inputs.out_table = '{0}.aparc.stats'.format(
            hemisphere)
        parcellation_stats_white.inputs.copy_inputs = True

        hemiwf2.connect([
            (hemi_inputspec2, parcellation_stats_white, [
                ('wm', 'wm'),
                ('lh_white', 'lh_white'),
                ('rh_white', 'rh_white'),
                ('transform', 'transform'),
                ('brainmask', 'brainmask'),
                ('aseg_presurf', 'aseg'),
                ('cortex_label', 'in_cortex'),
                ('cortex_label', 'cortex_label'),
                ('lh_pial', 'lh_pial'),
                ('rh_pial', 'rh_pial'),
                ('thickness', 'thickness'),
                ('aparc_annot', 'in_annotation'),
                ('ribbon', 'ribbon'),
            ]),
        ])

        parcellation_stats_pial = pe.Node(
            ParcellationStats(),
            name="Parcellation_Stats_{0}_Pial".format(hemisphere))
        parcellation_stats_pial.inputs.mgz = True
        parcellation_stats_pial.inputs.th3 = th3
        parcellation_stats_pial.inputs.tabular_output = True
        parcellation_stats_pial.inputs.surface = 'pial'
        parcellation_stats_pial.inputs.hemisphere = hemisphere
        parcellation_stats_pial.inputs.copy_inputs = True
        parcellation_stats_pial.inputs.out_color = 'aparc.annot.ctab'
        parcellation_stats_pial.inputs.out_table = '{0}.aparc.pial.stats'.format(
            hemisphere)

        hemiwf2.connect([
            (hemi_inputspec2, parcellation_stats_pial, [
                ('wm', 'wm'),
                ('lh_white', 'lh_white'),
                ('rh_white', 'rh_white'),
                ('transform', 'transform'),
                ('brainmask', 'brainmask'),
                ('aseg_presurf', 'aseg'),
                ('cortex_label', 'cortex_label'),
                ('cortex_label', 'in_cortex'),
                ('lh_pial', 'lh_pial'),
                ('rh_pial', 'rh_pial'),
                ('thickness', 'thickness'),
                ('aparc_annot', 'in_annotation'),
                ('ribbon', 'ribbon'),
            ]),
        ])

        # Cortical Parcellation 2
        cortical_parcellation_2 = pe.Node(
            MRIsCALabel(),
            name="Cortical_Parcellation_{0}_2".format(hemisphere))
        cortical_parcellation_2.inputs.out_file = '{0}.aparc.a2009s.annot'.format(
            hemisphere)
        cortical_parcellation_2.inputs.seed = 1234
        cortical_parcellation_2.inputs.copy_inputs = True
        cortical_parcellation_2.inputs.hemisphere = hemisphere

        hemiwf2.connect([(hemi_inputspec2, cortical_parcellation_2,
                          [('smoothwm', 'smoothwm'), ('aseg_presurf', 'aseg'),
                           ('cortex_label', 'label'),
                           ('sphere_reg', 'canonsurf'), ('curv', 'curv'),
                           ('sulc', 'sulc'), ('classifier2', 'classifier')])])

        # Parcellation Statistics 2
        parcellation_stats_white_2 = parcellation_stats_white.clone(
            name="Parcellation_Statistics_{0}_2".format(hemisphere))
        parcellation_stats_white_2.inputs.hemisphere = hemisphere
        parcellation_stats_white_2.inputs.out_color = 'aparc.annot.a2009s.ctab'
        parcellation_stats_white_2.inputs.out_table = '{0}.aparc.a2009s.stats'.format(
            hemisphere)
        hemiwf2.connect([(hemi_inputspec2, parcellation_stats_white_2, [
            ('wm', 'wm'),
            ('lh_white', 'lh_white'),
            ('rh_white', 'rh_white'),
            ('transform', 'transform'),
            ('brainmask', 'brainmask'),
            ('aseg_presurf', 'aseg'),
            ('cortex_label', 'cortex_label'),
            ('cortex_label', 'in_cortex'),
            ('lh_pial', 'lh_pial'),
            ('rh_pial', 'rh_pial'),
            ('thickness', 'thickness'),
            ('ribbon', 'ribbon'),
        ]),
                         (cortical_parcellation_2, parcellation_stats_white_2,
                          [('out_file', 'in_annotation')])])

        # Cortical Parcellation 3
        cortical_parcellation_3 = pe.Node(
            MRIsCALabel(),
            name="Cortical_Parcellation_{0}_3".format(hemisphere))
        cortical_parcellation_3.inputs.out_file = '{0}.aparc.DKTatlas40.annot'.format(
            hemisphere)
        cortical_parcellation_3.inputs.hemisphere = hemisphere
        cortical_parcellation_3.inputs.seed = 1234
        cortical_parcellation_3.inputs.copy_inputs = True
        hemiwf2.connect([(hemi_inputspec2, cortical_parcellation_3,
                          [('smoothwm', 'smoothwm'), ('aseg_presurf', 'aseg'),
                           ('cortex_label', 'label'),
                           ('sphere_reg', 'canonsurf'), ('curv', 'curv'),
                           ('sulc', 'sulc'), ('classifier3', 'classifier')])])

        # Parcellation Statistics 3
        parcellation_stats_white_3 = parcellation_stats_white.clone(
            name="Parcellation_Statistics_{0}_3".format(hemisphere))
        parcellation_stats_white_3.inputs.out_color = 'aparc.annot.DKTatlas40.ctab'
        parcellation_stats_white_3.inputs.out_table = '{0}.aparc.DKTatlas40.stats'.format(
            hemisphere)
        parcellation_stats_white_3.inputs.hemisphere = hemisphere

        hemiwf2.connect([(hemi_inputspec2, parcellation_stats_white_3, [
            ('wm', 'wm'),
            ('lh_white', 'lh_white'),
            ('rh_white', 'rh_white'),
            ('transform', 'transform'),
            ('brainmask', 'brainmask'),
            ('aseg_presurf', 'aseg'),
            ('cortex_label', 'cortex_label'),
            ('cortex_label', 'in_cortex'),
            ('lh_pial', 'lh_pial'),
            ('rh_pial', 'rh_pial'),
            ('thickness', 'thickness'),
            ('ribbon', 'ribbon'),
        ]),
                         (cortical_parcellation_3, parcellation_stats_white_3,
                          [('out_file', 'in_annotation')])])

        # WM/GM Contrast
        contrast = pe.Node(Contrast(),
                           name="WM_GM_Contrast_{0}".format(hemisphere))
        contrast.inputs.hemisphere = hemisphere
        contrast.inputs.copy_inputs = True

        hemiwf2.connect([
            (hemi_inputspec2, contrast, [
                ('orig_mgz', 'orig'),
                ('rawavg', 'rawavg'),
                ('{0}_white'.format(hemisphere), 'white'),
                ('cortex_label', 'cortex'),
                ('aparc_annot', 'annotation'),
                ('thickness', 'thickness'),
            ]),
        ])

        hemi_outputs2 = [
            'aparc_annot_ctab',
            'aparc_stats',
            'aparc_pial_stats',
            'aparc_a2009s_annot',
            'aparc_a2009s_annot_ctab',
            'aparc_a2009s_annot_stats',
            'aparc_DKTatlas40_annot',
            'aparc_DKTatlas40_annot_ctab',
            'aparc_DKTatlas40_annot_stats',
            'wg_pct_mgh',
            'wg_pct_stats',
            'pctsurfcon_log',
        ]
        hemi_outputspec2 = pe.Node(IdentityInterface(fields=hemi_outputs2),
                                   name="outputspec")

        hemiwf2.connect([
            (contrast, hemi_outputspec2, [('out_contrast', 'wg_pct_mgh'),
                                          ('out_stats', 'wg_pct_stats'),
                                          ('out_log', 'pctsurfcon_log')]),
            (parcellation_stats_white_3, hemi_outputspec2,
             [('out_color', 'aparc_DKTatlas40_annot_ctab'),
              ('out_table', 'aparc_DKTatlas40_annot_stats')]),
            (cortical_parcellation_3, hemi_outputspec2,
             [('out_file', 'aparc_DKTatlas40_annot')]),
            (parcellation_stats_white_2, hemi_outputspec2,
             [('out_color', 'aparc_a2009s_annot_ctab'),
              ('out_table', 'aparc_a2009s_annot_stats')]),
            (cortical_parcellation_2, hemi_outputspec2,
             [('out_file', 'aparc_a2009s_annot')]),
            (parcellation_stats_white,
             hemi_outputspec2, [('out_color', 'aparc_annot_ctab'),
                                ('out_table', 'aparc_stats')]),
            (parcellation_stats_pial, hemi_outputspec2,
             [('out_table', 'aparc_pial_stats')]),
        ])
        # connect inputs to hemisphere2 workflow
        ar3_wf.connect([
            (inputspec, hemiwf2, [
                ('wm', 'inputspec.wm'),
                ('lh_white', 'inputspec.lh_white'),
                ('rh_white', 'inputspec.rh_white'),
                ('transform', 'inputspec.transform'),
                ('brainmask', 'inputspec.brainmask'),
                ('aseg_presurf', 'inputspec.aseg_presurf'),
                ('{0}_cortex_label'.format(hemisphere),
                 'inputspec.cortex_label'),
                ('{0}_smoothwm'.format(hemisphere), 'inputspec.smoothwm'),
                ('orig_mgz', 'inputspec.orig_mgz'),
                ('rawavg', 'inputspec.rawavg'),
                ('{0}_curv'.format(hemisphere), 'inputspec.curv'),
                ('{0}_sulc'.format(hemisphere), 'inputspec.sulc'),
                ('{0}_classifier2'.format(hemisphere),
                 'inputspec.classifier2'),
                ('{0}_classifier3'.format(hemisphere),
                 'inputspec.classifier3'),
            ]),
            (ar3_lh_wf1, hemiwf2, [('outputspec.pial', 'inputspec.lh_pial')]),
            (ar3_rh_wf1, hemiwf2, [('outputspec.pial', 'inputspec.rh_pial')]),
            (hemiwf1, hemiwf2,
             [('outputspec.thickness_pial', 'inputspec.thickness'),
              ('outputspec.aparc_annot', 'inputspec.aparc_annot'),
              ('outputspec.sphere_reg', 'inputspec.sphere_reg')]),
            (volume_mask, hemiwf2, [('out_ribbon', 'inputspec.ribbon')]),
        ])
        # End hemisphere2 workflow

    # APARC to ASEG
    # Adds information from the ribbon into the aseg.mgz (volume parcellation).
    aparc_2_aseg = pe.Node(Aparc2Aseg(), name="Aparc2Aseg")
    aparc_2_aseg.inputs.volmask = True
    aparc_2_aseg.inputs.copy_inputs = True
    aparc_2_aseg.inputs.out_file = "aparc+aseg.mgz"
    ar3_wf.connect([(inputspec, aparc_2_aseg, [
        ('lh_white', 'lh_white'),
        ('rh_white', 'rh_white'),
    ]),
                    (ar3_lh_wf1, aparc_2_aseg, [
                        ('outputspec.pial', 'lh_pial'),
                        ('outputspec.aparc_annot', 'lh_annotation'),
                    ]),
                    (ar3_rh_wf1, aparc_2_aseg, [
                        ('outputspec.pial', 'rh_pial'),
                        ('outputspec.aparc_annot', 'rh_annotation'),
                    ]),
                    (volume_mask, aparc_2_aseg, [
                        ('rh_ribbon', 'rh_ribbon'),
                        ('lh_ribbon', 'lh_ribbon'),
                        ('out_ribbon', 'ribbon'),
                    ])])
    if fsvernum < 6:
        ar3_wf.connect([(inputspec, aparc_2_aseg, [('aseg_presurf', 'aseg')])])
    else:
        # Relabel Hypointensities
        relabel_hypos = pe.Node(RelabelHypointensities(),
                                name="Relabel_Hypointensities")
        relabel_hypos.inputs.out_file = 'aseg.presurf.hypos.mgz'
        ar3_wf.connect([(inputspec, relabel_hypos, [('aseg_presurf', 'aseg'),
                                                    ('lh_white', 'lh_white'),
                                                    ('rh_white', 'rh_white')])
                        ])
        ar3_wf.connect([(relabel_hypos, aparc_2_aseg, [('out_file', 'aseg')])])

    aparc_2_aseg_2009 = pe.Node(Aparc2Aseg(), name="Aparc2Aseg_2009")
    aparc_2_aseg_2009.inputs.volmask = True
    aparc_2_aseg_2009.inputs.a2009s = True
    aparc_2_aseg_2009.inputs.copy_inputs = True
    aparc_2_aseg_2009.inputs.out_file = "aparc.a2009s+aseg.mgz"
    ar3_wf.connect([(inputspec, aparc_2_aseg_2009, [
        ('lh_white', 'lh_white'),
        ('rh_white', 'rh_white'),
    ]), (ar3_lh_wf1, aparc_2_aseg_2009, [
        ('outputspec.pial', 'lh_pial'),
    ]),
                    (ar3_lh_wf2, aparc_2_aseg_2009,
                     [('outputspec.aparc_a2009s_annot', 'lh_annotation')]),
                    (ar3_rh_wf2, aparc_2_aseg_2009,
                     [('outputspec.aparc_a2009s_annot', 'rh_annotation')]),
                    (ar3_rh_wf1, aparc_2_aseg_2009, [
                        ('outputspec.pial', 'rh_pial'),
                    ]),
                    (volume_mask, aparc_2_aseg_2009,
                     [('rh_ribbon', 'rh_ribbon'), ('lh_ribbon', 'lh_ribbon'),
                      ('out_ribbon', 'ribbon')])])

    if fsvernum >= 6:
        apas_2_aseg = pe.Node(Apas2Aseg(), name="Apas_2_Aseg")
        ar3_wf.connect([(aparc_2_aseg, apas_2_aseg, [('out_file', 'in_file')]),
                        (relabel_hypos, aparc_2_aseg_2009, [('out_file',
                                                             'aseg')])])
    else:
        # aseg.mgz gets edited in place, so we'll copy and pass it to the
        # outputspec once aparc_2_aseg has completed
        def out_aseg(in_aparcaseg, in_aseg, out_file):
            import shutil
            import os
            out_file = os.path.abspath(out_file)
            shutil.copy(in_aseg, out_file)
            return out_file

        apas_2_aseg = pe.Node(Function(['in_aparcaseg', 'in_aseg', 'out_file'],
                                       ['out_file'], out_aseg),
                              name="Aseg")
        ar3_wf.connect([
            (aparc_2_aseg, apas_2_aseg, [('out_file', 'in_aparcaseg')]),
            (inputspec, apas_2_aseg, [('aseg_presurf', 'in_aseg')]),
            (inputspec, aparc_2_aseg_2009, [('aseg_presurf', 'aseg')])
        ])

    apas_2_aseg.inputs.out_file = "aseg.mgz"

    # Segmentation Stats
    """
    Computes statistics on the segmented subcortical structures found in
    mri/aseg.mgz. Writes output to file stats/aseg.stats.
    """

    segstats = pe.Node(SegStatsReconAll(), name="Segmentation_Statistics")
    segstats.inputs.empty = True
    segstats.inputs.brain_vol = 'brain-vol-from-seg'
    segstats.inputs.exclude_ctx_gm_wm = True
    segstats.inputs.supratent = True
    segstats.inputs.subcort_gm = True
    segstats.inputs.etiv = True
    segstats.inputs.wm_vol_from_surf = True
    segstats.inputs.cortex_vol_from_surf = True
    segstats.inputs.total_gray = True
    segstats.inputs.euler = True
    segstats.inputs.exclude_id = 0
    segstats.inputs.intensity_units = "MR"
    segstats.inputs.summary_file = 'aseg.stats'
    segstats.inputs.copy_inputs = True

    ar3_wf.connect([
        (apas_2_aseg, segstats, [('out_file', 'segmentation_file')]),
        (inputspec, segstats, [
            ('lh_white', 'lh_white'),
            ('rh_white', 'rh_white'),
            ('transform', 'transform'),
            ('norm', 'in_intensity'),
            ('norm', 'partial_volume_file'),
            ('brainmask', 'brainmask_file'),
            ('lh_orig_nofix', 'lh_orig_nofix'),
            ('rh_orig_nofix', 'rh_orig_nofix'),
            ('lookup_table', 'color_table_file'),
        ]),
        (volume_mask, segstats, [('out_ribbon', 'ribbon')]),
        (ar3_lh_wf1, segstats, [
            ('outputspec.pial', 'lh_pial'),
        ]),
        (ar3_rh_wf1, segstats, [
            ('outputspec.pial', 'rh_pial'),
        ]),
    ])

    if fsvernum >= 6:
        ar3_wf.connect(inputspec, 'aseg_presurf', segstats, 'presurf_seg')
    else:
        ar3_wf.connect(inputspec, 'aseg_presurf', segstats, 'aseg')

    # White Matter Parcellation

    # Adds WM Parcellation info into the aseg and computes stat.

    wm_parcellation = pe.Node(Aparc2Aseg(), name="WM_Parcellation")
    wm_parcellation.inputs.volmask = True
    wm_parcellation.inputs.label_wm = True
    wm_parcellation.inputs.hypo_wm = True
    wm_parcellation.inputs.rip_unknown = True
    wm_parcellation.inputs.copy_inputs = True
    wm_parcellation.inputs.out_file = "wmparc.mgz"

    ar3_wf.connect([(inputspec, wm_parcellation, [
        ('lh_white', 'lh_white'),
        ('rh_white', 'rh_white'),
    ]),
                    (ar3_lh_wf1, wm_parcellation, [
                        ('outputspec.pial', 'lh_pial'),
                        ('outputspec.aparc_annot', 'lh_annotation'),
                    ]),
                    (ar3_rh_wf1, wm_parcellation, [
                        ('outputspec.pial', 'rh_pial'),
                        ('outputspec.aparc_annot', 'rh_annotation'),
                    ]),
                    (volume_mask, wm_parcellation, [
                        ('rh_ribbon', 'rh_ribbon'),
                        ('lh_ribbon', 'lh_ribbon'),
                        ('out_ribbon', 'ribbon'),
                    ]), (apas_2_aseg, wm_parcellation, [('out_file', 'aseg')]),
                    (aparc_2_aseg, wm_parcellation, [('out_file', 'ctxseg')])])

    if fsvernum < 6:
        ar3_wf.connect([(inputspec, wm_parcellation, [('filled', 'filled')])])

    # White Matter Segmentation Stats

    wm_segstats = pe.Node(SegStatsReconAll(),
                          name="WM_Segmentation_Statistics")
    wm_segstats.inputs.intensity_units = "MR"
    wm_segstats.inputs.wm_vol_from_surf = True
    wm_segstats.inputs.etiv = True
    wm_segstats.inputs.copy_inputs = True
    wm_segstats.inputs.exclude_id = 0
    wm_segstats.inputs.summary_file = "wmparc.stats"

    ar3_wf.connect([
        (wm_parcellation, wm_segstats, [('out_file', 'segmentation_file')]),
        (inputspec, wm_segstats, [
            ('lh_white', 'lh_white'),
            ('rh_white', 'rh_white'),
            ('transform', 'transform'),
            ('norm', 'in_intensity'),
            ('norm', 'partial_volume_file'),
            ('brainmask', 'brainmask_file'),
            ('lh_orig_nofix', 'lh_orig_nofix'),
            ('rh_orig_nofix', 'rh_orig_nofix'),
            ('wm_lookup_table', 'color_table_file'),
        ]),
        (volume_mask, wm_segstats, [('out_ribbon', 'ribbon')]),
        (ar3_lh_wf1, wm_segstats, [
            ('outputspec.pial', 'lh_pial'),
        ]),
        (ar3_rh_wf1, wm_segstats, [
            ('outputspec.pial', 'rh_pial'),
        ]),
    ])

    if fsvernum >= 6:
        ar3_wf.connect(inputspec, 'aseg_presurf', wm_segstats, 'presurf_seg')
    else:
        ar3_wf.connect(inputspec, 'aseg_presurf', wm_segstats, 'aseg')

    # add brodman area maps to the workflow
    ba_WF, ba_outputs = create_ba_maps_wf(th3=th3,
                                          exvivo=exvivo,
                                          entorhinal=entorhinal)

    ar3_wf.connect([
        (ar3_lh_wf1, ba_WF, [
            ('outputspec.sphere_reg', 'inputspec.lh_sphere_reg'),
            ('outputspec.thickness_pial', 'inputspec.lh_thickness'),
            ('outputspec.pial', 'inputspec.lh_pial'),
        ]),
        (ar3_rh_wf1, ba_WF, [
            ('outputspec.sphere_reg', 'inputspec.rh_sphere_reg'),
            ('outputspec.thickness_pial', 'inputspec.rh_thickness'),
            ('outputspec.pial', 'inputspec.rh_pial'),
        ]),
        (inputspec, ba_WF, [
            ('lh_white', 'inputspec.lh_white'),
            ('rh_white', 'inputspec.rh_white'),
            ('transform', 'inputspec.transform'),
            ('aseg_presurf', 'inputspec.aseg'),
            ('brainmask', 'inputspec.brainmask'),
            ('wm', 'inputspec.wm'),
            ('lh_orig', 'inputspec.lh_orig'),
            ('rh_orig', 'inputspec.rh_orig'),
            ('lh_cortex_label', 'inputspec.lh_cortex_label'),
            ('rh_cortex_label', 'inputspec.rh_cortex_label'),
            ('src_subject_dir', 'inputspec.src_subject_dir'),
            ('src_subject_id', 'inputspec.src_subject_id'),
            ('color_table', 'inputspec.color_table'),
        ]), (volume_mask, ba_WF, [('out_ribbon', 'inputspec.ribbon')])
    ])

    if qcache:
        source_inputs = ['lh_sphere_reg', 'rh_sphere_reg']
        source_subject = pe.Node(DataGrabber(outfields=source_inputs),
                                 name="{0}_srcsubject".format(hemisphere))
        source_subject.inputs.template = '*'
        source_subject.inputs.sort_filelist = False
        source_subject.inputs.field_template = dict(
            lh_sphere_reg='surf/lh.sphere.reg',
            rh_sphere_reg='surf/rh.sphere.reg')

        qcache_wf = pe.Workflow("QCache")

        measurements = [
            'thickness', 'area', 'area.pial', 'volume', 'curv', 'sulc',
            'white.K', 'white.H', 'jacobian_white', 'w-g.pct.mgh'
        ]

        qcache_inputs = list()
        for source_file in source_inputs:
            qcache_inputs.append('source_' + source_file)
        qcache_config = dict()
        qcache_outputs = list()
        for hemisphere in ['lh', 'rh']:
            qcache_config[hemisphere] = dict()
            for meas_name in measurements:
                qcache_config[hemisphere][meas_name] = dict()

                if meas_name == 'thickness':
                    meas_file = hemisphere + '_' + meas_name + '_pial'
                else:
                    meas_file = hemisphere + '_' + meas_name.replace(
                        '.', '_').replace('-', '')
                qcache_inputs.append(meas_file)

                preproc_name = "Preproc_{0}".format(meas_file)
                preproc_out = '{0}.{1}.{2}.mgh'.format(
                    hemisphere, meas_name, config['src_subject_id'])
                preproc_out_name = preproc_out.replace('.', '_')
                qcache_config[hemisphere][meas_name]['preproc'] = dict(
                    infile=meas_file,
                    name=preproc_name,
                    out=preproc_out,
                    out_name=preproc_out_name)
                qcache_outputs.append(preproc_out_name)

                qcache_config[hemisphere][meas_name]['smooth'] = dict()
                for value in range(0, 26, 5):
                    smooth_name = "Smooth_{0}_{1}".format(meas_file, value)
                    smooth_out = "{0}.{1}.fwhm{2}.{3}.mgh".format(
                        hemisphere, meas_name, value, config['src_subject_id'])
                    smooth_out_name = smooth_out.replace('.', '_')
                    qcache_config[hemisphere][meas_name]['smooth'][
                        value] = dict(name=smooth_name,
                                      out=smooth_out,
                                      out_name=smooth_out_name)
                    qcache_outputs.append(smooth_out_name)

            qcache_inputs.append(hemisphere + '_sphere_reg')

        qcache_inputspec = pe.Node(IdentityInterface(fields=qcache_inputs),
                                   name="inputspec")

        qcache_outputspec = pe.Node(IdentityInterface(fields=qcache_outputs),
                                    name="outputspec")

        for hemi in qcache_config.iterkeys():
            for meas_config in qcache_config[hemi].itervalues():
                preprocess = pe.Node(MRISPreprocReconAll(),
                                     name=meas_config['preproc']['name'])
                target_id = config['src_subject_id']
                preprocess.inputs.out_file = meas_config['preproc']['out']
                preprocess.inputs.target = target_id
                preprocess.inputs.hemi = hemi
                preprocess.inputs.copy_inputs = True

                qcache_merge = pe.Node(Merge(2),
                                       name="Merge{0}".format(
                                           meas_config['preproc']['name']))

                qcache_wf.connect([
                    (qcache_inputspec, qcache_merge, [('lh_sphere_reg', 'in1'),
                                                      ('rh_sphere_reg', 'in2')
                                                      ]),
                    (qcache_inputspec, preprocess,
                     [(meas_config['preproc']['infile'], 'surf_measure_file'),
                      ('source_lh_sphere_reg', 'lh_surfreg_target'),
                      ('source_rh_sphere_reg', 'rh_surfreg_target')]),
                    (qcache_merge, preprocess, [('out', 'surfreg_files')]),
                    (preprocess, qcache_outputspec,
                     [('out_file', meas_config['preproc']['out_name'])]),
                ])

                for value, val_config in meas_config['smooth'].iteritems():
                    surf2surf = pe.Node(SurfaceSmooth(),
                                        name=val_config['name'])
                    surf2surf.inputs.fwhm = value
                    surf2surf.inputs.cortex = True
                    surf2surf.inputs.subject_id = target_id
                    surf2surf.inputs.hemi = hemisphere
                    surf2surf.inputs.out_file = val_config['out']
                    qcache_wf.connect([
                        (preprocess, surf2surf, [('out_file', 'in_file')]),
                        (surf2surf, qcache_outputspec,
                         [('out_file', val_config['out_name'])])
                    ])

        # connect qcache inputs
        ar3_wf.connect([
            (inputspec, qcache_wf, [('lh_curv', 'inputspec.lh_curv'),
                                    ('rh_curv', 'inputspec.rh_curv'),
                                    ('lh_sulc', 'inputspec.lh_sulc'),
                                    ('rh_sulc', 'inputspec.rh_sulc'),
                                    ('lh_white_K', 'inputspec.lh_white_K'),
                                    ('rh_white_K', 'inputspec.rh_white_K'),
                                    ('lh_area', 'inputspec.lh_area'),
                                    ('rh_area', 'inputspec.rh_area')]),
            (ar3_lh_wf1, qcache_wf,
             [('outputspec.thickness_pial', 'inputspec.lh_thickness_pial'),
              ('outputspec.area_pial', 'inputspec.lh_area_pial'),
              ('outputspec.volume', 'inputspec.lh_volume'),
              ('outputspec.jacobian_white', 'inputspec.lh_jacobian_white'),
              ('outputspec.sphere_reg', 'inputspec.lh_sphere_reg')]),
            (ar3_lh_wf2, qcache_wf, [('outputspec.wg_pct_mgh',
                                      'inputspec.lh_wg_pct_mgh')]),
            (ar3_rh_wf1, qcache_wf,
             [('outputspec.thickness_pial', 'inputspec.rh_thickness_pial'),
              ('outputspec.area_pial', 'inputspec.rh_area_pial'),
              ('outputspec.volume', 'inputspec.rh_volume'),
              ('outputspec.jacobian_white', 'inputspec.rh_jacobian_white'),
              ('outputspec.sphere_reg', 'inputspec.rh_sphere_reg')]),
            (ar3_rh_wf2, qcache_wf, [('outputspec.wg_pct_mgh',
                                      'inputspec.rh_wg_pct_mgh')]),
        ])
        for source_file in source_inputs:
            ar3_wf.connect([(inputspec, source_subject, [('source_subject_dir',
                                                          'base_directory')]),
                            (source_subject, qcache_wf,
                             [(source_file, 'inputspec.source_' + source_file)
                              ])])
        # end qcache workflow

    # Add outputs to outputspec
    ar3_outputs = [
        'aseg', 'wmparc', 'wmparc_stats', 'aseg_stats', 'aparc_a2009s_aseg',
        'aparc_aseg', 'aseg_presurf_hypos', 'ribbon', 'rh_ribbon', 'lh_ribbon'
    ]
    for output in hemi_outputs1 + hemi_outputs2:
        for hemi in ('lh_', 'rh_'):
            ar3_outputs.append(hemi + output)
    if qcache:
        ar3_outputs.extend(qcache_outputs)

    ar3_outputs.extend(ba_outputs)

    outputspec = pe.Node(IdentityInterface(fields=ar3_outputs),
                         name="outputspec")

    ar3_wf.connect([
        (apas_2_aseg, outputspec, [('out_file', 'aseg')]),
        (wm_parcellation, outputspec, [('out_file', 'wmparc')]),
        (wm_segstats, outputspec, [('summary_file', 'wmparc_stats')]),
        (segstats, outputspec, [('summary_file', 'aseg_stats')]),
        (aparc_2_aseg_2009, outputspec, [('out_file', 'aparc_a2009s_aseg')]),
        (aparc_2_aseg, outputspec, [('out_file', 'aparc_aseg')]),
        (volume_mask, outputspec, [('out_ribbon', 'ribbon'),
                                   ('lh_ribbon', 'lh_ribbon'),
                                   ('rh_ribbon', 'rh_ribbon')])
    ])
    if fsvernum >= 6:
        ar3_wf.connect([(relabel_hypos, outputspec, [('out_file',
                                                      'aseg_presurf_hypos')])])

    for i, outputs in enumerate([hemi_outputs1, hemi_outputs2]):
        if i == 0:
            lhwf = ar3_lh_wf1
            rhwf = ar3_rh_wf1
        else:
            lhwf = ar3_lh_wf2
            rhwf = ar3_rh_wf2
        for output in outputs:
            ar3_wf.connect([
                (lhwf, outputspec, [('outputspec.' + output, 'lh_' + output)]),
                (rhwf, outputspec, [('outputspec.' + output, 'rh_' + output)])
            ])

    for output in ba_outputs:
        ar3_wf.connect([(ba_WF, outputspec, [('outputspec.' + output, output)])
                        ])

    if qcache:
        for output in qcache_outputs:
            ar3_wf.connect([(qcache_wf, outputspec, [('outputspec.' + output,
                                                      output)])])

    return ar3_wf, ar3_outputs
Exemple #23
0
    def test_summary(self):
        study = DummyStudy(
            self.SUMMARY_STUDY_NAME, self.archive, LinearRunner('ad'),
            inputs=[DatasetMatch('source1', nifti_gz_format, 'source1'),
                    DatasetMatch('source2', nifti_gz_format, 'source2'),
                    DatasetMatch('source3', nifti_gz_format, 'source3')])
        # TODO: Should test out other file formats as well.
        source_files = [study.input(n)
                        for n in ('source1', 'source2', 'source3')]
        inputnode = pe.Node(
            IdentityInterface(['subject_id', 'visit_id']), 'inputnode')
        inputnode.inputs.subject_id = self.SUBJECT
        inputnode.inputs.visit_id = self.VISIT
        source = self.archive.source(source_files)
        # Test subject sink
        subject_sink_files = [
            study.bound_data_spec('subject_sink')]
        subject_sink = self.archive.sink(subject_sink_files,
                                         frequency='per_subject',
                                         study_name=self.SUMMARY_STUDY_NAME)
        subject_sink.inputs.name = 'subject_summary'
        subject_sink.inputs.desc = (
            "Tests the sinking of subject-wide datasets")
        # Test visit sink
        visit_sink_files = [study.bound_data_spec('visit_sink')]
        visit_sink = self.archive.sink(visit_sink_files,
                                       frequency='per_visit',
                                       study_name=self.SUMMARY_STUDY_NAME)
        visit_sink.inputs.name = 'visit_summary'
        visit_sink.inputs.desc = (
            "Tests the sinking of visit-wide datasets")
        # Test project sink
        project_sink_files = [
            study.bound_data_spec('project_sink')]
        project_sink = self.archive.sink(project_sink_files,
                                         frequency='per_project',
                                         study_name=self.SUMMARY_STUDY_NAME)

        project_sink.inputs.name = 'project_summary'
        project_sink.inputs.desc = (
            "Tests the sinking of project-wide datasets")
        # Create workflow connecting them together
        workflow = pe.Workflow('summary_unittest', base_dir=self.work_dir)
        workflow.add_nodes((source, subject_sink, visit_sink,
                            project_sink))
        workflow.connect(inputnode, 'subject_id', source, 'subject_id')
        workflow.connect(inputnode, 'visit_id', source, 'visit_id')
        workflow.connect(inputnode, 'subject_id', subject_sink, 'subject_id')
        workflow.connect(inputnode, 'visit_id', visit_sink, 'visit_id')
        workflow.connect(
            source, 'source1' + PATH_SUFFIX,
            subject_sink, 'subject_sink' + PATH_SUFFIX)
        workflow.connect(
            source, 'source2' + PATH_SUFFIX,
            visit_sink, 'visit_sink' + PATH_SUFFIX)
        workflow.connect(
            source, 'source3' + PATH_SUFFIX,
            project_sink, 'project_sink' + PATH_SUFFIX)
        workflow.run()
        # Check local summary directories were created properly
        subject_dir = self.get_session_dir(frequency='per_subject')
        self.assertEqual(sorted(os.listdir(subject_dir)),
                         [self.SUMMARY_STUDY_NAME + '_subject_sink.nii.gz'])
        visit_dir = self.get_session_dir(frequency='per_visit')
        self.assertEqual(sorted(os.listdir(visit_dir)),
                         [self.SUMMARY_STUDY_NAME + '_visit_sink.nii.gz'])
        project_dir = self.get_session_dir(frequency='per_project')
        self.assertEqual(sorted(os.listdir(project_dir)),
                         [self.SUMMARY_STUDY_NAME + '_project_sink.nii.gz'])
        # Reload the data from the summary directories
        reloadinputnode = pe.Node(IdentityInterface(['subject_id',
                                                     'visit_id']),
                                  'reload_inputnode')
        reloadinputnode.inputs.subject_id = self.SUBJECT
        reloadinputnode.inputs.visit_id = self.VISIT
        reloadsource = self.archive.source(
            (source_files + subject_sink_files + visit_sink_files +
             project_sink_files),
            name='reload_source',
            study_name=self.SUMMARY_STUDY_NAME)
        reloadsink = self.archive.sink(
            [study.bound_data_spec(n)
             for n in ('resink1', 'resink2', 'resink3')],
            study_name=self.SUMMARY_STUDY_NAME)
        reloadsink.inputs.name = 'reload_summary'
        reloadsink.inputs.desc = (
            "Tests the reloading of subject and project summary datasets")
        reloadworkflow = pe.Workflow('reload_summary_unittest',
                                     base_dir=self.work_dir)
        reloadworkflow.connect(reloadinputnode, 'subject_id',
                               reloadsource, 'subject_id')
        reloadworkflow.connect(reloadinputnode, 'visit_id',
                               reloadsource, 'visit_id')
        reloadworkflow.connect(reloadinputnode, 'subject_id',
                               reloadsink, 'subject_id')
        reloadworkflow.connect(reloadinputnode, 'visit_id',
                               reloadsink, 'visit_id')
        reloadworkflow.connect(reloadsource,
                               'subject_sink' + PATH_SUFFIX,
                               reloadsink,
                               'resink1' + PATH_SUFFIX)
        reloadworkflow.connect(reloadsource,
                               'visit_sink' + PATH_SUFFIX,
                               reloadsink,
                               'resink2' + PATH_SUFFIX)
        reloadworkflow.connect(reloadsource,
                               'project_sink' + PATH_SUFFIX,
                               reloadsink,
                               'resink3' + PATH_SUFFIX)
        reloadworkflow.run()
        outputs = [
            f for f in sorted(os.listdir(self.session_dir))
            if f != FIELDS_FNAME]
        self.assertEqual(outputs,
                         [self.SUMMARY_STUDY_NAME + '_resink1.nii.gz',
                          self.SUMMARY_STUDY_NAME + '_resink2.nii.gz',
                          self.SUMMARY_STUDY_NAME + '_resink3.nii.gz',
                          'source1.nii.gz', 'source2.nii.gz',
                          'source3.nii.gz', 'source4.nii.gz'])
Exemple #24
0
    'run008'
]

frequency_list = ['05Hz', '10Hz', '20Hz', '40Hz']
# frequency_list = ['40Hz']

output_dir = 'Stimulation_1st_level_OutputDir_CA3'
working_dir = 'Stimulation_1st_level_WorkingDir_CA3'

stimulation_1st_level = Workflow(name='stimulation_1st_level_CA3')
stimulation_1st_level.base_dir = opj(experiment_dir, working_dir)

# ============================================================================================================================
# In[3]:
infosource = Node(
    IdentityInterface(fields=['subject_id', 'session_id', 'frequency_id']),
    name="infosource")

infosource.iterables = [('subject_id', subject_list),
                        ('session_id', session_list),
                        ('frequency_id', frequency_list)]

# ============================================================================================================================
# In[4]:
# sub-001_task-MGT_run-02_bold.nii.gz, sub-001_task-MGT_run-02_sbref.nii.gz
# /media/amr/Amr_4TB/MGT_poldrack/output_MGT_poldrack_preproc_preproc/preproc_img/run-04sub-119/afni_2d_smoothed_all_maths_filt_maths.nii.gz
# functional runs
templates = {
    'preproc_img':
    '/media/amr/Amr_4TB/Work/stimulation/Stimulation_Preproc_OutputDir_CA3/preproc_img/{frequency_id}_{session_id}_subj_{subject_id}/afni_2d_smoothed_maths_filt_maths.nii.gz',
    'bold_brain':
Exemple #25
0
    'true',  # remove any interface outputs not needed by the workflow
    'use_relative_paths':
    'false',  # relative paths should be on, require hash updat  e when changed.
    'remove_node_directories': 'false',  # Experimental
    'local_hash_check': 'true',
    'job_finished_timeout': 45
}
minipigWF.config['logging'] = {
    'workflow_level': 'DEBUG',
    'filemanip_level': 'DEBUG',
    'interface_level': 'DEBUG',
    'log_directory': ExperimentInfo["Atlas"]["LOG_DIR"]
}

input_spec = pe.Node(interface=IdentityInterface(fields=[
    'Raw_Atlas', 'Raw_T1', 'Cropped_T1', 'Raw_T2', 'Raw_BM', 'DomesticLUT',
    'Domestic_LabelMap'
]),
                     run_without_submitting=True,
                     name='inputspec')

input_spec.inputs.Raw_T1 = ExperimentInfo["Subject"]["Raw_T1"]
input_spec.inputs.Raw_T2 = ExperimentInfo["Subject"]["Raw_T2"]
input_spec.inputs.Raw_BM = ExperimentInfo["Subject"]["Raw_BM"]
input_spec.inputs.Cropped_T1 = ExperimentInfo["Subject"]["Cropped_T1"]

input_spec.inputs.Raw_Atlas = ExperimentInfo["Atlas"]["IntensityImage"]
input_spec.inputs.DomesticLUT = ExperimentInfo["Atlas"]["LabelMapLUT"]
input_spec.inputs.Domestic_LabelMap = ExperimentInfo["Atlas"]["LabelMapImage"]


def ChangeDynamicRangeOfImage(inFN, outFN, winMin, winMax, outMin, outMax):
def writeCVSubsetFile(environment, experiment, pipeline, cluster, csv_file,
                      test_size, hasHeader):

    from utilities.misc import add_dict
    master_config = {}
    for configDict in [environment, experiment, pipeline, cluster]:
        master_config = add_dict(master_config, configDict)
    """
    read in csv file
    """
    import csv
    csv_data = []
    with open(csv_file, mode='r') as infile:
        reader = csv.DictReader(infile, skipinitialspace=True)
        for row in reader:
            csv_data.append(row)
    print(csv_data)

    totalSampleSize = len(csv_data)
    print(totalSampleSize)
    cv_subsets = subsample_crossValidationSet(totalSampleSize, test_size)
    """
    global variable
    """
    ## HACK FOR NOW SHOULD BE MORE ELEGANT FROM THE .config file
    BASE_DATA_GRABBER_DIR = '/Shared/johnsonhj/HDNI/ReferenceData/Neuromorphometrics/2012Subscription'
    #master_config = {'queue':'HJ',
    #    'long_q':'HJ'}
    """
    workflow
    """
    import nipype.pipeline.engine as pe
    import nipype.interfaces.io as nio
    from .WorkupJointFusion import CreateJointFusionWorkflow
    CV_JointFusion_WF = pe.Workflow(name="CV_JointFusion")
    CV_JointFusion_WF.base_dir = master_config['cachedir']

    subset_no = 1
    for subset in cv_subsets:
        print("-" * 80)
        print(" Creat a subset workflow Set " + str(subset_no))
        print("-" * 80)
        trainData = [csv_data[i] for i in subset['train']]
        testData = [csv_data[i] for i in subset['test']]

        print([(trainData[i])['id'] for i in range(len(trainData))])

        for testSession in testData:
            JointFusionWFName = "JointFusion_Set{0}_{1}".format(
                subset_no, testSession['id'])
            myJointFusion = CreateJointFusionWorkflow(
                JointFusionWFName,
                master_config,
                [(trainData[i])['id'] for i in range(len(trainData))],
                BASE_DATA_GRABBER_DIR,
                runFixFusionLabelMap=False)

            testSessionName = "testSessionSpec_Set{0}_{1}".format(
                subset_no, testSession['id'])
            testSessionSpec = pe.Node(interface=IdentityInterface(fields=[
                't1_average', 'tissueLabel', 'template_leftHemisphere',
                'landmarkInACPCAlignedSpace', 'template_weights_50Lmks_wts',
                'labelFilename'
            ]),
                                      run_without_submitting=True,
                                      name=testSessionName)

            CV_JointFusion_WF.connect(testSessionSpec, 't1_average',
                                      myJointFusion, 'inputspec.subj_t1_image')
            CV_JointFusion_WF.connect(testSessionSpec, 'tissueLabel',
                                      myJointFusion,
                                      'inputspec.subj_fixed_head_labels')

            CV_JointFusion_WF.connect(testSessionSpec,
                                      'template_leftHemisphere', myJointFusion,
                                      'inputspec.subj_left_hemisphere')
            CV_JointFusion_WF.connect(testSessionSpec,
                                      'landmarkInACPCAlignedSpace',
                                      myJointFusion, 'inputspec.subj_lmks')
            CV_JointFusion_WF.connect(testSessionSpec,
                                      'template_weights_50Lmks_wts',
                                      myJointFusion,
                                      'inputspec.atlasWeightFilename')
            CV_JointFusion_WF.connect(testSessionSpec, 'labelFilename',
                                      myJointFusion,
                                      'inputspec.labelBaseFilename')
            """ set test image information
            """
            print(testSession)
            testSessionSpec.inputs.t1_average = testSession['t1']
            testSessionSpec.inputs.tissueLabel = testSession[
                'fixed_head_label']
            testSessionSpec.inputs.template_leftHemisphere = testSession[
                'warpedAtlasLeftHemisphere']
            testSessionSpec.inputs.landmarkInACPCAlignedSpace = testSession[
                'lmk']
            testSessionSpec.inputs.template_weights_50Lmks_wts = "/Shared/sinapse/scratch/eunyokim/src/NamicExternal/build_Mac_201501/bin/Atlas/Atlas_20131115/20141004_BCD/template_landmarks_50Lmks.fcsv"
            testSessionSpec.inputs.labelFilename = 'FS_wmparc.nii.gz'
            """
            DataSink
            """
            dsName = "DataSink_DS_Set{0}_{1}".format(subset_no,
                                                     testSession['id'])
            DataSink = pe.Node(name=dsName, interface=nio.DataSink())
            DataSink.overwrite = master_config['ds_overwrite']
            DataSink.inputs.container = 'CV_Set{0}/{1}'.format(
                subset_no, testSession['id'])
            DataSink.inputs.base_directory = master_config['resultdir']

            CV_JointFusion_WF.connect(
                myJointFusion, 'outputspec.JointFusion_neuro2012_labelmap',
                DataSink, 'Segmentation.@JointFusion_neuro2012_labelmap')

            subset_no = subset_no + 1

    #CV_JointFusion_WF.write_graph()
    CV_JointFusion_WF.run(plugin=master_config['plugin_name'],
                          plugin_args=master_config['plugin_args'])
Exemple #27
0
#the outdir
output_dir = 'output_dir'
#working_dir name
working_dir = 'derivatives/nipype_working_dir_ADNI_pp'

#other things to be set up
ses_list = ['ses-01', 'ses-02', 'ses-03']
subject_list = sorted(os.listdir(experiment_dir + 'data/'))

#####################

wf = Workflow(name='Workflow_preprocess_ADNI')
wf.base_dir = os.path.join(experiment_dir + working_dir)

# create infosource to iterate over iterables
infosource = Node(IdentityInterface(fields=['subject', 'ses']),
                  name="infosource")
infosource.iterables = [('subject', subject_list), ('ses', ses_list)]


templates = {#tse
             'tse' : '{subject}/{ses}/anat/{subject}_{ses}*run-1_T2w.nii.gz',
             #mprage
             'mprage' : '{subject}/{ses}/anat/{subject}_{ses}*run-1_T1w.nii.gz',
             }
# change and add more strings to include all necessary templates for histmatch
histmatch_files = {
    'ashs_t1_template': 'template/template.nii.gz',
    'ashs_t2_template': 'train/train000/tse.nii.gz',
}
preproc = Workflow (name = 'preproc')
preproc.base_dir = opj(experiment_dir, working_dir)


#=====================================================================================================
# In[3]:
#to prevent nipype from iterating over the anat image with each func run, you need seperate
#nodes to select the files
#and this will solve the problem I have for almost 6 months
#but notice that in the sessions, you have to iterate also over subject_id to get the {subject_id} var



# Infosource - a function free node to iterate over the list of subject names
infosource_anat = Node(IdentityInterface(fields=['subject_id']),
                  name="infosource_anat")
infosource_anat.iterables = [('subject_id', subject_list)]



infosource_func = Node(IdentityInterface(fields=['subject_id','session_id']),
                  name="infosource_func")
infosource_func.iterables = [('subject_id', subject_list),
                             ('session_id', session_list)]


#========================================================================================================
# In[4]:
# sub-001_task-MGT_run-02_bold.nii.gz, sub-001_task-MGT_run-02_sbref.nii.gz
Define the workflow directories
"""

subject_list = ['s1', 's3']
data_dir = os.path.abspath('data')
subjects_dir = os.path.join(tutorial_dir, 'subjects_dir')
if not os.path.exists(subjects_dir):
    os.mkdir(subjects_dir)

wf = pe.Workflow(name="l1workflow")
wf.base_dir = os.path.join(tutorial_dir, 'workdir')
"""
Create inputspec
"""

inputspec = pe.Node(interface=IdentityInterface(['subject_id']),
                    name="inputspec")
inputspec.iterables = ("subject_id", subject_list)
"""
Grab data
"""

datasource = pe.Node(interface=nio.DataGrabber(infields=['subject_id'],
                                               outfields=['struct']),
                     name='datasource')
datasource.inputs.base_directory = data_dir
datasource.inputs.template = '%s/%s.nii'
datasource.inputs.template_args = dict(struct=[['subject_id', 'struct']])
datasource.inputs.subject_id = subject_list
datasource.inputs.sort_filelist = True
Exemple #30
0
def create_output_spec(outputs, hemisphere_names, name):
    final_output_names = list()
    for output in outputs:
        for hemisphere in hemisphere_names:
            final_output_names.append("{0}_".format(hemisphere) + output)
    return Node(IdentityInterface(final_output_names), name)