Пример #1
0
def getOutputDirDict(configurationFilename):
    print("""getOutputDirDict""")
    import ConfigurationParser

    configurationMap = ConfigurationParser.ConfigurationSectionMap(
        configurationFilename)
    listFiles = configurationMap["ListFiles"]

    from XMLConfigurationGenerator import combineCSVs

    applyDict = combineCSVs(
        listFiles["applySubjectListFilename".lower()],
        listFiles["applyFeatureListFileDictionary".lower()],
    )
    outputDirDict = {}
    print(applyDict)
    for sessionID in list(applyDict.keys()):
        outputDirDict[sessionID] = "apply_" + sessionID
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    return outputDirDict
class TestParse(unittest.TestCase):

    cp = ConfigurationParser()
    expected_names = ['CUSTOMER_A', 'CUSTOMER_B']
    expected_vlans = [100, 101]

    def test_parse_customer_name(self):
        parsed_names = self.cp.parseCustomerNames()
        self.assertEqual(list, type(parsed_names))
        self.assertEqual(self.expected_names, parsed_names)

    def validate_result(self, customer):
        def validate_result(function):
            def wrapper(customer):
                self.assertEqual(customer, function(customer))

            return wrapper

    @validate_result(expected_names[0])
    def test_parse_customer_vlan(customer_name):
        return self.cp.parseCustomerVlan(customer_name)

    @validate_result(expected_names[1])
    def test_parse_customer_vlan(customer_name):
        return self.cp.parseCustomerVlan(customer_name)
Пример #3
0
def main():

    this_stations = parser.xml_parsing()

    # creation of network with parameters of stations
    this_network = network(stations=this_stations)

    # after implementing all parameters let's start simulation
    simulation(this_network)
Пример #4
0
    def readConfig(self, configFile):
        parser = ConfigurationParser.ConfigurationParser()
        parser.addTopLevelHandler('SiteConfig', self)
        parser.addTopLevelHandler('Content', self.contentConfig)
        parser.addTopLevelHandler('Template', self.templateConfig)

        self.currentDirective = []
        confFile = open(configFile, 'r')
        parser.parse(confFile)
        confFile.close()

        self.contentConfig.configFinished()
        self.templateConfig.configFinished()
Пример #5
0
def getOutputDirDict(configurationFilename):
    print( """getOutputDirDict""")
    import ConfigurationParser
    configurationMap = ConfigurationParser.ConfigurationSectionMap(configurationFilename)
    listFiles = configurationMap['ListFiles']

    from XMLConfigurationGenerator import combineCSVs
    applyDict = combineCSVs(listFiles['applySubjectListFilename'.lower()],
                            listFiles['applyFeatureListFileDictionary'.lower()])
    outputDirDict = {}
    print(applyDict)
    for sessionID in applyDict.iterkeys():
        outputDirDict[sessionID] = 'apply_' + sessionID
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    print(outputDirDict)
    return outputDirDict
Пример #6
0
def similarityComputeWorkflow(ResultDir,
                              OutputDir,
                              ExperimentalConfigurationFile,
                              runOption,
                              PythonBinDir,
                              BRAINSToolsSrcDir,
                              BRAINSToolsBuildDir):

    import sys
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe
    #
    # get normalization option from experimental configuration file
    #
    import ConfigurationParser as configParser
    import analysis as this
    configMap = configParser.ConfigurationSectionMap(ExperimentalConfigurationFile)
    normalizationOptions = configMap['Options']['normalization']
    print(""" Normalization Option:::
          {str}
          """.format( str=normalizationOptions ))

    #
    # get methods
    #
    import ast
    methodOptionsDictList = configMap['Options']['modelParameter'.lower()]
    methodOptions = []
    print(methodOptionsDictList)
    for option in methodOptionsDictList:
        methodStr = 'TreeDepth' + str(option['--randomTreeDepth']) + '_TreeNumber' + str(option['--numberOfTrees'])
        methodOptions.append(methodStr)
    print(""" Method Option:::
          {str}
          """.format( str=methodStr ))

    #
    # get roiList
    #
    roiList = configMap['Options']['roiBooleanCreator'.lower()].keys()
    print(""" ROIList:::
          {str}
          """.format( str=roiList ))

    #
    # get sessionList and manualDict
    #
    import XMLConfigurationGenerator
    subjectListFilename = configMap['ListFiles']['subjectListFilename'.lower()]
    manualDict = XMLConfigurationGenerator.combineCSVs(subjectListFilename, {})
    sessionList = manualDict.keys()

    #
    # workflow
    #
    workFlowName = 'outputDataCollector'
    workflow = pe.Workflow(name=workFlowName)
    workflow.base_dir = OutputDir

    from nipype.interfaces.utility import Function
    experimentalND = pe.Node(name='experimentalND',
                             interface=Function(input_names=['resultDir',
                                                             'outputCSVFilename',
                                                             'normalization',
                                                             'methodParameter',
                                                             'manualDict',
                                                             'roiList',
                                                             'sessionIDList',
                                                             'doRawComparison'],
                                                output_names='outputCSVFilename',
                                                function=this.experimentAnalysis
                                                )
                             )
    experimentalND.inputs.resultDir = ResultDir
    experimentalND.inputs.outputCSVFilename = 'experimentalResult.csv'
    experimentalND.inputs.roiList = roiList
    experimentalND.inputs.manualDict = manualDict
    experimentalND.inputs.sessionIDList = sessionList
    # experimentalND.inputs.doRawComparison = doRawComparison
    experimentalND.iterables = [('normalization', normalizationOptions),
                                ('methodParameter', methodOptions),
                                ('doRawComparison', [True, False])
                                ]
    workflow.add_nodes([experimentalND])

    summaryND = pe.Node(name='summaryND',
                        interface=Function(input_names=['inputCSVFilename',
                                                        'outputCSVPrefix'
                                                        ],
                                           output_names=['outputCSVList'],
                                           function=computeSimilarityWF.computeSummaryFromCSV)
                        )

    summaryND.inputs.outputCSVPrefix = 'summaryOutput'
    workflow.connect(experimentalND, 'outputCSVFilename',
                     summaryND, 'inputCSVFilename')

    if runOption == "cluster":
        ############################################
        # Platform specific information
        #     Prepend the python search paths
        pythonPath = BRAINSToolsSrcDir + "/BRAINSCut/BRAINSFeatureCreators/RobustStatisticComputations:" + BRAINSToolsSrcDir + "/AutoWorkup/:" + BRAINSToolsSrcDir + "/AutoWorkup/BRAINSTools/:" + BRAINSToolsBuildDir + "/SimpleITK-build/bin/" + \
            BRAINSToolsBuildDir + "/SimpleITK-build/lib:" + PythonBinDir
        binPath = BRAINSToolsBuildDir + "/bin:" + BRAINSToolsBuildDir + "/lib"

        PYTHON_AUX_PATHS = pythonPath
        PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(':')
        PYTHON_AUX_PATHS.extend(sys.path)
        sys.path = PYTHON_AUX_PATHS
        # print sys.path
        import SimpleITK as sitk
        #     Prepend the shell environment search paths
        PROGRAM_PATHS = binPath
        PROGRAM_PATHS = PROGRAM_PATHS.split(':')
        import os
        PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
        os.environ['PATH'] = ':'.join(PROGRAM_PATHS)

        Cluster_Script = get_global_sge_script(PYTHON_AUX_PATHS,
                                               PROGRAM_PATHS,
                                               {}
                                               )
        workflow.run(plugin='SGE',
                     plugin_args=dict(template=Cluster_Script,
                                      qsub_args="-S /bin/bash -pe smp 4-8 -o /dev/null "))
    else:
        workflow.run()
Пример #7
0
def similarityComputeWorkflow(
    ResultDir,
    OutputDir,
    ExperimentalConfigurationFile,
    runOption,
    PythonBinDir,
    BRAINSToolsSrcDir,
    BRAINSToolsBuildDir,
):

    from collections import (
        OrderedDict,
    )  # Need OrderedDict internally to ensure consistent ordering
    import sys
    import nipype.interfaces.io as nio
    import nipype.pipeline.engine as pe

    #
    # get normalization option from experimental configuration file
    #
    import ConfigurationParser as configParser
    import analysis as this

    configMap = configParser.ConfigurationSectionMap(
        ExperimentalConfigurationFile)
    normalizationOptions = configMap["Options"]["normalization"]
    print((""" Normalization Option:::
          {str}
          """.format(str=normalizationOptions)))

    #
    # get methods
    #
    import ast

    methodOptionsDictList = configMap["Options"]["modelParameter".lower()]
    methodOptions = []
    print(methodOptionsDictList)
    for option in methodOptionsDictList:
        methodStr = ("TreeDepth" + str(option["--randomTreeDepth"]) +
                     "_TreeNumber" + str(option["--numberOfTrees"]))
        methodOptions.append(methodStr)
    print((""" Method Option:::
          {str}
          """.format(str=methodStr)))

    #
    # get roiList
    #
    roiList = list(configMap["Options"]["roiBooleanCreator".lower()].keys())
    print((""" ROIList:::
          {str}
          """.format(str=roiList)))

    #
    # get sessionList and manualDict
    #
    import XMLConfigurationGenerator

    subjectListFilename = configMap["ListFiles"]["subjectListFilename".lower()]
    manualDict = XMLConfigurationGenerator.combineCSVs(subjectListFilename, {})
    sessionList = list(manualDict.keys())

    #
    # workflow
    #
    workFlowName = "outputDataCollector"
    workflow = pe.Workflow(name=workFlowName)
    workflow.base_dir = OutputDir

    from nipype.interfaces.utility import Function

    experimentalND = pe.Node(
        name="experimentalND",
        interface=Function(
            input_names=[
                "resultDir",
                "outputCSVFilename",
                "normalization",
                "methodParameter",
                "manualDict",
                "roiList",
                "sessionIDList",
                "doRawComparison",
            ],
            output_names="outputCSVFilename",
            function=this.experimentAnalysis,
        ),
    )
    experimentalND.inputs.resultDir = ResultDir
    experimentalND.inputs.outputCSVFilename = "experimentalResult.csv"
    experimentalND.inputs.roiList = roiList
    experimentalND.inputs.manualDict = manualDict
    experimentalND.inputs.sessionIDList = sessionList
    # experimentalND.inputs.doRawComparison = doRawComparison
    experimentalND.iterables = [
        ("normalization", normalizationOptions),
        ("methodParameter", methodOptions),
        ("doRawComparison", [True, False]),
    ]
    workflow.add_nodes([experimentalND])

    summaryND = pe.Node(
        name="summaryND",
        interface=Function(
            input_names=["inputCSVFilename", "outputCSVPrefix"],
            output_names=["outputCSVList"],
            function=computeSimilarityWF.computeSummaryFromCSV,
        ),
    )

    summaryND.inputs.outputCSVPrefix = "summaryOutput"
    workflow.connect(experimentalND, "outputCSVFilename", summaryND,
                     "inputCSVFilename")

    if runOption == "cluster":
        ############################################
        # Platform specific information
        #     Prepend the python search paths
        pythonPath = (
            BRAINSToolsSrcDir +
            "/BRAINSCut/BRAINSFeatureCreators/RobustStatisticComputations:" +
            BRAINSToolsSrcDir + "/AutoWorkup/:" + BRAINSToolsSrcDir +
            "/AutoWorkup/BRAINSTools/:" + BRAINSToolsBuildDir +
            "/SimpleITK-build/bin/" + BRAINSToolsBuildDir +
            "/SimpleITK-build/lib:" + PythonBinDir)
        binPath = BRAINSToolsBuildDir + "/bin:" + BRAINSToolsBuildDir + "/lib"

        PYTHON_AUX_PATHS = pythonPath
        PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(":")
        PYTHON_AUX_PATHS.extend(sys.path)
        sys.path = PYTHON_AUX_PATHS
        # print sys.path
        import SimpleITK as sitk

        #     Prepend the shell environment search paths
        PROGRAM_PATHS = binPath
        PROGRAM_PATHS = PROGRAM_PATHS.split(":")
        import os

        PROGRAM_PATHS.extend(os.environ["PATH"].split(":"))
        os.environ["PATH"] = ":".join(PROGRAM_PATHS)

        Cluster_Script = get_global_sge_script(PYTHON_AUX_PATHS, PROGRAM_PATHS,
                                               {})
        workflow.run(
            plugin="SGE",
            plugin_args=OrderedDict(
                template=Cluster_Script,
                qsub_args="-S /bin/bash -pe smp 4-8 -o /dev/null ",
            ),
        )
    else:
        workflow.run()
def unitWorkUp(configurationFilename,
               doApply=False,
               baseDir="."):
    import os
    import sys
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import ConfigurationParser
    import crossValidationUnit as this

    from nipype import config
    config.enable_debug_mode()

    workflow = pe.Workflow(name='balancedTraning')
    workflow.base_dir = baseDir

    configurationMap = ConfigurationParser.ConfigurationSectionMap(configurationFilename)
    Options = configurationMap['Options']
    roiDict = Options['roiBooleanCreator'.lower()]

    #
    #-------------------------------- filenameGeneratorND is dummy node
    # to create proper probability file location for nipype
    #

    filenameGeneratorND = pe.Node(name="filenameGeneratorND",
                                  interface=Function(
                                      input_names=['roiList',
                                                   'gaussianSigma'],
                                      output_names=['probabilityMapFilename'],
                                      function=this.getProbabilityMapFilename)
                                  )
    filenameGeneratorND.inputs.roiList = list(roiDict.keys())

    #
    #--------------------------------  start from generate probability
    #
    probabilityMapGeneratorND = pe.Node(name="probabilityMapGeneratorND",
                                        interface=Function(
                                             input_names=['configurationFilename',
                                                          'probabilityMapDict',
                                                          'gaussianSigma',
                                                          'outputXmlFilename'],
                                             output_names=['probabilityMapDict',
                                                           'outputXmlFilename',
                                                           'outputConfigurationFilename'],
                                             function=ConfigurationParser.BRAINSCutGenerateProbabilityMap)
                                        )

    probabilityMapGeneratorND.inputs.outputXmlFilename = 'netConfiguration.xml'
    probabilityMapGeneratorND.inputs.configurationFilename = configurationFilename
    probabilityMapGeneratorND.inputs.gaussianSigma = Options['gaussianSigma'.lower()]

    workflow.connect(filenameGeneratorND, 'probabilityMapFilename',
                     probabilityMapGeneratorND, 'probabilityMapDict')

    #
    #--------------------------------  create vectors for each ROI
    #
    configFileND = pe.Node(name="configFileND",
                           interface=Function(
                                input_names=['originalFilename',
                                             'editedFilenamePrefix'],
                                output_names=['editiedFilenames'],
                                function=ConfigurationParser.ConfigurationFileEditor)
                           )

    configFileND.inputs.originalFilename = configurationFilename
    configFileND.inputs.editedFilenamePrefix = 'ROI'
    workflow.add_nodes([configFileND])

    vectorCreatorND = pe.MapNode(name="vectorCreatorND",
                                 interface=Function(
                                      input_names=['configurationFilename',
                                                   'probabilityMapDict',
                                                   'normalization',
                                                   'outputXmlFilename',
                                                   'outputVectorFilename'],
                                      output_names=['outputVectorFilename',
                                                    'outputVectorHdrFilename',
                                                    'outputNormalization',
                                                    'outputXmlFilename'],
                                      function=ConfigurationParser.BRAINSCutCreateVector),
                                 iterfield=['configurationFilename']
                                 )
    vectorCreatorND.inputs.outputVectorFilename = 'oneROIVectorFile.txt'
    vectorCreatorND.inputs.outputXmlFilename = 'oneROICreateVectorNetConfiguration.xml'
    import ast
    normalizationOption = Options['normalization'.lower()]
    # normalizationOption = ast.literal_eval( Options[ 'normalization'.lower()]  )
    print(( """Normalization Option: {str}
           """.format( str=normalizationOption ) ))
    vectorCreatorND.iterables = ('normalization', normalizationOption)
    #
    #--------------------------------  workflow connections
    #
    workflow.connect(configFileND, 'editiedFilenames',
                     vectorCreatorND, 'configurationFilename')
    workflow.connect(probabilityMapGeneratorND, 'probabilityMapDict',
                     vectorCreatorND, 'probabilityMapDict')

    #
    #--------------------------------  balance and combine each ROI vectors
    #
    balaceND = pe.Node(name="balanceND",
                       interface=Function(
                            input_names=['inputVectorFilenames'],
                            output_names=['outputVectorFilenames',
                                          'outputVectorHdrFilenames'],
                            function=ConfigurationParser.BalanceInputVectors)
                       )
    workflow.connect(vectorCreatorND, 'outputVectorFilename',
                     balaceND, 'inputVectorFilenames')

    combineND = pe.Node(name="combineND",
                        interface=Function(
                            input_names=['inputVectorFilenames',
                                         'outputVectorFilename'],
                            output_names=['outputVectorFilename',
                                          'outputVectorHdrFilename'],
                            function=ConfigurationParser.CombineInputVectors)
                        )
    workflow.connect(balaceND, 'outputVectorFilenames',
                     combineND, 'inputVectorFilenames')

    combineND.inputs.outputVectorFilename = 'allCombinedVector.txtANN'

    #
    #--------------------------------  train
    #
    trainND = pe.Node(name="trainND",
                      interface=Function(
                           input_names=['configurationFilename',
                                        'inputVectorFilename',
                                        'outputModelFilenamePrefix',
                                        'outputXmlFilename',
                                        'methodParameter'],
                           output_names=['outputTrainedModelFilename',
                                         'outputMethodParameter'],
                           function=ConfigurationParser.BRAINSCutTrainModel),
                      )
    # methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    import ast
    methodFromConfiguFile = Options['modelParameter'.lower()]
    trainND.iterables = ('methodParameter', methodFromConfiguFile)
    trainND.inputs.outputXmlFilename = 'trianNetConfiguration.xml'
    trainND.inputs.outputModelFilenamePrefix = 'trainModelFile.txt'

    workflow.connect(probabilityMapGeneratorND, 'outputConfigurationFilename',
                     trainND, 'configurationFilename')
    workflow.connect(combineND, 'outputVectorFilename',
                     trainND, 'inputVectorFilename')

    #
    #--------------------------------  apply
    #
    # make output dir for each subject as a
    if doApply:
        applyND = pe.Node(name="applyND",
                          interface=Function(
                               input_names=['configurationFilename',
                                            'probabilityMapDict',
                                            'normalization',
                                            'inputModelFilename',
                                            'methodParameter',
                                            'outputXmlFilename'
                                            ],
                               output_names=['outputLabelDict'],
                               function=ConfigurationParser.BRAINSCutApplyModel)
                          )
        # methodParameter = { '--method': 'RandomForest',
        #                    '--numberOfTrees': 60,
        #                    '--randomTreeDepth ': 60 }
        applyND.inputs.outputXmlFilename = 'applyConfiguration.xml'
        workflow.connect(probabilityMapGeneratorND, 'outputConfigurationFilename',
                         applyND, 'configurationFilename')
        workflow.connect(vectorCreatorND, 'outputNormalization',
                         applyND, 'normalization')
        workflow.connect(probabilityMapGeneratorND, 'probabilityMapDict',
                         applyND, 'probabilityMapDict')
        workflow.connect(trainND, 'outputTrainedModelFilename',
                         applyND, 'inputModelFilename')
        workflow.connect(trainND, 'outputMethodParameter',
                         applyND, 'methodParameter')
        #
        # analysis
        #
        # analysisND = pe.Node( name = "analysisND",
        #                      interface = Function(
        #                          input_names['inputImageDict',
        #                                      'inputManualDict',
        #                                      'outputCSVFilename'],
        #                          output_names['outputCSVFilename'],
        #                          function = analysis.similarityFromApplyOutput )
        #                    )

    #
    #
    # workflow.run(updatehash=True)
    workflow.run()
Пример #9
0
def main(argv=None):
    import os
    import sys
    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import ConfigurationParser

    from nipype import config
    config.enable_debug_mode()

    workflow = pe.Workflow(name='crossValidation')
    workflow.base_dir = '.'

    #-------------------------------- argument parser
    import argparse
    argParser = argparse.ArgumentParser(
        description="""****************************
        10-cross validation command line argument parser
        """)
    # workup arguments
    argWfGrp = argParser.add_argument_group(
        'argWfGrp', """****************************
        auto workflow arguments for cross validation
        """)
    argWfGrp.add_argument('--crossValidationConfigurationFilename',
                          help="""configurationFilename
        Configuration file name with FULL PATH""",
                          dest='crossValidationConfigurationFilename',
                          required=True)
    argWfGrp.add_argument('--baseDir',
                          help="""baseDir
        """,
                          dest='baseDir',
                          required=False,
                          default=".")
    argWfGrp.add_argument('--runOption',
                          help="""runOption [local/cluster]
        """,
                          dest='runOption',
                          required=False,
                          default="local")
    argWfGrp.add_argument('--PythonBinDir',
                          help="""PythonBinDir [local/cluster]
        """,
                          dest='PythonBinDir',
                          required=False,
                          default="NA")
    argWfGrp.add_argument('--BRAINSToolsSrcDir',
                          help="""BRAINSToolsSrcDir [local/cluster]
        """,
                          dest='BRAINSToolsSrcDir',
                          required=False,
                          default="NA")
    argWfGrp.add_argument('--BRAINSToolsBuildDir',
                          help="""BRAINSToolsBuildDir [local/cluster]
        """,
                          dest='BRAINSToolsBuildDir',
                          required=False,
                          default="NA")

    # test arguments
    argTestGrp = argParser.add_argument_group(
        'argTestGrp', """****************************
        arguments for testing
        """)
    argTestGrp.add_argument('--unitTest',
                            action='store_true',
                            dest='unitTest',
                            help="""****************************
        List of test function name
        """)
    args = argParser.parse_args()

    #--------------------------------
    if not args.unitTest:
        crossValidationWorkUp(args.crossValidationConfigurationFilename,
                              args.baseDir, args.runOption, args.PythonBinDir,
                              args.BRAINSToolsSrcDir, args.BRAINSToolsBuildDir)

    #--------------------------------
    if args.unitTest:
        testElementPerSubject = [3, 4, 5]
        getStartAndEndIndex(0, testElementPerSubject)
        getStartAndEndIndex(1, testElementPerSubject)
        getStartAndEndIndex(2, testElementPerSubject)

        featureDict = {
            'GadSG': 'testGadFeatureList.csv',
            't2': 't2FeatureList.csv'
        }

        sessionList = [
            "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11",
            "s12"
        ]
        getRandomizedSessionOrder(sessionList)
        myTag = getTags(sessionList, 2, testElementPerSubject)
        featureFilenameDict = {'f1': 'f1.csv', 'f2': 'f2.csv'}
        configFilename, mainFilenameDict, featureFilenameDict = generateNewFilenames(
            3, featureFilenameDict.keys(), "outputPrefix")
        import ConfigurationParser
        m_configurationMap = ConfigurationParser.ConfigurationSectionMap(
            args.crossValidationConfigurationFilename)

        listFiles = m_configurationMap['ListFiles']
        mainListFilename = listFiles['subjectListFilename'.lower()]
        sessionDict = readListFileBySessionID(mainListFilename)
        myTag = getTags(sessionDict.keys(), 2,
                        listFiles['numberOfElementInSubset'.lower()])
        writeListFile(sessionDict, mainFilenameDict, myTag)
Пример #10
0
def crossValidationWorkUp(crossValidationConfigurationFilename, baseDir,
                          runOption, PythonBinDir, BRAINSToolsSrcDir,
                          BRAINSToolsBuildDir):
    print("""****************************
          crossValidationWorkUp
          """)
    from nipype import config
    config.enable_debug_mode()

    import crossValidation as this
    import ConfigurationParser
    myConfigurationMap = ConfigurationParser.ConfigurationSectionMap(
        crossValidationConfigurationFilename)

    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import ast
    print(""" before
           createeachvalidationunitnd
           """)
    createConfigurationFiles = pe.Node(
        name="createConfigurationFiles",
        interface=Function(
            input_names=[
                'inputConfigurationFilename',
                'outputConfigurationFilenamePrefix'
            ],
            output_names=['outputConfigFilenameDict'],
            function=this.createConfigurationFileForCrossValidationUnitTest))

    preprocessing = pe.Workflow(name='Preprocessing')
    preprocessing.base_dir = baseDir + "/PreprocessingDir"

    createConfigurationFiles.inputs.inputConfigurationFilename = crossValidationConfigurationFilename
    createConfigurationFiles.inputs.outputConfigurationFilenamePrefix = 'createConfigurationFiles'

    extractConfigurationFileListND = pe.Node(
        name="extractConfigurationFileListND",
        interface=Function(input_names=['configurationFiledict'],
                           output_names=['configurationFileList'],
                           function=this.extractConfigFile))
    preprocessing.connect(createConfigurationFiles, 'outputConfigFilenameDict',
                          extractConfigurationFileListND,
                          'configurationFiledict')

    preprocessing.run()

    #------------------------------------------------------------------------------------
    # Data graber for outputs
    #
    import nipype.interfaces.io as nio
    dg = nio.DataGrabber()
    dg.inputs.base_directory = baseDir + "/PreprocessingDir/Preprocessing/createConfigurationFiles/"
    dg.inputs.template = "*config"
    mainConfigFiles = dg.run()

    print(mainConfigFiles.outputs.outfiles)
    print(mainConfigFiles.outputs.outfiles)
    print(mainConfigFiles.outputs.outfiles)
    print(mainConfigFiles.outputs.outfiles)

    #------------------------------------------------------------------------------------
    workflow = pe.Workflow(name='crossValidationWF')
    workflow.base_dir = baseDir

    #------------------------------------------------------------------------------------
    # Generate Probability Map
    #
    Options = myConfigurationMap['Options']
    roiDict = Options['roiBooleanCreator'.lower()]

    #-------------------------------- probMapFilenameGenerator is dummy node
    # to create proper probability file location for nipype
    #
    print("""************************
          probMapFilenameGenerator
          """)

    probMapFilenameGenerator = pe.Node(
        name="probMapFilenameGenerator",
        interface=Function(input_names=['roiList'],
                           output_names=['probabilityMapFilename'],
                           function=this.getProbabilityMapFilename))
    print(roiDict)
    probMapFilenameGenerator.inputs.roiList = roiDict.keys()
    print("""************************
          probabilityMapGeneratorND
          """)

    #
    #--------------------------------  start from generate probability
    #
    probabilityMapGeneratorND = pe.Node(
        name="probabilityMapGeneratorND",
        interface=Function(
            input_names=[
                'configurationFilename', 'probabilityMapDict', 'gaussianSigma',
                'outputXmlFilename'
            ],
            output_names=[
                'probabilityMapDict', 'outputXmlFilename',
                'outputConfigurationFilename'
            ],
            function=ConfigurationParser.BRAINSCutGenerateProbabilityMap))

    probabilityMapGeneratorND.inputs.outputXmlFilename = 'netConfiguration.xml'

    gaussianSigmaParam = ast.literal_eval(Options['gaussianSigma'.lower()])
    print(gaussianSigmaParam)
    probabilityMapGeneratorND.iterables = ('configurationFilename',
                                           mainConfigFiles.outputs.outfiles)
    probabilityMapGeneratorND.inputs.gaussianSigma = gaussianSigmaParam

    workflow.connect(probMapFilenameGenerator, 'probabilityMapFilename',
                     probabilityMapGeneratorND, 'probabilityMapDict')

    #
    #--------------------------------  create vectors for each ROI
    #
    print("""************************
          configFileND
          """)
    configFileND = pe.Node(
        name="configFileND",
        interface=Function(
            input_names=['originalFilename', 'editedFilenamePrefix'],
            output_names=['editedFilenames'],
            function=ConfigurationParser.ConfigurationFileEditor))

    configFileND.inputs.editedFilenamePrefix = 'ROI'
    workflow.connect(probabilityMapGeneratorND, 'outputConfigurationFilename',
                     configFileND, 'originalFilename')

    vectorCreatorND = pe.MapNode(
        name="vectorCreatorND",
        interface=Function(input_names=[
            'configurationFilename', 'probabilityMapDict', 'normalization',
            'outputXmlFilename', 'outputVectorFilename'
        ],
                           output_names=[
                               'outputVectorFilename',
                               'outputVectorHdrFilename',
                               'outputNormalization', 'outputXmlFilename'
                           ],
                           function=ConfigurationParser.BRAINSCutCreateVector),
        iterfield=['configurationFilename'])
    vectorCreatorND.inputs.outputVectorFilename = 'oneROIVectorFile.txt'
    vectorCreatorND.inputs.outputXmlFilename = 'oneROICreateVectorNetConfiguration.xml'
    normalizationOption = Options['normalization'.lower()]
    print("""Normalization Option: {str}
           """.format(str=normalizationOption))
    vectorCreatorND.iterables = ('normalization', normalizationOption)
    #
    #--------------------------------  workflow connections
    #
    workflow.connect(configFileND, 'editedFilenames', vectorCreatorND,
                     'configurationFilename')
    workflow.connect(probabilityMapGeneratorND, 'probabilityMapDict',
                     vectorCreatorND, 'probabilityMapDict')

    #
    #--------------------------------  balance and combine each ROI vectors
    #
    print("""************************
          balanceND
          """)
    balaceND = pe.Node(
        name="balanceND",
        interface=Function(
            input_names=['inputVectorFilenames'],
            output_names=['outputVectorFilenames', 'outputVectorHdrFilenames'],
            function=ConfigurationParser.BalanceInputVectors))
    workflow.connect(vectorCreatorND, 'outputVectorFilename', balaceND,
                     'inputVectorFilenames')

    combineND = pe.Node(
        name="combineND",
        interface=Function(
            input_names=['inputVectorFilenames', 'outputVectorFilename'],
            output_names=['outputVectorFilename', 'outputVectorHdrFilename'],
            function=ConfigurationParser.CombineInputVectors))
    workflow.connect(balaceND, 'outputVectorFilenames', combineND,
                     'inputVectorFilenames')

    combineND.inputs.outputVectorFilename = 'allCombinedVector.txtANN'
    #
    #--------------------------------  train
    #
    print("""************************
          trainND
          """)
    trainND = pe.Node(
        name="trainND",
        interface=Function(input_names=[
            'configurationFilename', 'inputVectorFilename',
            'outputModelFilenamePrefix', 'outputXmlFilename', 'methodParameter'
        ],
                           output_names=[
                               'outputTrainedModelFilename',
                               'outputMethodParameter'
                           ],
                           function=ConfigurationParser.BRAINSCutTrainModel))
    # methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    methodFromConfiguFile = Options['modelParameter'.lower()]
    trainND.iterables = ('methodParameter', methodFromConfiguFile)

    trainND.inputs.outputXmlFilename = 'trianNetConfiguration.xml'
    trainND.inputs.outputModelFilenamePrefix = 'trainModelFile.txt'

    workflow.connect(probabilityMapGeneratorND, 'outputConfigurationFilename',
                     trainND, 'configurationFilename')
    workflow.connect(combineND, 'outputVectorFilename', trainND,
                     'inputVectorFilename')
    #
    #--------------------------------  apply
    #
    applyND = pe.Node(
        name="applyND",
        interface=Function(input_names=[
            'configurationFilename', 'probabilityMapDict', 'normalization',
            'inputModelFilename', 'methodParameter', 'outputXmlFilename'
        ],
                           output_names=['outputLabelDict'],
                           function=ConfigurationParser.BRAINSCutApplyModel))
    # methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    applyND.inputs.outputXmlFilename = 'applyConfiguration.xml'
    workflow.connect(probabilityMapGeneratorND, 'outputConfigurationFilename',
                     applyND, 'configurationFilename')
    workflow.connect(vectorCreatorND, 'outputNormalization', applyND,
                     'normalization')
    workflow.connect(probabilityMapGeneratorND, 'probabilityMapDict', applyND,
                     'probabilityMapDict')
    workflow.connect(trainND, 'outputTrainedModelFilename', applyND,
                     'inputModelFilename')
    workflow.connect(trainND, 'outputMethodParameter', applyND,
                     'methodParameter')

    #####################################################################################
    # Data Sink
    #
    import os
    LabelsDS = pe.Node(nio.DataSink(), name='LabelDS')
    LabelsDS.inputs.base_directory = os.path.join(baseDir, "Result")
    LabelsDS.inputs.regexp_substitutions = [
        ('/_', '/'), ('configurationFilename.*_Test', 'Test'),
        ('_configuration.config/normalization_', '/'),
        ('methodParameter_--method', ''), ('RandomForest', 'RF/'),
        ('.--randomTreeDepth', 'TreeDepth'),
        ('.--numberOfTrees', '_TreeNumber'),
        ('ANNContinuousPrediction(?P<roi>.+)(?P<session>\d\d\d\d\d).nii.gz',
         r'\g<session>_\g<roi>_ANNContinuous.nii.gz')
    ]
    # ANNContinuousPredictionl_accumben77478

    workflow.connect([(applyND, LabelsDS, [
        (('outputLabelDict', getDictionaryValues), 'Labels')
    ])])

    #####################################################################################
    # analysis
    #

    #####################################################################################
    # Running
    #
    if runOption == "cluster":
        ############################################
        # Platform specific information
        #     Prepend the python search paths
        pythonPath = BRAINSToolsSrcDir + "/BRAINSCut/BRAINSFeatureCreators/RobustStatisticComputations:" + BRAINSToolsSrcDir + "/AutoWorkup/:" + BRAINSToolsSrcDir + "/AutoWorkup/BRAINSTools/:" + BRAINSToolsBuildDir + "/SimpleITK-build/bin/" + \
            BRAINSToolsBuildDir + "/SimpleITK-build/lib:" + PythonBinDir
        binPath = BRAINSToolsBuildDir + "/bin:" + BRAINSToolsBuildDir + "/lib"

        PYTHON_AUX_PATHS = pythonPath
        PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(':')
        PYTHON_AUX_PATHS.extend(sys.path)
        sys.path = PYTHON_AUX_PATHS
        # print sys.path
        import SimpleITK as sitk
        #     Prepend the shell environment search paths
        PROGRAM_PATHS = binPath
        PROGRAM_PATHS = PROGRAM_PATHS.split(':')
        import os
        PROGRAM_PATHS.extend(os.environ['PATH'].split(':'))
        os.environ['PATH'] = ':'.join(PROGRAM_PATHS)

        Cluster_Script = get_global_sge_script(PYTHON_AUX_PATHS, PROGRAM_PATHS,
                                               {})
        workflow.run(plugin='SGE',
                     plugin_args=dict(
                         template=Cluster_Script,
                         qsub_args="-S /bin/bash -pe smp1 4-8 -o /dev/null "))
    else:
        print("""************************
              run
              """)
        try:
            workflow.write_graph(graph2use='flat')
        except:
            pass
        workflow.run()
Пример #11
0
def createConfigurationFileForCrossValidationUnitTest(
        inputConfigurationFilename, outputConfigurationFilenamePrefix):
    print("""****************************
          createConfigurationFileForCrossValidationUnitTest
          """)
    import os.path
    outputConfigurationFilenamePrefix = os.path.abspath(
        outputConfigurationFilenamePrefix)

    import ConfigurationParser
    m_configurationMap = ConfigurationParser.ConfigurationSectionMap(
        inputConfigurationFilename)

    # get list filenames
    import crossValidation as this
    listFilenames = m_configurationMap['ListFiles']
    mainListFilename = listFilenames['subjectListFilename'.lower()]
    featureListFilenamesDict = listFilenames[
        'featureListFileDictionary'.lower()]
    numberOfElementsInSubset = listFilenames['numberOfElementInSubset'.lower()]
    numberOfTotalSession = sum(numberOfElementsInSubset)

    # read files into sessionID -> data
    mainSessionDict = this.readListFileBySessionID(mainListFilename,
                                                   numberOfTotalSession)
    featureSessionDict = {}
    if len(featureListFilenamesDict) > 0:
        for ft in featureListFilenamesDict.iterkeys():
            featureSessionDict[ft] = this.readListFileBySessionID(
                featureListFilenamesDict[ft], numberOfTotalSession)

    #{ iterate throug subsets
    outputConfigFilenameDict = {}
    for nTest in range(0, len(numberOfElementsInSubset)):
        trainApplyTagList = this.getTags(mainSessionDict.keys(), nTest,
                                         numberOfElementsInSubset)
        newConfigFilename, newMainFilename, newFeatureFilenameDict = this.generateNewFilenames(
            nTest, featureListFilenamesDict.keys(),
            outputConfigurationFilenamePrefix)
        this.writeListFile(mainSessionDict, newMainFilename, trainApplyTagList)
        trainFeatureStr = {}
        applyFeatureStr = {}
        print(
            "++++++++++++++++++++++++++++++++newFeatureFilenameDict++++++++++++++++++++++++++++++++"
        )
        print(newFeatureFilenameDict)
        if len(featureSessionDict) > 0:
            for ft in featureSessionDict.iterkeys():
                this.writeListFile(featureSessionDict[ft],
                                   newFeatureFilenameDict[ft],
                                   trainApplyTagList)
                trainFeatureStr[ft] = newFeatureFilenameDict[ft]['Train']
                applyFeatureStr[ft] = newFeatureFilenameDict[ft]['Apply']

        print(newMainFilename['Train'])
        print(newMainFilename['Apply'])
        print(trainFeatureStr)
        print(applyFeatureStr)
        this.writeConfigFile(
            inputConfigurationFilename, newConfigFilename, {
                'subjectListFilename': newMainFilename['Train'],
                'applySubjectListFilename': newMainFilename['Apply'],
                'featureListFileDictionary': str(trainFeatureStr),
                'applyFeatureListFileDictionary': str(applyFeatureStr)
            })
        outputConfigFilenameDict["Test" + str(nTest)] = os.path.abspath(
            newConfigFilename)
    return outputConfigFilenameDict
Пример #12
0
def crossValidationWorkUp(
    crossValidationConfigurationFilename,
    baseDir,
    runOption,
    PythonBinDir,
    BRAINSToolsSrcDir,
    BRAINSToolsBuildDir,
):
    print(
        """****************************
          crossValidationWorkUp
          """
    )
    from collections import (
        OrderedDict,
    )  # Need OrderedDict internally to ensure consistent ordering
    from nipype import config

    config.enable_debug_mode()

    import crossValidation as this
    import ConfigurationParser

    myConfigurationMap = ConfigurationParser.ConfigurationSectionMap(
        crossValidationConfigurationFilename
    )

    import nipype.pipeline.engine as pe
    from nipype.interfaces.utility import Function
    import ast

    print(
        """ before
           createeachvalidationunitnd
           """
    )
    createConfigurationFiles = pe.Node(
        name="createConfigurationFiles",
        interface=Function(
            input_names=[
                "inputConfigurationFilename",
                "outputConfigurationFilenamePrefix",
            ],
            output_names=["outputConfigFilenameDict"],
            function=this.createConfigurationFileForCrossValidationUnitTest,
        ),
    )

    preprocessing = pe.Workflow(name="Preprocessing")
    preprocessing.base_dir = baseDir + "/PreprocessingDir"

    createConfigurationFiles.inputs.inputConfigurationFilename = (
        crossValidationConfigurationFilename
    )
    createConfigurationFiles.inputs.outputConfigurationFilenamePrefix = (
        "createConfigurationFiles"
    )

    extractConfigurationFileListND = pe.Node(
        name="extractConfigurationFileListND",
        interface=Function(
            input_names=["configurationFiledict"],
            output_names=["configurationFileList"],
            function=this.extractConfigFile,
        ),
    )
    preprocessing.connect(
        createConfigurationFiles,
        "outputConfigFilenameDict",
        extractConfigurationFileListND,
        "configurationFiledict",
    )

    preprocessing.run()

    # ------------------------------------------------------------------------------------
    # Data graber for outputs
    #
    import nipype.interfaces.io as nio

    dg = nio.DataGrabber()
    dg.inputs.base_directory = (
        baseDir + "/PreprocessingDir/Preprocessing/createConfigurationFiles/"
    )
    dg.inputs.template = "*config"
    mainConfigFiles = dg.run()

    print((mainConfigFiles.outputs.outfiles))
    print((mainConfigFiles.outputs.outfiles))
    print((mainConfigFiles.outputs.outfiles))
    print((mainConfigFiles.outputs.outfiles))

    # ------------------------------------------------------------------------------------
    workflow = pe.Workflow(name="crossValidationWF")
    workflow.base_dir = baseDir

    # ------------------------------------------------------------------------------------
    # Generate Probability Map
    #
    Options = myConfigurationMap["Options"]
    roiDict = Options["roiBooleanCreator".lower()]

    # -------------------------------- probMapFilenameGenerator is dummy node
    # to create proper probability file location for nipype
    #
    print(
        """************************
          probMapFilenameGenerator
          """
    )

    probMapFilenameGenerator = pe.Node(
        name="probMapFilenameGenerator",
        interface=Function(
            input_names=["roiList"],
            output_names=["probabilityMapFilename"],
            function=this.getProbabilityMapFilename,
        ),
    )
    print(roiDict)
    probMapFilenameGenerator.inputs.roiList = list(roiDict.keys())
    print(
        """************************
          probabilityMapGeneratorND
          """
    )

    #
    # --------------------------------  start from generate probability
    #
    probabilityMapGeneratorND = pe.Node(
        name="probabilityMapGeneratorND",
        interface=Function(
            input_names=[
                "configurationFilename",
                "probabilityMapDict",
                "gaussianSigma",
                "outputXmlFilename",
            ],
            output_names=[
                "probabilityMapDict",
                "outputXmlFilename",
                "outputConfigurationFilename",
            ],
            function=ConfigurationParser.BRAINSCutGenerateProbabilityMap,
        ),
    )

    probabilityMapGeneratorND.inputs.outputXmlFilename = "netConfiguration.xml"

    gaussianSigmaParam = ast.literal_eval(Options["gaussianSigma".lower()])
    print(gaussianSigmaParam)
    probabilityMapGeneratorND.iterables = (
        "configurationFilename",
        mainConfigFiles.outputs.outfiles,
    )
    probabilityMapGeneratorND.inputs.gaussianSigma = gaussianSigmaParam

    workflow.connect(
        probMapFilenameGenerator,
        "probabilityMapFilename",
        probabilityMapGeneratorND,
        "probabilityMapDict",
    )

    #
    # --------------------------------  create vectors for each ROI
    #
    print(
        """************************
          configFileND
          """
    )
    configFileND = pe.Node(
        name="configFileND",
        interface=Function(
            input_names=["originalFilename", "editedFilenamePrefix"],
            output_names=["editedFilenames"],
            function=ConfigurationParser.ConfigurationFileEditor,
        ),
    )

    configFileND.inputs.editedFilenamePrefix = "ROI"
    workflow.connect(
        probabilityMapGeneratorND,
        "outputConfigurationFilename",
        configFileND,
        "originalFilename",
    )

    vectorCreatorND = pe.MapNode(
        name="vectorCreatorND",
        interface=Function(
            input_names=[
                "configurationFilename",
                "probabilityMapDict",
                "normalization",
                "outputXmlFilename",
                "outputVectorFilename",
            ],
            output_names=[
                "outputVectorFilename",
                "outputVectorHdrFilename",
                "outputNormalization",
                "outputXmlFilename",
            ],
            function=ConfigurationParser.BRAINSCutCreateVector,
        ),
        iterfield=["configurationFilename"],
    )
    vectorCreatorND.inputs.outputVectorFilename = "oneROIVectorFile.txt"
    vectorCreatorND.inputs.outputXmlFilename = "oneROICreateVectorNetConfiguration.xml"
    normalizationOption = Options["normalization".lower()]
    print(
        (
            """Normalization Option: {str}
           """.format(
                str=normalizationOption
            )
        )
    )
    vectorCreatorND.iterables = ("normalization", normalizationOption)
    #
    # --------------------------------  workflow connections
    #
    workflow.connect(
        configFileND, "editedFilenames", vectorCreatorND, "configurationFilename"
    )
    workflow.connect(
        probabilityMapGeneratorND,
        "probabilityMapDict",
        vectorCreatorND,
        "probabilityMapDict",
    )

    #
    # --------------------------------  balance and combine each ROI vectors
    #
    print(
        """************************
          balanceND
          """
    )
    balaceND = pe.Node(
        name="balanceND",
        interface=Function(
            input_names=["inputVectorFilenames"],
            output_names=["outputVectorFilenames", "outputVectorHdrFilenames"],
            function=ConfigurationParser.BalanceInputVectors,
        ),
    )
    workflow.connect(
        vectorCreatorND, "outputVectorFilename", balaceND, "inputVectorFilenames"
    )

    combineND = pe.Node(
        name="combineND",
        interface=Function(
            input_names=["inputVectorFilenames", "outputVectorFilename"],
            output_names=["outputVectorFilename", "outputVectorHdrFilename"],
            function=ConfigurationParser.CombineInputVectors,
        ),
    )
    workflow.connect(
        balaceND, "outputVectorFilenames", combineND, "inputVectorFilenames"
    )

    combineND.inputs.outputVectorFilename = "allCombinedVector.txtANN"
    #
    # --------------------------------  train
    #
    print(
        """************************
          trainND
          """
    )
    trainND = pe.Node(
        name="trainND",
        interface=Function(
            input_names=[
                "configurationFilename",
                "inputVectorFilename",
                "outputModelFilenamePrefix",
                "outputXmlFilename",
                "methodParameter",
            ],
            output_names=["outputTrainedModelFilename", "outputMethodParameter"],
            function=ConfigurationParser.BRAINSCutTrainModel,
        ),
    )
    # methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    methodFromConfiguFile = Options["modelParameter".lower()]
    trainND.iterables = ("methodParameter", methodFromConfiguFile)

    trainND.inputs.outputXmlFilename = "trianNetConfiguration.xml"
    trainND.inputs.outputModelFilenamePrefix = "trainModelFile.txt"

    workflow.connect(
        probabilityMapGeneratorND,
        "outputConfigurationFilename",
        trainND,
        "configurationFilename",
    )
    workflow.connect(combineND, "outputVectorFilename", trainND, "inputVectorFilename")
    #
    # --------------------------------  apply
    #
    applyND = pe.Node(
        name="applyND",
        interface=Function(
            input_names=[
                "configurationFilename",
                "probabilityMapDict",
                "normalization",
                "inputModelFilename",
                "methodParameter",
                "outputXmlFilename",
            ],
            output_names=["outputLabelDict"],
            function=ConfigurationParser.BRAINSCutApplyModel,
        ),
    )
    # methodParameter = { '--method': 'RandomForest',
    #                    '--numberOfTrees': 60,
    #                    '--randomTreeDepth ': 60 }
    applyND.inputs.outputXmlFilename = "applyConfiguration.xml"
    workflow.connect(
        probabilityMapGeneratorND,
        "outputConfigurationFilename",
        applyND,
        "configurationFilename",
    )
    workflow.connect(vectorCreatorND, "outputNormalization", applyND, "normalization")
    workflow.connect(
        probabilityMapGeneratorND, "probabilityMapDict", applyND, "probabilityMapDict"
    )
    workflow.connect(
        trainND, "outputTrainedModelFilename", applyND, "inputModelFilename"
    )
    workflow.connect(trainND, "outputMethodParameter", applyND, "methodParameter")

    #####################################################################################
    # Data Sink
    #
    import os

    LabelsDS = pe.Node(nio.DataSink(), name="LabelDS")
    LabelsDS.inputs.base_directory = os.path.join(baseDir, "Result")
    LabelsDS.inputs.regexp_substitutions = [
        ("/_", "/"),
        ("configurationFilename.*_Test", "Test"),
        ("_configuration.config/normalization_", "/"),
        ("methodParameter_--method", ""),
        ("RandomForest", "RF/"),
        (".--randomTreeDepth", "TreeDepth"),
        (".--numberOfTrees", "_TreeNumber"),
        (
            "ANNContinuousPrediction(?P<roi>.+)(?P<session>\d\d\d\d\d).nii.gz",
            r"\g<session>_\g<roi>_ANNContinuous.nii.gz",
        ),
    ]
    # ANNContinuousPredictionl_accumben77478

    workflow.connect(
        [(applyND, LabelsDS, [(("outputLabelDict", getDictionaryValues), "Labels")])]
    )

    #####################################################################################
    # analysis
    #

    #####################################################################################
    # Running
    #
    if runOption == "cluster":
        ############################################
        # Platform specific information
        #     Prepend the python search paths
        pythonPath = (
            BRAINSToolsSrcDir
            + "/BRAINSCut/BRAINSFeatureCreators/RobustStatisticComputations:"
            + BRAINSToolsSrcDir
            + "/AutoWorkup/:"
            + BRAINSToolsSrcDir
            + "/AutoWorkup/BRAINSTools/:"
            + BRAINSToolsBuildDir
            + "/SimpleITK-build/bin/"
            + BRAINSToolsBuildDir
            + "/SimpleITK-build/lib:"
            + PythonBinDir
        )
        binPath = BRAINSToolsBuildDir + "/bin:" + BRAINSToolsBuildDir + "/lib"

        PYTHON_AUX_PATHS = pythonPath
        PYTHON_AUX_PATHS = PYTHON_AUX_PATHS.split(":")
        PYTHON_AUX_PATHS.extend(sys.path)
        sys.path = PYTHON_AUX_PATHS
        # print sys.path
        import SimpleITK as sitk

        #     Prepend the shell environment search paths
        PROGRAM_PATHS = binPath
        PROGRAM_PATHS = PROGRAM_PATHS.split(":")
        import os

        PROGRAM_PATHS.extend(os.environ["PATH"].split(":"))
        os.environ["PATH"] = ":".join(PROGRAM_PATHS)

        Cluster_Script = get_global_sge_script(PYTHON_AUX_PATHS, PROGRAM_PATHS, {})
        workflow.run(
            plugin="SGE",
            plugin_args=OrderedDict(
                template=Cluster_Script,
                qsub_args="-S /bin/bash -pe smp 4-8 -o /dev/null ",
            ),
        )
    else:
        print(
            """************************
              run
              """
        )
        try:
            workflow.write_graph(graph2use="flat")
        except:
            pass
        workflow.run()