def multiOptimizeNSGA2(restart_=False):
    """
    # for multi objective optimization
    # clear; clear; time nice -4 python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet_Intersections/ --paramFile xml/config/sf-param-config-multi.xml --statsOnly -p 1 --benchmark compositePLE --optimizer="NSGA2" -c
    # 3d multi opjective optimization
    # clear; clear; time nice -4 optirun python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --scenarioSetInitId=0 --scenarioDIR data/scenarios/customSet/bottleneck-hallway/ --paramFile xml/config/sf-param-config-multi.xml --statsOnly -p 1 --benchmark compositePLE --optimizer="NSGA2" --numFrames 2500 --RealDataDir=data/RealWorldData/b140_combined/ -v -c
    # to restart
    # clear; clear; time nice -4 optirun python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --scenarioSetInitId=0 --scenarioDIR data/scenarios/customSet/bottleneck-hallway/ --paramFile xml/config/sf-param-config-multi.xml --statsOnly -p 6 --benchmark compositePLE --optimizer="NSGA2" --numFrames 2500 --RealDataDir=data/RealWorldData/b140_combined/ -c --multiObjRestartFile=SteerStatsOpt_1.log
    # clear; clear; time nice -4 optirun python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --scenarioSetInitId=0 --scenarioDIR data/scenarios/customSet/bottleneck-hallway/ --paramFile xml/config/sf-param-config-multi.xml --statsOnly -p 6 --benchmark compositePLE --optimizer="NSGA2" --numFrames 2500 --RealDataDir=data/RealWorldData/b140_combined/ -c --multiObjRestartFile=SteerStatsOpt_2.log
    """
    
    options_ = getOptions()
    availableProcesses_=int(options_.processes)
    options_.processes=1
    
    [ai_bounds_, ai_params_list, ai_param_names_list,
     stddev_for_parameters, ai_step_scaling]  = prepareOptimization(options.paramFile, options)
 
    steerStats_ = SteerStats(options_)
    steerStats_.setParamNameDict(ai_param_names_list)
    cmaFilePrefix=options_.cmaFilePrefix
    
    print "Algorithm param names: " + str(ai_param_names_list)
    print ai_bounds_
    print ai_params_list
    
    NDIM_ = len(ai_param_names_list)
    
    # f_ = (steerStats_.performanceMetric, steerStats_.pleMetricGlobal, steerStats_.entropyMetric)
    f_ = (steerStats_.performanceMetric, steerStats_.pleMetricGlobal)
    
    return multiOptimizeWithNSGA2(NDIM=NDIM_, NGEN=3, MU=8, f=f_, 
                                  options=options_, ai_bounds=ai_bounds_, 
                                  availableProcesses=availableProcesses_, 
                                  steerStats=steerStats_, restart=restart_)
Beispiel #2
0
    def OptimizeParameters2(self, options):

        options = getOptions()
        options.noReturn = True
        availableProcesses = int(options.processes)
        options.processes = int(1)
        steerStats = SteerStats(options)
        steerStats.set_insert_func(InsertStatistics.InsertOptimizationConfig)

        param_xml_file = open(options.paramFile)
        param_config_data = param_xml_file.read()

        OptimExper = OptimizationExperiment(param_config_data)
        optimizingParameters = OptimExper.parseExperimentXML()

        # print "Algorithm param names: " + str(ai_param_names_list)
        experiements = []
        aiParams = []
        for param in optimizingParameters:

            # this will create a lot of directories but I won't have to worry about
            # syncroniztion and reuse of directories
            for n in range(int(param.get_descetization())):
                aiParams_tmp = {}
                """
                for param_ in optimizingParameters:
                    if param_.get_type() == "float":
                        aiParams_tmp[param_.get_name()] = str(float(param_.get_original()))
                    elif param_.get_type() == "integer":
                        aiParams_tmp[param_.get_name()] = str(int(param_.get_original()))
                """
                # calculate param value for this iteration
                if param.get_type() == "float":
                    paramValue = param.get_min() + (
                        ((param.get_max() - param.get_min()) /
                         param.get_descetization()) * n)
                elif param.get_type() == "integer":
                    paramValue = int(param.get_min() +
                                     (((param.get_max() - param.get_min()) /
                                       param.get_descetization()) * n))
                aiParams_tmp[param.get_name()] = paramValue
                tmp_params = []
                tmp_param_names = []
                for key, value in aiParams_tmp.items():
                    tmp_params.append(value)
                    tmp_param_names.append(key)

                experiements.append(copy.deepcopy(tmp_params))
                aiParams.append(copy.deepcopy(tmp_param_names))

                #TODO stuff here.

        print "Number of experiments in-line: " + str(len(experiements))
        print "Size of process pool: " + str(availableProcesses)

        # sys.exit()
        processes_pool = Pool(availableProcesses)

        processes_pool.map(steerStats.RunStats, experiements, aiParams)
        print "Waited for subprocess"
def runScenarioSpace():
	options = getOptions()
	steerstats = SteerStats(options)
	options.dumpTestcases=True
	random.seed(10)
	totalSimulations = 0
	samples=10
	step = 50
	dataLog = open("scenarioSpace.cvs", "w+") 
	for nScenarios in range(step, int(501), step):
		for i in range(0, samples, 1):
			options.numScenarios = nScenarios
			steerstats._options.numScenarios = nScenarios
			_rand = random.randint(1, 5000)
			options.randomSeed=_rand
			results = steerstats.RunStats(None, None, options)
			
			cov = steerstats.coverageMetric(None, results, options)
			dataLog.write(str(nScenarios)+", " + str(cov) + "\n")
			print "Coverage is: " + str(cov)
			totalSimulations = totalSimulations + nScenarios
		dataLog.flush()
		# this should just save the mean and std after every group...
	
	print "totalSimulations: " + str(totalSimulations)
Beispiel #4
0
    def test_one_scenario(self):
        # _args="--ai=sfAI"
        _args = ['--ai', 'pprAI', '--skipInsert', '-c', '--numScenarios', '1']

        options = getOptions(_args)

        steerStats = SteerStats(options)
        results = steerStats.RunStats()
        # print "Length of result: " + (str(len(results)))
        self.assertEqual(1, len(results))
    def test_ten_random_scenarios(self):
        # _args="--ai=sfAI"
        _args = [
            '--ai', self._ai, '--skipInsert', '-c', '--numScenarios', '10'
        ]

        options = getOptions(_args)

        steerStats = SteerStats(options)
        results = steerStats.RunStats()
        # print "Length of result: " + (str(len(results)))
        self.assertEqual(10, len(results))
    def test_3_squeeze_scenarios(self):
        # _args="--ai=sfAI"
        _args = [
            '--ai', self._ai, '--skipInsert', '-c', '--numScenarios', '1',
            '--scenarioDIR', 'data/scenarios/testcases/'
        ]

        options = getOptions(_args)

        steerStats = SteerStats(options)
        results = steerStats.RunStats()
        # print "Length of result: " + (str(len(results)))
        self.assertEqual(1, len(results))
def InsertOptimizationConfig(parameters, configFilePath, aiConfigParams):
    insertData = InsertStatistics(parameters, configFilePath)

    print aiConfigParams
    print insertData
    options = getOptions()
    param_xml_file = open(options.paramFile)
    param_config_data = param_xml_file.read()

    OptimExper = OptimizationExperiment(param_config_data)
    optimizingParameters = OptimExper.parseExperimentXML()
    for param in optimizingParameters:
        if not param.get_name() in aiConfigParams:
            if param.get_type() == "float":
                aiConfigParams[param.get_name()] = str(
                    float(param.get_original()))
            elif param.get_type() == "integer":
                aiConfigParams[param.get_name()] = str(
                    int(param.get_original()))
    aiConfigParams['ppr_ai_config_id'] = insertData['config_id']
    _args = parseArg(parameters)
    if 'dbName' in _args:
        conM = ConnectionManager(dbname=_args['dbName'])
        if options.verbose:
            print "DataBase used: " + _args['dbName']
    else:
        if options.verbose:
            print "DataBase used: " + "steersuitedb"
        conM = ConnectionManager()

    cur = conM.getCursor()

    if _args['scenarioAI'] == "pprAI":
        config_ = PPRConfig(aiConfigParams)
        aiConfigParams['ppr_ai_config_id'] = insertData['config_id']
    elif _args['scenarioAI'] == "rvo2dAI":
        config_ = RVO2DConfig(aiConfigParams)
        aiConfigParams['rvo2d_ai_config_id'] = insertData['config_id']
    elif _args['scenarioAI'] == "sfAI":
        config_ = SFConfig(aiConfigParams)
        aiConfigParams['sf_ai_config_id'] = insertData['config_id']
    else:
        print "Configuration not supported for " + str(_args['scenarioAI'])

    config_.insertConfig2(cur)

    conM.dispose()
    # pprC2 = pprC.getConfigData(cur, 1)

    return insertData
def OptimizeWithCMA_ES_MixedInt():
    print "blah"
    options = getOptions()
    
    cmaFilePrefix=options.cmaFilePrefix
    
    result = "blah"
    
    param_xml_file = open(options.paramFile)
    param_config_data = param_xml_file.read()
    
    OptimExper = OptimizationExperiment(param_config_data)
    optimizingParameters = OptimExper.parseExperimentXML()

    stddev_for_parameters=1
    
    scaling_factor=float(0.13)
        
    ai_params_list = []
    ai_param_names_list = []
    ai_lower_bounds = []
    ai_upper_bounds = []
    ai_step_scaling = []
    
    
    for ai_param in optimizingParameters:
        ai_params_list.append(ai_param.get_original())
        ai_param_names_list.append(ai_param.get_name())
        ai_lower_bounds.append(ai_param.get_min())
        ai_upper_bounds.append(ai_param.get_max())
        """ 
        This is magic stuff here
        Scaling stddev for all the parameters acording the min/max values for the search
        apparently this should be 0.3 of the search area if your search area is perfectly round 
        """
        ai_step_scaling.append( (ai_param.get_max()-ai_param.get_min())*scaling_factor )
   
    print "Algorithm param names: " + str(ai_param_names_list)
    ai_bounds = [ai_lower_bounds, ai_upper_bounds]
    
    steerStats = SteerStats(options)
    steerStats.setParamNameDict(ai_param_names_list)
    
    print ai_lower_bounds
    print ai_params_list
    print ai_upper_bounds

    steerStats.pleMetric(ai_params_list, None)
def runScenarioSpace2():
	options = getOptions()
	steerstats = SteerStats(options)
	options.dumpTestcases=True
	random.seed(10)
	totalSimulations = 0
	samples=25
	step = 500
	p = Pool(int(options.processes))
	dataLog = open("scenarioSpace.cvs", "w+") 
	dataLog_data = open("scenarioSpace_data.cvs", "w+") 
	for nScenarios in range(step, int(options.numScenarios), step):
	# for nScenarios in range(step, int(501), step):
		_rands = []
		_nScenarios = []
		for i in range(0, samples, 1):
			totalSimulations = totalSimulations + nScenarios
			
			_rands.append(random.randint(1, 5000))
			# args = " -module scenario,scenarioAI=sfAI,useBenchmark,benchmarkTechnique=compositePLE,checkAgentValid,reducedGoals,fixedSpeed,checkAgentRelevant,minAgents=3,maxFrames=1000,checkAgentInteraction,egocentric,scenarioSetInitId=0,numScenarios="+str(nScenarios)+",randomSeed="+str(_rand)+",skipInsert=True,logAIStats -config configs/scenarioSpace/scenarioSpace-config.xml -commandLine"
			# args = args.split(' ')
			_nScenarios.append(nScenarios)
		process_pairs = zip(_nScenarios, _rands, [options] * len(_rands))
		options.numScenarios = nScenarios
		# print process_pairs
		# print zip(_nScenarios, _rands)
		# sys.exit(0)
		results = p.map(runSteerSuite, process_pairs)
		metrics = []
		for result in results:
			if result is not None:
				cov = (coverage(result, options))
				dataLog_data.write(str(nScenarios)+", " + str(cov) + "\n")
				dataLog_data.flush()
				metrics.append(cov)
				print "Coverage is: " + str(cov)
		metrics = np.array(metrics)
		
		dataLog.write(str(nScenarios)+", " + str(np.mean(metrics)) + ", " + str(np.std(metrics)) + "\n")
		dataLog.flush()
		# this should just save the mean and std after every group...
	
	print "totalSimulations: " + str(totalSimulations)
def InsertStatistics(params, configFilePath, dataQueue=None):

    # pr = cProfile.Profile()
    # pr.enable()
    __dbSem.acquire()
    options = getOptions()
    args = parseArg(params)  # the same command passed to the module
    stats = []
    cur = None
    scenario_id = 0
    config_id = 0
    if 'recFileFolder' in args:
        recFileNameSuffix = args['recFile']  # "rec"
        recFileNamePath = args['recFileFolder']  # "./"
    # frameBasePath=args['framePath']# "scenarioFrames/"
    benchmarkLog = args['benchmarkLog']  # "test.log"
    if 'testCasePath' in args:  # check if key is in dict
        testFilePath = args['testCasePath']
        testFilePrefix = "testcase-"
    elif 'subspace' in args:
        testFilePath = args['subspace']
        testFilePrefix = ''
        subspaceFile = open(testFilePath, 'r')
    elif options.randScenarios:
        testFilePrefix = "testcase-"
        # dumped testcases
        testFilePath = args['testCasePath']
    else:
        testFilePath = args['scenarioSetPath']
        testFilePrefix = "testcase-"

    #if "scenarioSetInitId" in args:
    #    scenarioSetInitId = 0 # args['scenarioSetInitId']
    #else:
    #    scenarioSetInitId=0

    videoType = "mp4"
    videoBaseName = "scenario"  # Need to implement in C++ (scenarioModule)
    scenario_description = str(params)
    # items that may change (different ai algorithm or benchmark)
    benchmarkName = args[
        'benchmarkTechnique']  # "Composite02" or "Composite01"
    algorithmName = args['scenarioAI']  # "pprAI" or "footstapAI"
    algorithmLogFileName = args['ailogFileName']
    config_id = 1

    configFile = open(configFilePath, "r")
    # print "config file path: " + configFilePath
    configFileData = configFile.read()
    config = Configuration(configFileData)
    configFile.close()

    test_comments = "AlgorithmName: " + algorithmName
    test_comments = test_comments + ", BenchmarkName: " + benchmarkName
    # The point of all of these varaibles is to make scripting easier.
    # Naming convention will tailor to conforming to steersuite practice
    # allowing for ai names and benchmark names to match the easiest.

    try:

        if not 'skipInsert' in args:
            if 'dbName' in args:
                conM = ConnectionManager(dbname=args['dbName'])
                if options.verbose:
                    print "DataBase used: " + args['dbName']
            else:
                if options.verbose:
                    print "DataBase used: " + "steersuitedb"
                conM = ConnectionManager()
            cur = conM.getCursor()

            # need the benchmark_id

            benchmark = Benchmark()
            benchmark_id = benchmark.getBenchmarkIndex(cur, benchmarkName)

            # also need a algorithm id

            algorithm = Algorithm()
            algorithm_id = algorithm.getAlgorithmIndex(cur, algorithmName)

            # We can load the configuration data for this
            config_data = {}

            config_data['name'] = "stats_config"
            config_data['fps'] = config.getFPS()
            config_data['grid_size_z'] = config.getGridSizeZ()
            config_data['grid_size_x'] = config.getGridSizeX()
            config_data['grid_cells_z'] = config.getGridCellsZ()
            config_data['grid_cells_x'] = config.getGridCellsX()
            config_data['config_xml'] = configFileData

            configuration = Config()
            configuration.setValuesFromDict(config_data)
            config_id = configuration.insertConfig2(cur)

            #then insert a scenario for this scenario

            scenario = Scenario(algorithm_id, benchmark_id, config_id,
                                scenario_description)
            scenario_id = scenario.insertScenario2(cur)

            if options.verbose:
                print "scenario id: " + str(scenario_id)

            # add steering algorithms as needed
            if algorithmName == "pprAI":
                algorithmAI = PprAI()
            elif algorithmName == "footstepAI":
                algorithmAI = FootstepAI()
            elif algorithmName == "ccAI":
                algorithmAI = CcAI()
            elif algorithmName == "rvo2dAI":
                algorithmAI = RVO2DAI()
            elif algorithmName == "reactiveAI":
                algorithmAI = ReactiveAI()
            elif algorithmName == "egocentricAI":
                algorithmAI = EgocentricAI()
            elif algorithmName == "sfAI":
                algorithmAI = SocialForcesAI()
            else:
                print "Unknown AI algorithm"
                sys.exit(0)

        try:
            print '============' + benchmarkLog
            benchmarkLogFile = open(benchmarkLog, "r")
            rand_seed = benchmarkLogFile.readline()  #save / remove random seed
            lparser = LogParser()
            logData = lparser.parseLog(benchmarkLogFile)

            print '============' + algorithmLogFileName
            # pr = cProfile.Profile()
            # pr.enable()
            algorithmLogFile = open(algorithmLogFileName, "r")
            algorithmData = lparser.parseLog(algorithmLogFile)
        except IOError:
            print "Are you sure you have compiled SteerSuite?"
            raise IOError("Are you sure you have compiled SteerSuite?")
        # print logData

        # sys.exit(0)

        if options.verbose:
            print "logdata length: " + str(len(logData))
            print "algorithmData length: " + str(len(algorithmData))

        # sys.exit(0)
        if 'skipInsert' in args:
            # pr = cProfile.Profile()
            # pr.enable()
            for ldat, aData in zip(logData, algorithmData):
                n = ldat['scenario_id']

                if 'subspace' in args:
                    testcaseFile = open(testFilePath, 'r')
                else:
                    testcaseFile = open(
                        testFilePath + "/" + testFilePrefix + n + ".xml", "r")

                testcase = testcaseFile.read()
                testcase_ = TestCase(testcase)

                test_status = ldat['agent_success']
                ldat['test_comments'] = test_comments
                # ldat['benchmark_type']=benchmark_id # update benchmark_id because of naming issues
                # ldat['test_case']=testcase
                ldat['test_status'] = test_status
                # ldat['scenario_group']=scenario_id
                # __test_case = TestCase(testcase)
                # // TODO
                # ldat['num_agents']=__test_case.getNumAgents()
                # ldat['num_obstacles']=__test_case.getNumObstacles()

                ldat['num_agents'] = testcase_.getNumAgents()
                ldat['num_obstacles'] = testcase_.getNumObstacles()
                # needed for data but not used in calculations
                ldat['algorithm_data_id'] = 0
                ldat['benchmark_type'] = 0
                ldat['test_comments'] = test_comments
                ldat['test_case'] = ""
                ldat['test_status'] = ""
                ldat['scenario_group'] = scenario_id

                if benchmarkName == "compositeEntropy":
                    baseStats = BaseCompositeEntropy()
                elif benchmarkName == "composite02":
                    baseStats = BaseComposite02()
                else:
                    baseStats = BaseCompositePLE()
                baseStats.setMetricValuesFromDict(
                    dict(ldat.items() + aData.items()))
                stats.append(baseStats)

        else:
            for ldat, aData in zip(logData, algorithmData):
                testcase = ""
                # n= str(int(ldat['scenario_id']) + int(scenarioSetInitId)) # the numbering for the file
                n = ldat['scenario_id']

                # print "Scenario Id: " + n

                # then insert test data
                # read dumped test xml
                if 'subspace' in args:
                    testcaseFile = open(testFilePath, 'r')
                else:
                    testcaseFile = open(
                        testFilePath + "/" + testFilePrefix + n + ".xml", "r")

                testcase = testcaseFile.read()
                testcase_ = TestCase(testcase)
                # print testcase

                # read the record data
                if 'recFileFolder' in args:
                    recordFile = open(
                        recFileNamePath + "rec" + n + "." + recFileNameSuffix,
                        "r+b")
                    recordbytes = recordFile.read()
                    recordFile.close()
                    recordArray = bytearray()

                    for bite in recordbytes:
                        recordArray.append(bite)
                """pathToVideoFile=frameBasePath+videoBaseName+n+"."+videoType
                print "Video file: " + pathToVideoFile
                videoFile = open(pathToVideoFile, "r+b")
                videobytes = videoFile.read()
                videoFile.close()
                videoArray = bytearray()
                
                for vbite in videobytes:
                    videoArray.append(vbite)"""

                # now that all of the file have been read properly insert a test
                # need a way to check status
                # test_status=0
                test_status = ldat['agent_success']

                algorithmAI.setValuesFromDict(aData)
                algorithm_data_id = algorithmAI.insertAlgorithmData2(cur)

                ldat['algorithm_data_id'] = algorithm_data_id
                ldat['test_comments'] = test_comments
                ldat[
                    'benchmark_type'] = benchmark_id  # update benchmark_id because of nameing issues
                ldat['test_case'] = testcase
                ldat['test_status'] = test_status
                ldat['scenario_group'] = scenario_id
                __test_case = TestCase(testcase)
                # // TODO
                # ldat['num_agents']=__test_case.getNumAgents()
                # ldat['num_obstacles']=__test_case.getNumObstacles()

                ldat['num_agents'] = testcase_.getNumAgents()
                ldat['num_obstacles'] = testcase_.getNumObstacles()
                #print ldat
                # then other stuff for config.
                # check is config already exists
                # save width and heigth stuff

                if benchmarkName == "composite01":
                    benchmark = Composite1Benchmark()
                elif benchmarkName == "composite02":
                    benchmark = Composite2Benchmark()
                elif benchmarkName == "compositeGraph":
                    benchmark = CompositeGraphBenchmark()
                elif benchmarkName == "compositePLE":
                    benchmark = CompositePLEBenchmark()
                else:
                    print "Invalid benchmark type: " + benchmarkName
                    sys.exit()
                # print ldat
                benchmark.setBenchmarkValuesFromDict(ldat)

                benchmark_data_id = benchmark.insertBenchmark2(cur)

                if benchmark_data_id % 50 == 0:
                    if options.verbose:
                        print "Test id: " + str(benchmark_data_id)
                """video = Video(benchmark_data_id, videoArray)
                status = video.insertVideo(cur)"""

                if 'recFileFolder' in args:
                    recording = Recording(benchmark_data_id, recordArray)
                    status = recording.insertRecording(cur)

                testcaseFile.close()
                if benchmarkName == "compositeEntropy":
                    baseStats = BaseCompositeEntropy()
                elif benchmarkName == "composite02":
                    baseStats = BaseComposite02()
                else:
                    baseStats = BaseCompositePLE()
                baseStats.setMetricValuesFromDict(
                    dict(ldat.items() + aData.items()))
                stats.append(baseStats)

        # last is benchmark
        # oid = psycopg2.extensions.lobject()

        # close logFile
        benchmarkLogFile.close()
        if options.verbose:
            print "scenario id: " + str(scenario_id)
        out = {}
        out['scenario_id'] = scenario_id
        out['config_id'] = config_id

        __dbSem.release()
        if dataQueue:
            dataQueue.put(stats)


#        pr.disable()
#        f = open('x.prof', 'a')
#        pstats.Stats(pr, stream=f).sort_stats('time').print_stats()
#        f.close()
        return out
        # videoFileOut.close()

    except psycopg2.DatabaseError, e:
        print 'Error pprAI-test: %s' % e
def OptimizeWithBruteForce():
    # Only works for 2D for now
    # clear; clear; time nice -18 python OptimizeAlgorithm.py --ai sfAI --checkAgentInteraction --numScenarios 1 --benchmark compositePLE --statsOnly --scenarioSetInitId 0 --subspace=../subspaces/icra-subspaces/hallway-one-way-100-agents-funnel.xml --dataDir=data/ --numFrames=2000 --optimizeWith=bruteForce --paramFile=xml/config/subspaces/icra-subspaces/hallway-one-way-1pillar-smallOptimizationRegion.xml -c -p 4 --logMetrics
    import time # aint Python crazy like this
    # from multiprocessing import Pool as Pool
    from multiprocessing.pool import ThreadPool
        
    import itertools
    options = getOptions()
    # options.noReturn=True
    availableProcesses=int(options.processes)
    options.processes=int(1)
    steerStats = SteerStats(options)
    # steerStats.set_insert_func(InsertStatistics.InsertOptimizationConfig)
    
    [ai_bounds, ai_params_list, ai_param_names_list,
    stddev_for_parameters, ai_step_scaling]  = prepareOptimization(options.paramFile, options)
 
    # steerStats = SteerStats(options)
    steerStats.setParamNameDict(ai_param_names_list)
    cmaFilePrefix=options.cmaFilePrefix

    param_xml_file = open(options.paramFile)
    param_config_data = param_xml_file.read()
    
    default_params={}
    for param_name,t_param in zip(ai_param_names_list,ai_params_list):
        default_params[param_name]=t_param
        
    print default_params
    
    OptimExper = OptimizationExperiment(param_config_data)
    optimizingParameters = OptimExper.parseExperimentXML()

    op = OptimizeAlgorithm(options=options)
    ppe = PostProcessEvent_TarData(op)
    pped = PostProcessEvent_DumpOptResults(op)
    ppeh = PostProcessEvent_CreateHeatMap(op)
    
    # op._options=options
    # op.set_metric_func(steerStats.timeMetricGlobal)
    op.set_steerstats(steerStats)
    # op.set_metric_func(steerStats.simulationTimeMetricGlobal)
    if options.objectiveFunction != "":
        op.set_metric_func(steerStats.getBoundObjective(options.objectiveFunction))
    else:
        # op.set_metric_func(steerStats.agentFlowMetricGlobal)
        print 'blah'
    
    op.set_penatly_funct(overlapPenalty)
    
    # result = OptimizeParamsCMA(op.eval,
    #                             ai_params_list, 
    #                       stddev_for_parameters, ai_bounds,
    #                       options.cmaProcesses,
    #                        cmaFilePrefix,
    #                        ai_step_scaling)
    
    #opt_log = open(cmaFilePrefix+"SteerStatsOptResult.txt", "w")
    #writeCMAResults(opt_log, result)
    #opt_log.close()
    
    # print "Algorithm param names: " + str(ai_param_names_list)
    # print optimizingParameters
    experiements = []
    experiements_param = []
    aiParams = []
    for param in optimizingParameters:
    
        # this will create a lot of directories but I won't have to worry about
        # syncroniztion and reuse of directories
        for n in range(int(param.get_descetization())):
            # aiParams_tmp = default_params
            aiParams_tmp = {}
            """
            for param_ in optimizingParameters:
                if param_.get_type() == "float":
                    aiParams_tmp[param_.get_name()] = str(float(param_.get_original()))
                elif param_.get_type() == "integer":
                    aiParams_tmp[param_.get_name()] = str(int(param_.get_original()))
            """
            # calculate param value for this iteration
            if param.get_type() == "float":
                paramValue = param.get_min() + (((param.get_max()-param.get_min())/param.get_descetization())*n)
            elif param.get_type() == "integer":
                paramValue = int(param.get_min() + (((param.get_max()-param.get_min())/param.get_descetization())*n))
            aiParams_tmp[param.get_name()] = paramValue
            tmp_params = []
            tmp_param_names = []        
            for key, value in aiParams_tmp.items() :
                tmp_params.append(value)
                tmp_param_names.append(key)
            # print tmp_params
            # print tmp_param_names
            experiements_param.append(copy.deepcopy(paramValue))
            aiParams.append(copy.deepcopy(tmp_param_names))
        
        experiements.append(experiements_param)
        experiements_param = []  
            #TODO stuff here.
    
    
    print "Number of experiments in-line: " + str(len(experiements))
    print "Size of process pool: " + str(availableProcesses)
    #print "experiements: " + str(experiements)
    # print ""
    experiements = list(itertools.product(experiements[0],experiements[1]))
    # print "Cross product: " + str(experiements)
    
    # sys.exit()
    #try: 
    processes_pool = ThreadPool(availableProcesses)
    # results = processes_pool.apply(op.eval, experiements)
    for item in experiements:
        # this ensures the results come out in the same order the the experiemtns are in this list.
        processes_pool.apply_async(op.eval, args = (item, ), callback = op.log_result)
    processes_pool.close()
    processes_pool.join()
    # print op._result_list 
    # Does not acctually place an obstacle in the scenario because it comes from options.subspaceParams
    control_result = steerStats.RunStats((0,0), options=options)
    control_result = op._metric_func((0,0),results=control_result ,options=options)
   
    # print "Control result: " + str(control_result)
    op._result_list = control_result - numpy.array(op._result_list)
    # print "Corrected results" + str( op._result_list) 
    # processes_pool = Pool(availableProcesses)
    # results = processes_pool.map(op.eval, experiements)
    # except :
    # print "Multi-processing failed: "
    op.set_experiements(experiements)
    # results = map(op.eval, experiements)
    print "Waited for subprocess"
    # print "Results: " + str(zip(results,experiements))
	# write all of the result to a file.
    op.add_post_process_event(pped)
    # create a heatmap from the results
    op.add_post_process_event(ppeh)
    # this post processing step should be added last
    op.add_post_process_event(ppe)
    
    op.post_process()
    print "Done"
def OptimizeWithCMA():
  
    # clear; clear; time python SteerStats.py --ai pprAI --checkAgentInteraction --numScenarios 5000 --benchmark compositeGraph -c --scenarioSetInitId=0 --scenarioDIR data/scenarios/representativeSet  --statsOnly -p 8
    
    options = getOptions()
    
    [ai_bounds, ai_params_defaults_list, ai_param_names_list,
     stddev_for_parameters, ai_step_scaling]  = prepareOptimization(options.paramFile, options)
 
    steerStats = SteerStats(options)
    steerStats.setParamNameDict(ai_param_names_list)
    cmaFilePrefix=options.cmaFilePrefix
    
    print "Algorithm param names: " + str(ai_param_names_list)
    print ai_bounds
    print ai_params_defaults_list
    
    # sys.exit()
    # print results
    
    # JUst coverage metric
    #result = OptimizeParamsCMA(steerStats.coverageMetric, ai_params_list, 
    #                       stddev_for_parameters, ai_bounds, 
    #                       cmaFilePrefix)
    
     # JUst coverage metric
    #result = OptimizeParamsCMA(steerStats.performanceMetric, ai_params_list, 
    #                        stddev_for_parameters, ai_bounds, cmaFilePrefix)
    # 
    # JUst coverage metric
    # result = OptimizeParamsCMA(steerStats.distanceMetric, ai_params_list, 
    #                       stddev_for_parameters, ai_bounds, cmaFilePrefix)
    
    # Coverage plus frames metric
    #result = OptimizeParamsCMA(steerStats.coveragePlusFramesMetric, ai_params_list, 
     #                       stddev_for_parameters, ai_bounds, cmaFilePrefix)
    
    # coverage plus distance metric
    # result = OptimizeParamsCMA(steerStats.coveragePlusDistanceMetric, ai_params_list, 
    #                      stddev_for_parameters, ai_bounds, cmaFilePrefix)
    
    # coverage + computational_time
    #result = OptimizeParamsCMA(steerStats.coveragePlusPerformanceMetric,
    #                            ai_params_list, 
    #1                       stddev_for_parameters, ai_bounds, cmaFilePrefix)
    
    # computation time + converage + quality_distance
    # result = OptimizeParamsCMA(steerStats.coveragePlusDistancePlusComputationMetric,
    #                            ai_params_list, 
    #                       stddev_for_parameters, ai_bounds,
    #                        cmaFilePrefix)
    
        # computation time + converage + quality_distance
    op = OptimizeAlgorithm(options=options)
    op.set_steerstats(steerStats)
    ppe = PostProcessEvent_TarData(op)
    pped = PostProcessEvent_DumpOptResults(op)
    ppev = PostProcessEvent_MakeVideo(op)
    # op.set_metric_func(steerStats.timeMetricGlobal)
    if options.objectiveFunction != "":
        metric = steerStats.getBoundObjective(options.objectiveFunction)
        if metric is None:
        	print '***** objecive function ' + str(options.objectiveFunction) + ' not found *******'
        	sys.exit(1)
        print "objective Function: " + str(options.objectiveFunction) + ", found: " + str(metric)
        op.set_metric_func(steerStats.getBoundObjective(options.objectiveFunction))
        
    else:
        op.set_metric_func(steerStats.agentFlowMetricGlobal)
        print 'blah'
	
	if options.penaltyFunction != "overlapPenalty":
   		penalty = steerStats.getBoundPenaltyFunc(options.objectiveFunction)
   		op.set_penatly_funct(penalty)
   	else:
   		op.set_penatly_funct(overlapPenalty)
    
    # Does not acctually place an obstacle in the scenario because it comes from options.subspaceParams
    #control_result = steerStats.RunStats((0,0), options=options) # hard coded stuff uggh...
    #control_result = op._metric_func((0,0),results=control_result ,options=options)
    # control_result = steerStats.RunStats(ai_params_defaults_list, options=options)
    # print "control result: " + str(control_result)
    # print "op: " + str(op)
    # control_result = op._metric_func(ai_params_defaults_list,results=control_result ,options=options)
    # op._control_result = control_result
    
    result = OptimizeParamsCMA(op.eval,
                                ai_params_defaults_list, 
                           stddev_for_parameters, ai_bounds,
                           options.cmaProcesses,
                            cmaFilePrefix,
                            ai_step_scaling,
                            maxfevals_=options.cmaMaxEvals)
    
    
    print "Control result: " + str(op._control_result)
    opt_log = open(cmaFilePrefix+"SteerStatsOptResult.txt", "w")
    writeCMAResults(opt_log, result)
    opt_log.close()
    
    # write all of the result to a file.
    op.add_post_process_event(pped)
    # create a heatmap from the results
    # op.add_post_process_event(ppeh)
    # make video
    ppev.set_ai_params(result[0])
    # Record a video of the result
    #if options.config.getProcessingEvents()['PostProcessEvent_MakeVideo'] == 'true':
    #    op.add_post_process_event(ppev)
    # this post processing step should be added last (compressing everything)
    op.add_post_process_event(ppe)
    
    op.post_process()
    print "Done"
    # results = map(op.eval, experiements)
    print "Waited for subprocess"
    # print "Results: " + str(zip(results,experiements))
	# write all of the result to a file.
    op.add_post_process_event(pped)
    # create a heatmap from the results
    op.add_post_process_event(ppeh)
    # this post processing step should be added last
    op.add_post_process_event(ppe)
    
    op.post_process()
    print "Done"
    
if __name__ == "__main__":
    
    options = getOptions()
    if options.optimizationAlgorithm == "CMA-ES":
        OptimizeWithCMA()
    elif options.optimizationAlgorithm == "midaco":
        OptimizeWithMIDACO()
    elif options.optimizationAlgorithm == "bruteForce":
        OptimizeWithBruteForce()
    elif options.optimizationAlgorithm == "CMA-ES-MixInt":
        OptimizeWithCMA_ES_MixedInt()
    elif options.optimizationAlgorithm == "NSGA2":
        if options.multiObjRestartFile == "":
            pop, stats = multiOptimizeNSGA2()
        else:
            pop, stats = multiOptimizeNSGA2(restart_=True)
        print(stats)
        pop.sort(key=lambda x: x.fitness.values)
def runScenarioSpace3():
	options = getOptions()
	steerstats = SteerStats(options)
	options.dumpTestcases=True
	random.seed(10)
	totalSimulations = 0
	samples=24
	step = 500
	timelimit=600
	dataLog = open("scenarioSpace.cvs", "w+") 
	dataLog_data = open("scenarioSpace_data.cvs", "w+")
	terminate=False 
	for nScenarios in range(step, int(options.numScenarios), step):
	# for nScenarios in range(step, int(1501), step):
		processes_pool = Pool(int(options.processes))
		terminate=False
		results=[]
		for i in range(0, samples, 1):
			try: 
				totalSimulations = totalSimulations + nScenarios
				steerstats._options.numScenarios = nScenarios
				result = processes_pool.apply_async(runSteerSuite2, args = (nScenarios, random.randint(1, 5000), options, ), callback = handleSteerSuite)
				results.append(result)
			except Exception as inst:
				print "Exception processing result " + str(inst)
				# terminate=True;
				continue
			# result.get(timeout=120)
		# process_pairs = zip(_nScenarios, _rands, [options] * len(_rands))
		# processes_pool.close()
		# print process_pairs
		# print zip(_nScenarios, _rands)
		# sys.exit(0)
		# results = p.map(runSteerSuite, process_pairs)
		metrics = []
		for result in results:
			try: 
				# print "Getting Result"
				_result = result.get(timeout=timelimit)
				# print "RESULT: " + str(_result)
				cov = (coverage(_result, options))
				_time = (qualityTime(_result, options))
				_dist = (qualityPathLength(_result, options))
				_effort = (qualityEffort(_result, options))
				_result=None
				_line = str(nScenarios)+", " + str(cov) + ", " + str(_time) + ", " + str(_dist) + ", " + str(_effort) + "\n"
				print _line
				dataLog_data.write(_line)
				dataLog_data.flush()
				metrics.append([cov, _time, _dist, _effort] )
			except Exception as inst:
				print "Exception getting result " + str(inst)
				terminate=True;
				continue
		if terminate is True:
			# Using seems to leave a lot of memory behind
			processes_pool.terminate() # Will this end processes early?
		else:
			processes_pool.close() # Will this end processes early?
			# processes_pool.terminate() # Will this end processes early?
			
		processes_pool.join()
		processes_pool=None
		metrics = np.array(metrics)
		print "Metrics: " + str(metrics)
		# dataLog.write(str(nScenarios)+", " + str(np.mean(metrics, axis=0)) + ", " + str(np.std(metrics, axis=0)) + "\n")
		dataLog.write(str(nScenarios))
		for _item in np.mean(metrics, axis=0):
			dataLog.write(", " + str(_item))
		for _item in np.std(metrics, axis=0):
			dataLog.write(", " + str(_item))
		dataLog.write("\n")
		dataLog.flush()
		# this should just save the mean and std after every group...
	
	print "totalSimulations: " + str(totalSimulations)
	dataLog.close()
   	dataLog_data.close()