def main(): inicio = time() rand = Random() rand.seed(int(time())) ea = ec.GA(rand) ea.selector = ec.selectors.tournament_selection ea.variator = [ ec.variators.uniform_crossover, ec.variators.gaussian_mutation ] ea.replacer = ec.replacers.steady_state_replacement ea.terminator = terminators.generation_termination #ea.observer = [ec.observers.stats_observer, ec.observers.file_observer] final_pop = ea.evolve(generator=generate_, evaluator=evaluate_, pop_size=5000, maximize=True, bounder=ec.Bounder(0, 53000), max_generations=20000, num_inputs=12, crossover_rate=1.0, num_crossover_points=1, mutation_rate=0.25, num_elites=1, num_selected=5, tournament_size=2, statistics_file=open("aviao_stats.csv", 'w'), individuals_file=open("aviao_individuals.csv", 'w')) final_pop.sort(reverse=True) print(final_pop[0]) perform_fitness(final_pop[0].candidate[0], final_pop[0].candidate[1], final_pop[0].candidate[2], final_pop[0].candidate[3], final_pop[0].candidate[4], final_pop[0].candidate[5], final_pop[0].candidate[6], final_pop[0].candidate[7], final_pop[0].candidate[8], final_pop[0].candidate[9], final_pop[0].candidate[10], final_pop[0].candidate[11]) solution_evaluation(final_pop[0].candidate[0], final_pop[0].candidate[1], final_pop[0].candidate[2], final_pop[0].candidate[3], final_pop[0].candidate[4], final_pop[0].candidate[5], final_pop[0].candidate[6], final_pop[0].candidate[7], final_pop[0].candidate[8], final_pop[0].candidate[9], final_pop[0].candidate[10], final_pop[0].candidate[11]) fim = time() #solution_evaluation(final_pop[0].candidate[0], final_pop[0].candidate[1]) print("\nTempo de processamento: ", fim - inicio)
def optimize_core(): rand = Random() rand.seed(int(time.time())) es = ec.GA(rand) es.terminator = terminators.evaluation_termination es.observer = inspyred.ec.observers.stats_observer final_pop = es.evolve(generator=generate_design_variable, evaluator=evaluate_optimization, maximize=False, pop_size=100, bounder=inspyred.ec.Bounder([-100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0, -100.0], [100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0, 100.0]), max_evaluations=10000, num_elites=1, num_inputs=1)
def main(): rand = Random() rand.seed(int(time())) ea = ec.GA(rand) ea.selector = ec.selectors.tournament_selection ea.variator = [ ec.variators.uniform_crossover, ec.variators.gaussian_mutation ] ea.replacer = ec.replacers.steady_state_replacement ea.terminator = terminators.generation_termination ea.observer = [ec.observers.stats_observer, ec.observers.file_observer] final_pop = ea.evolve(generator=generate_, evaluator=evaluate_, pop_size=10000, maximize=True, bounder=ec.Bounder(0, 800), max_generations=10000, num_imputs=2, crossover_rae=1.0, num_crossover_points=1, mutation_rate=0.15, num_elites=1, num_selected=2, tournament_size=2, statistics_file=open("garrafas_stats.csv", "w"), individuals_file=open("garrafas_individuais.csv", "w")) final_pop.sort(reverse=True) print(final_pop[0]) perform_fitness(final_pop[0].candidate[0], final_pop[0].candidate[1]) solution_evaluation(final_pop[0].candidate[0], final_pop[0].candidate[1])
def create_island(rand_seed, island_number, mp_migrator): rand = random.Random() rand.seed(rand_seed) ga = ec.GA(rand) ga.selector = inspyred.ec.selectors.tournament_selection ga.terminator = terminators.evaluation_termination #ga.variator = [inspyred.ec.variators.blend_crossover, inspyred.ec.variators.gaussian_mutation] #ga.terminator = terminators.no_improvement_termination ga.observer = [ inspyred.ec.observers.stats_observer, inspyred.ec.observers.plot_observer, inspyred.ec.observers.file_observer ] ga.migrator = mp_migrator final_pop = ga.evolve( evaluator=evaluator, generator=generator, statistics_file=open("stats_%d.csv" % island_number, "w"), individuals_file=open("inds_%d.csv" % island_number, "w"), crossover_rate=0.9, mutation_rate=0.01, pop_size=500, max_evaluations=50000, #max_generations = 15, tournament_size=3, num_selected=500, evaluate_migrant=False) final_pop.sort(reverse=True) print(final_pop[0]) A = decoder(final_pop[0].candidate) s = 0 for i in A: s += i print(A) print(s)
def evolOptim(self, pc): """ Function for/to <short description of `netpyne.batch.evol.evolOptim`> Parameters ---------- self : <type> <Short description of self> **Default:** *required* pc : <type> <Short description of pc> **Default:** *required* """ import sys import inspyred.ec as EC # ------------------------------------------------------------------------------- # Evolutionary optimization: Parallel evaluation # ------------------------------------------------------------------------------- def evaluator(candidates, args): import os import signal global ngen ngen += 1 total_jobs = 0 # options slurm, mpi type = args.get('type', 'mpi_direct') # paths to required scripts script = args.get('script', 'init.py') netParamsSavePath = args.get('netParamsSavePath') genFolderPath = self.saveFolder + '/gen_' + str(ngen) # mpi command setup nodes = args.get('nodes', 1) paramLabels = args.get('paramLabels', []) coresPerNode = args.get('coresPerNode', 1) mpiCommand = args.get('mpiCommand', 'mpirun') nrnCommand = args.get('nrnCommand', 'nrniv') numproc = nodes * coresPerNode # slurm setup custom = args.get('custom', '') folder = args.get('folder', '.') email = args.get('email', '[email protected]') walltime = args.get('walltime', '00:01:00') reservation = args.get('reservation', None) allocation = args.get('allocation', 'csd403') # NSG account # fitness function fitnessFunc = args.get('fitnessFunc') fitnessFuncArgs = args.get('fitnessFuncArgs') defaultFitness = args.get('defaultFitness') # read params or set defaults sleepInterval = args.get('sleepInterval', 0.2) # create folder if it does not exist createFolder(genFolderPath) # remember pids and jobids in a list pids = [] jobids = {} # create a job for each candidate for candidate_index, candidate in enumerate(candidates): # required for slurm sleep(sleepInterval) # name and path jobName = "gen_" + str(ngen) + "_cand_" + str(candidate_index) jobPath = genFolderPath + '/' + jobName # set initial cfg initCfg if len(self.initCfg) > 0: for paramLabel, paramVal in self.initCfg.items(): self.setCfgNestedParam(paramLabel, paramVal) # modify cfg instance with candidate values for label, value in zip(paramLabels, candidate): print('set %s=%s' % (label, value)) self.setCfgNestedParam(label, value) #self.setCfgNestedParam("filename", jobPath) self.cfg.simLabel = jobName self.cfg.saveFolder = genFolderPath # save cfg instance to file cfgSavePath = jobPath + '_cfg.json' self.cfg.save(cfgSavePath) if type == 'mpi_bulletin': # ---------------------------------------------------------------------- # MPI master-slaves # ---------------------------------------------------------------------- pc.submit(runEvolJob, nrnCommand, script, cfgSavePath, netParamsSavePath, jobPath) print('-' * 80) else: # ---------------------------------------------------------------------- # MPI job commnand # ---------------------------------------------------------------------- if mpiCommand == '': command = '%s %s simConfig=%s netParams=%s ' % ( nrnCommand, script, cfgSavePath, netParamsSavePath) else: command = '%s -np %d %s -python -mpi %s simConfig=%s netParams=%s ' % ( mpiCommand, numproc, nrnCommand, script, cfgSavePath, netParamsSavePath) # ---------------------------------------------------------------------- # run on local machine with <nodes*coresPerNode> cores # ---------------------------------------------------------------------- if type == 'mpi_direct': executer = '/bin/bash' jobString = bashTemplate('mpi_direct') % (custom, folder, command) # ---------------------------------------------------------------------- # run on HPC through slurm # ---------------------------------------------------------------------- elif type == 'hpc_slurm': executer = 'sbatch' res = '#SBATCH --res=%s' % ( reservation) if reservation else '' jobString = bashTemplate('hpc_slurm') % ( jobName, allocation, walltime, nodes, coresPerNode, jobPath, jobPath, email, res, custom, folder, command) # ---------------------------------------------------------------------- # run on HPC through PBS # ---------------------------------------------------------------------- elif type == 'hpc_torque': executer = 'qsub' queueName = args.get('queueName', 'default') nodesppn = 'nodes=%d:ppn=%d' % (nodes, coresPerNode) jobString = bashTemplate('hpc_torque') % ( jobName, walltime, queueName, nodesppn, jobPath, jobPath, custom, command) # ---------------------------------------------------------------------- # save job and run # ---------------------------------------------------------------------- print('Submitting job ', jobName) print(jobString) print('-' * 80) # save file batchfile = '%s.sbatch' % (jobPath) with open(batchfile, 'w') as text_file: text_file.write("%s" % jobString) #with open(jobPath+'.run', 'a+') as outf, open(jobPath+'.err', 'w') as errf: with open(jobPath + '.jobid', 'w') as outf, open(jobPath + '.err', 'w') as errf: pids.append( Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid) #proc = Popen(command.split([executer, batchfile]), stdout=PIPE, stderr=PIPE) sleep(0.1) #read = proc.stdout.read() with open(jobPath + '.jobid', 'r') as outf: read = outf.readline() print(read) if len(read) > 0: jobid = int(read.split()[-1]) jobids[candidate_index] = jobid print('jobids', jobids) total_jobs += 1 sleep(0.1) # ---------------------------------------------------------------------- # gather data and compute fitness # ---------------------------------------------------------------------- if type == 'mpi_bulletin': # wait for pc bulletin board jobs to finish try: while pc.working(): sleep(1) #pc.done() except: pass num_iters = 0 jobs_completed = 0 fitness = [None for cand in candidates] # print outfilestem print("Waiting for jobs from generation %d/%d ..." % (ngen, args.get('max_generations'))) # print "PID's: %r" %(pids) # start fitness calculation while jobs_completed < total_jobs: unfinished = [i for i, x in enumerate(fitness) if x is None] for candidate_index in unfinished: try: # load simData and evaluate fitness jobNamePath = genFolderPath + "/gen_" + str( ngen) + "_cand_" + str(candidate_index) if os.path.isfile(jobNamePath + '.json'): with open('%s.json' % (jobNamePath)) as file: simData = json.load(file)['simData'] fitness[candidate_index] = fitnessFunc( simData, **fitnessFuncArgs) jobs_completed += 1 print(' Candidate %d fitness = %.1f' % (candidate_index, fitness[candidate_index])) elif os.path.isfile(jobNamePath + '.pkl'): with open('%s.pkl' % (jobNamePath), 'rb') as file: simData = pickle.load(file)['simData'] fitness[candidate_index] = fitnessFunc( simData, **fitnessFuncArgs) jobs_completed += 1 print(' Candidate %d fitness = %.1f' % (candidate_index, fitness[candidate_index])) except Exception as e: # print err = "There was an exception evaluating candidate %d:" % ( candidate_index) print(("%s \n %s" % (err, e))) #pass #print 'Error evaluating fitness of candidate %d'%(candidate_index) num_iters += 1 print('completed: %d' % (jobs_completed)) if num_iters >= args.get('maxiter_wait', 5000): print( "Max iterations reached, the %d unfinished jobs will be canceled and set to default fitness" % (len(unfinished))) for canditade_index in unfinished: fitness[canditade_index] = defaultFitness jobs_completed += 1 try: if 'scancelUser' in kwargs: os.system('scancel -u %s' % (kwargs['scancelUser'])) else: os.system( 'scancel %d' % (jobids[candidate_index]) ) # terminate unfinished job (resubmitted jobs not terminated!) except: pass sleep(args.get('time_sleep', 1)) # kill all processes if type == 'mpi_bulletin': try: with open("./pids.pid", 'r') as file: # read pids for mpi_bulletin pids = [int(i) for i in file.read().split(' ')[:-1]] with open("./pids.pid", 'w') as file: # delete content pass for pid in pids: try: os.killpg(os.getpgid(pid), signal.SIGTERM) except: pass except: pass # don't want to to this for hpcs since jobs are running on compute nodes not master # else: # try: # for pid in pids: os.killpg(os.getpgid(pid), signal.SIGTERM) # except: # pass # return print("-" * 80) print(" Completed a generation ") print("-" * 80) return fitness # ------------------------------------------------------------------------------- # Evolutionary optimization: Generation of first population candidates # ------------------------------------------------------------------------------- def generator(random, args): # generate initial values for candidates return [ random.uniform(l, u) for l, u in zip(args.get('lower_bound'), args.get('upper_bound')) ] # ------------------------------------------------------------------------------- # Mutator # ------------------------------------------------------------------------------- @EC.variators.mutator def nonuniform_bounds_mutation(random, candidate, args): """Return the mutants produced by nonuniform mutation on the candidates. .. Arguments: random -- the random number generator object candidate -- the candidate solution args -- a dictionary of keyword arguments Required keyword arguments in args: Optional keyword arguments in args: - *mutation_strength* -- the strength of the mutation, where higher values correspond to greater variation (default 1) """ lower_bound = args.get('lower_bound') upper_bound = args.get('upper_bound') strength = args.setdefault('mutation_strength', 1) mutant = copy(candidate) for i, (c, lo, hi) in enumerate(zip(candidate, lower_bound, upper_bound)): if random.random() <= 0.5: new_value = c + (hi - c) * (1.0 - random.random()**strength) else: new_value = c - (c - lo) * (1.0 - random.random()**strength) mutant[i] = new_value return mutant # ------------------------------------------------------------------------------- # Evolutionary optimization: Main code # ------------------------------------------------------------------------------- import os # create main sim directory and save scripts self.saveScripts() global ngen ngen = -1 # log for simulation logger = logging.getLogger('inspyred.ec') logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler(self.saveFolder + '/inspyred.log', mode='a') file_handler.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler.setFormatter(formatter) logger.addHandler(file_handler) # create randomizer instance rand = Random() rand.seed(self.seed) # create file handlers for observers stats_file, ind_stats_file = self.openFiles2SaveStats() # gather **kwargs kwargs = {'cfg': self.cfg} kwargs['num_inputs'] = len(self.params) kwargs['paramLabels'] = [x['label'] for x in self.params] kwargs['lower_bound'] = [x['values'][0] for x in self.params] kwargs['upper_bound'] = [x['values'][1] for x in self.params] kwargs['statistics_file'] = stats_file kwargs['individuals_file'] = ind_stats_file kwargs[ 'netParamsSavePath'] = self.saveFolder + '/' + self.batchLabel + '_netParams.py' for key, value in self.evolCfg.items(): kwargs[key] = value if not 'maximize' in kwargs: kwargs['maximize'] = False for key, value in self.runCfg.items(): kwargs[key] = value # if using pc bulletin board, initialize all workers if self.runCfg.get('type', None) == 'mpi_bulletin': for iworker in range(int(pc.nhost())): pc.runworker() #------------------------------------------------------------------ # Evolutionary algorithm method #------------------------------------------------------------------- # Custom algorithm based on Krichmar's params if self.evolCfg['evolAlgorithm'] == 'custom': ea = EC.EvolutionaryComputation(rand) ea.selector = EC.selectors.tournament_selection ea.variator = [ EC.variators.uniform_crossover, nonuniform_bounds_mutation ] ea.replacer = EC.replacers.generational_replacement if not 'tournament_size' in kwargs: kwargs['tournament_size'] = 2 if not 'num_selected' in kwargs: kwargs['num_selected'] = kwargs['pop_size'] # Genetic elif self.evolCfg['evolAlgorithm'] == 'genetic': ea = EC.GA(rand) # Evolution Strategy elif self.evolCfg['evolAlgorithm'] == 'evolutionStrategy': ea = EC.ES(rand) # Simulated Annealing elif self.evolCfg['evolAlgorithm'] == 'simulatedAnnealing': ea = EC.SA(rand) # Differential Evolution elif self.evolCfg['evolAlgorithm'] == 'diffEvolution': ea = EC.DEA(rand) # Estimation of Distribution elif self.evolCfg['evolAlgorithm'] == 'estimationDist': ea = EC.EDA(rand) # Particle Swarm optimization elif self.evolCfg['evolAlgorithm'] == 'particleSwarm': from inspyred import swarm ea = swarm.PSO(rand) ea.topology = swarm.topologies.ring_topology # Ant colony optimization (requires components) elif self.evolCfg['evolAlgorithm'] == 'antColony': from inspyred import swarm if not 'components' in kwargs: raise ValueError("%s requires components" % (self.evolCfg['evolAlgorithm'])) ea = swarm.ACS(rand, self.evolCfg['components']) ea.topology = swarm.topologies.ring_topology else: raise ValueError("%s is not a valid strategy" % (self.evolCfg['evolAlgorithm'])) ea.terminator = EC.terminators.generation_termination ea.observer = [EC.observers.stats_observer, EC.observers.file_observer] # ------------------------------------------------------------------------------- # Run algorithm # ------------------------------------------------------------------------------- final_pop = ea.evolve(generator=generator, evaluator=evaluator, bounder=EC.Bounder(kwargs['lower_bound'], kwargs['upper_bound']), logger=logger, **kwargs) # close file stats_file.close() ind_stats_file.close() # print best and finish print(('Best Solution: \n{0}'.format(str(max(final_pop))))) print("-" * 80) print(" Completed evolutionary algorithm parameter optimization ") print("-" * 80) sys.exit()
def run(self): # ------------------------------------------------------------------------------- # Grid Search optimization # ------------------------------------------------------------------------------- if self.method in ['grid', 'list']: # create saveFolder import os, glob try: os.mkdir(self.saveFolder) except OSError: if not os.path.exists(self.saveFolder): print ' Could not create', self.saveFolder # save Batch dict as json targetFile = self.saveFolder + '/' + self.batchLabel + '_batch.json' self.save(targetFile) # copy this batch script to folder targetFile = self.saveFolder + '/' + self.batchLabel + '_batchScript.py' os.system('cp ' + os.path.realpath(__file__) + ' ' + targetFile) # copy netParams source to folder netParamsSavePath = self.saveFolder + '/' + self.batchLabel + '_netParams.py' os.system('cp ' + self.netParamsFile + ' ' + netParamsSavePath) # import cfg cfgModuleName = os.path.basename(self.cfgFile).split('.')[0] cfgModule = imp.load_source(cfgModuleName, self.cfgFile) self.cfg = cfgModule.cfg self.cfg.checkErrors = False # avoid error checking during batch # set initial cfg initCfg if len(self.initCfg) > 0: for paramLabel, paramVal in self.initCfg.iteritems(): self.setCfgNestedParam(paramLabel, paramVal) # iterate over all param combinations if self.method == 'grid': groupedParams = False ungroupedParams = False for p in self.params: if 'group' not in p: p['group'] = False ungroupedParams = True elif p['group'] == True: groupedParams = True if ungroupedParams: labelList, valuesList = zip(*[(p['label'], p['values']) for p in self.params if p['group'] == False]) else: labelList = () valuesList = () labelList, valuesList = zip(*[(p['label'], p['values']) for p in self.params if p['group'] == False]) valueCombinations = list(product(*(valuesList))) indexCombinations = list( product(*[range(len(x)) for x in valuesList])) if groupedParams: labelListGroup, valuesListGroup = zip( *[(p['label'], p['values']) for p in self.params if p['group'] == True]) valueCombGroups = izip(*(valuesListGroup)) indexCombGroups = izip( *[range(len(x)) for x in valuesListGroup]) labelList = labelListGroup + labelList else: valueCombGroups = [(0, )] # this is a hack -- improve! indexCombGroups = [(0, )] # if using pc bulletin board, initialize all workers if self.runCfg.get('type', None) == 'mpi_bulletin': for iworker in range(int(pc.nhost())): pc.runworker() #if 1: #for iComb, pComb in zip(indexCombinations, valueCombinations): for iCombG, pCombG in zip(indexCombGroups, valueCombGroups): for iCombNG, pCombNG in zip(indexCombinations, valueCombinations): if groupedParams: # temporary hack - improve iComb = iCombG + iCombNG pComb = pCombG + pCombNG else: iComb = iCombNG pComb = pCombNG print iComb, pComb for i, paramVal in enumerate(pComb): paramLabel = labelList[i] self.setCfgNestedParam(paramLabel, paramVal) print str(paramLabel) + ' = ' + str(paramVal) # set simLabel and jobName simLabel = self.batchLabel + ''.join( [''.join('_' + str(i)) for i in iComb]) jobName = self.saveFolder + '/' + simLabel # skip if output file already exists if self.runCfg.get('skip', False) and glob.glob(jobName + '.json'): print 'Skipping job %s since output file already exists...' % ( jobName) elif self.runCfg.get( 'skipCfg', False) and glob.glob(jobName + '_cfg.json'): print 'Skipping job %s since cfg file already exists...' % ( jobName) elif self.runCfg.get( 'skipCustom', None) and glob.glob(jobName + self.runCfg['skipCustom']): print 'Skipping job %s since %s file already exists...' % ( jobName, self.runCfg['skipCustom']) else: # save simConfig json to saveFolder self.cfg.simLabel = simLabel self.cfg.saveFolder = self.saveFolder cfgSavePath = self.saveFolder + '/' + simLabel + '_cfg.json' self.cfg.save(cfgSavePath) # hpc torque job submission if self.runCfg.get('type', None) == 'hpc_torque': # read params or set defaults sleepInterval = self.runCfg.get('sleepInterval', 1) sleep(sleepInterval) nodes = self.runCfg.get('nodes', 1) ppn = self.runCfg.get('ppn', 1) script = self.runCfg.get('script', 'init.py') mpiCommand = self.runCfg.get( 'mpiCommand', 'mpiexec') walltime = self.runCfg.get('walltime', '00:30:00') queueName = self.runCfg.get('queueName', 'default') nodesppn = 'nodes=%d:ppn=%d' % (nodes, ppn) custom = self.runCfg.get('custom', '') numproc = nodes * ppn command = '%s -np %d nrniv -python -mpi %s simConfig=%s netParams=%s' % ( mpiCommand, numproc, script, cfgSavePath, netParamsSavePath) jobString = """#!/bin/bash #PBS -N %s #PBS -l walltime=%s #PBS -q %s #PBS -l %s #PBS -o %s.run #PBS -e %s.err %s cd $PBS_O_WORKDIR echo $PBS_O_WORKDIR %s """ % (jobName, walltime, queueName, nodesppn, jobName, jobName, custom, command) # Send job_string to qsub print 'Submitting job ', jobName print jobString + '\n' batchfile = '%s.pbs' % (jobName) with open(batchfile, 'w') as text_file: text_file.write("%s" % jobString) proc = Popen(['qsub', batchfile], stderr=PIPE, stdout=PIPE ) # Open a pipe to the qsub command. (output, input) = (proc.stdin, proc.stdout) # hpc torque job submission elif self.runCfg.get('type', None) == 'hpc_slurm': # read params or set defaults sleepInterval = self.runCfg.get('sleepInterval', 1) sleep(sleepInterval) allocation = self.runCfg.get( 'allocation', 'csd403') # NSG account nodes = self.runCfg.get('nodes', 1) coresPerNode = self.runCfg.get('coresPerNode', 1) email = self.runCfg.get('email', '[email protected]') folder = self.runCfg.get('folder', '.') script = self.runCfg.get('script', 'init.py') mpiCommand = self.runCfg.get('mpiCommand', 'ibrun') walltime = self.runCfg.get('walltime', '00:30:00') reservation = self.runCfg.get('reservation', None) custom = self.runCfg.get('custom', '') if reservation: res = '#SBATCH --res=%s' % (reservation) else: res = '' numproc = nodes * coresPerNode command = '%s -np %d nrniv -python -mpi %s simConfig=%s netParams=%s' % ( mpiCommand, numproc, script, cfgSavePath, netParamsSavePath) jobString = """#!/bin/bash #SBATCH --job-name=%s #SBATCH -A %s #SBATCH -t %s #SBATCH --nodes=%d #SBATCH --ntasks-per-node=%d #SBATCH -o %s.run #SBATCH -e %s.err #SBATCH --mail-user=%s #SBATCH --mail-type=end %s %s source ~/.bashrc cd %s %s wait """ % (simLabel, allocation, walltime, nodes, coresPerNode, jobName, jobName, email, res, custom, folder, command) # Send job_string to qsub print 'Submitting job ', jobName print jobString + '\n' batchfile = '%s.sbatch' % (jobName) with open(batchfile, 'w') as text_file: text_file.write("%s" % jobString) #subprocess.call proc = Popen(['sbatch', batchfile], stdin=PIPE, stdout=PIPE ) # Open a pipe to the qsub command. (output, input) = (proc.stdin, proc.stdout) # run mpi jobs directly e.g. if have 16 cores, can run 4 jobs * 4 cores in parallel # eg. usage: python batch.py elif self.runCfg.get('type', None) == 'mpi_direct': jobName = self.saveFolder + '/' + simLabel print 'Running job ', jobName cores = self.runCfg.get('cores', 1) folder = self.runCfg.get('folder', '.') script = self.runCfg.get('script', 'init.py') mpiCommand = self.runCfg.get('mpiCommand', 'ibrun') command = '%s -np %d nrniv -python -mpi %s simConfig=%s netParams=%s' % ( mpiCommand, cores, script, cfgSavePath, netParamsSavePath) print command + '\n' proc = Popen(command.split(' '), stdout=open(jobName + '.run', 'w'), stderr=open(jobName + '.err', 'w')) #print proc.stdout.read() # pc bulletin board job submission (master/slave) via mpi # eg. usage: mpiexec -n 4 nrniv -mpi batch.py elif self.runCfg.get('type', None) == 'mpi_bulletin': jobName = self.saveFolder + '/' + simLabel print 'Submitting job ', jobName # master/slave bulletin board schedulling of jobs pc.submit(runJob, self.runCfg.get('script', 'init.py'), cfgSavePath, netParamsSavePath) sleep(1) # avoid saturating scheduler print "-" * 80 print " Finished submitting jobs for grid parameter exploration " print "-" * 80 # ------------------------------------------------------------------------------- # Evolutionary optimization # ------------------------------------------------------------------------------- elif self.method == 'evol': import sys import inspyred.ec as EC # ------------------------------------------------------------------------------- # Evolutionary optimization: Parallel evaluation # ------------------------------------------------------------------------------- def evaluator(candidates, args): import os import signal global ngen ngen += 1 total_jobs = 0 # options slurm, mpi type = args.get('type', 'mpi_direct') # paths to required scripts script = args.get('script', 'init.py') netParamsSavePath = args.get('netParamsSavePath') genFolderPath = self.saveFolder + '/gen_' + str(ngen) # mpi command setup nodes = args.get('nodes', 1) paramLabels = args.get('paramLabels', []) coresPerNode = args.get('coresPerNode', 1) mpiCommand = args.get('mpiCommand', 'ibrun') numproc = nodes * coresPerNode # slurm setup custom = args.get('custom', '') folder = args.get('folder', '.') email = args.get('email', '[email protected]') walltime = args.get('walltime', '00:01:00') reservation = args.get('reservation', None) allocation = args.get('allocation', 'csd403') # NSG account # fitness function fitnessFunc = args.get('fitnessFunc') fitnessFuncArgs = args.get('fitnessFuncArgs') defaultFitness = args.get('defaultFitness') # read params or set defaults sleepInterval = args.get('sleepInterval', 0.2) # create folder if it does not exist createFolder(genFolderPath) # remember pids and jobids in a list pids = [] jobids = {} # create a job for each candidate for candidate_index, candidate in enumerate(candidates): # required for slurm sleep(sleepInterval) # name and path jobName = "gen_" + str(ngen) + "_cand_" + str( candidate_index) jobPath = genFolderPath + '/' + jobName # modify cfg instance with candidate values for label, value in zip(paramLabels, candidate): self.setCfgNestedParam(label, value) print 'set %s=%s' % (label, value) #self.setCfgNestedParam("filename", jobPath) self.cfg.simLabel = jobName self.cfg.saveFolder = genFolderPath # save cfg instance to file cfgSavePath = jobPath + '_cfg.json' self.cfg.save(cfgSavePath) if type == 'mpi_bulletin': # ---------------------------------------------------------------------- # MPI master-slaves # ---------------------------------------------------------------------- pc.submit(runEvolJob, script, cfgSavePath, netParamsSavePath, jobPath) print '-' * 80 else: # ---------------------------------------------------------------------- # MPI job commnand # ---------------------------------------------------------------------- command = '%s -np %d nrniv -python -mpi %s simConfig=%s netParams=%s ' % ( mpiCommand, numproc, script, cfgSavePath, netParamsSavePath) # ---------------------------------------------------------------------- # run on local machine with <nodes*coresPerNode> cores # ---------------------------------------------------------------------- if type == 'mpi_direct': executer = '/bin/bash' jobString = bashTemplate('mpi_direct') % ( custom, folder, command) # ---------------------------------------------------------------------- # run on HPC through slurm # ---------------------------------------------------------------------- elif type == 'hpc_slurm': executer = 'sbatch' res = '#SBATCH --res=%s' % ( reservation) if reservation else '' jobString = bashTemplate('hpc_slurm') % ( jobName, allocation, walltime, nodes, coresPerNode, jobPath, jobPath, email, res, custom, folder, command) # ---------------------------------------------------------------------- # run on HPC through PBS # ---------------------------------------------------------------------- elif type == 'hpc_torque': executer = 'qsub' queueName = args.get('queueName', 'default') nodesppn = 'nodes=%d:ppn=%d' % (nodes, coresPerNode) jobString = bashTemplate('hpc_torque') % ( jobName, walltime, queueName, nodesppn, jobPath, jobPath, custom, command) # ---------------------------------------------------------------------- # save job and run # ---------------------------------------------------------------------- print 'Submitting job ', jobName print jobString print '-' * 80 # save file batchfile = '%s.sbatch' % (jobPath) with open(batchfile, 'w') as text_file: text_file.write("%s" % jobString) #with open(jobPath+'.run', 'a+') as outf, open(jobPath+'.err', 'w') as errf: with open(jobPath + '.jobid', 'w') as outf, open(jobPath + '.err', 'w') as errf: pids.append( Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid) #proc = Popen(command.split([executer, batchfile]), stdout=PIPE, stderr=PIPE) sleep(0.1) #read = proc.stdout.read() with open(jobPath + '.jobid', 'r') as outf: read = outf.readline() print read if len(read) > 0: jobid = int(read.split()[-1]) jobids[candidate_index] = jobid print 'jobids', jobids total_jobs += 1 sleep(0.1) # ---------------------------------------------------------------------- # gather data and compute fitness # ---------------------------------------------------------------------- if type == 'mpi_bulletin': # wait for pc bulletin board jobs to finish try: while pc.working(): sleep(1) #pc.done() except: pass num_iters = 0 jobs_completed = 0 fitness = [None for cand in candidates] # print outfilestem print "Waiting for jobs from generation %d/%d ..." % ( ngen, args.get('max_generations')) # print "PID's: %r" %(pids) # start fitness calculation while jobs_completed < total_jobs: unfinished = [ i for i, x in enumerate(fitness) if x is None ] for candidate_index in unfinished: try: # load simData and evaluate fitness jobNamePath = genFolderPath + "/gen_" + str( ngen) + "_cand_" + str(candidate_index) if os.path.isfile(jobNamePath + '.json'): with open('%s.json' % (jobNamePath)) as file: simData = json.load(file)['simData'] fitness[candidate_index] = fitnessFunc( simData, **fitnessFuncArgs) jobs_completed += 1 print ' Candidate %d fitness = %.1f' % ( candidate_index, fitness[candidate_index]) except Exception as e: # print err = "There was an exception evaluating candidate %d:" % ( candidate_index) print("%s \n %s" % (err, e)) #pass #print 'Error evaluating fitness of candidate %d'%(candidate_index) num_iters += 1 print 'completed: %d' % (jobs_completed) if num_iters >= args.get('maxiter_wait', 5000): print "Max iterations reached, the %d unfinished jobs will be canceled and set to default fitness" % ( len(unfinished)) for canditade_index in unfinished: fitness[canditade_index] = defaultFitness jobs_completed += 1 if 'scancelUser' in kwargs: os.system('scancel -u %s' % (kwargs['scancelUser'])) else: os.system( 'scancel %d' % (jobids[candidate_index]) ) # terminate unfinished job (resubmitted jobs not terminated!) sleep(args.get('time_sleep', 1)) # kill all processes if type == 'mpi_bulletin': try: with open("./pids.pid", 'r') as file: # read pids for mpi_bulletin pids = [ int(i) for i in file.read().split(' ')[:-1] ] with open("./pids.pid", 'w') as file: # delete content pass for pid in pids: try: os.killpg(os.getpgid(pid), signal.SIGTERM) except: pass except: pass # don't want to to this for hpcs since jobs are running on compute nodes not master # else: # try: # for pid in pids: os.killpg(os.getpgid(pid), signal.SIGTERM) # except: # pass # return print "-" * 80 print " Completed a generation " print "-" * 80 return fitness # ------------------------------------------------------------------------------- # Evolutionary optimization: Generation of first population candidates # ------------------------------------------------------------------------------- def generator(random, args): # generate initial values for candidates return [ random.uniform(l, u) for l, u in zip( args.get('lower_bound'), args.get('upper_bound')) ] # ------------------------------------------------------------------------------- # Mutator # ------------------------------------------------------------------------------- @EC.variators.mutator def nonuniform_bounds_mutation(random, candidate, args): """Return the mutants produced by nonuniform mutation on the candidates. .. Arguments: random -- the random number generator object candidate -- the candidate solution args -- a dictionary of keyword arguments Required keyword arguments in args: Optional keyword arguments in args: - *mutation_strength* -- the strength of the mutation, where higher values correspond to greater variation (default 1) """ lower_bound = args.get('lower_bound') upper_bound = args.get('upper_bound') strength = args.setdefault('mutation_strength', 1) mutant = copy(candidate) for i, (c, lo, hi) in enumerate( zip(candidate, lower_bound, upper_bound)): if random.random() <= 0.5: new_value = c + (hi - c) * (1.0 - random.random()**strength) else: new_value = c - (c - lo) * (1.0 - random.random()**strength) mutant[i] = new_value return mutant # ------------------------------------------------------------------------------- # Evolutionary optimization: Main code # ------------------------------------------------------------------------------- import os # create main sim directory and save scripts self.saveScripts() global ngen ngen = -1 # log for simulation logger = logging.getLogger('inspyred.ec') logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler(self.saveFolder + '/inspyred.log', mode='a') file_handler.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler.setFormatter(formatter) logger.addHandler(file_handler) # create randomizer instance rand = Random() rand.seed(self.seed) # create file handlers for observers stats_file, ind_stats_file = self.openFiles2SaveStats() # gather **kwargs kwargs = {'cfg': self.cfg} kwargs['num_inputs'] = len(self.params) kwargs['paramLabels'] = [x['label'] for x in self.params] kwargs['lower_bound'] = [x['values'][0] for x in self.params] kwargs['upper_bound'] = [x['values'][1] for x in self.params] kwargs['statistics_file'] = stats_file kwargs['individuals_file'] = ind_stats_file kwargs[ 'netParamsSavePath'] = self.saveFolder + '/' + self.batchLabel + '_netParams.py' for key, value in self.evolCfg.iteritems(): kwargs[key] = value if not 'maximize' in kwargs: kwargs['maximize'] = False for key, value in self.runCfg.iteritems(): kwargs[key] = value # if using pc bulletin board, initialize all workers if self.runCfg.get('type', None) == 'mpi_bulletin': for iworker in range(int(pc.nhost())): pc.runworker() #################################################################### # Evolution strategy #################################################################### # Custom algorithm based on Krichmar's params if self.evolCfg['evolAlgorithm'] == 'krichmarCustom': ea = EC.EvolutionaryComputation(rand) ea.selector = EC.selectors.tournament_selection ea.variator = [ EC.variators.uniform_crossover, nonuniform_bounds_mutation ] ea.replacer = EC.replacers.generational_replacement if not 'tournament_size' in kwargs: kwargs['tournament_size'] = 2 if not 'num_selected' in kwargs: kwargs['num_selected'] = kwargs['pop_size'] # Genetic elif self.evolCfg['evolAlgorithm'] == 'genetic': ea = EC.GA(rand) # Evolution Strategy elif self.evolCfg['evolAlgorithm'] == 'evolutionStrategy': ea = EC.ES(rand) # Simulated Annealing elif self.evolCfg['evolAlgorithm'] == 'simulatedAnnealing': ea = EC.SA(rand) # Differential Evolution elif self.evolCfg['evolAlgorithm'] == 'diffEvolution': ea = EC.DEA(rand) # Estimation of Distribution elif self.evolCfg['evolAlgorithm'] == 'estimationDist': ea = EC.EDA(rand) # Particle Swarm optimization elif self.evolCfg['evolAlgorithm'] == 'particleSwarm': from inspyred import swarm ea = swarm.PSO(rand) ea.topology = swarm.topologies.ring_topology # Ant colony optimization (requires components) elif self.evolCfg['evolAlgorithm'] == 'antColony': from inspyred import swarm ea = swarm.ACS(rand) ea.topology = swarm.topologies.ring_topology else: raise ValueError("%s is not a valid strategy" % (self.evolCfg['evolAlgorithm'])) #################################################################### ea.terminator = EC.terminators.generation_termination ea.observer = [ EC.observers.stats_observer, EC.observers.file_observer ] # ------------------------------------------------------------------------------- # Run algorithm # ------------------------------------------------------------------------------- final_pop = ea.evolve(generator=generator, evaluator=evaluator, bounder=EC.Bounder(kwargs['lower_bound'], kwargs['upper_bound']), logger=logger, **kwargs) # close file stats_file.close() ind_stats_file.close() # print best and finish print('Best Solution: \n{0}'.format(str(max(final_pop)))) print "-" * 80 print " Completed evolutionary algorithm parameter optimization " print "-" * 80 sys.exit()
def GA(): """Starts the optimization with the GA algorithm.""" begin = time.time() # initialize random generator with system time rand = random.Random() rand.seed() # original start individual of the input data global start_individual # generate the original start individual from the input data # return it including the non static land use indices start_individual, nonstatic_elements = generate_genom(max_range, file_HRU,cfg.mapConfig.file_ASCII_map, cfg.mapConfig.file_transformation, cfg.mapConfig.file_ID_map, cfg.mapConfig.four_neighbours) if len(start_individual) == 0: msg = "Error: The generated start individual has no elements." WriteLogMsg(msg) raise SystemError("Error: The generated start individual has no elements.") close_window # determine that 'Bounder' conditions of candidates are equal to # the integer values of the non static land use indices bounder_discrete = nonstatic_elements # initialize inspyred log files stats_file,individ_file = fh.init_inspyred_logfiles() # initialize and run GA ea = ec.GA(rand) # public attributes # GA is predefined with rank_selection if cfg.ea.selector != 'rank_selection': exec ("%s%s" % ('ea.selector = ', fh.preparing_attribute('selector',cfg.ea.selector))) msg = 'Selector of the optimization algorithm changed to: %s' % cfg.ea.selector WriteLogMsg(msg) # GA is predefined with generational_replacement if cfg.ea.replacer != 'generational_replacement': exec ("%s%s" % ('ea.replacer = ', fh.preparing_attribute('replacer',cfg.ea.replacer))) msg = 'Replacer of the optimization algorithm changed to: %s' % cfg.ea.replacer WriteLogMsg(msg) # specify how the new candidates should be varied # GA is predefined with n_point_crossover,bit_flip_mutation as variators if cfg.ea.variator != 'n_point_crossover,bit_flip_mutation' and cfg.ea.variator != 'bit_flip_mutation,n_point_crossover': exec ("%s%s" % ('ea.variator = ', fh.preparing_attribute('variator',cfg.ea.variator))) msg = 'Variator of the optimization algorithm changed to: %s' % cfg.ea.variator WriteLogMsg(msg) # GA is predefined with num_selected = pop_size if cfg.ea.num_selected != cfg.ea.pop_size: msg = 'Num_selected of the optimization algorithm changed to: %s' % cfg.ea.num_selected WriteLogMsg(msg) exec ("%s%s" % ('ea.migrator = ', fh.preparing_attribute('migrator',cfg.ea.migrator))) exec ("%s%s" % ('ea.archiver = ', fh.preparing_attribute('archiver',cfg.ea.archiver))) if cfg.ea.archiver != 'best_archiver': msg = 'Archiver of the optimization algorithm changed to: %s' % cfg.ea.archiver WriteLogMsg(msg) exec ("%s%s" % ('ea.observer = ', fh.preparing_attribute('observer',cfg.ea.observer))) # specify when the optimization should terminate exec ("%s%s" % ('ea.terminator = ', fh.preparing_attribute('terminator',cfg.ea.terminator))) # run optimization, when finished final_pop holds the results final_pop = ea.evolve(generator = generate_parameter, # evaluate is the function to start external models # return results for the optimization algorithm evaluator = evaluate, # define population size pop_size = cfg.ea.pop_size, # maximize or minimize the problem maximize = cfg.ea.maximize, # bound the parameters to an interval # choose integer values between 1 and max_range in this case bounder = ec.DiscreteBounder(bounder_discrete), # minimum population diversity allowed (when using diversity_termination default 0.001) min_diversity = cfg.ea.min_diversity, # maximum number of evaluations (default pop_size) max_evaluations = cfg.ea.max_evaluations, # maximum number of generations max_generations = cfg.ea.max_generations, # number of elites to consider (default 0) num_elites = cfg.ea.num_elites, # number of individuals to be selected (default NSGA2 pop_size) num_selected = cfg.ea.num_selected, # tournament size (default NSGA2 2) tournament_size = cfg.ea.tournament_size, # the rate at which crossover is performed (default 1.0) crossover_rate = cfg.ea.crossover_rate, # mutation rate mutation_rate = cfg.ea.mutation_rate, # number of crossover points used (default 1) num_crossover_points = cfg.ea.num_crossover_points, # a positive integer representing the number of # closest solutions to consider as a “crowd” (default 2) crowding_distance = cfg.ea.crowding_distance, # statistic file statistics_file = stats_file, # individuals file individuals_file = individ_file) # read out the best individuals final_arc = ea.archive # for constrained_tournament_selection: # create a copy of final_arc only with feasible individuals (for csv file with best feasible solutions) if 'constrained_tournament_selection' in cfg.ea.selector: final_arc_feasible = [] end = time.time() WriteLogMsg("The optimization process needed %d seconds." %(end-begin)) msg = 'Best Solutions: \n' WriteLogMsg(msg) # save the map as ascii file in output folder f_count=1 for f in final_arc: # for constrained_tournament_selection: with information if individual is infeasible # and copy feasible solutions in final_arc_feasible if 'constrained_tournament_selection' in cfg.ea.selector: if individual_filter(f.candidate) == False: WriteLogMsg("(infeasible) %s" % f) # save the map as ascii file in output folder if file_HRU == 'None' or (file_HRU != 'None' and cfg.mapConfig.file_ID_map != 'None'): transform_individual_ascii_map(f.candidate,False,f_count,None,None,None,False) else: WriteLogMsg("%s" % f) # save the map as ascii file in output folder if file_HRU == 'None' or (file_HRU != 'None' and cfg.mapConfig.file_ID_map != 'None'): transform_individual_ascii_map(f.candidate,False,f_count) final_arc_feasible.append(f) else: WriteLogMsg("%s" % f) # save the map as ascii file in output folder if file_HRU == 'None' or (file_HRU != 'None' and cfg.mapConfig.file_ID_map != 'None'): transform_individual_ascii_map(f.candidate,False,f_count) f_count += 1 if cfg.ea.maximize == 'True': if 'constrained_tournament_selection' in cfg.ea.selector and individual_filter(f.candidate) == False: WriteLogMsg("\nFinal infeasible individual: %s, [%f]" % (max(final_pop).candidate,max(final_pop).fitness)) else: WriteLogMsg("\nFinal individual: %s, [%f]" % (max(final_pop).candidate,max(final_pop).fitness)) else: if 'constrained_tournament_selection' in cfg.ea.selector and individual_filter(f.candidate) == False: WriteLogMsg("\nFinal infeasible individual: %s, [%f]" % (min(final_pop).candidate,min(final_pop).fitness)) else: WriteLogMsg("\nFinal individual: %s, [%f]" % (min(final_pop).candidate,min(final_pop).fitness)) # save the map as ascii file in output folder # log the best solutions in a csv file fh.save_best_solutions(final_arc,1) # for constrained_tournament_selection: log the best feasible solutions in a csv file if 'constrained_tournament_selection' in cfg.ea.selector: fh.save_best_solutions(final_arc_feasible,1)