def train(self, optimizer='pso'): self.updateData() # self.updateModel() lowerBound = [self.thetamin] * self.k + [self.pmin] * self.k upperBound = [self.thetamax] * self.k + [self.pmax] * self.k rand = Random() rand.seed(int(time())) if optimizer is 'pso': ea = inspyred.swarm.PSO(Random()) ea.terminator = self.no_improvement_termination ea.topology = inspyred.swarm.topologies.ring_topology # ea.observer = inspyred.ec.observers.stats_observer final_pop = ea.evolve(generator=self.generate_population, evaluator=self.fittingObjective, pop_size=300, maximize=False, bounder=ec.Bounder(lowerBound, upperBound), max_evaluations=30000, neighborhood_size=20, num_inputs=self.k) # Sort and print the best individual, who will be at index 0. final_pop.sort(reverse=True) elif optimizer is 'ga': ea = inspyred.ec.GA(Random()) ea.terminator = self.no_improvement_termination final_pop = ea.evolve(generator=self.generate_population, evaluator=self.fittingObjective, pop_size=300, maximize=False, bounder=ec.Bounder(lowerBound, upperBound), max_evaluations=30000, num_elites=10, mutation_rate=.05) for entry in final_pop: newValues = entry.candidate preLOP = copy.deepcopy(newValues) locOP_bounds = [] for i in range(self.k): locOP_bounds.append( [self.thetamin, self.thetamax] ) for i in range(self.k): locOP_bounds.append( [self.pmin, self.pmax] ) lopResults = minimize(self.fittingObjective_local, newValues, method='SLSQP', bounds=locOP_bounds, options={'disp': False}) newValues = lopResults['x'] for i in range(self.k): self.theta[i] = newValues[i] for i in range(self.k): self.pl[i] = newValues[i + self.k] try: self.updateModel() except: pass else: break
def infill(self,points,method='error',optimizer='PSO'): returnValues = np.zeros([points,self.k],dtype=float) for i in range(points): rand = Random() rand.seed(int(time())) if optimizer == 'PSO': ea = inspyred.swarm.PSO(Random()) # use PSO algorithms ea.terminator = self.no_improvement_termination ea.topology = inspyred.swarm.topologies.ring_topology if method == 'ei': evaluator = self.infill_objective_ei else: evaluator = self.infill_objective_mse final_pop = ea.evolve(generator=self.generate_population, evaluator=evaluator, pop_size=200, maximize=False, bounder=ec.Bounder([0.] * self.k, [1.]*self.k), max_evaluations=20000, neighborhood_size=40, num_inputs=self.k) final_pop.sort(reverse=True) elif optimizer == 'ga2': ea = inspyred.ec.emo.NSGA2(Random()) ea.terminator = self.no_improvement_termination if method == 'ei': evaluator = self.infill_objective_ei else: evaluator = self.infill_objective_mse final_pop = ea.evolve(generator=self.generate_population, evaluator=evaluator, pop_size=100, maximize=False, bounder=ec.Bounder([0.] * self.k, [1.]*self.k), max_generations=50, num_elites=10, mutation_rate=0.1) final_pop.sort(reverse=True) newpoint = final_pop[0].candidate returnValues[i][:] = newpoint return returnValues
def __init__(self, dimensions=2, objectives=2, alpha=100): Benchmark.__init__(self, dimensions, objectives) if dimensions < objectives: raise ValueError('dimensions ({0}) must be greater than or equal to objectives ({1})'.format(dimensions, objectives)) self.bounder = ec.Bounder([0.0] * self.dimensions, [1.0] * self.dimensions) self.maximize = False self.alpha = alpha
def __init__(self, dimensions=2): self.dimensions = dimensions self.nmb_functions = 2 Benchmark.__init__(self, self.dimensions, self.nmb_functions) self.bound5er = ec.Bounder([-4] * self.dimensions, [4] * self.dimensions) self.maximize = False
def run(self, dataContainer, model='intext', genomeSize=20): if model == 'intext': genFun = self.generate evalFun = evaluators.evaluator(self.logp) else: genFun = self.generateUnbias evalFun = evaluators.evaluator(self.logpUnbias) final_pop = self.ea.evolve(generator=genFun, evaluator=evalFun, pop_size=20, bounder=ec.Bounder(0, None), maximize=False, crossover_rate=0.9, mutation_rate=0.2, data=dataContainer.points, scale=dataContainer.scale, dim=genomeSize, tolerance=0.00001, max_evaluations=4000000) ind = max(final_pop) self.best = ind print ind.fitness return ind.candidate
def SetBoundaries(self,bounds): """ Stores the bounds of the parameters and creates a ``bounder`` object which bounds every parameter into the range of 0-1 since the algorithms are using normalized values. :param bounds: ``list`` containing the minimum and maximum values. """ self.min_max=bounds self.bounder=ec.Bounder([0]*len(self.min_max[0]),[1]*len(self.min_max[1]))
def infill(self, points, method='error', addPoint=True): ''' The function identifies where new points are needed in the model. :param points: The number of points to add to the model. Multiple points are added via imputation. :param method: Two choices: EI (for expected improvement) or Error (for general error reduction) :return: An array of coordinates identified by the infill ''' # We'll be making non-permanent modifications to self.X and self.y here, so lets make a copy just in case initX = np.copy(self.X) inity = np.copy(self.y) ##得到初始x,y # This array will hold the new values we add returnValues = np.zeros([points, self.k], dtype=float) ##建立一个加点零阵 for i in range(points): rand = Random() #随机产生一个数 rand.seed(int(time())) ea = inspyred.swarm.PSO(Random()) ea.terminator = self.no_improvement_termination ea.topology = inspyred.swarm.topologies.ring_topology if method == 'ei': evaluator = self.infill_objective_ei else: evaluator = self.infill_objective_mse final_pop = ea.evolve(generator=self.generate_population, evaluator=evaluator, pop_size=155, maximize=False, bounder=ec.Bounder([0] * self.k, [1] * self.k), max_evaluations=20000, neighborhood_size=30, num_inputs=self.k) final_pop.sort(reverse=True) ##从大大小排列 #print final_pop newpoint = final_pop[0].candidate returnValues[i][:] = newpoint #print returnValues #if addPoint:##函数里的参数为真时 则执行下列代码 #self.addPoint(returnValues, self.predict(returnValues))##把加的点加到初始x中 self.Xc = np.copy(initX) self.y = np.copy(inity) self.n = len(self.Xc) self.updateData() while True: try: self.updateModel() except: self.train() else: break return returnValues
def train(self): ''' The function trains the hyperparameters of the Kriging model. :param optimizer: Two optimizers are implemented, a Particle Swarm Optimizer or a GA ''' self.updateData() lowerBound = [self.thetamin] * self.k + [self.pmin] * self.k upperBound = [self.thetamax] * self.k + [self.pmax] * self.k # Create a random seed for our optimizer to use rand = Random() rand.seed(int(time())) ea = inspyred.swarm.PSO(Random()) ea.terminator = self.no_improvement_termination ea.topology = inspyred.swarm.topologies.ring_topology final_pop = ea.evolve(generator=self.generate_population, evaluator=self.fittingObjective, pop_size=300, maximize=False, bounder=ec.Bounder(lowerBound, upperBound), max_evaluations=30000, neighborhood_size=20, num_inputs=self.k) final_pop.sort(reverse=True) for entry in final_pop: newValues = entry.candidate preLOP = copy.deepcopy(newValues) locOP_bounds = [] for i in range(self.k): locOP_bounds.append([self.thetamin, self.thetamax]) for i in range(self.k): locOP_bounds.append([self.pmin, self.pmax]) lopResults = minimize(self.fittingObjective_local, newValues, method='SLSQP', bounds=locOP_bounds, options={'disp': False}) newValues = lopResults['x'] for i in range(self.k): self.theta[i] = newValues[i] for i in range(self.k): self.pl[i] = newValues[i + self.k]
def main(): inicio = time() rand = Random() rand.seed(int(time())) ea = ec.GA(rand) ea.selector = ec.selectors.tournament_selection ea.variator = [ ec.variators.uniform_crossover, ec.variators.gaussian_mutation ] ea.replacer = ec.replacers.steady_state_replacement ea.terminator = terminators.generation_termination #ea.observer = [ec.observers.stats_observer, ec.observers.file_observer] final_pop = ea.evolve(generator=generate_, evaluator=evaluate_, pop_size=5000, maximize=True, bounder=ec.Bounder(0, 53000), max_generations=20000, num_inputs=12, crossover_rate=1.0, num_crossover_points=1, mutation_rate=0.25, num_elites=1, num_selected=5, tournament_size=2, statistics_file=open("aviao_stats.csv", 'w'), individuals_file=open("aviao_individuals.csv", 'w')) final_pop.sort(reverse=True) print(final_pop[0]) perform_fitness(final_pop[0].candidate[0], final_pop[0].candidate[1], final_pop[0].candidate[2], final_pop[0].candidate[3], final_pop[0].candidate[4], final_pop[0].candidate[5], final_pop[0].candidate[6], final_pop[0].candidate[7], final_pop[0].candidate[8], final_pop[0].candidate[9], final_pop[0].candidate[10], final_pop[0].candidate[11]) solution_evaluation(final_pop[0].candidate[0], final_pop[0].candidate[1], final_pop[0].candidate[2], final_pop[0].candidate[3], final_pop[0].candidate[4], final_pop[0].candidate[5], final_pop[0].candidate[6], final_pop[0].candidate[7], final_pop[0].candidate[8], final_pop[0].candidate[9], final_pop[0].candidate[10], final_pop[0].candidate[11]) fim = time() #solution_evaluation(final_pop[0].candidate[0], final_pop[0].candidate[1]) print("\nTempo de processamento: ", fim - inicio)
def wrapper(self,candidates,args): """ Converts the ``ndarray`` object into a ``list`` and passes it to the fitness function. :param candidates: the ``ndarray`` object :param args: optional parameters to be passed to the fitness function :return: the return value of the fitness function """ tmp=ndarray.tolist(candidates) ec_bounder=ec.Bounder([0]*len(self.min_max[0]),[1]*len(self.min_max[1])) candidates=ec_bounder(tmp,args) return self.ffun([candidates],args)[0]
def EA_location_selection(acquisition, X_sample, Y_sample, gpr, bounds, n_gens=10): ''' propose the next sampling position by optimizing the acquisition functions :param acquisition: acquisition function/objective function :param X_sample: sample location :param Y_sample: sample value :param gpr: regressor :param bounds: :param n_restarts: :return: Location of the maximium value of the acquisition function ''' dim = X_sample.shape[1] min_value = 0 min_x = None # maxmize EI is to minimize negative EI def min_obj(X, args): X = np.atleast_2d(X) fitness = [] for x in X: fitness.append( -acquisition(x.reshape(-1, dim), X_sample, Y_sample, gpr)) return fitness # initialize population rand = Random() rand.seed(1) es = ec.ES(rand) es.terminator = terminators.evaluation_termination final_pop = es.evolve(generator=generate_EI, evaluator=min_obj, pop_size=10, maximize=False, bounder=ec.Bounder(0, 1), max_evaluations=n_gens, mutation_rate=0.25, num_inputs=1) final_pop.sort() print(final_pop) print(final_pop[0]) return final_pop[0]
def infill(self, points, method='error'): # We'll be making non-permanent modifications to self.X and self.y here, so lets make a copy just in case initX = np.copy(self.X) inity = np.copy(self.y) # This array will hold the new values we add returnValues = np.zeros([points, self.k], dtype=float) for i in range(points): rand = Random() rand.seed(int(time())) ea = inspyred.swarm.PSO(Random()) ea.terminator = self.no_improvement_termination ea.topology = inspyred.swarm.topologies.ring_topology if method == 'ei': evaluator = self.infill_objective_ei else: evaluator = self.infill_objective_mse final_pop = ea.evolve(generator=self.generate_population, evaluator=evaluator, pop_size=155, maximize=False, bounder=ec.Bounder([0] * self.k, [1] * self.k), max_evaluations=20000, neighborhood_size=30, num_inputs=self.k) final_pop.sort(reverse=True) newpoint = final_pop[0].candidate returnValues[i][:] = newpoint self.addPoint(returnValues[i], self.predict(returnValues[i]), norm=True) self.X = np.copy(initX) self.y = np.copy(inity) self.n = len(self.X) self.updateData() while True: try: self.updateModel() except: self.train() else: break return returnValues
def __init__(self, available_ingredients, products_price, products_consumption): self.maximize = True self.available_ingredients = available_ingredients self.products_price = products_price self.products_consumption = products_consumption mins = [] maxs = [] for product in self.products_price.keys(): mins.append(0) maxs.append( np.min(self.available_ingredients / self.products_consumption[product])) self.bounder = ec.Bounder(lower_bound=mins, upper_bound=maxs)
def __init__(self, nexus, obj = 'krig'): optprob = nexus.optimization_problem self.inputs = optprob.inputs self.bounds = self.make_bounds() self.lower_bound= self.bounds[1] self.upper_bound= self.bounds[0] self.bounder = ec.Bounder(self.lower_bound, self.upper_bound) surr = nexus.surrogate_data # if the models are not empty, make kriging_model.predict(inps) the funct if obj.lower() in ['k','krig','krige','kriging']: if not surr.model0 == None: self.function_1 = surr.model0.predict else: self.function_1 = None if not surr.model1 == None: self.function_2 = surr.model1.predict else: self.function_2 = None elif obj.lower() in ['ck','cokrig','co-krige','co-kriging','co kriging','cokriging','c*k']: if not surr.modelck0 == None: self.function_1 = surr.modelck0.predict print 'cokrig!' else: self.function_1 = None if not surr.model0 == None: self.function_2 = surr.model0.predict print 'model0' elif not surr.model1 == None: self.function_2 = surr.model1.predict else: self.function_2 = None elif obj.lower() in ['mix','other','borked','):']: if not surr.modelck0 == None: self.function_1 = surr.modelck0.predict else: self.function_1 = None if not surr.model1 == None: self.function_2 = surr.model1.predict else: self.function_2 = None
def __init__(self, model, input_params, silent=False): self.dimensions = len(input_params) self.model = model self.params = input_params self.param_names = list(input_params.keys()) self.maximize = False self.silent = silent self.fit_func = None self.X_train = None self.X_test = None self.y_train = None self.y_test = None mins = [] maxs = [] for interval in input_params.values(): mins.append(interval[0]) maxs.append(interval[1]) self.bounder = ec.Bounder(lower_bound=mins, upper_bound=maxs)
def main(): rand = Random() rand.seed(int(time())) ea = ec.GA(rand) ea.selector = ec.selectors.tournament_selection ea.variator = [ ec.variators.uniform_crossover, ec.variators.gaussian_mutation ] ea.replacer = ec.replacers.steady_state_replacement ea.terminator = terminators.generation_termination ea.observer = [ec.observers.stats_observer, ec.observers.file_observer] final_pop = ea.evolve(generator=generate_, evaluator=evaluate_, pop_size=10000, maximize=True, bounder=ec.Bounder(0, 800), max_generations=10000, num_imputs=2, crossover_rae=1.0, num_crossover_points=1, mutation_rate=0.15, num_elites=1, num_selected=2, tournament_size=2, statistics_file=open("garrafas_stats.csv", "w"), individuals_file=open("garrafas_individuais.csv", "w")) final_pop.sort(reverse=True) print(final_pop[0]) perform_fitness(final_pop[0].candidate[0], final_pop[0].candidate[1]) solution_evaluation(final_pop[0].candidate[0], final_pop[0].candidate[1])
def __init__(self, dimensions=2): self.dimensions = dimensions self.bounder = ec.Bounder([0] * dimensions, [1] * dimensions) self.maximize = False # Load real tiem contingecies module self.sample = RTC()
def train_expensive(self, optimizer='pso'): ''' The function trains the hyperparameters of the expensive Kriging model. :param optimizer: Two optimizers are implemented, a Particle Swarm Optimizer or a GA ''' # here we get the value of yc_xe for enu, entry in enumerate(self.Xe): if entry in self.Xc: # print('find this value {} in Xc!'.format(entry)) index = np.argwhere(self.Xc == entry)[0,0] self.yc_xe.append(self.yc[index]) else: print('find the value {} with kriging'.format(entry)) y_predict = self.predict_cheap(entry) self.yc_xe.append(y_predict) self.yc_xe = np.atleast_2d(self.yc_xe).T # transfer from list to np.array # then make sure our data is up-to-date self.updateData() # Establish the bounds for optimization for theta and p values lowerBound = [self.thetamin] * self.k + [self.pmin] * self.k + [self.rhomin] upperBound = [self.thetamax] * self.k + [self.pmax] * self.k + [self.rhomax] # Create a random seed for our optimizer to use rand = Random() rand.seed(int(time())) # If the optimizer option is PSO, run the PSO algorithm if optimizer == 'pso': ea = inspyred.swarm.PSO(Random()) ea.terminator = self.no_improvement_termination ea.topology = inspyred.swarm.topologies.ring_topology # ea.observer = inspyred.ec.observers.stats_observer final_pop = ea.evolve(generator=self.generate_population, evaluator=self.fittingObjective, pop_size=300, maximize=False, bounder=ec.Bounder(lowerBound, upperBound), max_evaluations=50000, neighborhood_size=30, num_inputs=self.k) # Sort and print the best individual, who will be at index 0. final_pop.sort(reverse=True) # If not using a PSO search, run the GA elif optimizer == 'ga2': ea = inspyred.ec.emo.NSGA2(Random()) ea.terminator = self.no_improvement_termination final_pop = ea.evolve(generator=self.generate_population, evaluator=self.fittingObjective, pop_size=10, maximize=False, bounder=ec.Bounder(lowerBound, upperBound), max_generations=50, num_elites=10, mutation_rate=0.1) # This code updates the model with the hyperparameters found in the global search for entry in final_pop: newValues = entry.candidate # newValues = [0.1, 0.1, 2, 2, 1] preLOP = copy.deepcopy(newValues) locOP_bounds = [] for i in range(self.k): locOP_bounds.append([self.thetamin, self.thetamax]) for i in range(self.k): locOP_bounds.append([self.pmin, self.pmax]) locOP_bounds.append([self.rhomin,self.rhomax]) # Let's quickly double check that we're at the optimal value by running a quick local optimizaiton lopResults = minimize(self.fittingObjective_local, newValues, method='SLSQP', bounds=locOP_bounds, options={'disp': False}) # fun = lopResults['fun'] newValues = lopResults['x'] # Finally, set our new theta and pl values and update the model again for i in range(self.k): self.thetad[i] = newValues[i] for i in range(self.k): self.pd[i] = newValues[i + self.k] self.rho = newValues[self.k + self.k] try: self.updateModel() except: pass else: break print('succeed to train expensive kriging model') # set other paras # self.thetad = 0.1*np.ones(self.k) # self.rho = 1.25 self.updateModel() self.neglnlikelihood() self._getMatrixC() print("mu = {}".format(self.mu)) print("thetad, pd, rho is {}".format([self.thetad, self.pd, self.rho]))
def run_optimization(optimProbConf, resultFile=None, isMultiProc=False, population=None): """ Function to perform the optimization using the integer set representation to the candidates solutions. Args: optimProbConf (OptimProblemConfiguration): This object contains all information to perform the strain optimization task. resultFile (str): The path file to store all the results obtained during the optimization (default results are not saved into a file) isMultiProc (bool): True, if the user wants parallelize the population evaluation. (default False) Returns list: the individuals of the last population. """ rand = Random() ea = ec.EvolutionaryComputation(rand) ea.selector = ec.selectors.tournament_selection ea.replacer = replacers.new_candidates_no_duplicates_replacement ea.terminator = ec.terminators.generation_termination if resultFile is not None: ea.observer = observers.save_all_results if optimProbConf.type in [ optimType.REACTION_KO, optimType.GENE_KO, optimType.MEDIUM, optimType.PROTEIN_KO ]: # int set representation bounds = [0, len(optimProbConf.get_decoder().ids) - 1] myGenerator = generators.generator_single_int_set ea.variator = [ variators.uniform_crossover, variators.grow_mutation_intSetRep, variators.shrink_mutation, variators.single_mutation_intSetRep ] elif optimProbConf.type == optimType.MEDIUM_REACTION_KO: bounds = [[0, 0], [ len(optimProbConf.get_decoder().drains) - 1, len(optimProbConf.get_decoder().reactions) - 1 ]] myGenerator = generators.generator_tuple_int_set ea.variator = [ variators.uniform_crossover_tuple, variators.grow_mutation_tuple_intSetRep, variators.shrink_mutation_tuple, variators.single_mutation_tuple_intSetRep ] else: # tuple set representation bounds = [[0, 0], [ len(optimProbConf.get_decoder().ids) - 1, len(optimProbConf.get_decoder().levels) - 1 ]] myGenerator = generators.generator_single_int_tuple ea.variator = [ variators.uniform_crossover_intTupleRep, variators.grow_mutation_intTupleRep, variators.shrink_mutation, variators.single_mutation_intTupleRep ] config = optimProbConf.get_ea_configurations() if isMultiProc: try: nprocs = int(cpu_count() / 2) except NotImplementedError: nprocs = config.NUM_CPUS print("number of proc", nprocs) final_pop = ea.evolve(generator=myGenerator, evaluator=evaluators.parallel_evaluation_mp, mp_evaluator=evaluators.evaluator, mp_nprocs=nprocs, pop_size=config.POPULATION_SIZE, bounder=ec.Bounder(bounds[0], bounds[1]), max_generations=config.MAX_GENERATIONS, candidate_max_size=config.MAX_CANDIDATE_SIZE, num_elites=config.NUM_ELITES, num_selected=config.POPULATION_SELECTED_SIZE, crossover_rate=config.CROSSOVER_RATE, mutation_rate=config.MUTATION_RATE, new_candidates_rate=config.NEW_CANDIDATES_RATE, configuration=optimProbConf, results_file=resultFile, tournament_size=config.TOURNAMENT_SIZE, seeds=population) else: final_pop = ea.evolve(generator=myGenerator, evaluator=evaluators.evaluator, bounder=ec.Bounder(bounds[0], bounds[1]), pop_size=config.POPULATION_SIZE, max_generations=config.MAX_GENERATIONS, candidate_max_size=config.MAX_CANDIDATE_SIZE, num_elites=config.NUM_ELITES, num_selected=config.POPULATION_SELECTED_SIZE, crossover_rate=config.CROSSOVER_RATE, mutation_rate=config.MUTATION_RATE, new_candidates_rate=config.NEW_CANDIDATES_RATE, configuration=optimProbConf, results_file=resultFile, tournament_size=config.TOURNAMENT_SIZE, seeds=population) return final_pop
stats.write('#gen pop-size worst best median average std-deviation\n') individual.write('#gen #ind fitness [candidate]\n') return stats, individual ''' #call evolution iterator final_pop = my_ec.evolve( generator= generate_netparams, # assign design parameter generator to iterator parameter generator evaluator= evaluate_netparams, # assign fitness function to iterator evaluator pop_size= 100, #1000 # original 10 # each generation of parameter sets will consist of 10 individuals maximize=False, # best fitness corresponds to minimum value bounder=ec.Bounder( minParamValues, maxParamValues ), # boundaries for parameter set ([probability, weight, delay]) max_evaluations= 500, #5000 # original 50 # evolutionary algorithm termination at 50 evaluations num_selected= 50, #1000 # original 10 # number of generated parameter sets to be selected for next generation mutation_rate=0.2, # original 0.2 # rate of mutation num_inputs=5, # len([na11a, na12, na13a, na16]) num_elites=1, #10 #statistics_file = stat_file, #indiduals_file = ind_file ) # 1 existing individual will survive to next generation if it has better fitness than an individual selected by the tournament selection #stat_file.close() #ind_file.close()
else: selected.append(tournament[cons.index(min(cons))]) return selected r = random.Random() myec = ec.EvolutionaryComputation(r) myec.selector = constrained_tournament_selection myec.variator = variators.gaussian_mutation myec.replacer = replacers.generational_replacement myec.terminator = terminators.evaluation_termination myec.observer = observers.stats_observer pop = myec.evolve(my_generator, my_evaluator, pop_size=100, bounder=ec.Bounder(-2, 2), num_selected=100, constraint_func=my_constraint_function, mutation_rate=0.5, max_evaluations=2000) import matplotlib.pyplot as plt import numpy x = [] y = [] c = [] pop.sort() num_feasible = len([p for p in pop if p.fitness >= 0]) feasible_count = 0 for i, p in enumerate(pop): x.append(p.candidate[0])
def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions, 2) self.bounder = ec.Bounder([-5.0] * self.dimensions, [5.0] * self.dimensions) self.maximize = False
def run(self): # ------------------------------------------------------------------------------- # Grid Search optimization # ------------------------------------------------------------------------------- if self.method in ['grid', 'list']: # create saveFolder import os, glob try: os.mkdir(self.saveFolder) except OSError: if not os.path.exists(self.saveFolder): print ' Could not create', self.saveFolder # save Batch dict as json targetFile = self.saveFolder + '/' + self.batchLabel + '_batch.json' self.save(targetFile) # copy this batch script to folder targetFile = self.saveFolder + '/' + self.batchLabel + '_batchScript.py' os.system('cp ' + os.path.realpath(__file__) + ' ' + targetFile) # copy netParams source to folder netParamsSavePath = self.saveFolder + '/' + self.batchLabel + '_netParams.py' os.system('cp ' + self.netParamsFile + ' ' + netParamsSavePath) # import cfg cfgModuleName = os.path.basename(self.cfgFile).split('.')[0] cfgModule = imp.load_source(cfgModuleName, self.cfgFile) self.cfg = cfgModule.cfg self.cfg.checkErrors = False # avoid error checking during batch # set initial cfg initCfg if len(self.initCfg) > 0: for paramLabel, paramVal in self.initCfg.iteritems(): self.setCfgNestedParam(paramLabel, paramVal) # iterate over all param combinations if self.method == 'grid': groupedParams = False ungroupedParams = False for p in self.params: if 'group' not in p: p['group'] = False ungroupedParams = True elif p['group'] == True: groupedParams = True if ungroupedParams: labelList, valuesList = zip(*[(p['label'], p['values']) for p in self.params if p['group'] == False]) else: labelList = () valuesList = () labelList, valuesList = zip(*[(p['label'], p['values']) for p in self.params if p['group'] == False]) valueCombinations = list(product(*(valuesList))) indexCombinations = list( product(*[range(len(x)) for x in valuesList])) if groupedParams: labelListGroup, valuesListGroup = zip( *[(p['label'], p['values']) for p in self.params if p['group'] == True]) valueCombGroups = izip(*(valuesListGroup)) indexCombGroups = izip( *[range(len(x)) for x in valuesListGroup]) labelList = labelListGroup + labelList else: valueCombGroups = [(0, )] # this is a hack -- improve! indexCombGroups = [(0, )] # if using pc bulletin board, initialize all workers if self.runCfg.get('type', None) == 'mpi_bulletin': for iworker in range(int(pc.nhost())): pc.runworker() #if 1: #for iComb, pComb in zip(indexCombinations, valueCombinations): for iCombG, pCombG in zip(indexCombGroups, valueCombGroups): for iCombNG, pCombNG in zip(indexCombinations, valueCombinations): if groupedParams: # temporary hack - improve iComb = iCombG + iCombNG pComb = pCombG + pCombNG else: iComb = iCombNG pComb = pCombNG print iComb, pComb for i, paramVal in enumerate(pComb): paramLabel = labelList[i] self.setCfgNestedParam(paramLabel, paramVal) print str(paramLabel) + ' = ' + str(paramVal) # set simLabel and jobName simLabel = self.batchLabel + ''.join( [''.join('_' + str(i)) for i in iComb]) jobName = self.saveFolder + '/' + simLabel # skip if output file already exists if self.runCfg.get('skip', False) and glob.glob(jobName + '.json'): print 'Skipping job %s since output file already exists...' % ( jobName) elif self.runCfg.get( 'skipCfg', False) and glob.glob(jobName + '_cfg.json'): print 'Skipping job %s since cfg file already exists...' % ( jobName) elif self.runCfg.get( 'skipCustom', None) and glob.glob(jobName + self.runCfg['skipCustom']): print 'Skipping job %s since %s file already exists...' % ( jobName, self.runCfg['skipCustom']) else: # save simConfig json to saveFolder self.cfg.simLabel = simLabel self.cfg.saveFolder = self.saveFolder cfgSavePath = self.saveFolder + '/' + simLabel + '_cfg.json' self.cfg.save(cfgSavePath) # hpc torque job submission if self.runCfg.get('type', None) == 'hpc_torque': # read params or set defaults sleepInterval = self.runCfg.get('sleepInterval', 1) sleep(sleepInterval) nodes = self.runCfg.get('nodes', 1) ppn = self.runCfg.get('ppn', 1) script = self.runCfg.get('script', 'init.py') mpiCommand = self.runCfg.get( 'mpiCommand', 'mpiexec') walltime = self.runCfg.get('walltime', '00:30:00') queueName = self.runCfg.get('queueName', 'default') nodesppn = 'nodes=%d:ppn=%d' % (nodes, ppn) custom = self.runCfg.get('custom', '') numproc = nodes * ppn command = '%s -np %d nrniv -python -mpi %s simConfig=%s netParams=%s' % ( mpiCommand, numproc, script, cfgSavePath, netParamsSavePath) jobString = """#!/bin/bash #PBS -N %s #PBS -l walltime=%s #PBS -q %s #PBS -l %s #PBS -o %s.run #PBS -e %s.err %s cd $PBS_O_WORKDIR echo $PBS_O_WORKDIR %s """ % (jobName, walltime, queueName, nodesppn, jobName, jobName, custom, command) # Send job_string to qsub print 'Submitting job ', jobName print jobString + '\n' batchfile = '%s.pbs' % (jobName) with open(batchfile, 'w') as text_file: text_file.write("%s" % jobString) proc = Popen(['qsub', batchfile], stderr=PIPE, stdout=PIPE ) # Open a pipe to the qsub command. (output, input) = (proc.stdin, proc.stdout) # hpc torque job submission elif self.runCfg.get('type', None) == 'hpc_slurm': # read params or set defaults sleepInterval = self.runCfg.get('sleepInterval', 1) sleep(sleepInterval) allocation = self.runCfg.get( 'allocation', 'csd403') # NSG account nodes = self.runCfg.get('nodes', 1) coresPerNode = self.runCfg.get('coresPerNode', 1) email = self.runCfg.get('email', '[email protected]') folder = self.runCfg.get('folder', '.') script = self.runCfg.get('script', 'init.py') mpiCommand = self.runCfg.get('mpiCommand', 'ibrun') walltime = self.runCfg.get('walltime', '00:30:00') reservation = self.runCfg.get('reservation', None) custom = self.runCfg.get('custom', '') if reservation: res = '#SBATCH --res=%s' % (reservation) else: res = '' numproc = nodes * coresPerNode command = '%s -np %d nrniv -python -mpi %s simConfig=%s netParams=%s' % ( mpiCommand, numproc, script, cfgSavePath, netParamsSavePath) jobString = """#!/bin/bash #SBATCH --job-name=%s #SBATCH -A %s #SBATCH -t %s #SBATCH --nodes=%d #SBATCH --ntasks-per-node=%d #SBATCH -o %s.run #SBATCH -e %s.err #SBATCH --mail-user=%s #SBATCH --mail-type=end %s %s source ~/.bashrc cd %s %s wait """ % (simLabel, allocation, walltime, nodes, coresPerNode, jobName, jobName, email, res, custom, folder, command) # Send job_string to qsub print 'Submitting job ', jobName print jobString + '\n' batchfile = '%s.sbatch' % (jobName) with open(batchfile, 'w') as text_file: text_file.write("%s" % jobString) #subprocess.call proc = Popen(['sbatch', batchfile], stdin=PIPE, stdout=PIPE ) # Open a pipe to the qsub command. (output, input) = (proc.stdin, proc.stdout) # run mpi jobs directly e.g. if have 16 cores, can run 4 jobs * 4 cores in parallel # eg. usage: python batch.py elif self.runCfg.get('type', None) == 'mpi_direct': jobName = self.saveFolder + '/' + simLabel print 'Running job ', jobName cores = self.runCfg.get('cores', 1) folder = self.runCfg.get('folder', '.') script = self.runCfg.get('script', 'init.py') mpiCommand = self.runCfg.get('mpiCommand', 'ibrun') command = '%s -np %d nrniv -python -mpi %s simConfig=%s netParams=%s' % ( mpiCommand, cores, script, cfgSavePath, netParamsSavePath) print command + '\n' proc = Popen(command.split(' '), stdout=open(jobName + '.run', 'w'), stderr=open(jobName + '.err', 'w')) #print proc.stdout.read() # pc bulletin board job submission (master/slave) via mpi # eg. usage: mpiexec -n 4 nrniv -mpi batch.py elif self.runCfg.get('type', None) == 'mpi_bulletin': jobName = self.saveFolder + '/' + simLabel print 'Submitting job ', jobName # master/slave bulletin board schedulling of jobs pc.submit(runJob, self.runCfg.get('script', 'init.py'), cfgSavePath, netParamsSavePath) sleep(1) # avoid saturating scheduler print "-" * 80 print " Finished submitting jobs for grid parameter exploration " print "-" * 80 # ------------------------------------------------------------------------------- # Evolutionary optimization # ------------------------------------------------------------------------------- elif self.method == 'evol': import sys import inspyred.ec as EC # ------------------------------------------------------------------------------- # Evolutionary optimization: Parallel evaluation # ------------------------------------------------------------------------------- def evaluator(candidates, args): import os import signal global ngen ngen += 1 total_jobs = 0 # options slurm, mpi type = args.get('type', 'mpi_direct') # paths to required scripts script = args.get('script', 'init.py') netParamsSavePath = args.get('netParamsSavePath') genFolderPath = self.saveFolder + '/gen_' + str(ngen) # mpi command setup nodes = args.get('nodes', 1) paramLabels = args.get('paramLabels', []) coresPerNode = args.get('coresPerNode', 1) mpiCommand = args.get('mpiCommand', 'ibrun') numproc = nodes * coresPerNode # slurm setup custom = args.get('custom', '') folder = args.get('folder', '.') email = args.get('email', '[email protected]') walltime = args.get('walltime', '00:01:00') reservation = args.get('reservation', None) allocation = args.get('allocation', 'csd403') # NSG account # fitness function fitnessFunc = args.get('fitnessFunc') fitnessFuncArgs = args.get('fitnessFuncArgs') defaultFitness = args.get('defaultFitness') # read params or set defaults sleepInterval = args.get('sleepInterval', 0.2) # create folder if it does not exist createFolder(genFolderPath) # remember pids and jobids in a list pids = [] jobids = {} # create a job for each candidate for candidate_index, candidate in enumerate(candidates): # required for slurm sleep(sleepInterval) # name and path jobName = "gen_" + str(ngen) + "_cand_" + str( candidate_index) jobPath = genFolderPath + '/' + jobName # modify cfg instance with candidate values for label, value in zip(paramLabels, candidate): self.setCfgNestedParam(label, value) print 'set %s=%s' % (label, value) #self.setCfgNestedParam("filename", jobPath) self.cfg.simLabel = jobName self.cfg.saveFolder = genFolderPath # save cfg instance to file cfgSavePath = jobPath + '_cfg.json' self.cfg.save(cfgSavePath) if type == 'mpi_bulletin': # ---------------------------------------------------------------------- # MPI master-slaves # ---------------------------------------------------------------------- pc.submit(runEvolJob, script, cfgSavePath, netParamsSavePath, jobPath) print '-' * 80 else: # ---------------------------------------------------------------------- # MPI job commnand # ---------------------------------------------------------------------- command = '%s -np %d nrniv -python -mpi %s simConfig=%s netParams=%s ' % ( mpiCommand, numproc, script, cfgSavePath, netParamsSavePath) # ---------------------------------------------------------------------- # run on local machine with <nodes*coresPerNode> cores # ---------------------------------------------------------------------- if type == 'mpi_direct': executer = '/bin/bash' jobString = bashTemplate('mpi_direct') % ( custom, folder, command) # ---------------------------------------------------------------------- # run on HPC through slurm # ---------------------------------------------------------------------- elif type == 'hpc_slurm': executer = 'sbatch' res = '#SBATCH --res=%s' % ( reservation) if reservation else '' jobString = bashTemplate('hpc_slurm') % ( jobName, allocation, walltime, nodes, coresPerNode, jobPath, jobPath, email, res, custom, folder, command) # ---------------------------------------------------------------------- # run on HPC through PBS # ---------------------------------------------------------------------- elif type == 'hpc_torque': executer = 'qsub' queueName = args.get('queueName', 'default') nodesppn = 'nodes=%d:ppn=%d' % (nodes, coresPerNode) jobString = bashTemplate('hpc_torque') % ( jobName, walltime, queueName, nodesppn, jobPath, jobPath, custom, command) # ---------------------------------------------------------------------- # save job and run # ---------------------------------------------------------------------- print 'Submitting job ', jobName print jobString print '-' * 80 # save file batchfile = '%s.sbatch' % (jobPath) with open(batchfile, 'w') as text_file: text_file.write("%s" % jobString) #with open(jobPath+'.run', 'a+') as outf, open(jobPath+'.err', 'w') as errf: with open(jobPath + '.jobid', 'w') as outf, open(jobPath + '.err', 'w') as errf: pids.append( Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid) #proc = Popen(command.split([executer, batchfile]), stdout=PIPE, stderr=PIPE) sleep(0.1) #read = proc.stdout.read() with open(jobPath + '.jobid', 'r') as outf: read = outf.readline() print read if len(read) > 0: jobid = int(read.split()[-1]) jobids[candidate_index] = jobid print 'jobids', jobids total_jobs += 1 sleep(0.1) # ---------------------------------------------------------------------- # gather data and compute fitness # ---------------------------------------------------------------------- if type == 'mpi_bulletin': # wait for pc bulletin board jobs to finish try: while pc.working(): sleep(1) #pc.done() except: pass num_iters = 0 jobs_completed = 0 fitness = [None for cand in candidates] # print outfilestem print "Waiting for jobs from generation %d/%d ..." % ( ngen, args.get('max_generations')) # print "PID's: %r" %(pids) # start fitness calculation while jobs_completed < total_jobs: unfinished = [ i for i, x in enumerate(fitness) if x is None ] for candidate_index in unfinished: try: # load simData and evaluate fitness jobNamePath = genFolderPath + "/gen_" + str( ngen) + "_cand_" + str(candidate_index) if os.path.isfile(jobNamePath + '.json'): with open('%s.json' % (jobNamePath)) as file: simData = json.load(file)['simData'] fitness[candidate_index] = fitnessFunc( simData, **fitnessFuncArgs) jobs_completed += 1 print ' Candidate %d fitness = %.1f' % ( candidate_index, fitness[candidate_index]) except Exception as e: # print err = "There was an exception evaluating candidate %d:" % ( candidate_index) print("%s \n %s" % (err, e)) #pass #print 'Error evaluating fitness of candidate %d'%(candidate_index) num_iters += 1 print 'completed: %d' % (jobs_completed) if num_iters >= args.get('maxiter_wait', 5000): print "Max iterations reached, the %d unfinished jobs will be canceled and set to default fitness" % ( len(unfinished)) for canditade_index in unfinished: fitness[canditade_index] = defaultFitness jobs_completed += 1 if 'scancelUser' in kwargs: os.system('scancel -u %s' % (kwargs['scancelUser'])) else: os.system( 'scancel %d' % (jobids[candidate_index]) ) # terminate unfinished job (resubmitted jobs not terminated!) sleep(args.get('time_sleep', 1)) # kill all processes if type == 'mpi_bulletin': try: with open("./pids.pid", 'r') as file: # read pids for mpi_bulletin pids = [ int(i) for i in file.read().split(' ')[:-1] ] with open("./pids.pid", 'w') as file: # delete content pass for pid in pids: try: os.killpg(os.getpgid(pid), signal.SIGTERM) except: pass except: pass # don't want to to this for hpcs since jobs are running on compute nodes not master # else: # try: # for pid in pids: os.killpg(os.getpgid(pid), signal.SIGTERM) # except: # pass # return print "-" * 80 print " Completed a generation " print "-" * 80 return fitness # ------------------------------------------------------------------------------- # Evolutionary optimization: Generation of first population candidates # ------------------------------------------------------------------------------- def generator(random, args): # generate initial values for candidates return [ random.uniform(l, u) for l, u in zip( args.get('lower_bound'), args.get('upper_bound')) ] # ------------------------------------------------------------------------------- # Mutator # ------------------------------------------------------------------------------- @EC.variators.mutator def nonuniform_bounds_mutation(random, candidate, args): """Return the mutants produced by nonuniform mutation on the candidates. .. Arguments: random -- the random number generator object candidate -- the candidate solution args -- a dictionary of keyword arguments Required keyword arguments in args: Optional keyword arguments in args: - *mutation_strength* -- the strength of the mutation, where higher values correspond to greater variation (default 1) """ lower_bound = args.get('lower_bound') upper_bound = args.get('upper_bound') strength = args.setdefault('mutation_strength', 1) mutant = copy(candidate) for i, (c, lo, hi) in enumerate( zip(candidate, lower_bound, upper_bound)): if random.random() <= 0.5: new_value = c + (hi - c) * (1.0 - random.random()**strength) else: new_value = c - (c - lo) * (1.0 - random.random()**strength) mutant[i] = new_value return mutant # ------------------------------------------------------------------------------- # Evolutionary optimization: Main code # ------------------------------------------------------------------------------- import os # create main sim directory and save scripts self.saveScripts() global ngen ngen = -1 # log for simulation logger = logging.getLogger('inspyred.ec') logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler(self.saveFolder + '/inspyred.log', mode='a') file_handler.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler.setFormatter(formatter) logger.addHandler(file_handler) # create randomizer instance rand = Random() rand.seed(self.seed) # create file handlers for observers stats_file, ind_stats_file = self.openFiles2SaveStats() # gather **kwargs kwargs = {'cfg': self.cfg} kwargs['num_inputs'] = len(self.params) kwargs['paramLabels'] = [x['label'] for x in self.params] kwargs['lower_bound'] = [x['values'][0] for x in self.params] kwargs['upper_bound'] = [x['values'][1] for x in self.params] kwargs['statistics_file'] = stats_file kwargs['individuals_file'] = ind_stats_file kwargs[ 'netParamsSavePath'] = self.saveFolder + '/' + self.batchLabel + '_netParams.py' for key, value in self.evolCfg.iteritems(): kwargs[key] = value if not 'maximize' in kwargs: kwargs['maximize'] = False for key, value in self.runCfg.iteritems(): kwargs[key] = value # if using pc bulletin board, initialize all workers if self.runCfg.get('type', None) == 'mpi_bulletin': for iworker in range(int(pc.nhost())): pc.runworker() #################################################################### # Evolution strategy #################################################################### # Custom algorithm based on Krichmar's params if self.evolCfg['evolAlgorithm'] == 'krichmarCustom': ea = EC.EvolutionaryComputation(rand) ea.selector = EC.selectors.tournament_selection ea.variator = [ EC.variators.uniform_crossover, nonuniform_bounds_mutation ] ea.replacer = EC.replacers.generational_replacement if not 'tournament_size' in kwargs: kwargs['tournament_size'] = 2 if not 'num_selected' in kwargs: kwargs['num_selected'] = kwargs['pop_size'] # Genetic elif self.evolCfg['evolAlgorithm'] == 'genetic': ea = EC.GA(rand) # Evolution Strategy elif self.evolCfg['evolAlgorithm'] == 'evolutionStrategy': ea = EC.ES(rand) # Simulated Annealing elif self.evolCfg['evolAlgorithm'] == 'simulatedAnnealing': ea = EC.SA(rand) # Differential Evolution elif self.evolCfg['evolAlgorithm'] == 'diffEvolution': ea = EC.DEA(rand) # Estimation of Distribution elif self.evolCfg['evolAlgorithm'] == 'estimationDist': ea = EC.EDA(rand) # Particle Swarm optimization elif self.evolCfg['evolAlgorithm'] == 'particleSwarm': from inspyred import swarm ea = swarm.PSO(rand) ea.topology = swarm.topologies.ring_topology # Ant colony optimization (requires components) elif self.evolCfg['evolAlgorithm'] == 'antColony': from inspyred import swarm ea = swarm.ACS(rand) ea.topology = swarm.topologies.ring_topology else: raise ValueError("%s is not a valid strategy" % (self.evolCfg['evolAlgorithm'])) #################################################################### ea.terminator = EC.terminators.generation_termination ea.observer = [ EC.observers.stats_observer, EC.observers.file_observer ] # ------------------------------------------------------------------------------- # Run algorithm # ------------------------------------------------------------------------------- final_pop = ea.evolve(generator=generator, evaluator=evaluator, bounder=EC.Bounder(kwargs['lower_bound'], kwargs['upper_bound']), logger=logger, **kwargs) # close file stats_file.close() ind_stats_file.close() # print best and finish print('Best Solution: \n{0}'.format(str(max(final_pop)))) print "-" * 80 print " Completed evolutionary algorithm parameter optimization " print "-" * 80 sys.exit()
def evolOptim(self, pc): """ Function for/to <short description of `netpyne.batch.evol.evolOptim`> Parameters ---------- self : <type> <Short description of self> **Default:** *required* pc : <type> <Short description of pc> **Default:** *required* """ import sys import inspyred.ec as EC # ------------------------------------------------------------------------------- # Evolutionary optimization: Parallel evaluation # ------------------------------------------------------------------------------- def evaluator(candidates, args): import os import signal global ngen ngen += 1 total_jobs = 0 # options slurm, mpi type = args.get('type', 'mpi_direct') # paths to required scripts script = args.get('script', 'init.py') netParamsSavePath = args.get('netParamsSavePath') genFolderPath = self.saveFolder + '/gen_' + str(ngen) # mpi command setup nodes = args.get('nodes', 1) paramLabels = args.get('paramLabels', []) coresPerNode = args.get('coresPerNode', 1) mpiCommand = args.get('mpiCommand', 'mpirun') nrnCommand = args.get('nrnCommand', 'nrniv') numproc = nodes * coresPerNode # slurm setup custom = args.get('custom', '') folder = args.get('folder', '.') email = args.get('email', '[email protected]') walltime = args.get('walltime', '00:01:00') reservation = args.get('reservation', None) allocation = args.get('allocation', 'csd403') # NSG account # fitness function fitnessFunc = args.get('fitnessFunc') fitnessFuncArgs = args.get('fitnessFuncArgs') defaultFitness = args.get('defaultFitness') # read params or set defaults sleepInterval = args.get('sleepInterval', 0.2) # create folder if it does not exist createFolder(genFolderPath) # remember pids and jobids in a list pids = [] jobids = {} # create a job for each candidate for candidate_index, candidate in enumerate(candidates): # required for slurm sleep(sleepInterval) # name and path jobName = "gen_" + str(ngen) + "_cand_" + str(candidate_index) jobPath = genFolderPath + '/' + jobName # set initial cfg initCfg if len(self.initCfg) > 0: for paramLabel, paramVal in self.initCfg.items(): self.setCfgNestedParam(paramLabel, paramVal) # modify cfg instance with candidate values for label, value in zip(paramLabels, candidate): print('set %s=%s' % (label, value)) self.setCfgNestedParam(label, value) #self.setCfgNestedParam("filename", jobPath) self.cfg.simLabel = jobName self.cfg.saveFolder = genFolderPath # save cfg instance to file cfgSavePath = jobPath + '_cfg.json' self.cfg.save(cfgSavePath) if type == 'mpi_bulletin': # ---------------------------------------------------------------------- # MPI master-slaves # ---------------------------------------------------------------------- pc.submit(runEvolJob, nrnCommand, script, cfgSavePath, netParamsSavePath, jobPath) print('-' * 80) else: # ---------------------------------------------------------------------- # MPI job commnand # ---------------------------------------------------------------------- if mpiCommand == '': command = '%s %s simConfig=%s netParams=%s ' % ( nrnCommand, script, cfgSavePath, netParamsSavePath) else: command = '%s -np %d %s -python -mpi %s simConfig=%s netParams=%s ' % ( mpiCommand, numproc, nrnCommand, script, cfgSavePath, netParamsSavePath) # ---------------------------------------------------------------------- # run on local machine with <nodes*coresPerNode> cores # ---------------------------------------------------------------------- if type == 'mpi_direct': executer = '/bin/bash' jobString = bashTemplate('mpi_direct') % (custom, folder, command) # ---------------------------------------------------------------------- # run on HPC through slurm # ---------------------------------------------------------------------- elif type == 'hpc_slurm': executer = 'sbatch' res = '#SBATCH --res=%s' % ( reservation) if reservation else '' jobString = bashTemplate('hpc_slurm') % ( jobName, allocation, walltime, nodes, coresPerNode, jobPath, jobPath, email, res, custom, folder, command) # ---------------------------------------------------------------------- # run on HPC through PBS # ---------------------------------------------------------------------- elif type == 'hpc_torque': executer = 'qsub' queueName = args.get('queueName', 'default') nodesppn = 'nodes=%d:ppn=%d' % (nodes, coresPerNode) jobString = bashTemplate('hpc_torque') % ( jobName, walltime, queueName, nodesppn, jobPath, jobPath, custom, command) # ---------------------------------------------------------------------- # save job and run # ---------------------------------------------------------------------- print('Submitting job ', jobName) print(jobString) print('-' * 80) # save file batchfile = '%s.sbatch' % (jobPath) with open(batchfile, 'w') as text_file: text_file.write("%s" % jobString) #with open(jobPath+'.run', 'a+') as outf, open(jobPath+'.err', 'w') as errf: with open(jobPath + '.jobid', 'w') as outf, open(jobPath + '.err', 'w') as errf: pids.append( Popen([executer, batchfile], stdout=outf, stderr=errf, preexec_fn=os.setsid).pid) #proc = Popen(command.split([executer, batchfile]), stdout=PIPE, stderr=PIPE) sleep(0.1) #read = proc.stdout.read() with open(jobPath + '.jobid', 'r') as outf: read = outf.readline() print(read) if len(read) > 0: jobid = int(read.split()[-1]) jobids[candidate_index] = jobid print('jobids', jobids) total_jobs += 1 sleep(0.1) # ---------------------------------------------------------------------- # gather data and compute fitness # ---------------------------------------------------------------------- if type == 'mpi_bulletin': # wait for pc bulletin board jobs to finish try: while pc.working(): sleep(1) #pc.done() except: pass num_iters = 0 jobs_completed = 0 fitness = [None for cand in candidates] # print outfilestem print("Waiting for jobs from generation %d/%d ..." % (ngen, args.get('max_generations'))) # print "PID's: %r" %(pids) # start fitness calculation while jobs_completed < total_jobs: unfinished = [i for i, x in enumerate(fitness) if x is None] for candidate_index in unfinished: try: # load simData and evaluate fitness jobNamePath = genFolderPath + "/gen_" + str( ngen) + "_cand_" + str(candidate_index) if os.path.isfile(jobNamePath + '.json'): with open('%s.json' % (jobNamePath)) as file: simData = json.load(file)['simData'] fitness[candidate_index] = fitnessFunc( simData, **fitnessFuncArgs) jobs_completed += 1 print(' Candidate %d fitness = %.1f' % (candidate_index, fitness[candidate_index])) elif os.path.isfile(jobNamePath + '.pkl'): with open('%s.pkl' % (jobNamePath), 'rb') as file: simData = pickle.load(file)['simData'] fitness[candidate_index] = fitnessFunc( simData, **fitnessFuncArgs) jobs_completed += 1 print(' Candidate %d fitness = %.1f' % (candidate_index, fitness[candidate_index])) except Exception as e: # print err = "There was an exception evaluating candidate %d:" % ( candidate_index) print(("%s \n %s" % (err, e))) #pass #print 'Error evaluating fitness of candidate %d'%(candidate_index) num_iters += 1 print('completed: %d' % (jobs_completed)) if num_iters >= args.get('maxiter_wait', 5000): print( "Max iterations reached, the %d unfinished jobs will be canceled and set to default fitness" % (len(unfinished))) for canditade_index in unfinished: fitness[canditade_index] = defaultFitness jobs_completed += 1 try: if 'scancelUser' in kwargs: os.system('scancel -u %s' % (kwargs['scancelUser'])) else: os.system( 'scancel %d' % (jobids[candidate_index]) ) # terminate unfinished job (resubmitted jobs not terminated!) except: pass sleep(args.get('time_sleep', 1)) # kill all processes if type == 'mpi_bulletin': try: with open("./pids.pid", 'r') as file: # read pids for mpi_bulletin pids = [int(i) for i in file.read().split(' ')[:-1]] with open("./pids.pid", 'w') as file: # delete content pass for pid in pids: try: os.killpg(os.getpgid(pid), signal.SIGTERM) except: pass except: pass # don't want to to this for hpcs since jobs are running on compute nodes not master # else: # try: # for pid in pids: os.killpg(os.getpgid(pid), signal.SIGTERM) # except: # pass # return print("-" * 80) print(" Completed a generation ") print("-" * 80) return fitness # ------------------------------------------------------------------------------- # Evolutionary optimization: Generation of first population candidates # ------------------------------------------------------------------------------- def generator(random, args): # generate initial values for candidates return [ random.uniform(l, u) for l, u in zip(args.get('lower_bound'), args.get('upper_bound')) ] # ------------------------------------------------------------------------------- # Mutator # ------------------------------------------------------------------------------- @EC.variators.mutator def nonuniform_bounds_mutation(random, candidate, args): """Return the mutants produced by nonuniform mutation on the candidates. .. Arguments: random -- the random number generator object candidate -- the candidate solution args -- a dictionary of keyword arguments Required keyword arguments in args: Optional keyword arguments in args: - *mutation_strength* -- the strength of the mutation, where higher values correspond to greater variation (default 1) """ lower_bound = args.get('lower_bound') upper_bound = args.get('upper_bound') strength = args.setdefault('mutation_strength', 1) mutant = copy(candidate) for i, (c, lo, hi) in enumerate(zip(candidate, lower_bound, upper_bound)): if random.random() <= 0.5: new_value = c + (hi - c) * (1.0 - random.random()**strength) else: new_value = c - (c - lo) * (1.0 - random.random()**strength) mutant[i] = new_value return mutant # ------------------------------------------------------------------------------- # Evolutionary optimization: Main code # ------------------------------------------------------------------------------- import os # create main sim directory and save scripts self.saveScripts() global ngen ngen = -1 # log for simulation logger = logging.getLogger('inspyred.ec') logger.setLevel(logging.DEBUG) file_handler = logging.FileHandler(self.saveFolder + '/inspyred.log', mode='a') file_handler.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') file_handler.setFormatter(formatter) logger.addHandler(file_handler) # create randomizer instance rand = Random() rand.seed(self.seed) # create file handlers for observers stats_file, ind_stats_file = self.openFiles2SaveStats() # gather **kwargs kwargs = {'cfg': self.cfg} kwargs['num_inputs'] = len(self.params) kwargs['paramLabels'] = [x['label'] for x in self.params] kwargs['lower_bound'] = [x['values'][0] for x in self.params] kwargs['upper_bound'] = [x['values'][1] for x in self.params] kwargs['statistics_file'] = stats_file kwargs['individuals_file'] = ind_stats_file kwargs[ 'netParamsSavePath'] = self.saveFolder + '/' + self.batchLabel + '_netParams.py' for key, value in self.evolCfg.items(): kwargs[key] = value if not 'maximize' in kwargs: kwargs['maximize'] = False for key, value in self.runCfg.items(): kwargs[key] = value # if using pc bulletin board, initialize all workers if self.runCfg.get('type', None) == 'mpi_bulletin': for iworker in range(int(pc.nhost())): pc.runworker() #------------------------------------------------------------------ # Evolutionary algorithm method #------------------------------------------------------------------- # Custom algorithm based on Krichmar's params if self.evolCfg['evolAlgorithm'] == 'custom': ea = EC.EvolutionaryComputation(rand) ea.selector = EC.selectors.tournament_selection ea.variator = [ EC.variators.uniform_crossover, nonuniform_bounds_mutation ] ea.replacer = EC.replacers.generational_replacement if not 'tournament_size' in kwargs: kwargs['tournament_size'] = 2 if not 'num_selected' in kwargs: kwargs['num_selected'] = kwargs['pop_size'] # Genetic elif self.evolCfg['evolAlgorithm'] == 'genetic': ea = EC.GA(rand) # Evolution Strategy elif self.evolCfg['evolAlgorithm'] == 'evolutionStrategy': ea = EC.ES(rand) # Simulated Annealing elif self.evolCfg['evolAlgorithm'] == 'simulatedAnnealing': ea = EC.SA(rand) # Differential Evolution elif self.evolCfg['evolAlgorithm'] == 'diffEvolution': ea = EC.DEA(rand) # Estimation of Distribution elif self.evolCfg['evolAlgorithm'] == 'estimationDist': ea = EC.EDA(rand) # Particle Swarm optimization elif self.evolCfg['evolAlgorithm'] == 'particleSwarm': from inspyred import swarm ea = swarm.PSO(rand) ea.topology = swarm.topologies.ring_topology # Ant colony optimization (requires components) elif self.evolCfg['evolAlgorithm'] == 'antColony': from inspyred import swarm if not 'components' in kwargs: raise ValueError("%s requires components" % (self.evolCfg['evolAlgorithm'])) ea = swarm.ACS(rand, self.evolCfg['components']) ea.topology = swarm.topologies.ring_topology else: raise ValueError("%s is not a valid strategy" % (self.evolCfg['evolAlgorithm'])) ea.terminator = EC.terminators.generation_termination ea.observer = [EC.observers.stats_observer, EC.observers.file_observer] # ------------------------------------------------------------------------------- # Run algorithm # ------------------------------------------------------------------------------- final_pop = ea.evolve(generator=generator, evaluator=evaluator, bounder=EC.Bounder(kwargs['lower_bound'], kwargs['upper_bound']), logger=logger, **kwargs) # close file stats_file.close() ind_stats_file.close() # print best and finish print(('Best Solution: \n{0}'.format(str(max(final_pop))))) print("-" * 80) print(" Completed evolutionary algorithm parameter optimization ") print("-" * 80) sys.exit()
import math import multiprocessing import os import random import sys from typing import Dict, List import gym import numpy as np from inspyred import ec from inspyred.benchmarks import Benchmark import constants as cc from CGP_program import CGP_program bounder = ec.Bounder(0.0, 1.0) # Util functions for GC def generator(random: random.Random, args: Dict) -> List: """ Generate an individual of length `cc.N_EVOLVABLE_GENES` where every element in it is a random number x where 0 <= x <= 1 But only the outputs and C (number of inner nodes) will be evolved, so the inputs are not considered part of the genome Parameters ---------- random : random.Random The random generator passed to inspyred
def __init__(self, COST_FUNCTION, dimensions=2): self.dimensions = dimensions self.bounder = ec.Bounder([0] * dimensions, [1] * dimensions) self.maximize = False # Load real tiem contingecies module self.cost_function = COST_FUNCTION
def train(self, optimizer='pso'): ''' The function trains the hyperparameters of the Kriging model. :param optimizer: Two optimizers are implemented, a Particle Swarm Optimizer or a GA ''' # First make sure our data is up-to-date self.updateData() # Establish the bounds for optimization for theta and p values lowerBound = [self.thetamin] * self.k + [self.pmin] * self.k upperBound = [self.thetamax] * self.k + [self.pmax] * self.k #Create a random seed for our optimizer to use rand = Random() rand.seed(int(time())) # If the optimizer option is PSO, run the PSO algorithm if optimizer == 'pso': ea = inspyred.swarm.PSO(Random()) ea.terminator = self.no_improvement_termination ea.topology = inspyred.swarm.topologies.ring_topology # ea.observer = inspyred.ec.observers.stats_observer final_pop = ea.evolve(generator=self.generate_population, evaluator=self.fittingObjective, pop_size=300, maximize=False, bounder=ec.Bounder(lowerBound, upperBound), max_evaluations=30000, neighborhood_size=20, num_inputs=self.k) # Sort and print the best individual, who will be at index 0. final_pop.sort(reverse=True) # If not using a PSO search, run the GA elif optimizer == 'ga': ea = inspyred.ec.GA(Random()) ea.terminator = self.no_improvement_termination final_pop = ea.evolve(generator=self.generate_population, evaluator=self.fittingObjective, pop_size=300, maximize=False, bounder=ec.Bounder(lowerBound, upperBound), max_evaluations=30000, num_elites=10, mutation_rate=.05) # This code updates the model with the hyperparameters found in the global search for entry in final_pop: newValues = entry.candidate preLOP = copy.deepcopy(newValues) locOP_bounds = [] for i in range(self.k): locOP_bounds.append([self.thetamin, self.thetamax]) for i in range(self.k): locOP_bounds.append([self.pmin, self.pmax]) # Let's quickly double check that we're at the optimal value by running a quick local optimizaiton lopResults = minimize(self.fittingObjective_local, newValues, method='SLSQP', bounds=locOP_bounds, options={'disp': False}) newValues = lopResults['x'] # Finally, set our new theta and pl values and update the model again for i in range(self.k): self.theta[i] = newValues[i] for i in range(self.k): self.pl[i] = newValues[i + self.k] try: self.updateModel() except: pass else: break
#schweful evaluation #f(x) = 418.9829n - [sum from i to n](-x[i]sin(sqrt(abs(x[i])))) fit = 418.9829 * len(cs) - sum([(-x * sin(sqrt(abs(x)))) for x in cs]) fitness.append(fit) return fitness #ec rand = Random() rand.seed(int(time())) es = ec.ES(rand) es.terminator = terminators.evaluation_termination final_pop = es.evolve(generator=generate_schwefel, evaluator=evaluate_schwefel, pop_size=100, maximize=False, bounder=ec.Bounder(-500, 500), max_evaluations=20000, mutation_rate=0.25, num_inputs=2) # Sort and print the best individual, who will be at index 0. final_pop.sort(reverse=True) print(final_pop[0])
from math import cos from math import pi from inspyred import ec from inspyred.ec import terminators def generate_rastrigin(random, args): size = args.get('num_inputs', 10) return [random.uniform(-5.12, 5.12) for i in range(size)] def evaluate_rastrigin(candidates, args): fitness = [] for cs in candidates: fit = 10 * len(cs) + sum([((x - 1)**2 - 10 * cos(2 * pi * (x - 1))) for x in cs]) fitness.append(fit) return fitness rand = Random() rand.seed(int(time())) es = ec.ES(rand) es.terminator = terminators.evaluation_termination final_pop = es.evolve(generator=generate_rastrigin, evaluator=evaluate_rastrigin, pop_size=100, maximize=False, bounder=ec.Bounder(-5.12, 5.12), max_evaluations=20000, mutation_rate=0.25, num_inputs=3) # Sort and print the best individual, who will be at index 0. final_pop.sort(reverse=True) print(final_pop[0])
def __init__(self, dimensions=2): Benchmark.__init__(self, dimensions) self.bounder = ec.Bounder([-5.12] * self.dimensions, [5.12] * self.dimensions) self.maximize = False self.global_optimum = [0 for _ in range(self.dimensions)]