def test_mo_cma_es(): def distance(feasible_ind, original_ind): """A distance function to the feasibility region.""" return sum((f - o)**2 for f, o in zip(feasible_ind, original_ind)) def closest_feasible(individual): """A function returning a valid individual from an invalid one.""" feasible_ind = numpy.array(individual) feasible_ind = numpy.maximum(BOUND_LOW, feasible_ind) feasible_ind = numpy.minimum(BOUND_UP, feasible_ind) return feasible_ind def valid(individual): """Determines if the individual is valid or not.""" if any(individual < BOUND_LOW) or any(individual > BOUND_UP): return False return True NDIM = 5 BOUND_LOW, BOUND_UP = 0.0, 1.0 MU, LAMBDA = 10, 10 NGEN = 500 numpy.random.seed(128) # The MO-CMA-ES algorithm takes a full population as argument population = [ creator.__dict__[INDCLSNAME](x) for x in numpy.random.uniform(BOUND_LOW, BOUND_UP, (MU, NDIM)) ] toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.zdt1) toolbox.decorate( "evaluate", tools.ClosestValidPenalty(valid, closest_feasible, 1.0e+6, distance)) for ind in population: ind.fitness.values = toolbox.evaluate(ind) strategy = cma.StrategyMultiObjective(population, sigma=1.0, mu=MU, lambda_=LAMBDA) toolbox.register("generate", strategy.generate, creator.__dict__[INDCLSNAME]) toolbox.register("update", strategy.update) for gen in range(NGEN): # Generate a new population population = toolbox.generate() # Evaluate the individuals fitnesses = toolbox.map(toolbox.evaluate, population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit # Update the strategy with the evaluated individuals toolbox.update(population) # Note that we use a penalty to guide the search to feasible solutions, # but there is no guarantee that individuals are valid. # We expect the best individuals will be within bounds or very close. num_valid = 0 for ind in strategy.parents: dist = distance(closest_feasible(ind), ind) if numpy.isclose(dist, 0.0, rtol=1.e-5, atol=1.e-5): num_valid += 1 assert num_valid >= len(strategy.parents) # Note that NGEN=500 is enough to get consistent hypervolume > 116, # but not 119. More generations would help but would slow down testing. hv = hypervolume(strategy.parents, [11.0, 11.0]) assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % ( hv, HV_THRESHOLD)
def closest_feasible(individual): """A function returning a valid individual from an invalid one.""" feasible_ind = numpy.array(individual) feasible_ind = numpy.maximum(MIN_BOUND, feasible_ind) feasible_ind = numpy.minimum(MAX_BOUND, feasible_ind) return feasible_ind def valid(individual): """Determines if the individual is valid or not.""" if any(individual < MIN_BOUND) or any(individual > MAX_BOUND): return False return True toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.zdt1) toolbox.decorate("evaluate", tools.ClosestValidPenalty(valid, closest_feasible, 1.0e-6, distance)) def main(): # The cma module uses the numpy random number generator # numpy.random.seed(128) MU, LAMBDA = 10, 10 NGEN = 500 verbose = True # The MO-CMA-ES algorithm takes a full population as argument population = [creator.Individual(x) for x in (numpy.random.uniform(0, 1, (MU, N)))] for ind in population: ind.fitness.values = toolbox.evaluate(ind)
return True def close_valid(individual): """Determines if the individual is close to valid.""" if any(individual < MIN_BOUND - EPS_BOUND) or any( individual > MAX_BOUND + EPS_BOUND): return False return True toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.zdt1) toolbox.decorate( "evaluate", tools.ClosestValidPenalty(valid, closest_feasible, 1.0e+6, distance)) def main(): # The cma module uses the numpy random number generator # numpy.random.seed(128) MU, LAMBDA = 10, 10 NGEN = 500 verbose = True create_plot = False # The MO-CMA-ES algorithm takes a full population as argument population = [ creator.Individual(x) for x in (numpy.random.uniform(0, 1, (MU, N))) ]
def test_mo_cma_es(): def distance(feasible_ind, original_ind): """A distance function to the feasibility region.""" return sum((f - o)**2 for f, o in zip(feasible_ind, original_ind)) def closest_feasible(individual): """A function returning a valid individual from an invalid one.""" feasible_ind = numpy.array(individual) feasible_ind = numpy.maximum(BOUND_LOW, feasible_ind) feasible_ind = numpy.minimum(BOUND_UP, feasible_ind) return feasible_ind def valid(individual): """Determines if the individual is valid or not.""" if any(individual < BOUND_LOW) or any(individual > BOUND_UP): return False return True NDIM = 5 BOUND_LOW, BOUND_UP = 0.0, 1.0 MU, LAMBDA = 10, 10 NGEN = 500 # The MO-CMA-ES algorithm takes a full population as argument population = [ creator.__dict__[INDCLSNAME](x) for x in numpy.random.uniform(BOUND_LOW, BOUND_UP, (MU, NDIM)) ] toolbox = base.Toolbox() toolbox.register("evaluate", benchmarks.zdt1) toolbox.decorate( "evaluate", tools.ClosestValidPenalty(valid, closest_feasible, 1.0e-6, distance)) for ind in population: ind.fitness.values = toolbox.evaluate(ind) strategy = cma.StrategyMultiObjective(population, sigma=1.0, mu=MU, lambda_=LAMBDA) toolbox.register("generate", strategy.generate, creator.__dict__[INDCLSNAME]) toolbox.register("update", strategy.update) for gen in range(NGEN): # Generate a new population population = toolbox.generate() # Evaluate the individuals fitnesses = toolbox.map(toolbox.evaluate, population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit # Update the strategy with the evaluated individuals toolbox.update(population) hv = hypervolume(strategy.parents, [11.0, 11.0]) assert hv > HV_THRESHOLD, "Hypervolume is lower than expected %f < %f" % ( hv, HV_THRESHOLD)
def minimize(self): # for bounds constraints def IsFeasible(individual): """ True if individual is OK, False if out of bounds """ violation = False for ii in range(len(individual)): if bound_constraints[ii][0] is not None: if individual[ii] < bound_constraints[ii][0]: violation = True if bound_constraints[ii][1] is not None: if individual[ii] > bound_constraints[ii][1]: violation = True return not violation def displacement(individual): """ return distance of individual to closest valid point """ # displacement to closest valid individual displacement = np.zeros(len(individual)) # increment individual from exact boundary to just within limits dx = 1e-10 for ii in range(len(individual)): if bound_constraints[ii][0] is not None: if individual[ii] < bound_constraints[ii][0]: displacement[ii] = individual[ii] - bound_constraints[ ii][0] - dx if bound_constraints[ii][1] is not None: if individual[ii] > bound_constraints[ii][1]: displacement[ii] = individual[ii] - bound_constraints[ ii][1] - dx return displacement def distance(closest_feasible_ind, individual): """ return distance of individual to closest valid point """ return np.linalg.norm(closest_feasible_ind - individual) def ClosestIndividual(individual): """ return closest feasible individual """ new_individual = individual - displacement(individual) if not IsFeasible(new_individual): raise StochasticOptimizersError( "Implementation error generating closest valid Ind.") return new_individual def ConstraintsPresent(bounds): constraints_present = False if bounds is not None: for ii in range(len(bounds)): for jj in range(2): if bounds[ii][jj] is not None: constraints_present = True return constraints_present def RunTime(Ind): return time.time() - t0 sigma = self.sigma lambda_ = int(4 + 3 * np.log(self.Nparam)) creator.create("FitnessMin", base.Fitness, weights=(-1., )) creator.create("Individual", list, fitness=creator.FitnessMin) toolbox = base.Toolbox() toolbox.register("map", map) toolbox.register("evaluate", self._function) if ConstraintsPresent(self.bounds): toolbox.decorate("evaluate",tools.ClosestValidPenalty(IsFeasible,\ ClosestIndividual,1e3,distance)) bound_constraints = self.bounds strategy = cma.Strategy(centroid=self.x0, sigma=sigma) toolbox.register("generate", strategy.generate, creator.Individual) toolbox.register("update", strategy.update) #-----------------# # log information # #-----------------# halloffame = tools.HallOfFame(maxsize=1) stats = tools.Statistics(lambda ind: ind.fitness.values) stats.register("avg", np.mean) stats.register("std", np.std) stats.register("min", np.min) stats.register("max", np.max) stats.register("time (s)", RunTime) logbook = tools.Logbook() logbook.header = "gen", "time (s)", "evals", "avg", "std", "min", "max" conditions = { "MaxIter": False, "TolHistFun": False, "EqualFunVals": False, "TolX": False, "TolUpSigma": False, "Stagnation": False, "ConditionCov": False, "NoEffectAxis": False, "NoEffectCoor": False, "small_std": False } MAXITER = self.max_iter # return a ScipyOptimize object opt_res = OptimizeResult() opt_res["success"] = False opt_res["nfev"] = 0 # initial runtime t0 = time.time() t = 0 while not any(conditions.values()): # generate all indidivuals population = toolbox.generate() # evaluate fitnesses for all individuals in population fitnesses = toolbox.map(toolbox.evaluate, population) for ind, fit in zip(population, fitnesses): ind.fitness.values = fit # update records of optimisation halloffame.update(population) record = stats.compile(population) logbook.record(gen=t, evals=lambda_, **record) # number of function evaluations opt_res["nfev"] += len(population) toolbox.update(population) if self.verbose: print(logbook.stream) # book keeping t += 1 if t > MAXITER: conditions["MaxIter"] = True # "best" individual of all time opt_res["fun"] = np.min([_pop["min"] for _pop in logbook]) opt_res["x"] = halloffame[0] opt_res["logbook"] = logbook # return Scipy OptimizeResult instance return opt_res