def _next(self): pop = self.pop elites = np.where(pop.get("type") == "elite")[0] # actually do the mating given the elite selection and biased crossover off = self.mating.do(self.problem, self.pop, n_offsprings=self.n_offsprings, algorithm=self) # create the mutants randomly to fill the population with mutants = FloatRandomSampling().do(self.problem, self.n_mutants, algorithm=self) # evaluate all the new solutions to_evaluate = off.merge(mutants) self.evaluator.eval(self.problem, to_evaluate, algorithm=self) # finally merge everything together and sort by fitness pop = pop[elites].merge(to_evaluate) # the do survival selection - set the elites for the next round self.pop = self.survival.do(self.problem, pop, len(pop), algorithm=self)
def test_against_orginal_implementation(self): for problem in [ Ackley(n_var=2), Rosenbrock(n_var=2), Sphere(n_var=10), ]: print(problem.__class__.__name__) x0 = FloatRandomSampling().do(problem, 1)[0].X problem.xl = None problem.xu = None rho = 0.5 pop = run(problem, x0, rho=rho)[1:] delta = np.zeros(problem.n_var) for i in range(0, problem.n_var): if (x0[i] == 0.0): delta[i] = rho else: delta[i] = rho * abs(x0[i]) algorithm = PatternSearch(x0=x0, explr_delta=delta) ret = minimize(problem, algorithm, verbose=True) X, _X = pop.get("X"), ret.pop.get("X") F, _F = pop.get("F"), ret.pop.get("F") n = min(len(X), len(_X)) X, _X, F, _F = X[:n], _X[:n], F[:n], _F[:n]
def __init__(self, pop_size=100, sampling=FloatRandomSampling(), selection=TournamentSelection(func_comp=comp_by_cv_and_fitness), crossover=SimulatedBinaryCrossover(prob=0.9, eta=3), mutation=PolynomialMutation(prob=None, eta=5), eliminate_duplicates=True, n_offsprings=None, **kwargs): """ Parameters ---------- pop_size : {pop_size} sampling : {sampling} selection : {selection} crossover : {crossover} mutation : {mutation} eliminate_duplicates : {eliminate_duplicates} n_offsprings : {n_offsprings} """ super().__init__(pop_size=pop_size, sampling=sampling, selection=selection, crossover=crossover, mutation=mutation, survival=FitnessSurvival(), eliminate_duplicates=eliminate_duplicates, n_offsprings=n_offsprings, **kwargs) self.func_display_attrs = disp_single_objective
def _get_sampling(self, X, Y, bound=None, mode=0): ''' Initialize population from data ''' if self.pop_init_method == 'lhs': sampling = LatinHypercubeSampling() elif self.pop_init_method == 'nds': sorted_indices = NonDominatedSorting().do(Y) pop_size = self.algo_kwargs['pop_size'] sampling = X[np.concatenate(sorted_indices)][:pop_size] # NOTE: use lhs if current samples are not enough if len(sampling) < pop_size: if bound is None: rest_sampling = lhs(X.shape[1], pop_size - len(sampling)) else: rest_sampling = lhs(X.shape[1], pop_size - len(sampling)) rest_sampling = bound[0] + rest_sampling * (bound[1] - bound[0]) rest_sampling = np.round(rest_sampling) sampling = np.vstack([sampling, rest_sampling]) elif self.pop_init_method == 'random': sampling = FloatRandomSampling() else: raise NotImplementedError return sampling
def _get_sampling(self, X, Y): ''' Initialize population from data. Parameters ---------- X: np.array Design variables. Y: np.array Performance values. Returns ------- sampling: np.array or pymoo.model.sampling.Sampling Initial population or a sampling method for generating initial population. ''' if self.pop_init_method == 'lhs': sampling = LatinHypercubeSampling() elif self.pop_init_method == 'nds': sorted_indices = NonDominatedSorting().do(Y) pop_size = self.algo_kwargs['pop_size'] sampling = X[np.concatenate(sorted_indices)][:pop_size] # NOTE: use lhs if current samples are not enough if len(sampling) < pop_size: rest_sampling = lhs(X.shape[1], pop_size - len(sampling)) sampling = np.vstack([sampling, rest_sampling]) elif self.pop_init_method == 'random': sampling = FloatRandomSampling() else: raise NotImplementedError return sampling
def initialize(self, problem, seed=None, **kwargs): super().initialize(problem, **kwargs) self.n_gen = 0 xl = problem.xl.tolist() if problem.xl is not None else None xu = problem.xu.tolist() if problem.xu is not None else None self.options['bounds'] = [xl, xu] self.options['seed'] = seed if isinstance(self.termination, MaximumGenerationTermination): self.options['maxiter'] = self.termination.n_max_gen elif isinstance(self.termination, MaximumFunctionCallTermination): self.options['maxfevals'] = self.termination.n_max_evals if self.x0 is None: np.random.seed(seed) self.x0 = FloatRandomSampling().do(problem, 1).get("X")[0] self.es = my_fmin( self.x0, self.sigma, options=self.options, parallelize=self.parallelize, restarts=self.restarts, restart_from_best=self.restart_from_best, incpopsize=self.incpopsize, eval_initial_x=self.eval_initial_x, noise_handler=self.noise_handler, noise_change_sigma_exponent=self.noise_change_sigma_exponent, noise_kappa_exponent=self.noise_kappa_exponent, bipop=self.bipop )
def __init__(self, ref_dirs, pop_size=None, sampling=FloatRandomSampling(), selection=TournamentSelection(func_comp=comp_by_cv_then_random), crossover=SimulatedBinaryCrossover(eta=30, prob=1.0), mutation=PolynomialMutation(eta=20, prob=None), eliminate_duplicates=True, n_offsprings=None, display=MultiObjectiveDisplay(), **kwargs): """ Parameters ---------- ref_dirs : {ref_dirs} pop_size : int (default = None) By default the population size is set to None which means that it will be equal to the number of reference line. However, if desired this can be overwritten by providing a positive number. sampling : {sampling} selection : {selection} crossover : {crossover} mutation : {mutation} eliminate_duplicates : {eliminate_duplicates} n_offsprings : {n_offsprings} """ self.ref_dirs = ref_dirs # in case of R-NSGA-3 they will be None - otherwise this will be executed if self.ref_dirs is not None: if pop_size is None: pop_size = len(self.ref_dirs) if pop_size < len(self.ref_dirs): print( f"WARNING: pop_size={pop_size} is less than the number of reference directions ref_dirs={len(self.ref_dirs)}.\n" "This might cause unwanted behavior of the algorithm. \nPlease make sure pop_size is equal or larger " "than the number of reference directions. ") if 'survival' in kwargs: survival = kwargs['survival'] del kwargs['survival'] else: survival = ReferenceDirectionSurvival(ref_dirs) super().__init__(pop_size=pop_size, sampling=sampling, selection=selection, crossover=crossover, mutation=mutation, survival=survival, eliminate_duplicates=eliminate_duplicates, n_offsprings=n_offsprings, display=display, **kwargs)
def __init__(self, ref_dirs, alpha=2.0, adapt_freq=0.1, pop_size=None, sampling=FloatRandomSampling(), selection=TournamentSelection(binary_tournament), crossover=SimulatedBinaryCrossover(eta=30, prob=1.0), mutation=PolynomialMutation(eta=20, prob=None), eliminate_duplicates=True, n_offsprings=None, display=MultiObjectiveDisplay(), **kwargs): """ Parameters ---------- ref_dirs : {ref_dirs} adapt_freq : float Defines the ratio of generation when the reference directions are updated. pop_size : int (default = None) By default the population size is set to None which means that it will be equal to the number of reference line. However, if desired this can be overwritten by providing a positive number. sampling : {sampling} selection : {selection} crossover : {crossover} mutation : {mutation} eliminate_duplicates : {eliminate_duplicates} n_offsprings : {n_offsprings} """ # set reference directions and pop_size self.ref_dirs = ref_dirs if self.ref_dirs is not None: if pop_size is None: pop_size = len(self.ref_dirs) # the fraction of n_max_gen when the the reference directions are adapted self.adapt_freq = adapt_freq # you can override the survival if necessary survival = kwargs.pop("survival", None) if survival is None: survival = ModifiedAPDSurvival(ref_dirs, alpha=alpha) super().__init__(pop_size=pop_size, sampling=sampling, selection=selection, crossover=crossover, mutation=mutation, survival=survival, eliminate_duplicates=eliminate_duplicates, n_offsprings=n_offsprings, display=display, **kwargs)
def __init__(self, n_elites=200, n_offsprings=700, n_mutants=100, bias=0.7, sampling=FloatRandomSampling(), survival=None, display=SingleObjectiveDisplay(), eliminate_duplicates=False, **kwargs ): """ Parameters ---------- n_elites : int Number of elite individuals n_offsprings : int Number of offsprings to be generated through mating of an elite and a non-elite individual n_mutants : int Number of mutations to be introduced each generation bias : float Bias of an offspring inheriting the allele of its elite parent eliminate_duplicates : bool or class The duplicate elimination is more important if a decoding is used. The duplicate check has to be performed on the decoded variable and not on the real values. Therefore, we recommend passing a DuplicateElimination object. If eliminate_duplicates is simply set to `True`, then duplicates are filtered out whenever the objective values are equal. """ if survival is None: survival = EliteSurvival(n_elites, eliminate_duplicates=eliminate_duplicates) super().__init__(pop_size=n_elites + n_offsprings + n_mutants, n_offsprings=n_offsprings, sampling=sampling, selection=EliteBiasedSelection(), crossover=BiasedCrossover(bias, prob=1.0), mutation=NoMutation(), survival=survival, display=display, eliminate_duplicates=True, **kwargs) self.n_elites = n_elites self.n_mutants = n_mutants self.bias = bias self.default_termination = SingleObjectiveDefaultTermination()
def __init__( self, ref_dirs, pop_size=None, sampling=FloatRandomSampling(), selection=TournamentSelection(func_comp=comp_by_cv_then_random), crossover=SimulatedBinaryCrossover(eta=30, prob=1.0), mutation=PolynomialMutation(eta=20, prob=None), eliminate_duplicates=True, n_offsprings=None, **kwargs): """ Parameters ---------- ref_dirs : {ref_dirs} pop_size : int (default = None) By default the population size is set to None which means that it will be equal to the number of reference line. However, if desired this can be overwritten by providing a positive number. sampling : {sampling} selection : {selection} crossover : {crossover} mutation : {mutation} eliminate_duplicates : {eliminate_duplicates} n_offsprings : {n_offsprings} """ self.ref_dirs = ref_dirs if pop_size is None: pop_size = len(ref_dirs) kwargs['individual'] = Individual(rank=np.inf, niche=-1, dist_to_niche=np.inf) if 'survival' in kwargs: survival = kwargs['survival'] del kwargs['survival'] else: survival = ReferenceDirectionSurvival(ref_dirs) super().__init__(pop_size=pop_size, sampling=sampling, selection=selection, crossover=crossover, mutation=mutation, survival=survival, eliminate_duplicates=eliminate_duplicates, n_offsprings=n_offsprings, **kwargs) self.func_display_attrs = disp_multi_objective
def __init__(self, display=CSDisplay(), sampling=FloatRandomSampling(), survival=FitnessSurvival(), eliminate_duplicates=DefaultDuplicateElimination(), termination=SingleObjectiveDefaultTermination(), pop_size=100, beta=1.5, alfa=0.01, pa=0.35, **kwargs): """ Parameters ---------- display : {display} sampling : {sampling} survival : {survival} eliminate_duplicates: This does not exists in the original paper/book. Without this the solutions might get too biased to current global best solution, because the global random walk use the global best solution as the reference. termination : {termination} pop_size : The number of nests (solutions) beta : The input parameter of the Mantegna's Algorithm to simulate sampling on Levy Distribution alfa : alfa is the step size scaling factor and is usually 0.01, so that the step size will be scaled down to O(L/100) with L is the scale (range of bounds) of the problem. pa : The switch probability, pa fraction of the nests will be abandoned on every iteration """ super().__init__(**kwargs) self.initialization = Initialization(sampling) self.survival = survival self.display = display self.pop_size = pop_size self.default_termination = termination self.eliminate_duplicates = eliminate_duplicates #the scale will be multiplied by problem scale after problem given in setup self.alfa = alfa self.scale = alfa self.pa = pa self.beta = beta a = math.gamma(1. + beta) * math.sin(math.pi * beta / 2.) b = beta * math.gamma((1. + beta) / 2.) * 2**((beta - 1.) / 2) self.sig = (a / b)**(1. / (2 * beta))
def __init__(self, ref_dirs, n_neighbors=20, decomposition='auto', prob_neighbor_mating=0.9, **kwargs): """ Parameters ---------- ref_dirs : {ref_dirs} decomposition : {{ 'auto', 'tchebi', 'pbi' }} The decomposition approach that should be used. If set to `auto` for two objectives `tchebi` and for more than two `pbi` will be used. n_neighbors : int Number of neighboring reference lines to be used for selection. prob_neighbor_mating : float Probability of selecting the parents in the neighborhood. """ self.n_neighbors = n_neighbors self.prob_neighbor_mating = prob_neighbor_mating self.decomposition = decomposition set_if_none(kwargs, 'pop_size', len(ref_dirs)) set_if_none(kwargs, 'sampling', FloatRandomSampling()) set_if_none(kwargs, 'crossover', SimulatedBinaryCrossover(prob=1.0, eta=20)) set_if_none(kwargs, 'mutation', PolynomialMutation(prob=None, eta=20)) set_if_none(kwargs, 'survival', None) set_if_none(kwargs, 'selection', None) super().__init__(**kwargs) self.func_display_attrs = disp_multi_objective # initialized when problem is known self.ref_dirs = ref_dirs if self.ref_dirs.shape[0] < self.n_neighbors: print("Setting number of neighbours to population size: %s" % self.ref_dirs.shape[0]) self.n_neighbors = self.ref_dirs.shape[0] # neighbours includes the entry by itself intentionally for the survival method self.neighbors = np.argsort(cdist(self.ref_dirs, self.ref_dirs), axis=1, kind='quicksort')[:, :self.n_neighbors]
def __init__( self, ref_dirs, sampling=FloatRandomSampling(), selection=RestrictedMating(func_comp=comp_by_cv_dom_then_random), crossover=SimulatedBinaryCrossover(n_offsprings=1, eta=30, prob=1.0), mutation=PolynomialMutation(eta=20, prob=None), eliminate_duplicates=True, display=MultiObjectiveDisplay(), **kwargs): """ Parameters ---------- ref_dirs : {ref_dirs} sampling : {sampling} selection : {selection} crossover : {crossover} mutation : {mutation} eliminate_duplicates : {eliminate_duplicates} """ self.ref_dirs = ref_dirs pop_size = len(ref_dirs) kwargs['individual'] = Individual(rank=np.inf, niche=-1, FV=-1) if 'survival' in kwargs: survival = kwargs['survival'] del kwargs['survival'] else: survival = CADASurvival(ref_dirs) # Initialize diversity archives self.da = None super().__init__(pop_size=pop_size, sampling=sampling, selection=selection, crossover=crossover, mutation=mutation, survival=survival, eliminate_duplicates=eliminate_duplicates, n_offsprings=pop_size, display=display, **kwargs)
def __init__(self, n_elites=20, n_offsprings=70, n_mutants=10, bias=0.7, sampling=FloatRandomSampling(), survival=None, display=SingleObjectiveDisplay(), eliminate_duplicates=False, **kwargs): """ Parameters ---------- n_elites : int Population size n_offsprings : int Fraction of elite items into each population n_mutants : int Fraction of mutants introduced at each generation into the population bias : float Probability that an offspring inherits the allele of its elite parent """ if survival is None: survival = EliteSurvival(n_elites, eliminate_duplicates=eliminate_duplicates) super().__init__(pop_size=n_elites + n_offsprings + n_mutants, n_offsprings=n_offsprings, sampling=sampling, selection=EliteBiasedSelection(), crossover=BiasedCrossover(bias, prob=1.0), mutation=NoMutation(), survival=survival, display=display, eliminate_duplicates=True, **kwargs) self.n_elites = n_elites self.n_mutants = n_mutants self.bias = bias self.default_termination = SingleObjectiveToleranceBasedTermination()
def __init__(self, ref_dirs, n_neighbors=20, decomposition='auto', prob_neighbor_mating=0.9, display=MultiObjectiveDisplay(), **kwargs): """ Parameters ---------- ref_dirs n_neighbors decomposition prob_neighbor_mating display kwargs """ self.n_neighbors = n_neighbors self.prob_neighbor_mating = prob_neighbor_mating self.decomposition = decomposition set_if_none(kwargs, 'pop_size', len(ref_dirs)) set_if_none(kwargs, 'sampling', FloatRandomSampling()) set_if_none(kwargs, 'crossover', SimulatedBinaryCrossover(prob=1.0, eta=20)) set_if_none(kwargs, 'mutation', PolynomialMutation(prob=None, eta=20)) set_if_none(kwargs, 'survival', None) set_if_none(kwargs, 'selection', None) super().__init__(display=display, **kwargs) # initialized when problem is known self.ref_dirs = ref_dirs if self.ref_dirs.shape[0] < self.n_neighbors: print("Setting number of neighbours to population size: %s" % self.ref_dirs.shape[0]) self.n_neighbors = self.ref_dirs.shape[0] # neighbours includes the entry by itself intentionally for the survival method self.neighbors = np.argsort(cdist(self.ref_dirs, self.ref_dirs), axis=1, kind='quicksort')[:, :self.n_neighbors]
def __init__(self, display=MOCSDisplay(), sampling=FloatRandomSampling(), survival=RankAndCrowdingSurvival(), eliminate_duplicates=DefaultDuplicateElimination(), termination=None, pop_size=100, beta=1.5, alfa=0.1, pa=0.35, **kwargs): """ Parameters ---------- display : {display} sampling : {sampling} survival : {survival} eliminate_duplicates: {eliminate_duplicates} termination : {termination} pop_size : The number of nests (solutions) beta : The input parameter of the Mantegna's Algorithm to simulate sampling on Levy Distribution alfa : The scaling step size and is usually O(L/100) with L is the scale of the problem pa : The switch probability, pa fraction of the nests will be abandoned on every iteration """ super().__init__(display=display, sampling=sampling, survival=survival, eliminate_duplicates=eliminate_duplicates, termination=termination, pop_size=pop_size, beta=beta, alfa=alfa, pa=pa, **kwargs)
def __init__(self, pop_size=100, sampling=FloatRandomSampling(), selection=RandomSelection(), crossover=SimulatedBinaryCrossover(prob=0.9, eta=3), mutation=PolynomialMutation(prob=None, eta=5), eliminate_duplicates=True, n_offsprings=None, display=SingleObjectiveDisplay(), **kwargs): """ Parameters ---------- pop_size : {pop_size} sampling : {sampling} selection : {selection} crossover : {crossover} mutation : {mutation} eliminate_duplicates : {eliminate_duplicates} n_offsprings : {n_offsprings} """ super().__init__(pop_size=pop_size, sampling=sampling, selection=selection, crossover=crossover, mutation=mutation, survival=NichingSurvival(), eliminate_duplicates=eliminate_duplicates, n_offsprings=n_offsprings, display=display, **kwargs) # self.mating = NeighborBiasedMating(selection, # crossover, # mutation, # repair=self.mating.repair, # eliminate_duplicates=self.mating.eliminate_duplicates, # n_max_iterations=self.mating.n_max_iterations) self.default_termination = SingleObjectiveDefaultTermination()
def _next(self): n_evals = np.array( [solver.evaluator.n_eval for solver in self.solvers]) ranks = np.array([solver.opt[0].F[0] for solver in self.solvers]).argsort() + 1 rws = RouletteWheelSelection(ranks, larger_is_better=False) S = rws.next() self.solvers[S].next() print(n_evals.sum(), n_evals) if self.solvers[S].termination.force_termination or self.solvers[ S].termination.has_terminated(self.solvers[S]): self.niches.append(self.solvers[S]) print(self.solvers[S].opt.get("F"), self.solvers[S].opt.get("X")) self.solvers[S] = None for k in range(self.n_parallel): if self.solvers[k] is None: x = FloatRandomSampling().do(self.problem, 1)[0].get("X") self.solvers[S] = self.func_create_solver(self.problem, x)
def __init__(self, pop_size=100, sampling=FloatRandomSampling(), selection=TournamentSelection(func_comp=binary_tournament), crossover=SimulatedBinaryCrossover(eta=15, prob=0.9), mutation=PolynomialMutation(prob=None, eta=20), eliminate_duplicates=True, n_offsprings=None, display=MultiObjectiveDisplay(), **kwargs): """ Parameters ---------- pop_size : {pop_size} sampling : {sampling} selection : {selection} crossover : {crossover} mutation : {mutation} eliminate_duplicates : {eliminate_duplicates} n_offsprings : {n_offsprings} """ kwargs['individual'] = Individual(rank=np.inf, crowding=-1) super().__init__(pop_size=pop_size, sampling=sampling, selection=selection, crossover=crossover, mutation=mutation, survival=RankAndCrowdingSurvival(), eliminate_duplicates=eliminate_duplicates, n_offsprings=n_offsprings, display=display, **kwargs) self.tournament_type = 'comp_by_dom_and_crowding'
def main(): # get argument values args = get_args() # get reference point if args.ref_point is None: args.ref_point = get_ref_point(args.problem, args.n_var, args.n_obj, args.n_init_sample) t0 = time() # set seed np.random.seed(args.seed) # build problem, get initial samples jsonFile = "/home/picarib/Desktop/cdnet/config/json/sbd_custom.json" configDirPath = "/home/picarib/Desktop/cdnet/config/sbd_custom/" dataPath = "/home/picarib/Desktop/cdnet/data/" config = loadJSON(jsonFile) interval = 1 if "custom" not in config["RequestModels"] else config[ "RequestModels"]["custom"]["interval"] isLoadRTable = config["isLoadRTable"] isLoadSeparatorRank = config["isLoadSeparatorRank"] mode = config["RoutingMode"] # [no-cache, no-color, tag-color, full-color] fileSize = config["FileSize"] runReqNums = config["RunReqNums"] if "RunReqNums" in config else -1 warmUpReqNums = config["WarmUpReqNums"] if "WarmUpReqNums" in config else -1 colorNums = config["colorNums"] separatorRankIncrement = config["separatorRankIncrement"] colorList = [ ''.join(random.choices(string.ascii_uppercase + string.digits, k=6)) for i in range(colorNums) ] topo = NetTopology(config, configDirPath, mode, warmUpReqNums, fileSize, colorList) topo.build() extra_params = (topo, fileSize, mode, colorList, runReqNums, warmUpReqNums, separatorRankIncrement) problem, X_init, Y_init = build_problem(args.problem, args.n_var, args.n_obj, args.n_init_sample, args.n_process, extra_params=extra_params) args.n_var, args.n_obj, args.algo = problem.n_var, problem.n_obj, 'moeda' # save arguments and setup logger save_args(args) logger = setup_logger(args) print(problem) # initialize evolutionary algorithm ref_dir = get_reference_directions("das-dennis", 2, n_partitions=15) args.batch_size = min(len(ref_dir), args.batch_size) # initialize population if args.pop_init_method == 'lhs': sampling = LatinHypercubeSampling() elif args.pop_init_method == 'nds': sorted_indices = NonDominatedSorting().do(Y_init) sampling = X_init[np.concatenate(sorted_indices)][:args.batch_size] if len(sampling) < args.batch_size: rest_sampling = lhs(X_init.shape[1], args.batch_size - len(sampling)) sampling = np.vstack([sampling, rest_sampling]) elif args.pop_init_method == 'random': sampling = FloatRandomSampling() else: raise NotImplementedError ea_algorithm = MOEAD(ref_dir, n_neighbors=args.batch_size, sampling=sampling, crossover=get_crossover("int_one_point"), mutation=get_mutation("int_pm", prob=1.0 / 13), decomposition="pbi", seed=1) # initialize data exporter exporter = DataExport(X_init, Y_init, args) # find Pareto front res = minimize(problem, ea_algorithm, ('n_gen', args.n_iter), save_history=True) X_history = np.array([algo.pop.get('X') for algo in res.history]) Y_history = np.array([algo.pop.get('F') for algo in res.history]) # update data exporter for X_next, Y_next in zip(X_history, Y_history): exporter.update(X_next, Y_next) # export all result to csv exporter.write_csvs() # statistics final_hv = calc_hypervolume(exporter.Y, exporter.ref_point) print('========== Result ==========') print('Total runtime: %.2fs' % (time() - t0)) print('Total evaluations: %d, hypervolume: %.4f\n' % (args.batch_size * args.n_iter, final_hv)) # close logger if logger is not None: logger.close()
def main(): # get argument values args = get_args() # get reference point if args.ref_point is None: args.ref_point = get_ref_point(args.problem, args.n_var, args.n_obj, args.n_init_sample) t0 = time() # set seed np.random.seed(args.seed) # build problem, get initial samples problem, true_pfront, X_init, Y_init = build_problem( args.problem, args.n_var, args.n_obj, args.n_init_sample, args.n_process) args.n_var, args.n_obj, args.algo = problem.n_var, problem.n_obj, 'nsga2' # save arguments and setup logger save_args(args) logger = setup_logger(args) print(problem) # initialize data exporter exporter = DataExport(X_init, Y_init, args) # initialize population if args.pop_init_method == 'lhs': sampling = LatinHypercubeSampling() elif args.pop_init_method == 'nds': sorted_indices = NonDominatedSorting().do(Y_init) sampling = X_init[np.concatenate(sorted_indices)][:args.batch_size] if len(sampling) < args.batch_size: rest_sampling = lhs(X_init.shape[1], args.batch_size - len(sampling)) sampling = np.vstack([sampling, rest_sampling]) elif args.pop_init_method == 'random': sampling = FloatRandomSampling() else: raise NotImplementedError # initialize evolutionary algorithm ea_algorithm = NSGA2(pop_size=args.batch_size, sampling=sampling) # find Pareto front res = minimize(problem, ea_algorithm, ('n_gen', args.n_iter), save_history=True) X_history = np.array([algo.pop.get('X') for algo in res.history]) Y_history = np.array([algo.pop.get('F') for algo in res.history]) # update data exporter for X_next, Y_next in zip(X_history, Y_history): exporter.update(X_next, Y_next) # export all result to csv exporter.write_csvs() if true_pfront is not None: exporter.write_truefront_csv(true_pfront) # statistics final_hv = calc_hypervolume(exporter.Y, exporter.ref_point) print('========== Result ==========') print('Total runtime: %.2fs' % (time() - t0)) print('Total evaluations: %d, hypervolume: %.4f\n' % (args.batch_size * args.n_iter, final_hv)) # close logger if logger is not None: logger.close()
import numpy as np from pymoo.operators.crossover.simulated_binary_crossover import SimulatedBinaryCrossover from pymoo.operators.sampling.random_sampling import FloatRandomSampling from pymoo.problems.single import Rastrigin problem = Rastrigin(n_var=30) crossover = SimulatedBinaryCrossover(eta=20) pop = FloatRandomSampling().do(problem, 2) parents = np.array([[0, 1]]) off = crossover.do(problem, pop, parents) print(off) ind_a = pop[0] ind_b = pop[1] off = crossover.do(problem, ind_a, ind_b) print(off)
def __init__( self, ref_points, pop_per_ref_point, mu=0.05, sampling=FloatRandomSampling(), selection=TournamentSelection(func_comp=comp_by_cv_then_random), crossover=SimulatedBinaryCrossover(eta=30, prob=1.0), mutation=PolynomialMutation(eta=20, prob=None), eliminate_duplicates=True, n_offsprings=None, **kwargs): """ Parameters ---------- ref_points : {ref_points} pop_per_ref_point : int Size of the population used for each reference point. mu : float Defines the scaling of the reference lines used during survival selection. Increasing mu will result having solutions with a larger spread. Other Parameters ------- n_offsprings : {n_offsprings} sampling : {sampling} selection : {selection} crossover : {crossover} mutation : {mutation} eliminate_duplicates : {eliminate_duplicates} """ # number of objectives the reference lines have n_obj = ref_points.shape[1] # add the aspiration point lines aspiration_ref_dirs = UniformReferenceDirectionFactory( n_dim=n_obj, n_points=pop_per_ref_point).do() survival = AspirationPointSurvival(ref_points, aspiration_ref_dirs, mu=mu) pop_size = ref_points.shape[0] * aspiration_ref_dirs.shape[ 0] + aspiration_ref_dirs.shape[1] ref_dirs = None super().__init__(ref_dirs, pop_size=pop_size, sampling=sampling, selection=selection, crossover=crossover, mutation=mutation, survival=survival, eliminate_duplicates=eliminate_duplicates, n_offsprings=n_offsprings, **kwargs)