def __init__( # parameters of the class self, problem: MOProblem, scalarization_function: MOEADSF = Tchebycheff(), n_neighbors: int = 20, population_params: Dict = None, initial_population: Population = None, lattice_resolution: int = None, use_repair: bool = True, n_parents: int = 2, a_priori: bool = False, interact: bool = False, use_surrogates: bool = False, n_iterations: int = 10, n_gen_per_iter: int = 100, total_function_evaluations: int = 0, ): super().__init__( # parameters for decomposition based approach problem=problem, population_size=None, population_params=population_params, initial_population=initial_population, lattice_resolution=lattice_resolution, a_priori=a_priori, interact=interact, use_surrogates=use_surrogates, n_iterations=n_iterations, n_gen_per_iter=n_gen_per_iter, total_function_evaluations=total_function_evaluations, ) self.population_size = self.population.pop_size self.problem = problem self.scalarization_function = scalarization_function self.n_neighbors = n_neighbors self.use_repair = use_repair self.n_parents = n_parents self.population.mutation = BP_mutation( problem.get_variable_lower_bounds(), problem.get_variable_upper_bounds(), 0.5, 20, ) self.population.recombination = SBX_xover(1.0, 20) selection_operator = MOEAD_select(self.population, SF_type=self.scalarization_function) self.selection_operator = selection_operator # Compute the distance between each pair of reference vectors distance_matrix_vectors = distance_matrix( self.reference_vectors.values, self.reference_vectors.values) # Get the closest vectors to obtain the neighborhoods self.neighborhoods = np.argsort(distance_matrix_vectors, axis=1, kind="quicksort")[:, :n_neighbors] self.population.update_ideal() self._ideal_point = self.population.ideal_objective_vector
def __init__(self, problem: MOProblem, scalar_method: Optional[ScalarMethod] = None): # check if ideal and nadir are defined if problem.ideal is None or problem.nadir is None: # TODO: use same method as defined in scalar_method ideal, nadir = payoff_table_method(problem) self._ideal = ideal self._nadir = nadir else: self._ideal = problem.ideal self._nadir = problem.nadir self._scalar_method = scalar_method # generate Pareto optimal starting point asf = SimpleASF(np.ones(self._ideal.shape)) scalarizer = Scalarizer( lambda x: problem.evaluate(x).objectives, asf, scalarizer_args={"reference_point": np.atleast_2d(self._ideal)}, ) if problem.n_of_constraints > 0: _con_eval = lambda x: problem.evaluate(x).constraints.squeeze() else: _con_eval = None solver = ScalarMinimizer( scalarizer, problem.get_variable_bounds(), constraint_evaluator=_con_eval, method=self._scalar_method, ) # TODO: fix tools to check for scipy methods in general and delete me! solver._use_scipy = True res = solver.minimize(problem.get_variable_upper_bounds() / 2) if res["success"]: self._current_solution = res["x"] self._current_objectives = problem.evaluate( self._current_solution).objectives.squeeze() self._archive_solutions = [] self._archive_objectives = [] self._state = "classify" super().__init__(problem)
def __init__(self, problem: MOProblem, assign_type: str = "RandomDesign", pop_size=None, recombination_type=None, crossover_type="simulated_binary_crossover", mutation_type="bounded_polynomial_mutation", *args): """Initialize the population. Parameters ---------- problem : BaseProblem An object of the class Problem assign_type : str, optional Define the method of creation of population. If 'assign_type' is 'RandomDesign' the population is generated randomly. If 'assign_type' is 'LHSDesign', the population is generated via Latin Hypercube Sampling. If 'assign_type' is 'custom', the population is imported from file. If assign_type is 'empty', create blank population. 'EvoNN' and 'EvoDN2' will create neural networks or deep neural networks, respectively, for population . plotting : bool, optional (the default is True, which creates the plots) pop_size : int Population size recombination_type, crossover_type, mutation_type : str Recombination functions. If recombination_type is specified, crossover and mutation will be handled by the same function. If None, they are done separately. """ self.assign_type = assign_type self.num_var = problem.n_of_variables self.lower_limits = np.asarray(problem.get_variable_lower_bounds()) self.upper_limits = np.asarray(problem.get_variable_upper_bounds()) self.hyp = 0 self.non_dom = 0 self.pop_size = pop_size # Fix to remove the following assumptions self.recombination_funcs = { "biogp_xover": biogp_xover, "biogp_mut": biogp_mutation, "evodn2_xover_mutation": evodn2_xover_mutation, "evonn_xover_mutation": evonn_xover_mutation, "bounded_polynomial_mutation": bounded_polynomial_mutation, "simulated_binary_crossover": simulated_binary_crossover, } self.crossover_type = crossover_type self.mutation_type = mutation_type self.recombination = self.recombination_funcs.get( recombination_type, None) if recombination_type is None: self.crossover = self.recombination_funcs.get(crossover_type, None) self.mutation = self.recombination_funcs.get(mutation_type, None) self.problem = problem self.filename = (problem.name + "_" + str(problem.n_of_objectives) ) # Used for plotting self.plotting = plotting self.individuals = [] self.objectives = np.empty((0, self.problem.n_of_objectives), float) if problem.minimize is not None: self.fitness = self.objectives[:, self.problem.minimize] self.ideal_fitness = np.full((1, self.fitness.shape[1]), np.inf) self.worst_fitness = -1 * self.ideal_fitness else: self.fitness = np.empty((0, self.problem.num_of_objectives), float) self.ideal_fitness = np.full((1, self.problem.num_of_objectives), np.inf) self.worst_fitness = -1 * self.ideal_fitness self.constraint_violation = np.empty( (0, self.problem.num_of_constraints), float) self.archive = pd.DataFrame( columns=["generation", "decision_variables", "objective_values"]) if not assign_type == "empty": individuals = create_new_individuals(assign_type, problem, pop_size=self.pop_size) self.add(individuals) if self.plotting: self.figure = [] self.plot_init_()