def main(): # get config vars config = cli.init() population_size_nsgaii = config['POPULATION_SIZE_NSGAII'] number_of_runs_nsgaii = config['NUMBER_OF_RUNS_NSGAII'] number_of_runs_ga = config['NUMBER_OF_RUNS_GA'] population_size_ga = config['POPULATION_SIZE_GA'] config_path = config['TEST_DATA_PATH'] ga_weights = config['GA_WEIGHTS'] budget_constraint = config['BUDGET_CONSTRAINT'] # parse and get specific data data = test_data.parse(config_path) requirements = data[0] clients = data[1] # run NSGA-II multi-objective algorithm print(datetime.datetime.now()) print('Running NSGA-II...') NRP_multi = NRP_MOO(requirements, clients, budget_constraint) algorithm = NSGAII(NRP_multi.generate_problem(), population_size=population_size_nsgaii) algorithm.run(number_of_runs_nsgaii) NSGAII_solutions = unique(nondominated(algorithm.result)) # run GA single-objective algorithm with different weights GA_solutions = [] for ga_weight in ga_weights: print(datetime.datetime.now()) print('Running GA for weights ' + str(ga_weight) + ' and ' + str(1 - ga_weight) + '...') NRP_single = NRP_SOO(requirements, clients, budget_constraint, ga_weight, 1 - ga_weight) algorithm = GeneticAlgorithm(NRP_single.generate_problem(), population_size=population_size_ga) algorithm.run(number_of_runs_ga) GA_solutions.extend(unique(nondominated(algorithm.result))) # run random algorithm print(datetime.datetime.now()) print('Generating random solution...') NRP_random = NRP_Random(requirements, clients, budget_constraint) random_solutions = NRP_random.generate_solutions() print('done!') # draw graphs results.draw_graphs([ results.get_graph_data_nsga_ii(NSGAII_solutions), results.get_graph_data_ga(GA_solutions, requirements, clients, budget_constraint), results.get_graph_data_ga(random_solutions, requirements, clients, budget_constraint) ])
def _plot(optimizer, title, experiment=None, filename=None, show=False): results = nondominated(optimizer.result) x = [s.objectives[0] for s in results] y = [s.objectives[1] for s in results] if experiment is not None: comet.log_metric("EnvironmentalBenefit", x, experiment=experiment) # log the resulting values comet.log_metric("EconomicBenefit", y, experiment=experiment) log.debug("X: {}".format(x)) log.debug("Y: {}".format(y)) plt.scatter(x, y) plt.xlabel("Environmental Flow Benefit") plt.ylabel("Economic Benefit") plt.title(title) if experiment is not None: experiment.log_figure(title) if filename: plt.savefig(fname=filename, dpi=300) if show: plt.show() plt.close()
def to_dataframe(optimizer, dvnames, outcome_names): '''helper function to turn results of optimization into a pandas DataFrame Parameters ---------- optimizer : platypus algorithm instance dvnames : list of str outcome_names : list of str Returns ------- pandas DataFrame ''' solutions = [] for solution in platypus.unique(platypus.nondominated(optimizer.result)): vars = transform_variables(solution.problem, # @ReservedAssignment solution.variables) decision_vars = dict(zip(dvnames, vars)) decision_out = dict(zip(outcome_names, solution.objectives)) result = decision_vars.copy() result.update(decision_out) solutions.append(result) results = pd.DataFrame(solutions, columns=dvnames+outcome_names) return results
def calibrate(run_counts): """调用优化计算模型进行参数优选 Parameters ---------- run_counts: int 运行次数 Returns --------- optimal_params: list 非劣解集 """ algorithm = NSGAII(XajCalibrate(), population_size=500, variator=GAOperator(SBX(0.95, 20.0), PM(2, 25.0))) algorithm.run(run_counts) # algorithm.run(run_counts) # We could also get only the non-dominated solutions,这里只展示非劣解集 nondominated_solutions = nondominated(algorithm.result) # plot the results using matplotlib # 如果目标超过三个,可视化方面需要进行降维操作,这里先以两个目标为例进行分析 plt.scatter([s.objectives[0] for s in nondominated_solutions], [s.objectives[1] for s in nondominated_solutions]) plt.xlim([0, 1]) plt.ylim([0, 1]) plt.xlabel("$mare$") plt.ylabel("$nse$") plt.show() # 返回最优参数 optimal_params = [] for nondominated_solution in nondominated_solutions: optimal_params.append(nondominated_solution.variables) return optimal_params
def __call__(self, optimizer): n_solutions = 0 if platypus is None: raise ModuleNotFoundError("platypus") for _ in platypus.unique(platypus.nondominated(optimizer.result)): n_solutions += 1 self.results.append(n_solutions) super().__call__(optimizer)
def to_dataframe(optimizer, dvnames, outcome_names): '''helper function to turn results of optimization into a pandas DataFrame Parameters ---------- optimizer : platypus algorithm instance dvnames : list of str outcome_names : list of str Returns ------- pandas DataFrame ''' solutions = [] for solution in platypus.unique(platypus.nondominated(optimizer.result)): decision_vars = dict(zip(dvnames, solution.variables)) decision_out = dict(zip(outcome_names, solution.objectives)) result = decision_vars.copy() result.update(decision_out) solutions.append(result) results = pd.DataFrame(solutions, columns=dvnames+outcome_names) return results
def solutions_to_df(solutions: List[platypus.Solution], problem, parts='all', flag_optimal=True) -> pd.DataFrame: """Converts a list of platypus solutions to a DataFrame, with one row corresponding to each solution :param solutions: list of solutions to convert :param problem: the column names for DataFrame :param parts: which parts of the solutions should be kept :param flag_optimal: whether to include a boolean column denoting whether each solution is pareto-optimal :return: a DataFrame """ def to_col_vals(solution_list): return list(zip(*(solution_to_values(solution, parts) for solution in solution_list))) solutions = platypus.unique(solutions) non_dominated = platypus.nondominated(solutions) columns = problem.names(parts) values, non_dom_vals = to_col_vals(solutions), to_col_vals(non_dominated) assert len(columns) == len(values), f'{len(values)} values does not match {len(columns)} columns' # TODO: Intuit the dataframe column types based on the types of the parameters of the problem # or use the to_df method of the problem object solution_df = pd.DataFrame({column: data for column, data in zip(columns, values)}) # , dtype=float if flag_optimal: non_dom_df = pd.DataFrame({column: data for column, data in zip(columns, non_dom_vals)}) # , dtype=float df = pd.merge(solution_df, non_dom_df, how='outer', indicator='pareto-optimal') df['pareto-optimal'] = df['pareto-optimal'] == 'both' return df return solution_df
def autoIterate(model, river, reach, rs, flow, stage, nct, plot, outf, metrics, correctDatum, evals=None, si=False): """ Automatically iterate with NSGA-II """ keys = metrics # ensure same order evalf = evaluator(stage, useTests=keys, correctDatum=correctDatum) evals = int( input("How many evaluations to run? ")) if evals is None else evals plotpath = ".".join(outf.split(".")[:-1]) + ".png" count = 1 print("Running automatic calibration") def manningEval(vars): n = vars[0] metrics = minimized( nstageSingleRun(model, river, reach, rs, stage, n, keys, correctDatum)) values = [metrics[key] for key in keys] constraints = [-n, n - 1] nonlocal count print("Completed %d evaluations" % count) count += 1 return values, constraints c_type = "<0" problem = Problem( 1, len(keys), 2) # 1 decision variable, len(keys) objectives, and 2 constraints problem.types[:] = Real(0.001, 1) # range of decision variable problem.constraints[:] = c_type problem.function = manningEval algorithm = NSGAII(problem, population_size=nct) algorithm.run(evals) nondom = nondominated( algorithm.result ) # nondom: list of Solutions - wanted value is variables[0] nondomNs = [sol.variables[0] for sol in nondom] results = runSims(model, nondomNs, river, reach, len(stage), range=[rs]) resultPts = [(nondomNs[ix], [results[ix][rs][jx] for jx in range(1, len(stage) + 1)]) for ix in range(len(nondomNs))] metrics = [(res[0], evalf(res[1]), res[1]) for res in resultPts] nDisplay(metrics, flow, stage, plotpath, outf, plot, correctDatum, si) return metrics
def optimize(model, algorithm="NSGAII", NFE=10000, module="platypus", progress_bar=None, **kwargs): module = __import__(module, fromlist=['']) class_ref = getattr(module, algorithm) args = kwargs.copy() args["problem"], levers = _to_problem(model) instance = class_ref(**args) if progress_bar is not None: pbar = progress_bar(total=NFE) callback = lambda x: pbar.update(x.nfe - pbar.n) else: callback = None instance.run(NFE, callback=callback) result = DataSet() print("here") for solution in unique(nondominated(instance.result)): if not solution.feasible: continue env = OrderedDict() offset = 0 # decode from Platypus' internal representation (this should be fixed in Platypus instead) vars = [ solution.problem.types[i].decode(solution.variables[i]) for i in range(solution.problem.nvars) ] for lever, length in levers: env[lever.name] = lever.from_variables(vars[offset:(offset + length)]) offset += length if any([ r.dir not in [Response.MINIMIZE, Response.MAXIMIZE] for r in model.responses ]): # if there are any responses not included in the optimization, we must # re-evaluate the model to get all responses print("reeval") env = evaluate(model, env) else: for i, response in enumerate(model.responses): env[response.name] = solution.objectives[i] result.append(env) return result
def write_variables_as_shelf(model_run, output_folder): log.info("Writing out variables and objectives to shelf") results = nondominated(model_run.result) variables = [s.variables for s in results] objectives = [s.objectives for s in results] with shelve.open(os.path.join(output_folder, "variables.shelf")) as shelf: shelf["variables"] = variables shelf["objectives"] = objectives shelf["result"] = model_run.result shelf.sync()
def plot_all_solutions(solution, problem, simplified, segment_name, output_folder, show_plots): for i, solution in enumerate(nondominated(solution.result)): problem.stream_network.set_segment_allocations(solution.variables, simplified=simplified) for segment in problem.stream_network.stream_segments.values(): output_segment_name = "{}_sol_{}".format(segment_name, i) segment.plot_results_with_components( screen=show_plots, output_folder=output_folder, name_prefix=output_segment_name)
def _prune(self): problem = Problem(len(self.ensemble_), 2) problem.types[:] = Integer(0, 1) problem.directions[0] = Problem.MAXIMIZE problem.directions[1] = Problem.MAXIMIZE problem.function = functools.partial(MCE._evaluate_imbalance, y_predicts=self._y_predict, y_true=self._y_valid) algorithm = NSGAII(problem) algorithm.run(10000) solutions = unique(nondominated(algorithm.result)) objectives = [sol.objectives for sol in solutions] def extract_variables(variables): extracted = [v[0] for v in variables] return extracted self._ensemble_quality = self.get_group( extract_variables(solutions[objectives.index( max(objectives, key=itemgetter(0)))].variables), self.ensemble_) self._ensemble_diversity = self.get_group( extract_variables(solutions[objectives.index( max(objectives, key=itemgetter(1)))].variables), self.ensemble_) self._ensemble_balanced = self.get_group( extract_variables(solutions[objectives.index( min(objectives, key=lambda i: abs(i[0] - i[1])))].variables), self.ensemble_) pareto_set, fitnesses = self._genetic_optimalisation( optimalisation_type='quality_single') self._ensemble_quality_single = self.get_group( pareto_set[fitnesses.index(max(fitnesses, key=itemgetter(0)))], self.ensemble_) # pareto_set, fitnesses = self._genetic_optimalisation(optimalisation_type='diversity_single') # self._ensemble_diversity_single = self.get_group(pareto_set[fitnesses.index(min(fitnesses, key=itemgetter(0)))], # self.ensemble_) pareto_set, fitnesses = self._genetic_optimalisation( optimalisation_type='precision_single') self._ensemble_precision_single = self.get_group( pareto_set[fitnesses.index(max(fitnesses, key=itemgetter(0)))], self.ensemble_) pareto_set, fitnesses = self._genetic_optimalisation( optimalisation_type='recall_single') self._ensemble_recall_single = self.get_group( pareto_set[fitnesses.index(max(fitnesses, key=itemgetter(0)))], self.ensemble_)
def determine(self, runs=10000): # Open clarification - # caused PicklingError: Can't pickle <type 'instancemethod'>: attribute lookup # with ProcessPoolEvaluator() as evaluator: # algorithm = GeneticAlgorithm(self, evaluator=evaluator) # algorithm.run(runs) algorithm = GeneticAlgorithm(self) logger.debug('trigger GEA optimization run') algorithm.run(runs) logger.debug('GEA done') return unique(nondominated(algorithm.result))
def robust_optimize(model, SOWs, algorithm="NSGAII", NFE=10000, obj_aggregate=None, constr_aggregate=None, **kwargs): module = __import__("platypus", fromlist=[""]) class_ref = getattr(module, algorithm) if obj_aggregate is None: from .robustness import mean obj_aggregate = mean if constr_aggregate is None: constr_aggregate = max args = kwargs.copy() args["problem"] = _to_robust_problem(model, SOWs, obj_aggregate, constr_aggregate) instance = class_ref(**args) instance.run(NFE) result = DataSet() for solution in unique(nondominated(instance.result)): if not solution.feasible: continue env = OrderedDict() offset = 0 # decode from Platypus' internal representation (this should be fixed in Platypus instead) vars = [solution.problem.types[i].decode(solution.variables[i]) for i in range(solution.problem.nvars)] for lever in model.levers: env[lever.name] = lever.from_variables(vars[offset : (offset + lever.length)]) offset += lever.length if any([r.type not in [Response.MINIMIZE, Response.MAXIMIZE] for r in model.responses]): # if there are any responses not included in the optimization, we must # re-evaluate the model to get all responses env = evaluate(model, env) # here we copy over the objectives from the evaluated solution, which has been aggregated over all SOWs for i, response in enumerate([r for r in model.responses if r.type in [Response.MINIMIZE, Response.MAXIMIZE]]): env[response.name] = solution.objectives[i] result.append(env) return result
def fit(self, X, y): X, y, = copy.deepcopy(X), copy.deepcopy(y) self.y = y y_bin = y self.X, self.y_bin = X, y # # start evolving in MOEA num_variables = (self.X.shape[1] + 1) * self.n_hidden algorithm = NSGAII(Objectives(num_variables, 2, self.X, y_bin, self.n_hidden, sparse_degree=self.sparse_degree), population_size=self.n_pop) # MOEAD(Objectives(num_variables, 2, self.X, y_bin, self.n_hidden, sparse_degree=self.sparse_degree), # population_size=self.n_pop, neighborhood_size=int(self.n_pop/10)) # delta=0.5, eta=0.8 algorithm.run(self.max_iter) self.evo_result = algorithm.result print('total solution:', algorithm.result.__len__()) nondom_result = nondominated(algorithm.result) print('nondominated solution:', nondom_result.__len__()) self.nondom_solution = nondom_result self.W = [] self.B = [] for i in range(nondom_result.__len__()): s = nondom_result[i] W = np.asarray(s.variables).reshape(self.X.shape[1] + 1, self.n_hidden) X_ = np.append(self.X, np.ones((self.X.shape[0], 1)), axis=1) H = expit(np.dot(X_, W)) B = np.dot(linalg.pinv(H), y_bin) self.W.append(W) self.B.append(B) real_degree = H.mean(axis=0) # n_hidden dim avg_activation = real_degree.mean() print('NO.', i, ' obj:', s.objectives, 'AVG activation:', avg_activation) self.W = np.asarray(self.W) self.B = np.asarray(self.B) # # best W/B best_index = self.get_best_index() self.best_W = self.W[best_index] self.best_B = self.B[best_index] return self
def robust_optimize(model, SOWs, algorithm="NSGAII", NFE=10000, obj_aggregate=None, constr_aggregate=None, **kwargs): module = __import__("platypus", fromlist=['']) class_ref = getattr(module, algorithm) if obj_aggregate is None: from .robustness import mean obj_aggregate = mean if constr_aggregate is None: constr_aggregate = max args = kwargs.copy() args["problem"], levers = _to_robust_problem(model, SOWs, obj_aggregate, constr_aggregate) instance = class_ref(**args) instance.run(NFE) result = DataSet() for solution in unique(nondominated(instance.result)): if not solution.feasible: continue env = OrderedDict() offset = 0 # decode from Platypus' internal representation (this should be fixed in Platypus instead) vars = [solution.problem.types[i].decode(solution.variables[i]) for i in range(solution.problem.nvars)] for lever, length in levers: env[lever.name] = lever.from_variables(vars[offset:(offset+length)]) offset += length if any([r.dir not in [Response.MINIMIZE, Response.MAXIMIZE] for r in model.responses]): # if there are any responses not included in the optimization, we must # re-evaluate the model to get all responses env = evaluate(model, env) # here we copy over the objectives from the evaluated solution, which has been aggregated over all SOWs for i, response in enumerate([r for r in model.responses if r.dir in [Response.MINIMIZE, Response.MAXIMIZE]]): env[response.name] = solution.objectives[i] result.append(env) return result
def run(self, algorithm: AbstractGeneticAlgorithm, nruns: int) -> List[NRPSolution]: algorithm.run(nruns) print(len(algorithm.result)) # Only unique non-dominated solutions solutions: List[Solution] = unique(nondominated(algorithm.result)) solutions = [sol for sol in solutions if sol.feasible] print(len(solutions)) result: List[NRPSolution] = make_solutions(self.nrp_instance, solutions) # Sorting for 2 objectives First maximize score and then minimize cost result = sorted(result, key=lambda x: x.total_score, reverse=True) if self.is_last_single: result = sorted(result, key=lambda x: x.total_cost) # Taking only solution with minimal cost result = [result[0]] return result
def fit(self, X, y): X, y, = copy.deepcopy(X), copy.deepcopy(y) self.y = y y_bin = self.one2array(y, np.unique(y).shape[0]) self.classes_ = np.arange(y_bin.shape[1]) self.n_classes_ = self.classes_.__len__() self.X, self.y_bin = X, y_bin # # start evolving in MOEA num_variables = (self.X.shape[1] + 1) * self.n_hidden algorithm = MOEAD(Objectives(num_variables, 2, self.X, y_bin, self.n_hidden, sparse_degree=self.sparse_degree), population_size=self.n_pop, neighborhood_size=5) algorithm.run(self.max_iter) self.evo_result = algorithm.result print('total solution:', algorithm.result.__len__()) result = nondominated(algorithm.result) print('nondominated solution:', result.__len__()) self.solution = result self.W = [] self.B = [] self.voting_weight = [] for s in result: W = np.asarray(s.variables).reshape(self.X.shape[1] + 1, self.n_hidden) X_ = np.append(self.X, np.ones((self.X.shape[0], 1)), axis=1) H = expit(np.dot(X_, W)) B = np.dot(linalg.pinv(H), y_bin) voting_w_ = 1. / (s.objectives[0] + 10e-5) * self.mu + 1. / ( s.objectives[1] + 10e-5) * (1 - self.mu) self.voting_weight.append(voting_w_) self.W.append(W) self.B.append(B) self.voting_weight = np.asarray(self.voting_weight) self.W = np.asarray(self.W) self.B = np.asarray(self.B) return self
ax.set_yticks(y_pos) ax.set_yticklabels(var_list) ax.invert_yaxis() ax.set_xlabel('Relevancy') if objective_idx == 0: ax.set_title('Best Variables - Sensitivity') else: ax.set_title('Best Variables - Specificity') plt.show() if __name__ == "__main__": algorithm = NSGAII(SVM(), population_size=30) algorithm.run(100) nondominated_results = nondominated(algorithm.result) # prints results fig1 = plt.figure(figsize=[11, 11]) plt.scatter([s.objectives[0] for s in nondominated_results], [s.objectives[1] for s in nondominated_results]) plt.xlim([0, 1.1]) plt.ylim([0, 1.1]) plt.xlabel("Sensitivity") plt.ylabel("Specificity") plt.show() calculate_relevancy(nondominated_results, 0, 0.81, 15) calculate_relevancy(nondominated_results, 1, 0.83, 15)
def run(parent, generation, save_interval, save_dir="GA/result"): def objective(vars): # TODO condition edges_indicesの中身は左の方が右よりも小さいということをassertする gene_nodes_pos, gene_edges_thickness, gene_adj_element = convert_var_to_arg(vars) return [calculate_efficiency(gene_nodes_pos, gene_edges_thickness, gene_adj_element)] def make_adj_triu_matrix(adj_element, node_num, condition_edges_indices): """隣接情報を示す遺伝子から,edge_indicesを作成する関数 """ adj_matrix = np.zeros((node_num, node_num)) adj_matrix[np.triu_indices(node_num, 1)] = adj_element adj_matrix[(condition_edges_indices[:, 0], condition_edges_indices[:, 1])] = 1 edge_indices = np.stack(np.where(adj_matrix), axis=1) return edge_indices def make_edge_thick_triu_matrix(gene_edges_thickness, node_num, condition_edges_indices, condition_edges_thickness, edges_indices): """edge_thicknessを示す遺伝子から,condition_edge_thicknessを基にedges_thicknessを作成する関数 """ tri = np.zeros((node_num, node_num)) tri[np.triu_indices(node_num, 1)] = gene_edges_thickness tri[(condition_edges_indices[:, 0], condition_edges_indices[:, 1])] = condition_edges_thickness edges_thickness = tri[(edges_indices[:, 0], edges_indices[:, 1])] return edges_thickness def convert_var_to_arg(vars): nodes_pos = np.array(vars[0:gene_node_pos_num]) nodes_pos = nodes_pos.reshape([int(gene_node_pos_num / 2), 2]) edges_thickness = vars[gene_node_pos_num:gene_node_pos_num + gene_edge_thickness_num] adj_element = vars[gene_node_pos_num + gene_edge_thickness_num: gene_node_pos_num + gene_edge_thickness_num + gene_edge_indices_num] return nodes_pos, edges_thickness, adj_element def calculate_efficiency(gene_nodes_pos, gene_edges_thickness, gene_adj_element, np_save_path=False): condition_nodes_pos, input_nodes, input_vectors, output_nodes, \ output_vectors, frozen_nodes, condition_edges_indices, condition_edges_thickness\ = make_main_node_edge_info(*condition(), condition_edge_thickness=0.2) # make edge_indices edges_indices = make_adj_triu_matrix(gene_adj_element, node_num, condition_edges_indices) # make nodes_pos nodes_pos = np.concatenate([condition_nodes_pos, gene_nodes_pos]) # 条件ノードが含まれている部分グラフを抽出 G = nx.Graph() G.add_nodes_from(np.arange(len(nodes_pos))) G.add_edges_from(edges_indices) condition_node_list = input_nodes + output_nodes + frozen_nodes trigger = 0 # 条件ノードが全て接続するグラフが存在するとき,トリガーを発動する for c in nx.connected_components(G): sg = G.subgraph(c) # 部分グラフ if set(condition_node_list) <= set(sg.nodes): # 条件ノードが全て含まれているか edges_indices = np.array(sg.edges) trigger = 1 break if trigger == 0: # ペナルティを発動する return -10.0 # make edges_thickness edges_thickness = make_edge_thick_triu_matrix(gene_edges_thickness, node_num, condition_edges_indices, condition_edges_thickness, edges_indices) env = BarFemGym(nodes_pos, input_nodes, input_vectors, output_nodes, output_vectors, frozen_nodes, edges_indices, edges_thickness, frozen_nodes) env.reset() efficiency = env.calculate_simulation() if np_save_path: env.render(save_path=os.path.join(np_save_path, "image.png")) np.save(os.path.join(np_save_path, "nodes_pos.npy"), nodes_pos) np.save(os.path.join(np_save_path, "edges_indices.npy"), edges_indices) np.save(os.path.join(np_save_path, "edges_thickness.npy"), edges_thickness) return float(efficiency) node_num = 85 parent = (node_num * 2 + int(node_num * (node_num - 1) / 2) * 2) # 本来ならこれの10倍 PATH = os.path.join(save_dir, "parent_{}_gen_{}".format(parent, generation)) os.makedirs(PATH, exist_ok=True) condition_node_num = 10 gene_node_pos_num = (node_num - condition_node_num) * 2 gene_edge_thickness_num = int(node_num * (node_num - 1) / 2) gene_edge_indices_num = gene_edge_thickness_num # 2変数2目的の問題 problem = Problem(gene_node_pos_num + gene_edge_thickness_num + gene_edge_indices_num, 1) # 最小化or最大化を設定 problem.directions[:] = Problem.MAXIMIZE # 決定変数の範囲を設定 coord_const = Real(0, 1) edge_const = Real(0.1, 1) # バグが無いように0.1にする adj_constraint = Integer(0, 1) problem.types[0:gene_node_pos_num] = coord_const problem.types[gene_node_pos_num:gene_node_pos_num + gene_edge_thickness_num] = edge_const problem.types[gene_node_pos_num + gene_edge_thickness_num: gene_node_pos_num + gene_edge_thickness_num + gene_edge_indices_num] = adj_constraint problem.function = objective algorithm = NSGAII(problem, population_size=parent, variator=CompoundOperator(SBX(), HUX(), PM(), BitFlip())) history = [] for i in tqdm(range(generation)): algorithm.step() nondominated_solutions = nondominated(algorithm.result) efficiency_results = [s.objectives[0] for s in nondominated_solutions] max_efficiency = max(efficiency_results) history.append(max_efficiency) epochs = np.arange(i + 1) + 1 result_efficiency = np.array(history) fig = plt.figure() ax = fig.add_subplot(1, 1, 1) ax.plot(epochs, result_efficiency, label='efficiency') ax.set_xlim(1, max(epochs)) ax.set_xlabel('epoch') ax.legend() ax.set_title("efficiency curve") plt.savefig(os.path.join(PATH, "history.png")) plt.close() if i % save_interval == 0: save_dir = os.path.join(PATH, str(i)) max_index = efficiency_results.index(max_efficiency) max_solution = nondominated_solutions[max_index] vars = [] vars.extend([coord_const.decode(i) for i in max_solution.variables[0:gene_node_pos_num]]) vars.extend([edge_const.decode(i) for i in max_solution.variables[gene_node_pos_num:gene_node_pos_num + gene_edge_thickness_num]]) vars.extend([adj_constraint.decode(i) for i in max_solution.variables[gene_node_pos_num + gene_edge_thickness_num: gene_node_pos_num + gene_edge_thickness_num + gene_edge_indices_num]]) gene_nodes_pos, gene_edges_thickness, gene_adj_element = convert_var_to_arg(vars) calculate_efficiency(gene_nodes_pos, gene_edges_thickness, gene_adj_element, np_save_path=save_dir) np.save(os.path.join(save_dir, "history.npy"), history)
def grid_multi_GA(nx=20, ny=20, volume_frac=0.5, parent=400, generation=100, path="data"): PATH = os.path.join(path, "grid_nx_{}_ny_{}".format(nx, ny), "gen_{}_pa_{}".format(generation, parent)) os.makedirs(PATH, exist_ok=True) start = time.time() def objective(vars): rho = np.array(vars) rho = rho.reshape(ny, nx - 1) rho = np.concatenate([rho, np.ones((ny, 1))], 1) volume = np.sum(rho) / (nx * ny) return [calc_E(rho), calc_G(rho)], [volume] # 2変数2目的の問題 problem = Problem(ny * (nx - 1), 2, 1) # 最小化or最大化を設定 problem.directions[:] = Problem.MAXIMIZE # 決定変数の範囲を設定 int1 = Integer(0, 1) problem.types[:] = int1 problem.constraints[:] = "<=" + str(volume_frac) problem.function = objective problem.directions[:] = Problem.MAXIMIZE algorithm = NSGAII(problem, population_size=parent) algorithm.run(generation) # グラフを描画 fig = plt.figure() plt.scatter([s.objectives[0] for s in algorithm.result], [s.objectives[1] for s in algorithm.result], c="blue", label="infeasible solution") plt.scatter([s.objectives[0] for s in algorithm.result if s.feasible], [s.objectives[1] for s in algorithm.result if s.feasible], c="red", label='feasible solution') # 非劣解をとりだす nondominated_solutions = nondominated(algorithm.result) plt.scatter( [s.objectives[0] for s in nondominated_solutions if s.feasible], [s.objectives[1] for s in nondominated_solutions if s.feasible], c="green", label="pareto solution") plt.legend(loc='lower left') plt.xlabel("$E$") plt.ylabel("$G$") fig.savefig(os.path.join(PATH, "graph.png")) plt.close() for solution in [s for s in nondominated_solutions if s.feasible]: image_list = [] for j in solution.variables: image_list.append(j) image = np.array(image_list).reshape(ny, nx - 1) image = np.concatenate([image, np.ones((ny, 1))], 1) np.save( os.path.join( PATH, 'E_{}_G_{}.npy'.format(solution.objectives[0], solution.objectives[1])), image) convert_folder_npy_to_image(PATH) elapsed_time = time.time() - start with open("time.txt", mode='a') as f: f.writelines("grid_nx_{}_ny_{}_gen_{}_pa_{}:{}sec\n".format( nx, ny, generation, parent, elapsed_time))
def NSGAII_Experiment(): NSGAII_results = {} hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(7,2,11) problem.types[:] = [Real(2.6,3.6),Real(0.7,0.8),Real(17,28),Real(7.3,8.3),Real(7.3,8.3),Real(2.9,3.9),Real(5,5.5)] problem.constraints[:] = "<=0" problem.function = SRD algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'SRD' nondominated_solutions = nondominated(algorithm.result) ref = np.array([7000,1700]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(3,2,3) problem.types[:] = [Real(1,3),Real(0.0005,0.05),Real(0.0005,0.05)] problem.constraints[:] = "<=0" problem.function = TBTD algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'TBTD' nondominated_solutions = nondominated(algorithm.result) ref = np.array([0.1,50000]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(4,2,5) problem.types[:] = [Real(0.125,5),Real(0.1,10),Real(0.1,10),Real(0.125,5)] problem.constraints[:] = "<=0" problem.function = WB algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'WB' nondominated_solutions = nondominated(algorithm.result) ref = np.array([350,0.1]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(4,2,5) problem.types[:] = [Real(55,80),Real(75,110),Real(1000,3000),Real(2,20)] problem.constraints[:] = "<=0" problem.function = DBD algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'DBD' nondominated_solutions = nondominated(algorithm.result) ref = np.array([5,50]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(2,2,5) problem.types[:] = [Real(20,250),Real(10,50)] problem.constraints[:] = "<=0" problem.function = NBP algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'NBP' nondominated_solutions = nondominated(algorithm.result) ref = np.array([11150, 12500]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(6,3,9) problem.types[:] = [Real(150,274.32),Real(25,32.31),Real(12,22),Real(8,11.71),Real(14,18),Real(0.63,0.75)] problem.constraints[:] = "<=0" problem.function = SPD algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'SPD' nondominated_solutions = nondominated(algorithm.result) ref = np.array([16,19000,-260000]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(7,3,10) problem.types[:] = [Real(0.5,1.5),Real(0.45,1.35),Real(0.5,1.5),Real(0.5,1.5),Real(0.875,2.625),Real(0.4,1.2),Real(0.4,1.2)] problem.constraints[:] = "<=0" problem.function = CSI algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'CSI' nondominated_solutions = nondominated(algorithm.result) ref = np.array([42,4.5,13]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(3,5,7) problem.types[:] = [Real(0.01,0.45),Real(0.01,0.1),Real(0.01,0.1)] problem.constraints[:] = "<=0" problem.function = WP algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'WP' nondominated_solutions = nondominated(algorithm.result) ref = np.array([83000, 1350, 2.85, 15989825, 25000]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(2,2,2) problem.types[:] = [Real(0,5),Real(0,3)] problem.constraints[:] = "<=0" problem.function = BNH algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'BNH' nondominated_solutions = nondominated(algorithm.result) ref = np.array([140,50]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(2,2,2) problem.types[:] = [Real(0.1,1),Real(0,5)] problem.constraints[:] = "<=0" problem.function = CEXP algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'CEXP' nondominated_solutions = nondominated(algorithm.result) ref = np.array([1,9]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(6,2,2) problem.types[:] = [Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1)] problem.constraints[:] = "<=0" problem.function = C3DTLZ4 algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'C3DTLZ4' nondominated_solutions = nondominated(algorithm.result) ref = np.array([3,3]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(2,2,2) problem.types[:] = [Real(-20,20),Real(-20,20)] problem.constraints[:] = "<=0" problem.function = SRN algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'SRN' nondominated_solutions = nondominated(algorithm.result) ref = np.array([301,72]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(2,2,2) problem.types[:] = [Real(1e-5,np.pi),Real(1e-5,np.pi)] problem.constraints[:] = "<=0" problem.function = TNK algorithm = NSGAII(problem, p*problem.nvars) algorithm.run( 40*problem.nvars) funcname = 'TNK' nondominated_solutions = nondominated(algorithm.result) ref = np.array([3,3]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(6,2,6) problem.types[:] = [Real(0,10),Real(0,10),Real(1,5),Real(0,6),Real(1,5),Real(0,10)] problem.constraints[:] = "<=0" problem.function = OSY algorithm = NSGAII(problem, p*problem.nvars) algorithm.run( 40*problem.nvars) funcname = 'OSY' nondominated_solutions = nondominated(algorithm.result) ref = np.array([0,386]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(2,2,2) problem.types[:] = [Real(0,1),Real(0,1)] problem.constraints[:] = "<=0" problem.function = CTP1 algorithm = NSGAII(problem, p*problem.nvars) algorithm.run( 40*problem.nvars) funcname = 'CTP1' nondominated_solutions = nondominated(algorithm.result) ref = np.array([1,2]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(10,2,1) problem.types[:] = [Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1)] problem.constraints[:] = "<=0" problem.function = BICOP1 algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'BICOP1' nondominated_solutions = nondominated(algorithm.result) ref = np.array([9,9]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(10,2,2) problem.types[:] = [Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1),Real(0,1)] problem.constraints[:] = "<=0" problem.function = BICOP2 algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'BICOP2' nondominated_solutions = nondominated(algorithm.result) ref = np.array([70,70]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 hypNS2 = [0] random.seed(0) for p in [5,8,10,20]: hyp = [] for i in range(100): problem = Problem(2,3,3) problem.types[:] = [Real(-4,4),Real(-4,4)] problem.constraints[:] = "<=0" problem.function = TRICOP algorithm = NSGAII(problem, p*problem.nvars) algorithm.run(40*problem.nvars) funcname = 'TRICOP' nondominated_solutions = nondominated(algorithm.result) ref = np.array([34,-4,90]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) hyp.append(hypervolume(obj,ref)) if np.mean(hyp) > np.mean(hypNS2): hypNS2 = hyp print(funcname, np.mean(hypNS2), '(', np.std(hypNS2), ')') NSGAII_results[funcname] = hypNS2 return NSGAII_results
def optimize(problem, algorithm, iterations=100, write_forcefields=None): """ The optimize function provides a uniform wrapper to solve the EZFF problem using the algorithm(s) provided. :param problem: EZFF Problem to be optimized :type problem: Problem :param algorithm: EZFF Algorithm(s) to use for optimization. Allowed options are ``NSGAII``, ``NSGAIII`` and ``IBEA``, or a list containing any sequence of these options. The algorithms will be used in the sequence provided :type algorithm: str or list (of strings) :param iterations: Number of epochs to perform the optimization for. If multiple algorithms are specified, one iteration value should be provided for each algorithm :type iterations: int or list (of ints) :param write_forcefields: All non-dominated forcefields are written out every ``write_forcefields`` epochs. If this is ``None``, the forcefields are written out for the first and last epoch :type write_forcefields: int or None """ # Convert algorithm and iterations into lists if not isinstance(algorithm, list): algorithm = [algorithm] if not isinstance(iterations, list): iterations = [iterations] if not len(algorithm) == len(iterations): raise ValueError( "Please provide a maximum number of epochs for each algorithm") total_epochs = 0 current_solutions = None for stage in range(0, len(algorithm)): # Construct an algorithm algorithm_for_this_stage = _generate_algorithm( algorithm[stage]["myproblem"], algorithm[stage]["algorithm_string"], algorithm[stage]["population"], algorithm[stage]["mutation_probability"], current_solutions, algorithm[stage]["pool"]) if not isinstance(write_forcefields, int): write_forcefields = np.sum( [iterations[stage_no] for stage_no in range(stage + 1)]) for i in range(0, iterations[stage]): total_epochs += 1 print('Epoch: ' + str(total_epochs)) algorithm_for_this_stage.step() # Make output files/directories outdir = 'results/' + str(total_epochs) if not os.path.isdir(outdir): os.makedirs(outdir) varfilename = outdir + '/variables' objfilename = outdir + '/errors' varfile = open(varfilename, 'w') objfile = open(objfilename, 'w') for solution in unique( nondominated(algorithm_for_this_stage.result)): varfile.write(' '.join( [str(variables) for variables in solution.variables])) varfile.write('\n') objfile.write(' '.join( [str(objective) for objective in solution.objectives])) objfile.write('\n') varfile.close() objfile.close() if total_epochs % write_forcefields == 0: if not os.path.isdir(outdir + '/forcefields'): os.makedirs(outdir + '/forcefields') for sol_index, solution in enumerate( unique(nondominated(algorithm_for_this_stage.result))): ff_name = outdir + '/forcefields/FF_' + str(sol_index + 1) parameters_dict = dict( zip(problem.variables, solution.variables)) generate_forcefield(problem.template, parameters_dict, outfile=ff_name) current_solutions = algorithm_for_this_stage.population
def bar_multi_GA(nx=20, ny=20, volume_frac=0.5, parent=400, generation=100, path="data"): PATH = os.path.join(path, "bar_nx_{}_ny_{}".format(nx, ny), "gen_{}_pa_{}".format(generation, parent)) os.makedirs(PATH, exist_ok=True) start = time.time() def objective(vars): y_1, y_2, y_3, x_4, nodes, widths = convert_var_to_arg(vars) edges = make_6_bar_edges(nx, ny, y_1, y_2, y_3, x_4, nodes, widths) rho = make_bar_structure(nx, ny, edges) volume = np.sum(rho) / (nx * ny) return [calc_E(rho), calc_G(rho)], [volume] def convert_var_to_arg(vars): y_1 = vars[0] y_2 = vars[1] y_3 = vars[2] x_4 = vars[3] node_y_indexes = vars[4:4 + 6 * 3] node_x_indexes = vars[4 + 6 * 3:4 + 6 * 3 * 2] nodes = np.stack([node_x_indexes, node_y_indexes], axis=1) widths = vars[4 + 6 * 3 * 2:] return y_1, y_2, y_3, x_4, nodes, widths # 2変数2目的の問題 problem = Problem(4 + 6 * 3 * 2 + 6 * 4, 2, 1) # 最小化or最大化を設定 problem.directions[:] = Problem.MAXIMIZE # 決定変数の範囲を設定 x_index_const = Integer(1, nx) # x座標に関する制約 y_index_const = Integer(1, ny) # y座標に関する制約 bar_constraint = Real(0, ny / 2) # バーの幅に関する制約 problem.types[0:3] = y_index_const problem.types[3] = x_index_const problem.types[4:4 + 6 * 3] = y_index_const problem.types[4 + 6 * 3:4 + 6 * 3 * 2] = x_index_const problem.types[4 + 6 * 3 * 2:] = bar_constraint problem.constraints[:] = "<=" + str(volume_frac) problem.function = objective problem.directions[:] = Problem.MAXIMIZE algorithm = NSGAII(problem, population_size=parent, variator=CompoundOperator(SBX(), HUX(), PM(), BitFlip())) algorithm.run(generation) # グラフを描画 fig = plt.figure() plt.scatter([s.objectives[0] for s in algorithm.result], [s.objectives[1] for s in algorithm.result], c="blue", label="infeasible solution") plt.scatter([s.objectives[0] for s in algorithm.result if s.feasible], [s.objectives[1] for s in algorithm.result if s.feasible], c="red", label='feasible solution') # 非劣解をとりだす nondominated_solutions = nondominated(algorithm.result) plt.scatter( [s.objectives[0] for s in nondominated_solutions if s.feasible], [s.objectives[1] for s in nondominated_solutions if s.feasible], c="green", label="pareto solution") plt.legend(loc='lower left') plt.xlabel("$E$") plt.ylabel("$G$") fig.savefig(os.path.join(PATH, "graph.png")) plt.close() for solution in [s for s in nondominated_solutions if s.feasible]: vars_list = [] for j in solution.variables[:3]: vars_list.append(y_index_const.decode(j)) vars_list.append(x_index_const.decode(solution.variables[3])) for j in solution.variables[4:4 + 6 * 3]: vars_list.append(y_index_const.decode(j)) for j in solution.variables[4 + 6 * 3:4 + 6 * 3 * 2]: vars_list.append(x_index_const.decode(j)) for j in solution.variables[4 + 6 * 3 * 2:]: vars_list.append(bar_constraint.decode(j)) y_1, y_2, y_3, x_4, nodes, widths = convert_var_to_arg(vars_list) edges = make_6_bar_edges(nx, ny, y_1, y_2, y_3, x_4, nodes, widths) image = make_bar_structure(nx, ny, edges) np.save( os.path.join( PATH, 'E_{}_G_{}.npy'.format(solution.objectives[0], solution.objectives[1])), image) convert_folder_npy_to_image(PATH) elapsed_time = time.time() - start with open("time.txt", mode='a') as f: f.writelines("bar_nx_{}_ny_{}_gen_{}_pa_{}:{}sec\n".format( nx, ny, generation, parent, elapsed_time))
def optimize(problem, algorithm, iterations=100, write_forcefields=None): """ Uniform wrapper function that steps through the optimization process. Also provides uniform handling of output files. :param problem: EZFF Problem to be optimized :type problem: Problem :param algorithm: EZFF Algorithm to use for optimization. Allowed options are ``NSGAII``, ``NSGAIII`` and ``IBEA`` :type algorithm: str :param iterations: Number of epochs to perform the optimization for :type iterations: int :param write_forcefields: All non-dominated forcefields are written out every ``write_forcefields`` epochs. If this is ``None``, the forcefields are written out for the first and last epoch :type write_forcefields: int or None """ # Convert algorithm and iterations into lists if not isinstance(algorithm, list): algorithm = [algorithm] if not isinstance(iterations, list): iterations = [iterations] if not len(algorithm) == len(iterations): raise ValueError( "Please provide a maximum number of epochs for each algorithm") total_epochs = 0 current_solutions = None for stage in range(0, len(algorithm)): # Construct an algorithm algorithm_for_this_stage = generate_algorithm( algorithm[stage]["myproblem"], algorithm[stage]["algorithm_string"], algorithm[stage]["population"], current_solutions, algorithm[stage]["pool"]) if not isinstance(write_forcefields, int): write_forcefields = iterations[stage] for i in range(0, iterations[stage]): total_epochs += 1 print('Epoch: ' + str(total_epochs)) algorithm_for_this_stage.step() # Make output files/directories outdir = 'results/' + str(total_epochs) if not os.path.isdir(outdir): os.makedirs(outdir) varfilename = outdir + '/variables' objfilename = outdir + '/errors' varfile = open(varfilename, 'w') objfile = open(objfilename, 'w') for solution in unique( nondominated(algorithm_for_this_stage.result)): varfile.write(' '.join( [str(variables) for variables in solution.variables])) varfile.write('\n') objfile.write(' '.join( [str(objective) for objective in solution.objectives])) objfile.write('\n') varfile.close() objfile.close() if total_epochs % (write_forcefields - 1) == 0: if not os.path.isdir(outdir + '/forcefields'): os.makedirs(outdir + '/forcefields') for sol_index, solution in enumerate( unique(nondominated(algorithm_for_this_stage.result))): ff_name = outdir + '/forcefields/FF_' + str(sol_index) parameters_dict = dict( zip(problem.variables, solution.variables)) write_forcefield_file(ff_name, problem.template, parameters_dict, verbose=False) current_solutions = algorithm_for_this_stage.population
from platypus import GeneticAlgorithm, Problem, Constraint, Binary, nondominated, unique # This simple example has an optimal value of 15 when picking items 1 and 4. items = 7 capacity = 9 weights = [2, 3, 6, 7, 5, 9, 4] profits = [6, 5, 8, 9, 6, 7, 3] def knapsack(x): selection = x[0] total_weight = sum([weights[i] if selection[i] else 0 for i in range(items)]) total_profit = sum([profits[i] if selection[i] else 0 for i in range(items)]) return total_profit, total_weight problem = Problem(1, 1, 1) problem.types[0] = Binary(items) problem.directions[0] = Problem.MAXIMIZE problem.constraints[0] = Constraint("<=", capacity) problem.function = knapsack algorithm = GeneticAlgorithm(problem) algorithm.run(10000) for solution in unique(nondominated(algorithm.result)): print(solution.variables, solution.objectives)
for g in range(1, 10): hyp = [] nfes = [] for i in range(10): problem = Problem(2, 2, 2) problem.types[:] = [Real(0, 5), Real(0, 3)] problem.constraints[:] = "<=0" problem.function = BNH algorithm = NSGAIII(problem, d * problem.nvars) algorithm.run(d * g * problem.nvars) funcname = 'BNH' # if not os.path.exists(funcname): # os.makedirs(funcname) nondominated_solutions = nondominated(algorithm.result) ref = np.array([140, 50]) obj = [] for s in nondominated_solutions: lijst = str(s.objectives) obj.append(ast.literal_eval(lijst)) obj = np.array(obj) # np.savetxt(str(funcname)+'/'+str(funcname)+'_pf_run_'+str(i)+'.csv', obj, delimiter=',') hyp.append(hypervolume(obj, ref)) nfes.append(algorithm.nfe) print(np.mean(hyp)) if np.mean(hyp) > 5005: print('BNH', np.mean(hyp), '(', np.std(hyp), ')', g, d, np.mean(nfes)) print('BNH', np.mean(hyp), '(', np.std(hyp), ')')
def ga(variables, outpu): #genetic algorithm function if gv.vector == 0: gv.algo = int(input("Enter the no: of iterations\nuser input: ")) print(" \n***** Optimization procedures have started. *****\n") if gv.constraint == "n": problem = Problem(variables, outpu) if gv.constraint == "y": problem = Problem(variables, outpu, len( gv.bigconst)) # specify the no of objectives and inputs for i in range(0, len(gv.bigres)): problem.types[i:i + 1] = [Real(gv.bigres[i][3], gv.bigres[i][2]) ] # loop to intialise the limkits for i in range(0, len(gv.bigconst)): for j in range(len(gv.bigconst[i])): if gv.bigconst[i][j] == 1: problem.constraints[i:i + 1] = "<=0" #constraint assigning elif gv.bigconst[i][j] == 2: problem.constraints[i:i + 1] = ">=0" problem.function = evaluator # call the simulator v_population_size = 10 init_pop = [Solution(problem) for i in range(v_population_size)] pop_indiv = [[x.rand() for x in problem.types] for i in range(v_population_size)] for i in range(v_population_size): init_pop[i].variables = pop_indiv[i] if gv.algoindex == 1: algorithm = NSGAII(problem, population_size=v_population_size, generator=InjectedPopulation(init_pop)) elif gv.algoindex == 2: algorithm = NSGAIII(problem, 12, population_size=v_population_size, generator=InjectedPopulation(init_pop)) elif gv.algoindex == 3: algorithm = CMAES(problem, epsilons=0.05, population_size=v_population_size, generator=InjectedPopulation(init_pop)) elif gv.algoindex == 4: algorithm = GDE3(problem, population_size=v_population_size, generator=InjectedPopulation(init_pop)) elif gv.algoindex == 5: algorithm = IBEA(problem, population_size=v_population_size, generator=InjectedPopulation(init_pop)) elif gv.algoindex == 6: algorithm = MOEAD(problem, divisions_outer=12, population_size=v_population_size, generator=InjectedPopulation(init_pop)) elif gv.algoindex == 7: algorithm = OMOPSO(problem, epsilons=0.05, population_size=v_population_size, generator=InjectedPopulation(init_pop)) elif gv.algoindex == 8: algorithm = SMPSO(problem, population_size=v_population_size, generator=InjectedPopulation(init_pop)) elif gv.algoindex == 9: algorithm = SPEA2(problem, population_size=v_population_size, generator=InjectedPopulation(init_pop)) elif gv.algoindex == 10: algorithm = EpsMOEA(problem, epsilons=0.05, population_size=v_population_size, generator=InjectedPopulation(init_pop)) algorithm.run(gv.algo) feasible_solutions = [s for s in algorithm.result if s.feasible] nondominanted_solutions = nondominated(algorithm.result) f = open("feasible.txt", "a") f.write("\nThis is a set of feasible_solutions values\n") f.close() for ki in range(len(feasible_solutions)): f = open("feasible.txt", "a") f.write("\n This is solution " + str(ki + 1) + "\n") f.close() for i in range(len(feasible_solutions[ki].variables)): f = open("feasible.txt", "a") f.write(" The value of element " + str(i + 1) + " is " + str(feasible_solutions[ki].variables[i]) + "\n") f.close() for i in range(len(feasible_solutions[ki].objectives)): f = open("feasible.txt", "a") #f.write( "The error of target " +str(i+1) + " is " + str(feasible_solutions[ki].objectives[i]) + "\n") if gv.bigout[i][3] >= 0: tru = gv.bigout[i][3] - feasible_solutions[ki].objectives[ i]**0.5 f.write(" The value of target " + str(i + 1) + " is " + str(tru) + " and the corresponding error value is " + str(feasible_solutions[ki].objectives[i]) + "\n") else: tru = gv.bigout[i][3] + feasible_solutions[ki].objectives[ i]**0.5 f.write(" The value of target " + str(i + 1) + " is " + str(tru) + " and the corresponding error value is " + str(feasible_solutions[ki].objectives[i]) + "\n") f.close() f = open("nondominanted_solutions.txt", "a") f.write("\nThis is a set of nondominanted_solutions values\n") f.close() for ki in range(len(nondominanted_solutions)): f = open("nondominanted_solutions.txt", "a") f.write("\n This is solution " + str(ki + 1) + "\n") f.close() for i in range(len(nondominanted_solutions[ki].variables)): f = open("nondominanted_solutions.txt", "a") f.write(" The value of element " + str(i + 1) + " is " + str(nondominanted_solutions[ki].variables[i]) + "\n") f.close() for i in range(len(nondominanted_solutions[ki].objectives)): f = open("nondominanted_solutions.txt", "a") #f.write(str(i+1) + " th objective error " + " value is " + str(nondominanted_solutions[ki].objectives[i]) + "\n") if gv.bigout[i][3] >= 0: tru = gv.bigout[i][3] - nondominanted_solutions[ki].objectives[ i]**0.5 f.write(" The value of target " + str(i + 1) + " is " + str(tru) + " and the corresponding error value is " + str(nondominanted_solutions[ki].objectives[i]) + "\n") else: tru = gv.bigout[i][3] + nondominanted_solutions[ki].objectives[ i]**0.5 f.write(" The value of target " + str(i + 1) + " is " + str(tru) + " and the corresponding error value is " + str(nondominanted_solutions[ki].objectives[i]) + "\n") f.close() return
from platypus import GeneticAlgorithm, Problem, Constraint, Binary, nondominated, unique # This simple example has an optimal value of 15 when picking items 1 and 4. items = 7 capacity = 9 weights = [2, 3, 6, 7, 5, 9, 4] profits = [6, 5, 8, 9, 6, 7, 3] def knapsack(x): selection = x[0] total_weight = sum( [weights[i] if selection[i] else 0 for i in range(items)]) total_profit = sum( [profits[i] if selection[i] else 0 for i in range(items)]) return total_profit, total_weight problem = Problem(1, 1, 1) problem.types[0] = Binary(items) problem.directions[0] = Problem.MAXIMIZE problem.constraints[0] = Constraint("<=", capacity) problem.function = knapsack algorithm = GeneticAlgorithm(problem) algorithm.run(10000) for solution in unique(nondominated(algorithm.result)): print(solution.variables, solution.objectives)
(4493, 7102), (3600, 6950), (3100, 7250), (4700, 8450), (5400, 8450), (5610, 10053), (4492, 10052), (3600, 10800), (3100, 10950), (4700, 11650), (5400, 11650), (6650, 10800), (7300, 10950), (7300, 7250), (6650, 6950), (7300, 3300), (6650, 2300), (5400, 1600), (8350, 2300), (7850, 3300), (9450, 5750), (10150, 5750), (10358, 7103), (9243, 7102), (8350, 6950), (7850, 7250), (9450, 8450), (10150, 8450), (10360, 10053), (9242, 10052), (8350, 10800), (7850, 10950), (9450, 11650), (10150, 11650), (11400, 10800), (12050, 10950), (12050, 7250), (11400, 6950), (12050, 3300), (11400, 2300), (10150, 1600), (13100, 2300), (12600, 3300), (14200, 5750), (14900, 5750), (15108, 7103), (13993, 7102), (13100, 6950), (12600, 7250), (14200, 8450), (14900, 8450), (15110, 10053), (13992, 10052), (13100, 10800), (12600, 10950), (14200, 11650), (14900, 11650), (16150, 10800), (16800, 10950), (16800, 7250), (16150, 6950), (16800, 3300), (16150, 2300), (14900, 1600), (19800, 800), (19800, 10000), (19800, 11900), (19800, 12200), (200, 12200), (200, 1100), (200, 800)] def dist(x, y): return round(math.sqrt((x[0] - y[0])**2 + (x[1] - y[1])**2)) def tsp(x): tour = x[0] return sum([dist(cities[tour[i]], cities[tour[(i + 1) % len(cities)]]) for i in range(len(tour))]) problem = Problem(1, 1) problem.types[0] = Permutation(range(len(cities))) problem.directions[0] = Problem.MINIMIZE problem.function = tsp algorithm = GeneticAlgorithm(problem) algorithm.run(100000, callback = lambda a : print(a.nfe, unique(nondominated(algorithm.result))[0].objectives[0]))
def optimize(model, scenario, nfe, epsilons, sc_name, algorithm=EpsNSGAII, searchover='levers'): '''optimize the model Parameters ---------- model : a Model instance algorith : a valid Platypus optimization algorithm nfe : int searchover : {'uncertainties', 'levers'} Returns ------- pandas DataFrame Raises ------ EMAError if searchover is not one of 'uncertainties' or 'levers' TODO:: constrains are not yet supported ''' if searchover not in ('levers', 'uncertainties'): raise EMAError(("searchover should be one of 'levers' or" "'uncertainties' not {}".format(searchover))) # extract the levers and the outcomes decision_variables = [dv for dv in getattr(model, searchover)] outcomes = [ outcome for outcome in model.outcomes if outcome.kind != AbstractOutcome.INFO ] evalfunc = functools.partial(evaluate_function, model=model, scenario=scenario, decision_vars=decision_variables, searchover=searchover) # setup the optimization problem # TODO:: add constraints problem = Problem(len(decision_variables), len(outcomes)) problem.types[:] = [ Real(dv.lower_bound, dv.upper_bound) for dv in decision_variables ] problem.function = evalfunc problem.directions = [outcome.kind for outcome in outcomes] # solve the optimization problem optimizer = algorithm(problem, epsilons=epsilons) optimizer.run(nfe) # extract the names for levers and the outcomes lever_names = [dv.name for dv in decision_variables] outcome_names = [outcome.name for outcome in outcomes] solutions = [] for solution in unique(nondominated(optimizer.result)): decision_vars = dict(zip(lever_names, solution.variables)) decision_out = dict(zip(outcome_names, solution.objectives)) result = {**decision_vars, **decision_out} solutions.append(result) #print("fe_result: ", optimizer.algorithm.fe_results) #plot_convergence(optimizer.algorithm.hv_results, sc_name) results = pd.DataFrame(solutions, columns=lever_names + outcome_names) #save the hypervolume output in a csv file hv = np.swapaxes( np.array(optimizer.algorithm.hv_results), 0, 1 ) #hv is a 2d list, where hv[0] is the record of nfe's, hv[1] is the record of hypervolume df = pd.DataFrame(hv).transpose() #df.to_csv("Hypervolume_scenario_{}_v6.csv".format(sc_name)) return results, df
def fit(self, X, y): opt_start_time = time.time() kfold = None if isinstance(self.cv, int) and self.cv == 1: X_train, X_val, y_train, y_val = train_test_split( X, y, test_size=0.2, random_state=self.random_seed, stratify=y) logger.info("Not using Cross-Validation. " "Performing single train/test split") else: is_clf = self.model.is_classifier() kfold = check_cv(self.cv, y=y, classifier=is_clf) # kfold = StratifiedKFold( # n_splits=self.cv, random_state=self.random_seed, shuffle=True # ) logger.info(f"Using Cross-Validation - {kfold}") self.ind = 0 def train_test_model(parameter): # First check if we exceeded allocated time budget current_time = time.time() elapsed_time = current_time - opt_start_time if (self.max_opt_time is not None) and (elapsed_time > self.max_opt_time): msg = ( f"Max optimization time exceeded. " f"Max Opt time = {self.max_opt_time}, Elapsed Time = {elapsed_time}, " f"NFE Completed - {self.ind}") raise MaxBudgetExceededException(msg) self.ind = self.ind + 1 logger.info(f"Training population {self.ind}") parameter = self.param_to_dict( parameter, self.model_helper.param_choices, self.model_helper.param_categories, self.model_helper.param_type, ) scorers = [get_scorer(scorer) for scorer in self.scoring] nscorers = len(scorers) try: if kfold is None: clf = self.model_helper.create_instance(parameter) clf_trained = clf.fit(X_train, y_train) obj_val = [ scorer(clf_trained, X_val, y_val) for scorer in scorers ] else: obj_scores = [[] for _ in range(nscorers)] # Perform k-fold cross-validation for train_index, test_index in kfold.split(X, y): if isinstance(X, pd.DataFrame): X_train_split, X_val_split = ( X.iloc[train_index], X.iloc[test_index], ) y_train_split, y_val_split = ( y.iloc[train_index], y.iloc[test_index], ) else: X_train_split, X_val_split = X[train_index], X[ test_index] y_train_split, y_val_split = y[train_index], y[ test_index] clf = self.model_helper.create_instance(parameter) clf_trained = clf.fit(X_train_split, y_train_split) obj_score = [ scorer(clf_trained, X_val_split, y_val_split) for scorer in scorers ] for i in range(nscorers): obj_scores[i].append(obj_score[i]) # Aggregate CV score obj_val = [np.mean(obj_scores[i]) for i in range(nscorers)] logger.debug(f"Obj k-fold scores - {obj_scores}") # By default we are solving a minimization MOO problem fitnessValue = [ self.best_score[i] - obj_val[i] for i in range(nscorers) ] logger.info(f"Train fitnessValue - {fitnessValue}") except jsonschema.ValidationError as e: logger.error(f"Caught JSON schema validation error.\n{e}") logger.error("Setting fitness (loss) values to infinity") fitnessValue = [np.inf for i in range(nscorers)] logger.info(f"Train fitnessValue - {fitnessValue}") return fitnessValue def time_check_callback(alg): current_time = time.time() elapsed_time = current_time - opt_start_time logger.info( f"NFE Complete - {alg.nfe}, Elapsed Time - {elapsed_time}") parameter_num = len(self.model_helper.param_choices) target_num = len(self.scoring) # Adjust max_evals if not a multiple of population size. This is # required as Platypus performs evaluations in multiples of # population_size. adjusted_max_evals = (self.max_evals // self.population_size) * self.population_size if adjusted_max_evals != self.max_evals: logger.info( f"Adjusting max_evals to {adjusted_max_evals} from specified {self.max_evals}" ) problem = Problem(parameter_num, target_num) problem.types[:] = self.model_helper.types problem.function = train_test_model # Set the variator based on types of decision variables varg = {} first_type = problem.types[0].__class__ all_type_same = all([isinstance(t, first_type) for t in problem.types]) # use compound operator for mixed types if not all_type_same: varg["variator"] = CompoundOperator(SBX(), HUX(), PM(), BitFlip()) algorithm = NSGAII( problem, population_size=self.population_size, **varg, ) try: algorithm.run(adjusted_max_evals, callback=time_check_callback) except MaxBudgetExceededException as e: logger.warning( f"Max optimization time budget exceeded. Optimization exited prematurely.\n{e}" ) solutions = nondominated(algorithm.result) # solutions = [s for s in algorithm.result if s.feasible]` # solutions = algorithm.result moo_solutions = [] for solution in solutions: vars = [] for pnum in range(parameter_num): vars.append(problem.types[pnum].decode( solution.variables[pnum])) vars_dict = self.param_to_dict( vars, self.model_helper.param_choices, self.model_helper.param_categories, self.model_helper.param_type, ) moo_solutions.append(self.Soln(vars_dict, solution.objectives)) logger.info(f"{vars}, {solution.objectives}") self.moo_solutions = moo_solutions pareto_models = [] for solution in self.moo_solutions: est = self.model_helper.create_instance(solution.variables) est_trained = est.fit(X, y) pareto_models.append((solution.variables, est_trained)) self.pareto_models = pareto_models return self
def __call__(self, optimizer): n_solutions = 0 for _ in platypus.unique(platypus.nondominated(optimizer.result)): n_solutions += 1 self.results.append(n_solutions) super().__call__(optimizer)