def draw_hv_and_gd(path_list): from pymoo.factory import get_performance_indicator def is_pareto_efficient_dumb(costs): """ Find the pareto-efficient points :param costs: An (n_points, n_costs) array :return: A (n_points, ) boolean array, indicating whether each point is Pareto efficient """ is_efficient = np.ones(costs.shape[0], dtype = bool) for i, c in enumerate(costs): is_efficient[i] = np.all(np.any(costs[:i]>c, axis=1)) and np.all(np.any(costs[i+1:]>c, axis=1)) return is_efficient for i, (label, pth) in enumerate(path_list): d = np.load(pth, allow_pickle=True) X = d['X'] y = d['y'] objectives = d['objectives'][:, :5] * np.array([-1, 1, 1, 1, -1]) pareto_set = objectives[is_pareto_efficient_dumb(objectives)] # print(label, np.sum(is_pareto_efficient_dumb(objectives))) gd = get_performance_indicator("gd", pareto_set) hv = get_performance_indicator("hv", ref_point=np.array([0.01, 7.01, 7.01, 7.01, 0.01])) print(label) for j in range(16): cur_objectives = objectives[:(j+1)*100] print(j) print("GD", gd.calc(cur_objectives)) print("hv", hv.calc(cur_objectives))
def Main(algorithm, problem, pop_size, crossover_probability, mutation_probability, n_partitions, n_gen, seed): # Instancia el problema problem = Problems.get(problem) reference_directions = get_reference_directions("das-dennis", problem.n_obj, n_partitions=n_partitions) # Instancia el algoritmo algorithm = NSGA_II.Get_Algorithm_Instance( pop_size, crossover_probability, mutation_probability ) if (algorithm == Algorithms.NSGAII) else NSGA_III.Get_Algorithm_Instance( reference_directions, pop_size, crossover_probability, mutation_probability) if (algorithm == Algorithms.NSGAIII) else None # Instancia el optimizador optimizer = Optimizer(problem, algorithm) optimization_result = optimizer.Minimize(n_gen, seed) objective_spaces_values = optimization_result.F pareto_front = problem.pareto_front(reference_directions) if type( problem).__name__ == "DTLZ1" else problem.pareto_front() # Instancia los indicadores de rendimiento (Distancia Generacional Invertida (IGD) / Distancia Generacional Invertida Plus (IGD+)) IGD = get_performance_indicator("igd", pareto_front) #IGD_plus = get_performance_indicator("igd+", pareto_front) # Imprime las métricas obtenidas por el conjunto de soluciones resultantes de la optimización multimodal/multiobjetivo print("IGD:", IGD.calc(objective_spaces_values))
def _select(self, X_candidate, Y_candidate, X, Y, batch_size): pred_pset, pred_pfront = X_candidate, Y_candidate curr_pfront = find_pareto_front(Y) ref_point = np.max(np.vstack([Y_candidate, Y]), axis=0) hv = get_performance_indicator('hv', ref_point=ref_point) idx_choices = np.ma.array(np.arange(len(pred_pset)), mask=False) # mask array for index choices next_batch_indices = [] # greedily select indices that maximize hypervolume contribution for _ in range(batch_size): curr_hv = hv.calc(curr_pfront) max_hv_contrib = 0. max_hv_idx = -1 for idx in idx_choices.compressed(): # calculate hypervolume contribution new_hv = hv.calc(np.vstack([curr_pfront, pred_pfront[idx]])) hv_contrib = new_hv - curr_hv if hv_contrib > max_hv_contrib: max_hv_contrib = hv_contrib max_hv_idx = idx if max_hv_idx == -1: # if all candidates have no hypervolume contribution, just randomly select one max_hv_idx = np.random.choice(idx_choices.compressed()) idx_choices.mask[max_hv_idx] = True # mask as selected curr_pfront = np.vstack([curr_pfront, pred_pfront[max_hv_idx]]) # add to current pareto front next_batch_indices.append(max_hv_idx) next_batch_indices = np.array(next_batch_indices) X_next = pred_pset[next_batch_indices] return X_next
def select(self, solution, surrogate_model, status, transformation): pred_pset = solution['x'] val = surrogate_model.evaluate(pred_pset) pred_pfront = val['F'] pred_pset, pred_pfront = transformation.undo(pred_pset, pred_pfront) curr_pfront = status['pfront'].copy() hv = get_performance_indicator('hv', ref_point=self.ref_point) idx_choices = np.ma.array(np.arange(len(pred_pset)), mask=False) # mask array for index choices next_batch_indices = [] # greedily select indices that maximize hypervolume contribution for _ in range(self.batch_size): curr_hv = hv.calc(curr_pfront) max_hv_contrib = 0. max_hv_idx = -1 for idx in idx_choices.compressed(): # calculate hypervolume contribution new_hv = hv.calc(np.vstack([curr_pfront, pred_pfront[idx]])) hv_contrib = new_hv - curr_hv if hv_contrib > max_hv_contrib: max_hv_contrib = hv_contrib max_hv_idx = idx if max_hv_idx == -1: # if all candidates have no hypervolume contribution, just randomly select one max_hv_idx = np.random.choice(idx_choices.compressed()) idx_choices.mask[max_hv_idx] = True # mask as selected curr_pfront = np.vstack([curr_pfront, pred_pfront[max_hv_idx] ]) # add to current pareto front next_batch_indices.append(max_hv_idx) next_batch_indices = np.array(next_batch_indices) return pred_pset[next_batch_indices], None
def plot_performance_metric(Y, obj_type): ''' ''' if Y.shape[1] == 1: opt_list = [] if obj_type == ['min']: opt_func = np.min elif obj_type == ['max']: opt_func == np.max else: raise Exception(f'Invalid objective type {obj_type}') for i in range(1, len(Y)): opt_list.append(opt_func(Y[:i])) plt.plot(np.arange(1, len(Y)), opt_list) plt.title('Optimum') elif Y.shape[1] > 1: Y = convert_minimization(Y, obj_type) ref_point = np.max(Y, axis=0) indicator = get_performance_indicator('hv', ref_point=ref_point) hv_list = [] for i in range(1, len(Y)): hv = indicator.calc(Y[:i]) hv_list.append(hv) plt.plot(np.arange(1, len(Y)), hv_list) plt.title('Hypervolume') else: raise Exception(f'Invalid objective dimension {Y.shape[1]}') plt.show()
def compute_mean_HV(algorithm, problems, iterations): #fixed problems, can pass later hypervolume_scores = np.zeros([len(problems), iterations]) for i in range(iterations): results = [] ind = 0 for j in problems: problem = get_problem(j[0]) res = minimize(problem, algorithm, ('n_gen', 5), seed=i, verbose=False) if res.F is not None: hv = get_performance_indicator("hv", ref_point=j[1]).calc(res.F) else: hv = 0 results.append((j[0], res, res.F, hv)) hypervolume_scores[ind, i] = hv ind = ind + 1 mean_hv = np.mean(hypervolume_scores, axis=1) std = np.std(hypervolume_scores, axis=1) output = [] for i in range(len(problems)): output.append([problems[i][0], mean_hv[i], std[i]]) path = 'results/' + algorithm.__module__ + '.csv' pd.DataFrame(np.array(output)).to_csv(path, header=('function', 'mean HV', 'std'), index=False) print("all runs:", hypervolume_scores) return (hypervolume_scores, print("Output written to " + path + '\n'))
def E_SMS_EGO_mean_HV(problems, iterations): hypervolume_scores = np.zeros([len(problems), iterations]) for i in range(4, iterations): global results results = [] ind = 0 for j in problems: problem = get_problem(j[0]) res = E_SMS_EGO(problem, eval_budget=25, time_budget=2500) if np.array(res[3]) is not None: hv = get_performance_indicator("hv", ref_point=j[1]).calc( np.array(res[1])) else: hv = 0 results.append( (j[0], res[0].tolist(), res[1].tolist(), hv, j[1].tolist())) respath = 'results/E_SMS_EGO_run_' + str(i) + '.csv' pd.DataFrame(results).to_csv(respath, header=('problem', 'x', 'y', 'hv', 'ref'), index=False) hypervolume_scores[ind, i] = hv ind = ind + 1 mean_hv = np.mean(hypervolume_scores, axis=1) std = np.std(hypervolume_scores, axis=1) output = [] for i in range(len(problems)): output.append([problems[i][0], mean_hv[i], std[i]]) path = 'results/E_SMS_EGO_res.csv' pd.DataFrame(np.array(output)).to_csv(path, header=('function', 'mean HV', 'std'), index=False) print("all runs:", hypervolume_scores) return (print("Output written to " + path + '\n'))
def _calc_hv(ref_pt, F, normalized=True): # calculate hypervolume on the non-dominated set of F front = NonDominatedSorting().do(F, only_non_dominated_front=True) nd_F = F[front, :] ref_point = 1.01 * ref_pt hv = get_performance_indicator("hv", ref_point=ref_point).calc(nd_F) if normalized: hv = hv / np.prod(ref_point) return hv
def propose_next_batch_without_label(curr_pfront, ref_point, pred_pfront, pred_pset, batch_size): ''' Propose next batch of design variables to evaluate by maximizing hypervolume contribution. Parameters ---------- curr_pfront: np.array Current pareto front of evaluated design samples. pred_pfront: np.array Predicted pareto front from sampled objective functions. pred_pset: np.array Predicted pareto set from sampled objective functions. batch_size: int Batch size of design samples to be proposed. Returns ------- X_next: np.array Next batch of design variables to evaluate. ''' #assert len(pred_pset) >= batch_size, "predicted pareto set is smaller than proposed batch size! curr_pfront = curr_pfront.copy() hv = get_performance_indicator('hv', ref_point=ref_point) idx_choices = np.ma.array(np.arange(len(pred_pset)), mask=False) # mask array for index choices next_batch_indices = [] if len(pred_pset) < batch_size: # print('Predicted pareto set is smaller than proposed batch size and has '+ str(len(pred_pset)) +' points.') next_batch_indices = [0] * (batch_size - len(pred_pset)) batch_size = len(pred_pset) # greedily select indices that maximize hypervolume contribution for _ in range(batch_size): curr_hv = hv.calc(curr_pfront) max_hv_contrib = 0. max_hv_idx = -1 for idx in idx_choices.compressed(): # calculate hypervolume contribution new_hv = hv.calc(np.vstack([curr_pfront, pred_pfront[idx]])) hv_contrib = new_hv - curr_hv if hv_contrib > max_hv_contrib: max_hv_contrib = hv_contrib max_hv_idx = idx if max_hv_idx == -1: # if all candidates have no hypervolume contribution, just randomly select one max_hv_idx = np.random.choice(idx_choices.compressed()) idx_choices.mask[max_hv_idx] = True # mask as selected curr_pfront = np.vstack([curr_pfront, pred_pfront[max_hv_idx] ]) # add to current pareto front next_batch_indices.append(max_hv_idx) X_next = pred_pset[next_batch_indices].copy() Y_next = pred_pfront[next_batch_indices].copy() return X_next, Y_next
def _do(self, problem, evaluator, algorithm): super(MyDisplay, self)._do(problem,evaluator,algorithm) self.output.append("Obj1_avg",np.mean(algorithm.pop.get("F")[:, 0])) self.output.append("Obj2_avg", np.mean(algorithm.pop.get("F")[:, 1])) hv = get_performance_indicator("hv", ref_point=np.array([1, 1])) hv_value = hv.calc(algorithm.pop.get("F")) self.output.append("hv", hv_value) hvs.append(hv_value) precisions.append(algorithm.pop.get("F")[:, 0]) recalls.append(algorithm.pop.get("F")[:, 1])
def calculate_hypervolume(data, ref_point): """ Text. Args: data (np.array): Pareto front. ref_point (np.array): Reference point. Returns: hv (float): Hypervolume size. """ hv = get_performance_indicator("hv", ref_point=ref_point) hv = hv.calc(data) return hv
def get_indicators(pf): gd = get_performance_indicator("gd", pf) igd = get_performance_indicator("igd", pf) gd_plus = get_performance_indicator("gd+", pf) igd_plus = get_performance_indicator("igd+", pf) return gd, igd, gd_plus, igd_plus
M[i] = p_dist M[:, i] = p_dist.T M[i, i] = np.nan in_pool[i] = p return in_pool # load all results from directory tree result_dirs = sorted(os.listdir("./log_data/")) results = [] for path in result_dirs: result = load_result(os.path.join('./log_data/', path)) results.append(result) # set up ypervolume measure hv_measure = get_performance_indicator("hv", ref_point=np.ones(n_obj) * 1.2) # set up igdi+ measure initial_samples = 10000 final_n = 200 y = generate_wfg_pareto_samples(initial_samples) y = down_sample(y, final_n) igdp_measure = get_performance_indicator("igd+", y) import matplotlib.pyplot as plt fig = plt.figure() ax = fig.gca(projection='3d') ax.scatter(*y.T) plt.show()
import pandas as pd import matplotlib.pyplot as plt import seaborn as sns import numpy as np #from pygmo import hypervolume from pymoo.factory import get_performance_indicator from pymoo.util.nds.non_dominated_sorting import NonDominatedSorting reference_points = np.ones((1, 2)) * 1.2 hv = get_performance_indicator("hv", ref_point=reference_points[0]) root = '../Data/' outDir = '../Output/' columns = ['Method', 'Dataset', 'Motif Length', 'Similarity', 'Entropy'] # ['Entropy', 'TC', 'Gap', 'SimG', 'SimNG', 'GapCon'] algoList = ['NSGAII', 'NSGAII-PM', 'NSGAII-PC', 'NSGAII-PMC'] algoDirDic = { 'ABC-E': 'ABC', 'ABC-S': 'ABC', 'NSGAII': 'NSGA-DefaultMutation-DefaultCrossover', 'NSGAII-PC': 'NSGA-DefaultMutation-MyCrossover', 'NSGAII-PM': 'NSGA-MyMutation-DefaultCrossover', 'NSGAII-PMC': 'NSGA-MyMutation-MyCrossover' } #NSGA-II', markerDic = { 'ABC-S': '+', 'ABC-E': 'x', 'NSGAII': "s", 'NSGAII-PC': '*', 'NSGAII-PM': 'o', 'NSGAII-PMC': '^'
facecolor=None, edgecolor=None, orientation='portrait', pad_inches=0.12) # In[11]: hv_ref = results['Mpoi']['hv_ref'] p = results['Mpoi']['igd_ref'] p.shape # In[12]: from pymoo.factory import get_performance_indicator hv_measure = get_performance_indicator("hv", ref_point=hv_ref) baseline_hv = hv_measure.calc(p[::10]) # In[13]: for key, value in results.items(): value['hypervolume'] /= baseline_hv fig_hv = plt.figure(figsize=[10, 5]) ax_hv = fig_hv.gca() i = 0 for opt, color, marker in zip(chosen_optimisers, colors, markers): result = results[opt] plot_measure(result, measure="hypervolume", axis=ax_hv,
result = load_result(os.path.join(problem_path, 'log_data/', path)) results.append(result) n_obj = np.shape(results[0]['y'])[-1] print("n_obj", n_obj) print("loading ref points from , ", ref_path) print("saving processed results to ", os.path.join(problem_path, "log_data/")) # get refpoints p = np.load(sys.argv[1]) y_maxs = np.concatenate([r['y'] for r in results if r['name'] != "lhs"], axis=0).reshape(-1, n_obj) ref_point = y_maxs.max(axis=0) # setup measurement systems hv_measure = get_performance_indicator("hv", ref_point=ref_point) igdp_measure = get_performance_indicator("igd+", p) # process results, storing in D D = {} for result in tqdm(results): print(result['name']) y = np.array(result['y']) if result['name'] == 'lhs': hvs = np.zeros((y.shape[0], y.shape[1] + 10)) igdps = np.zeros((y.shape[0], y.shape[1] + 10)) for i, yi in tqdm(enumerate(y)): for j, yii in enumerate(yi): hvs[i, j + 10] = hv_measure.calc(yii) igdps[i, j + 10] = igdp_measure.calc(yii)
# The pareto front of a scaled zdt1 problem pf = get_problem("zdt1").pareto_front() # The result found by an algorithm A = pf[::10] * 1.1 # plot the result Scatter(legend=True).add(pf, label="Pareto-front").add(A, label="Result").show() # END load_data # START gd from pymoo.factory import get_performance_indicator gd = get_performance_indicator("gd", pf) print("GD", gd.calc(A)) # END gd # START gd_plus from pymoo.factory import get_performance_indicator gd_plus = get_performance_indicator("gd+", pf) print("GD+", gd_plus.calc(A)) # END gd_plus # START igd from pymoo.factory import get_performance_indicator igd = get_performance_indicator("igd", pf) print("IGD", igd.calc(A))
def propose_next_batch(curr_pfront, ref_point, pred_pfront, pred_pset, batch_size, labels): ''' Propose next batch of design variables to evaluate by maximizing hypervolume contribution. Greedily add samples with maximum hypervolume from each family. Parameters ---------- curr_pfront: np.array Current pareto front of evaluated design samples. pred_pfront: np.array Predicted pareto front from sampled objective functions. pred_pset: np.array Predicted pareto set from sampled objective functions. batch_size: int Batch size of design samples to be proposed. labels: np.array Family labels for pred_pset. Returns ------- X_next: np.array Next batch of design variables to evaluate. Y_next: np.array Expected output of next batch of design variables to evaluate. family_lbls_next: np.array Family labels of proposed batch samples. ''' #assert len(pred_pset) >= batch_size, "predicted pareto set is smaller than proposed batch size!" curr_pfront = curr_pfront.copy() hv = get_performance_indicator('hv', ref_point=ref_point) idx_choices = np.ma.array(np.arange(len(pred_pset)), mask=False) # mask array for index choices iter_idx_choices = np.ma.array( np.arange(len(pred_pset)), mask=False) # mask array for index choices of unvisited family samples next_batch_indices = [] family_lbls_next = [] num_families = len(np.unique(labels)) # print('Number of families is: '+str(num_families)) if len(pred_pset) < batch_size: # print('Predicted pareto set is smaller than proposed batch size and has '+ str(len(pred_pset)) +' points.') next_batch_indices = [0] * (batch_size - len(pred_pset)) batch_size = len(pred_pset) # greedily select indices that maximize hypervolume contribution for _ in range(batch_size): #if all families were visited, start new cycle if len(iter_idx_choices.compressed()) == 0: iter_idx_choices = idx_choices.copy() curr_hv = hv.calc(curr_pfront) max_hv_contrib = 0. max_hv_idx = -1 for idx in iter_idx_choices.compressed(): # calculate hypervolume contribution new_hv = hv.calc(np.vstack([curr_pfront, pred_pfront[idx]])) hv_contrib = new_hv - curr_hv if hv_contrib > max_hv_contrib: max_hv_contrib = hv_contrib max_hv_idx = idx if max_hv_idx == -1: # if all candidates have no hypervolume contribution, just randomly select one max_hv_idx = np.random.choice(iter_idx_choices.compressed()) idx_choices.mask[max_hv_idx] = True # mask as selected curr_pfront = np.vstack([curr_pfront, pred_pfront[max_hv_idx] ]) # add to current pareto front next_batch_indices.append(max_hv_idx) family_lbls_next.append(labels[max_hv_idx]) #find which family to mask all family memebers as visited in this cycle family_ids = np.where(labels == labels[max_hv_idx])[0] for fid in family_ids: iter_idx_choices.mask[fid] = True X_next = pred_pset[next_batch_indices].copy() Y_next = pred_pfront[next_batch_indices].copy() return X_next, Y_next, family_lbls_next
def calc_hypervolume(pfront, ref_point): ''' Calculate hypervolume of pfront based on ref_point ''' hv = get_performance_indicator('hv', ref_point=ref_point) return hv.calc(pfront)
def find_hyper(code): # Find nader point hyper = [10000,111110] for x in os.listdir('../data/result_cleaned/'): df = pd.read_csv('../data/result_cleaned/'+x) # Convert to float for i, row in df.iterrows(): row['Early Objective'] = float(row['Early Objective']) row['Late Objective'] = float(row['Early Objective']) # Normalise for i,row in df.iterrows(): row['Early Objective'] = (row['Early Objective'] - df['Early Objective'].min()) / (df['Early Objective'].max() - df['Early Objective'].min()) row['Late Objective'] = (row['Late Objective'] - df['Late Objective'].min()) / (df['Late Objective'].max() - df['Late Objective'].min()) # Find nadir point for i,row in df.iterrows(): if float(row["Early Objective"]) < hyper[0]: hyper[0] = float(row["Early Objective"]) if float(row["Late Objective"]) < hyper[1]: hyper[1] = float(row["Late Objective"]) # Get dict of name -> hyper, normalised di = {} for x in os.listdir('../data/result_cleaned/'): df = pd.read_csv('../data/result_cleaned/'+x) # Convert to float for i, row in df.iterrows(): row['Early Objective'] = float(row['Early Objective']) row['Late Objective'] = float(row['Early Objective']) # Normalise for i, row in df.iterrows(): row['Early Objective'] = (row['Early Objective'] - df['Early Objective'].min()) / (df['Early Objective'].max() - df['Early Objective'].min()) row['Late Objective'] = (row['Late Objective'] - df['Late Objective'].min()) / (df['Late Objective'].max() - df['Late Objective'].min()) data = [] for i,row in df.iterrows(): data.append([-float(row["Early Objective"]), -float(row["Late Objective"])]) A = np.array(data) #print(A) hv = get_performance_indicator("hv", ref_point=np.array([-1,-1])) #print("hv", hv.calc(A)) di[x] = hv.calc(A) l = [] for key in di: l.append(di[key]) l.sort(reverse=True) l = (l[:5]) best_solutions = [] for x in l: for key in di: if di[key] == x: best_solutions.append(key) return best_solutions
def get_hv_indicator(self): ref = np.diag(self.model.payoff) hv = get_performance_indicator("hv", ref_point=ref) self.hv_indicator = hv.do(self.unique_pareto_sols)
variants = [ "rand1", "rand2", "best1", "best2", "currtobest1", "pbest/P-0.05", "pbest/P-0.1", "pbest/P-0.15", "pbest/P-0.2" ] # file path base_path = "/home/nick/.gode/mode/paretoFront/" + problem + "/" # IGD data IGD_DATA = [] ref_dirs = get_reference_directions("das-dennis", 3, n_partitions=12) # pf = get_problem(problem) # metric = IGDPlus(pf=ref_point) pf = get_problem("wfg1").pareto_front() metric = get_performance_indicator("igd+", pf) for varIndex in range(len(variants)): variantFiles = [] for i in range(NUM_EXECS): filePath = base_path + variants[varIndex] + "/exec-" + str(i + 1) + '.csv' variantFiles.append(pd.read_csv(filePath, sep='\t|\n', engine='python')) # file related constants GEN = int(len(variantFiles[0]) / 3) # GEN = QTD_LINES / QTD_OBJS NP = len(variantFiles[0].iloc[0]) # NP = QTD_COLS # NP = 100
## Normalize the data def normalize_data(DF, column, minValue, maxValue): DF[column] = (DF[column] - minValue) / (maxValue - minValue) normalize_data(DF, LCC_Var, minLCC, maxLCC) normalize_data(DF, CO2_Var, minCO2, maxCO2) normalize_data(DF, WalkScore_Var, minWalkScore, maxWalkScore) normalize_data(resultsTotal, LCC_Var_Gen, minLCC, maxLCC) normalize_data(resultsTotal, CO2_Var_Gen, minCO2, maxCO2) normalize_data(resultsTotal, WalkScore_Var_Gen, minWalkScore, maxWalkScore) ## Calculate the hv from pymoo.factory import get_performance_indicator hv = get_performance_indicator( "hv", ref_point=np.array(ref_point) ) #[0.5, 0.5, 0.5]#ref_point=np.array([maxLCC, maxCO2, maxWalkScore])) ## GIVES MEMORY ERROR IF USED DIRECTLY ## array1 = np.array(DF[[LCC_Var, CO2_Var, WalkScore_Var]]) # print("hv for the original solutions", hv.calc(array1)) array2 = np.array( resultsTotal[[LCC_Var_Gen, CO2_Var_Gen, WalkScore_Var_Gen]]) generatedArea = hv.calc(array2) # print("hv for the generated solutions", generatedArea) originalAreas = [] # prevArr = None Num_Samplings = int(len(array1) / len(array2)) + 1 for i in range(Num_Samplings):
'spea2': SPEA2(problem, population_size, variator=variator), }[algorithm_name] algorithm.run(args.get_n_generations()) pareto = get_problem(problem_name).pareto_front() pareto_x = pareto[:, 0] pareto_y = pareto[:, 1] pareto_function = interp1d(pareto_x, pareto_y, kind='cubic') result_nondominated = nondominated(algorithm.result) result_x = [s.objectives[0] for s in result_nondominated] result_y = [s.objectives[1] for s in result_nondominated] result_y_true = [pareto_function(x) for x in result_x] igd = get_performance_indicator('igd', pareto) print("MSE: \t", mean_squared_error(result_y_true, result_y), '\n' + "IGD: \t", igd.calc(numpy.array(list(zip(result_x, result_y))))) # region plot ax = pyplot.figure().add_subplot() ax.scatter(result_x, result_y, alpha=0.6, marker='x', label='results', color='red') ax.plot(pareto_x, pareto_y, alpha=0.7, label='pareto front') ax.legend() pyplot.grid(alpha=0.3)