def configure_experiment(problems: dict, n_run: int): jobs = [] max_evaluations = 25000 for run in range(n_run): for problem_tag, problem in problems.items(): jobs.append( Job( algorithm=NSGAII( problem=problem, population_size=100, offspring_population_size=100, mutation=PolynomialMutation( probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations), ), algorithm_tag="NSGAII", problem_tag=problem_tag, run=run, )) jobs.append( Job( algorithm=GDE3( problem=problem, population_size=100, cr=0.5, f=0.5, termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations), ), algorithm_tag="GDE3", problem_tag=problem_tag, run=run, )) jobs.append( Job( algorithm=SMPSO( problem=problem, swarm_size=100, mutation=PolynomialMutation( probability=1.0 / problem.number_of_variables, distribution_index=20), leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations), ), algorithm_tag="SMPSO", problem_tag=problem_tag, run=run, )) return jobs
def run_optimization(max_eval, num_nodes, pop_size, offspring, trial_name, year): os_fold = os_sep() if not os.path.exists('results' + os_fold + year): os.makedirs('results' + os_fold + year) multiprocessing.freeze_support() problem = cequal() max_evaluations = max_eval algorithm = NSGAII( population_evaluator=MultiprocessEvaluator(num_nodes), problem=problem, population_size=pop_size, offspring_population_size=offspring, mutation=PolynomialMutation(probability= 0.2, distribution_index=20), crossover=SBXCrossover(probability=0.8, distribution_index=20), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), dominance_comparator=DominanceComparator() ) algorithm.observable.register(ProgressBarObserver(max=max_evaluations)) algorithm.observable.register(observer=BasicObserver()) algorithm.run() print(os.getcwd()) front = algorithm.get_result() print('Algorithm (continuous problem): ' + algorithm.get_name()) print('Problem: ' + problem.get_name()) print('Computing time: ' + str(algorithm.total_computing_time/3600)) print_function_values_to_file(front, 'results' + os_fold + year + os_fold + 'OBJ_' + algorithm.get_name() + "_" + trial_name + '.txt') print_variables_to_file(front, 'results' + os_fold + year + os_fold + 'VAR_' + algorithm.get_name() + "_" + trial_name + '.txt')
def optimal_panels_battery_multi(Ce, Cv, Cp, Cb, Vp, Vb, demand, irradiance, xp_min): energy_building = EnergyBuildingMulti(irradiance, demand, N, Ce, Cv, qinit, Cb, Vb, Cp, Vp, xp_min) algorithm = NSGAII( problem=energy_building, population_size=100, offspring_population_size=100, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max_evaluations=25000)) algorithm.run() solutions = algorithm.get_result() for solution in solutions: #print('Solution:',solution.variables, 'Maximum value:',solution.objectives[0]) Xp = solution.variables[0] Xb = solution.variables[1] C = solution.objectives[0] print(f'Xp {Xp}, Xb {Xb}, Cost {C}') return Xb, Xp, C, algorithm
def setUp(self): self.problem = ZDT1() self.population_size = 100 self.offspring_size = 100 self.mating_pool_size = 100 self.max_evaluations = 100 self.mutation = PolynomialMutation(probability=1.0 / self.problem.number_of_variables, distribution_index=20) self.crossover = SBXCrossover(probability=1.0, distribution_index=20)
def __init__(self): super(_Store, self).__init__() self.default_observable = DefaultObservable() self.default_evaluator = SequentialEvaluator() self.default_generator = RandomGenerator() self.default_termination_criteria = StoppingByEvaluations(max=25000) self.default_mutation = { 'real': PolynomialMutation(probability=0.15, distribution_index=20), 'binary': BitFlipMutation(0.15) }
def test_should_SMPSO_work_when_solving_problem_ZDT1_with_standard_settings(self): problem = ZDT1() algorithm = SMPSO( problem=problem, swarm_size=100, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations(max_evaluations=25000), ) algorithm.run() front = algorithm.get_result() hv = HyperVolume(reference_point=[1, 1]) value = hv.compute([front[i].objectives for i in range(len(front))]) self.assertTrue(value >= 0.655)
def nsgaii_train(particoes, regras, instancias, classes): problem = MixedIntegerFloatProblem(particoes, regras, instancias, classes) max_evaluations = 10 algorithm = NSGAII( problem=problem, population_size=10, offspring_population_size=10, mutation=CompositeMutation([IntegerPolynomialMutation(0.05, 20), IntegerPolynomialMutation(0.05, 20), PolynomialMutation(0.05, 20.0)]), crossover=CompositeCrossover([IntegerSBXCrossover(probability=0.95, distribution_index=20), IntegerSBXCrossover(probability=0.95, distribution_index=20), SBXCrossover(probability=0.95, distribution_index=20)]), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations) ) algorithm.run() front = get_non_dominated_solutions(algorithm.get_result()) # Save results to file print_function_values_to_file(front, 'FUN.' + algorithm.label) print_variables_to_file(front, 'VAR.' + algorithm.label) print('Algorithm (continuous problem): ' + algorithm.get_name()) print('Problem: ' + problem.get_name()) print('Computing time: ' + str(algorithm.total_computing_time)) minAcuracia = 0 index = -1 for i, f in enumerate(front): if minAcuracia > f.objectives[0]: minAcuracia = f.objectives[0] index = i #for variable in front[index].variables: #print(variable.variables) particoes = problem.alterar_centroids(front[index].variables[2].variables) new_regras = problem.cromossomo_para_regras(front[index].variables[0].variables, front[index].variables[1].variables, problem.semente.qtdAntecedenteRegra, particoes) return particoes, new_regras
def test_should_NSGAII_work_when_solving_problem_ZDT1_with_standard_settings(self): problem = ZDT1() max_evaluations = 25000 algorithm = NSGAII( problem=problem, population_size=100, offspring_population_size=100, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), ) algorithm.run() front = algorithm.get_result() hv = HyperVolume(reference_point=[1, 1]) value = hv.compute([front[i].objectives for i in range(len(front))]) self.assertTrue(value >= 0.65)
def evaluate(crosssover_algo, problem): alldata = [] series = [] for x in range(10): algorithm = GeneticAlgorithm( problem=problem, population_size=100, offspring_population_size=100, mutation=PolynomialMutation(1.0 / problem.number_of_variables, 20.0), crossover=crosssover_algo, selection=BinaryTournamentSelection(), termination_criterion=StoppingByEvaluations(max_evaluations=500000) ) data = [] dataobserver = DataObserver(1.0, data) algorithm.observable.register(observer=dataobserver) algorithm.run() result = algorithm.get_result().objectives[0] series.append(result) alldata.append(data) numpy_array = np.array(alldata) transpose = numpy_array.T transpose_list = transpose.tolist() fig = plt.figure(figsize =(60, 42)) ax = fig.add_axes([0, 0, 1, 1]) bp = ax.boxplot(transpose_list) plt.show() print(stats.kruskal(transpose_list[0],transpose_list[1],transpose_list[-1])) series = [series] print(np.average(series)) sp.posthoc_dunn([transpose_list[0],transpose_list[1],transpose_list[-1]], p_adjust = 'holm')
def configure_experiment(problems: dict, n_run: int): jobs = [] max_evaluations = 25000 for run in range(n_run): for problem_tag, problem in problems.items(): jobs.append( Job( algorithm=DynamicNSGAII( problem=problem, population_size=100, offspring_population_size=100, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations) ), algorithm_tag='DynamicNSGAII', problem_tag=problem_tag, run=run, ) ) return jobs
def run_DynamicNSGAII(problems): for problem in problems: time_counter = TimeCounter(delay=1) time_counter.observable.register(problem[1]) time_counter.start() max_evaluations = 25000 algorithm = DynamicNSGAII( problem=problem[1], population_size=100, offspring_population_size=100, mutation=PolynomialMutation(probability=1.0 / problem[1].number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), ) algorithm.observable.register(observer=ProgressBarObserver(max=max_evaluations)) algorithm.observable.register(observer=VisualizerObserver()) algorithm.observable.register(observer=PlotFrontToFileObserver(problem[0] + "_dynamic_front_vis")) algorithm.observable.register(observer=WriteFrontToFileObserver(problem[0] + "_dynamic_front")) #algorithm.observable.register(observer=BasicObserver()) algorithm.run() front = algorithm.get_result() non_dominated_solutions = get_non_dominated_solutions(front) # save to files print_function_values_to_file(front, 'FUN.DYNAMICNSGAII.' + problem[0]) print_variables_to_file(front, 'VAR.DYNAMICNSGAII.' + problem[0]) # Plot plot_front = Plot(title='Pareto front approximation', axis_labels=['x', 'y']) plot_front.plot(front, label='DynamicNSGAII-FDA2', filename='DYNAMICNSGAII-'+problem[0], format='png')
def default_mutation(self): return { 'real': PolynomialMutation(probability=0.15, distribution_index=20), 'binary': BitFlipMutation(0.15) }
# Solving FON Problem using NSGAII from FON_Problem import FON_Problem from jmetal.algorithm.multiobjective.nsgaii import NSGAII from jmetal.operator import SBXCrossover, PolynomialMutation from jmetal.util.termination_criterion import StoppingByEvaluations from jmetal.util.observer import ProgressBarObserver, VisualizerObserver from jmetal.lab.visualization import Plot, InteractivePlot if __name__ == '__main__': problem = FON_Problem() max_evaluations = 20000 algorithm = NSGAII( problem = problem, population_size = 100, offspring_population_size = 100, mutation=PolynomialMutation(probability=0.05, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max=max_evaluations) ) algorithm.observable.register(observer=ProgressBarObserver(max=max_evaluations)) algorithm.observable.register(observer=VisualizerObserver(reference_front=problem.reference_front)) algorithm.run() front = algorithm.get_result() # Plot front plot_front = Plot(plot_title='Pareto front approximation', reference_front=problem.reference_front, axis_labels=problem.obj_labels) plot_front.plot(front, label=algorithm.label, filename=algorithm.get_name()) # Plot interactive front plot_front = InteractivePlot(plot_title='Pareto front approximation', reference_front=problem.reference_front, axis_labels=problem.obj_labels)
print_function_values_to_file, print_variables_to_file, ) from jmetal.util.termination_criterion import StoppingByEvaluations if __name__ == "__main__": problem = MixedIntegerFloatProblem(10, 10, 100, -100, -1000, 1000) max_evaluations = 25000 algorithm = NSGAII( problem=problem, population_size=100, offspring_population_size=100, mutation=CompositeMutation([ IntegerPolynomialMutation(0.01, 20), PolynomialMutation(0.01, 20.0) ]), crossover=CompositeCrossover([ IntegerSBXCrossover(probability=1.0, distribution_index=20), SBXCrossover(probability=1.0, distribution_index=20), ]), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations), ) algorithm.run() front = get_non_dominated_solutions(algorithm.get_result()) # Save results to file print_function_values_to_file(front, "FUN." + algorithm.label) print_variables_to_file(front, "VAR." + algorithm.label)
def configure_experiment(problems: dict, n_run: int): jobs = [] for run in range(n_run): for problem_tag, problem in problems.items(): jobs.append( Job( algorithm=NSGAII( problem=problem, population_size=POPULATION_SIZE, offspring_population_size=POPULATION_SIZE, mutation=IntegerPolynomialMutation(probability=0.05, distribution_index=20), crossover=IntegerSBXCrossover(probability=0.3, distribution_index=20), termination_criterion=StoppingByEvaluationsCustom(max_evaluations=max_evaluations, reference_point=REFERENCE_POINT, AlgorithmName='NSGAII') # termination_criterion=stopCriterion ), algorithm_tag='NSGAII', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=NSGAIII( problem=problem, population_size=POPULATION_SIZE, mutation=IntegerPolynomialMutation(probability=0.05, distribution_index=20), crossover=IntegerSBXCrossover(probability=0.3, distribution_index=20), reference_directions=UniformReferenceDirectionFactory(2, n_points=91), termination_criterion=StoppingByEvaluationsCustom(max_evaluations=max_evaluations, reference_point=REFERENCE_POINT, AlgorithmName='NSGAIII') # termination_criterion=stopCriterion ), algorithm_tag='NSGAIII', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=SPEA2( problem=problem, population_size=POPULATION_SIZE, offspring_population_size=POPULATION_SIZE, mutation=IntegerPolynomialMutation(probability=0.05, distribution_index=20), crossover=IntegerSBXCrossover(probability=0.3, distribution_index=20), termination_criterion=StoppingByEvaluationsCustom(max_evaluations=max_evaluations, reference_point=REFERENCE_POINT, AlgorithmName='SPEA2') ), algorithm_tag='SPEA2', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=HYPE( problem=problem, reference_point=reference_point, population_size=POPULATION_SIZE, offspring_population_size=POPULATION_SIZE, mutation=IntegerPolynomialMutation(probability=0.05, distribution_index=20), crossover=IntegerSBXCrossover(probability=0.3, distribution_index=20), termination_criterion=StoppingByEvaluationsCustom(max_evaluations=max_evaluations, reference_point=REFERENCE_POINT, AlgorithmName='HYPE') ), algorithm_tag='HYPE', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=MOCell( problem=problem, population_size=POPULATION_SIZE, neighborhood=C9(4, 4), archive=CrowdingDistanceArchive(100), mutation=IntegerPolynomialMutation(probability=0.05, distribution_index=20), crossover=IntegerSBXCrossover(probability=0.3, distribution_index=20), termination_criterion=StoppingByEvaluationsCustom(max_evaluations=max_evaluations, reference_point=REFERENCE_POINT, AlgorithmName='MOCell') ), algorithm_tag='MOCELL', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=OMOPSO( problem=problem, swarm_size=swarm_size, epsilon=0.0075, uniform_mutation=UniformMutation(probability=0.05, perturbation=0.5), non_uniform_mutation=NonUniformMutation(mutation_probability, perturbation=0.5, max_iterations=int(max_evaluations / swarm_size)), leaders=CrowdingDistanceArchive(10), termination_criterion=StoppingByEvaluationsCustom(max_evaluations=max_evaluations, reference_point=REFERENCE_POINT, AlgorithmName='OMOPSO') ), algorithm_tag='OMOPSO', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=SMPSO( problem=problem, swarm_size=POPULATION_SIZE, mutation=PolynomialMutation(probability=0.05, distribution_index=20), leaders=CrowdingDistanceArchive(20), termination_criterion=StoppingByEvaluationsCustom(max_evaluations=max_evaluations, reference_point=REFERENCE_POINT, AlgorithmName='SMPSO') ), algorithm_tag='SMPSO', problem_tag=problem_tag, run=run, ) ) return jobs
def run() -> None: target_path = os.path.join(RESULTS_DIR, f"test_{datetime.now().isoformat()}.json") first_execution_unit = ExecutionUnit( algorithm_cls=ClonalSelection, problem_name="DeJong1").register_run( parameters={ "problem": DeJong1(-5.12, 5.12, number_of_variables=50), "population_size": 200, "selection_size": 30, "random_cells_number": 50, "clone_rate": 20, "mutation": PolynomialMutation(probability=1 / 50, distribution_index=20), "termination_criterion": StoppingByEvaluations(max_evaluations=500) }) problem_1 = DeJong1(-5.12, 5.12, number_of_variables=50) problem_2 = DeJong1(-5.12, 5.12, number_of_variables=50) second_execution_unit = ExecutionUnit( algorithm_cls=ClonalSelectionCognitive, problem_name="DeJong1" ).register_run( parameters={ "clonal_selections": [ ClonalSelection( problem=problem_1, population_size=200, selection_size=30, random_cells_number=50, clone_rate=20, mutation=PolynomialMutation(probability=1 / problem_1.number_of_variables, distribution_index=20), ), ClonalSelection( problem=problem_2, population_size=200, selection_size=30, random_cells_number=50, clone_rate=20, mutation=PolynomialMutation(probability=2 / problem_2.number_of_variables, distribution_index=20), ) ], "mix_rate": 0.4, "mixes_number": 2, "termination_criterion": StoppingByEvaluations(max_evaluations=500) }) runner = MultiAlgorithmRunner( execution_units=[first_execution_unit, second_execution_unit]) print("Runner starts evaluation.") results = runner.run_all() print("Results") for run_result in results.run_results: print(run_result) save_execution_history(execution_history=results, path=target_path)
from jmetal.algorithm.singleobjective.evolution_strategy import EvolutionStrategy from jmetal.operator import PolynomialMutation from jmetal.problem import Sphere from jmetal.util.termination_criterion import StoppingByEvaluations if __name__ == "__main__": problem = Sphere(number_of_variables=10) algorithm = EvolutionStrategy( problem=problem, mu=10, lambda_=10, elitist=True, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables), termination_criterion=StoppingByEvaluations(max_evaluations=25000), ) algorithm.run() result = algorithm.get_result() print("Algorithm: " + algorithm.get_name()) print("Problem: " + problem.get_name()) print("Solution: " + str(result.variables[0])) print("Fitness: " + str(result.objectives[0])) print("Computing time: " + str(algorithm.total_computing_time))
def train(self): problem = SVM_Problem(X=self.Xtrain, Y=self.Ytrain) #problem.reference_front = read_solutions(filename='resources/reference_front/ZDT1.pf') max_evaluations = self.maxEvaluations algorithm = NSGAII( problem=problem, population_size=self.popsize, offspring_population_size=self.popsize, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max=max_evaluations)) algorithm.observable.register(observer=ProgressBarObserver( max=max_evaluations)) #algorithm.observable.register(observer=VisualizerObserver(reference_front=problem.reference_front)) algorithm.run() front = algorithm.get_result() # Plot front plot_front = Plot(plot_title='Pareto front approximation', reference_front=None, axis_labels=problem.obj_labels) plot_front.plot(front, label=algorithm.label, filename=algorithm.get_name()) # Plot interactive front plot_front = InteractivePlot(plot_title='Pareto front approximation', axis_labels=problem.obj_labels) plot_front.plot(front, label=algorithm.label, filename=algorithm.get_name()) # Save results to file print_function_values_to_file(front, 'FUN.' + algorithm.label) print_variables_to_file(front, 'VAR.' + algorithm.label) print('Algorithm (continuous problem): ' + algorithm.get_name()) print( "-----------------------------------------------------------------------------" ) print('Problem: ' + problem.get_name()) print('Computing time: ' + str(algorithm.total_computing_time)) # Get normalized matrix of results normed_matrix = normalize( list(map(lambda result: result.objectives, front))) # Get the sum of each objective results and select the best (min) scores = list(map(lambda item: sum(item), normed_matrix)) solution = front[scores.index(min(scores))] # Get our variables self.gamma = solution.variables[0] self.C = solution.variables[1] self.coef0 = solution.variables[2] self.degree = solution.variables[3] self.kernel = solution.variables[4] self.instances = solution.masks[0] self.attributes = solution.masks[1] # Select pick a random array with length of the variable X = self.Xtrain[self.instances, :] X = X[:, self.attributes] Y = self.Ytrain[self.instances] print(*front, sep=", ") # Contruct model self.model = SVM(Xtrain=X, Ytrain=Y, kernel=self.kernel, C=self.C, degree=self.degree, coef0=self.coef0, gamma=self.gamma, seed=self.seed).train() print('Objectives: ', *solution.objectives, sep=", ") # write your code here return self.model
def run() -> None: problem = Ackley(number_of_variables=150) # problem = Griewank(number_of_variables=150) # problem = Schwefel(number_of_variables=150) # problem = SchafferF7(number_of_variables=150) mutation = PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20) local_search = MemeticLocalSearch(problem, mutation, StoppingByEvaluations(250)) local_search2 = MemeticLocalSearch(problem, mutation, StoppingByEvaluations(500)) max_evaluations = 1000000 drawing_class = DrawingClass(registered_runs=6) target_path = os.path.join(RESULTS_DIR, f"test_{datetime.now().isoformat()}.json") first_execution_unit = ExecutionUnit( algorithm_cls=MemeticCognitiveAlgorithm, problem_name="Ackley", drawing_fun=drawing_class.draw_avg_function, drawing_series_labels=["RUN1", "RUN1"] ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 1000, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "species1": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "species2": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "local_search": local_search, "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 1000, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "species1": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "species2": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "local_search": local_search, "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ) second_execution_unit = ExecutionUnit( algorithm_cls=MemeticCognitiveAlgorithm, problem_name="Ackley", drawing_fun=drawing_class.draw_avg_function, drawing_series_labels=["RUN2", "RUN2"] ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 2500, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "species1": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "species2": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search2, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "local_search": local_search, "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 2500, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "species1": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "species2": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search2, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "local_search": local_search, "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ) # modify genetic to get history (see MemeticCognitiveAlgorithm) third_execution_unit = ExecutionUnit( algorithm_cls=GeneticAlgorithm, problem_name="Ackley", drawing_fun=drawing_class.draw_avg_function, drawing_series_labels=["GENETIC", "GENETIC"] ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 1000, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 1000, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ) runner = MultiAlgorithmRunner( execution_units=[ first_execution_unit, second_execution_unit, third_execution_unit ], drawing_properties= DrawingProperties(title='Memetic1', target_location=os.path.join(RESULTS_DIR, "photo.png")) ) print("Runner starts evaluation.") results = runner.run_all() print("Results") for run_result in results.run_results: print(run_result) save_execution_history(execution_history=results, path=target_path)
def run(self) -> List[S]: pool_1_size = self.population_size pool_2_size = self.population_size selection_operator_1 = BinaryTournamentSelection() crossover_operator_1 = SBXCrossover(1.0, 20.0) mutation_operator_1 = PolynomialMutation( 1.0 / self.problem.number_of_variables, 20.0) selection_operator_2 = DifferentialEvolutionSelection() crossover_operator_2 = DifferentialEvolutionCrossover(0.2, 0.5, 0.5) dominance = DominanceComparator() max_iterations = self.max_iterations iterations = 0 parent_1: List[FloatSolution] = [None, None] generational_hv: List[float] = [] current_gen = 0 """Create the initial subpopulation pools and evaluate them""" pool_1: List[FloatSolution] = [] for i in range(pool_1_size): pool_1.append(self.problem.create_solution()) pool_1[i] = self.problem.evaluate(pool_1[i]) pool_2: List[FloatSolution] = [] for i in range(pool_2_size): pool_2.append(self.problem.create_solution()) pool_2[i] = self.problem.evaluate(pool_2[i]) evaluations = pool_1_size + pool_2_size mix = self.mix_interval problem = self.problem # problem.reference_front = read_solutions( # filename="./resources/" + problem.get_name() + ".3D.pf" # ) # h = HyperVolume(reference_point=[1, 1, 1]) h = HyperVolume(reference_point=[1] * self.problem.number_of_objectives) initial_population = True """The main evolutionary cycle""" while iterations < max_iterations: combi: List[FloatSolution] = [] if not initial_population: offspring_pop_1: List[FloatSolution] = [] offspring_pop_2: List[FloatSolution] = [] """Evolve pool 1""" for i in range(pool_1_size): parent_1[0] = selection_operator_1.execute(pool_1) parent_1[1] = selection_operator_1.execute(pool_1) child_1: FloatSolution = crossover_operator_1.execute( parent_1)[0] child_1 = mutation_operator_1.execute(child_1) child_1 = problem.evaluate(child_1) evaluations += 1 offspring_pop_1.append(child_1) """Evolve pool 2""" for i in range(pool_2_size): parent_2: List[ FloatSolution] = selection_operator_2.execute(pool_2) crossover_operator_2.current_individual = pool_2[i] child_2 = crossover_operator_2.execute(parent_2) child_2 = problem.evaluate(child_2[0]) evaluations += 1 result = dominance.compare(pool_2[i], child_2) if result == -1: offspring_pop_2.append(pool_2[i]) elif result == 1: offspring_pop_2.append(child_2) else: offspring_pop_2.append(child_2) offspring_pop_2.append(pool_2[i]) ind_1 = pool_1[random.randint(0, pool_1_size - 1)] ind_2 = pool_2[random.randint(0, pool_2_size - 1)] offspring_pop_1.append(ind_1) offspring_pop_2.append(ind_2) offspring_pop_1.extend(pool_1) pool_1 = self.r.replace(offspring_pop_1[:pool_1_size], offspring_pop_1[pool_1_size:]) pool_2 = self.r.replace(offspring_pop_2[:pool_2_size], offspring_pop_2[pool_2_size:]) mix -= 1 if mix == 0: """Time to perform fitness sharing""" mix = self.mix_interval combi = combi + pool_1 + pool_2 print("Combi size: ", len(combi)) """pool1size/10""" combi = self.r.replace( combi[:int(pool_1_size / 10)], combi[int(pool_1_size / 10):len(combi)], ) print( "Sizes: ", len(pool_1) + len(combi), len(pool_2) + len(combi), "\n", ) pool_1 = self.r.replace(pool_1, combi) pool_2 = self.r.replace(pool_2, combi) if initial_population: initial_population = False iterations += 1 hval_1 = h.compute([s.objectives for s in pool_1]) hval_2 = h.compute([s.objectives for s in pool_2]) print("Iterations: ", str(iterations)) print("hval_1: ", str(hval_1)) print("hval_2: ", str(hval_2), "\n") new_gen = int(evaluations / self.report_interval) if new_gen > current_gen: combi = combi + pool_1 + pool_2 combi = self.r.replace(combi[:(2 * pool_1_size)], combi[(2 * pool_1_size):]) hval = h.compute([s.objectives for s in combi]) for i in range(current_gen, new_gen, 1): generational_hv.append(hval) current_gen = new_gen """#Write runtime generational HV to file""" """Return the first non dominated front""" combi_ini: List[FloatSolution] = [] combi_ini.extend(pool_1) combi_ini.extend(pool_2) combi_ini = self.r.replace( combi_ini[:pool_1_size + pool_2_size], combi_ini[pool_1_size + pool_2_size:], ) return combi_ini
"""#### Running the Algorithms <a id='re_run'> </a> [Back to top](#top) """ swarm_size = 30 max_evaluations = 9000 mutation_probability = 1.0 / objective_function.number_of_variables """**SMPSO**""" smpso_enhanced = SMPSO(problem=objective_function, swarm_size=swarm_size, mutation=PolynomialMutation( probability=mutation_probability, distribution_index=20), leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations)) smpso_enhanced.run() print('Total computing time for SMPSO: {:.4f} s'.format( smpso_enhanced.total_computing_time)) """**OMOPSO**""" ompso_enhanced = OMOPSO( problem=objective_function, swarm_size=swarm_size, epsilon=0.0075, uniform_mutation=UniformMutation(probability=mutation_probability,
from jmetal.algorithm.singleobjective.local_search import LocalSearch from jmetal.operator import PolynomialMutation from jmetal.problem.singleobjective.unconstrained import Rastrigin from jmetal.util.solution import print_function_values_to_file, print_variables_to_file from jmetal.util.termination_criterion import StoppingByEvaluations if __name__ == "__main__": problem = Rastrigin(10) max_evaluations = 100000 algorithm = LocalSearch( problem=problem, mutation=PolynomialMutation(1.0 / problem.number_of_variables, 20.0), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), ) algorithm.run() result = algorithm.get_result() # Save results to file print_function_values_to_file(result, "FUN." + algorithm.get_name() + "." + problem.get_name()) print_variables_to_file(result, "VAR." + algorithm.get_name() + "." + problem.get_name()) print("Algorithm: " + algorithm.get_name()) print("Problem: " + problem.get_name()) print("Solution: " + str(result.variables)) print("Fitness: " + str(result.objectives[0])) print("Computing time: " + str(algorithm.total_computing_time))
def configure_experiment(problems: dict, n_run: int): jobs = [] num_processes = config.num_cpu population = 10 generations = 10 max_evaluations = population * generations for run in range(n_run): for problem_tag, problem in problems.items(): reference_point = FloatSolution([0, 0], [1, 1], problem.number_of_objectives, ) reference_point.objectives = np.repeat(1, problem.number_of_objectives).tolist() jobs.append( Job( algorithm=NSGAIII( problem=problem, population_size=population, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), population_evaluator=MultiprocessEvaluator(processes=num_processes), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), reference_directions=UniformReferenceDirectionFactory(4, n_points=100), ), algorithm_tag='NSGAIII', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=GDE3( problem=problem, population_size=population, population_evaluator=MultiprocessEvaluator(processes=num_processes), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), cr=0.5, f=0.5, ), algorithm_tag='GDE3', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=SPEA2( problem=problem, population_size=population, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), population_evaluator=MultiprocessEvaluator(processes=num_processes), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), offspring_population_size=population, ), algorithm_tag='SPEA2', problem_tag=problem_tag, run=run, ) ) return jobs
def run(self) -> List[S]: # selection operator 1 selection_operator_1 = BinaryTournamentSelection() # selection operator 2 selection_operator_2 = DifferentialEvolutionSelection() # crossover operator 1 crossover_operator_1 = SBXCrossover(1.0, 20.0) # crossover operator 2 crossover_operator_2 = DifferentialEvolutionCrossover(0.2, 0.5, 0.5) # crossover operator 3 crossover_operator_3 = DifferentialEvolutionCrossover(1.0, 0.5, 0.5) # mutation operator 1 mutation_operator_1 = PolynomialMutation( 1.0 / self.problem.number_of_variables, 20.0) # dominance comparator dominance = DominanceComparator() # array that stores the "generational" HV quality generational_hv: List[float] = [] parent_1: List[FloatSolution] = [None, None] parent_2: List[FloatSolution] = [] parent_3: List[FloatSolution] = [] # initialize some local and global variables pool_1: List[FloatSolution] = [] pool_2: List[FloatSolution] = [] # size of elite subset used for fitness sharing between subpopulations nrOfDirectionalSolutionsToEvolve = int(self.population_size / 5) # subpopulation 1 pool_1_size = int(self.population_size - (nrOfDirectionalSolutionsToEvolve / 2)) # subpopulation 2 pool_2_size = int(self.population_size - (nrOfDirectionalSolutionsToEvolve / 2)) print( str(pool_1_size) + " - " + str(nrOfDirectionalSolutionsToEvolve) + " - " + str(self.mix_interval)) evaluations = 0 current_gen = 0 directionalArchiveSize = 2 * self.population_size weights = self.__create_uniform_weights( directionalArchiveSize, self.problem.number_of_objectives) directionalArchive = self.__create_directional_archive(weights) neighbourhoods = self.__create_neighbourhoods(directionalArchive, self.population_size) nrOfReplacements = 1 iniID = 0 # Create the initial pools # pool1 pool_1: List[FloatSolution] = [] for _ in range(pool_1_size): new_solution = self.problem.create_solution() new_solution = self.problem.evaluate(new_solution) evaluations += 1 pool_1.append(new_solution) self.__update_extreme_values(new_solution) dr = directionalArchive[iniID] dr.curr_sol = new_solution iniID += 1 # pool2 pool_2: List[FloatSolution] = [] for _ in range(pool_2_size): new_solution = self.problem.create_solution() new_solution = self.problem.evaluate(new_solution) evaluations += 1 pool_2.append(new_solution) self.__update_extreme_values(new_solution) dr = directionalArchive[iniID] dr.curr_sol = new_solution iniID += 1 # directional archive initialization pool_A: List[FloatSolution] = [] while iniID < directionalArchiveSize: new_solution = self.problem.create_solution() new_solution = self.problem.evaluate(new_solution) evaluations += 1 pool_A.append(new_solution) self.__update_extreme_values(new_solution) dr = directionalArchive[iniID] dr.curr_sol = new_solution iniID += 1 mix = self.mix_interval h = HyperVolume(reference_point=[1] * self.problem.number_of_objectives) insertionRate: List[float] = [0, 0, 0] bonusEvals: List[int] = [0, 0, nrOfDirectionalSolutionsToEvolve] testRun = True # record the generational HV of the initial population combiAll: List[FloatSolution] = [] cGen = int(evaluations / self.report_interval) if cGen > 0: combiAll = pool_1 + pool_2 + pool_A combiAll = self.r.replace( combiAll[:pool_1_size + pool_2_size], combiAll[pool_1_size + pool_2_size:], ) hval = h.compute([s.objectives for s in combiAll]) for _ in range(cGen): generational_hv.append(hval) current_gen = cGen # the main loop of the algorithm while evaluations < self.max_evaluations: offspringPop1: List[FloatSolution] = [] offspringPop2: List[FloatSolution] = [] offspringPop3: List[FloatSolution] = [] dirInsertPool1: List[FloatSolution] = [] dirInsertPool2: List[FloatSolution] = [] dirInsertPool3: List[FloatSolution] = [] # evolve pool1 - using SPEA2 evolutionary model nfe: int = 0 while nfe < (pool_1_size + bonusEvals[0]): parent_1[0] = selection_operator_1.execute(pool_1) parent_1[1] = selection_operator_1.execute(pool_1) child1a: FloatSolution = crossover_operator_1.execute( parent_1)[0] child1a = mutation_operator_1.execute(child1a) child1a = self.problem.evaluate(child1a) evaluations += 1 nfe += 1 offspringPop1.append(child1a) dirInsertPool1.append(child1a) # evolve pool2 - using DEMO SP evolutionary model i: int = 0 unselectedIDs: List[int] = [] for ID in range(len(pool_2)): unselectedIDs.append(ID) nfe = 0 while nfe < (pool_2_size + bonusEvals[1]): index = random.randint(0, len(unselectedIDs) - 1) i = unselectedIDs[index] unselectedIDs.pop(index) parent_2 = selection_operator_2.execute(pool_2) crossover_operator_2.current_individual = pool_2[i] child2 = crossover_operator_2.execute(parent_2) child2 = self.problem.evaluate(child2[0]) evaluations += 1 nfe += 1 result = dominance.compare(pool_2[i], child2) if result == -1: # solution i dominates child offspringPop2.append(pool_2[i]) elif result == 1: # child dominates offspringPop2.append(child2) else: # the two solutions are non-dominated offspringPop2.append(child2) offspringPop2.append(pool_2[i]) dirInsertPool2.append(child2) if len(unselectedIDs) == 0: for ID in range(len(pool_2)): unselectedIDs.append(random.randint( 0, len(pool_2) - 1)) # evolve pool3 - Directional Decomposition DE/rand/1/bin IDs = self.__compute_neighbourhood_Nfe_since_last_update( neighbourhoods, directionalArchive, nrOfDirectionalSolutionsToEvolve) nfe = 0 for j in range(len(IDs)): if nfe < bonusEvals[2]: nfe += 1 else: break cID = IDs[j] chosenSol: FloatSolution = None if directionalArchive[cID].curr_sol != None: chosenSol = directionalArchive[cID].curr_sol else: chosenSol = pool_1[0] print("error!") parent_3: List[FloatSolution] = [None, None, None] r1 = random.randint(0, len(neighbourhoods[cID]) - 1) r2 = random.randint(0, len(neighbourhoods[cID]) - 1) r3 = random.randint(0, len(neighbourhoods[cID]) - 1) while r2 == r1: r2 = random.randint(0, len(neighbourhoods[cID]) - 1) while r3 == r1 or r3 == r2: r3 = random.randint(0, len(neighbourhoods[cID]) - 1) parent_3[0] = directionalArchive[r1].curr_sol parent_3[1] = directionalArchive[r2].curr_sol parent_3[2] = directionalArchive[r3].curr_sol crossover_operator_3.current_individual = chosenSol child3 = crossover_operator_3.execute(parent_3)[0] child3 = mutation_operator_1.execute(child3) child3 = self.problem.evaluate(child3) evaluations += 1 dirInsertPool3.append(child3) # compute directional improvements # pool1 improvements = 0 for j in range(len(dirInsertPool1)): testSol = dirInsertPool1[j] self.__update_extreme_values(testSol) improvements += self.__update_neighbourhoods( directionalArchive, testSol, nrOfReplacements) insertionRate[0] += (1.0 * improvements) / len(dirInsertPool1) # pool2 improvements = 0 for j in range(len(dirInsertPool2)): testSol = dirInsertPool2[j] self.__update_extreme_values(testSol) improvements += self.__update_neighbourhoods( directionalArchive, testSol, nrOfReplacements) insertionRate[1] += (1.0 * improvements) / len(dirInsertPool2) # pool3 improvements = 0 for j in range(len(dirInsertPool3)): testSol = dirInsertPool3[j] self.__update_extreme_values(testSol) improvements += self.__update_neighbourhoods( directionalArchive, testSol, nrOfReplacements) # on java, dividing a floating number by 0, returns NaN # on python, dividing a floating number by 0, returns an exception if len(dirInsertPool3) == 0: insertionRate[2] = None else: insertionRate[2] += (1.0 * improvements) / len(dirInsertPool3) for dr in directionalArchive: offspringPop3.append(dr.curr_sol) offspringPop1 = offspringPop1 + pool_1 pool_1 = self.r.replace(offspringPop1[:pool_1_size], offspringPop1[pool_1_size:]) pool_2 = self.r.replace(offspringPop2[:pool_2_size], offspringPop2[pool_2_size:]) combi: List[FloatSolution] = [] mix -= 1 if mix == 0: mix = self.mix_interval combi = combi + pool_1 + pool_2 + offspringPop3 print("Combi size: " + str(len(combi))) combi = self.r.replace( combi[:nrOfDirectionalSolutionsToEvolve], combi[nrOfDirectionalSolutionsToEvolve:], ) insertionRate[0] /= self.mix_interval insertionRate[1] /= self.mix_interval if insertionRate[2] != None: insertionRate[2] /= self.mix_interval """ print( "Insertion rates: " + str(insertionRate[0]) + " - " + str(insertionRate[1]) + " - " + str(insertionRate[2]) + " - Test run:" + str(testRun) ) """ if testRun: if (insertionRate[0] > insertionRate[1]) and ( insertionRate[0] > insertionRate[2]): print("SPEA2 win - bonus run!") bonusEvals[0] = nrOfDirectionalSolutionsToEvolve bonusEvals[1] = 0 bonusEvals[2] = 0 if (insertionRate[1] > insertionRate[0]) and ( insertionRate[1] > insertionRate[2]): print("DE win - bonus run!") bonusEvals[0] = 0 bonusEvals[1] = nrOfDirectionalSolutionsToEvolve bonusEvals[2] = 0 if (insertionRate[2] > insertionRate[0]) and ( insertionRate[2] > insertionRate[1]): print("Directional win - no bonus!") bonusEvals[0] = 0 bonusEvals[1] = 0 bonusEvals[2] = nrOfDirectionalSolutionsToEvolve else: print("Test run - no bonus!") bonusEvals[0] = 0 bonusEvals[1] = 0 bonusEvals[2] = nrOfDirectionalSolutionsToEvolve testRun = not testRun insertionRate[0] = 0.0 insertionRate[1] = 0.0 insertionRate[2] = 0.0 pool_1 = pool_1 + combi pool_2 = pool_2 + combi print("Sizes: " + str(len(pool_1)) + " " + str(len(pool_2))) pool_1 = self.r.replace(pool_1[:pool_1_size], pool_1[pool_1_size:]) pool_2 = self.r.replace(pool_2[:pool_2_size], pool_2[pool_2_size:]) self.__clear_Nfe_history(directionalArchive) hVal1 = h.compute([s.objectives for s in pool_1]) hVal2 = h.compute([s.objectives for s in pool_2]) hVal3 = h.compute([s.objectives for s in offspringPop3]) newGen = int(evaluations / self.report_interval) if newGen > current_gen: print("Hypervolume: " + str(newGen) + " - " + str(hVal1) + " - " + str(hVal2) + " - " + str(hVal3)) combi = combi + pool_1 + pool_2 + offspringPop3 combi = self.r.replace(combi[:self.population_size * 2], combi[self.population_size * 2:]) hval = h.compute([s.objectives for s in combi]) for j in range(current_gen, newGen): generational_hv.append(hval) current_gen = newGen # return the final combined non-dominated set of maximum size = (populationSize * 2) combiAll: List[FloatSolution] = [] combiAll = combiAll + pool_1 + pool_2 + pool_A combiAll = self.r.replace(combiAll[:self.population_size * 2], combiAll[self.population_size * 2:]) return combiAll
def get_algorithm_instance(algo_name): algos = { 'smpso': SMPSO(problem=objective_function, swarm_size=swarm_size, mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations)), 'omopso': OMOPSO(problem=objective_function, swarm_size=swarm_size, epsilon=0.0075, uniform_mutation=UniformMutation( probability=mutation_probability, perturbation=0.5), non_uniform_mutation=NonUniformMutation( mutation_probability, perturbation=0.5, max_iterations=int(max_evaluations / swarm_size)), leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations)), 'nsgaii': NSGAII(problem=objective_function, population_size=30, offspring_population_size=30, mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations)), 'spea2': SPEA2(problem=objective_function, population_size=30, offspring_population_size=30, mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations)), 'moead': MOEAD( problem=objective_function, population_size=30, crossover=DifferentialEvolutionCrossover(CR=1.0, F=0.5, K=0.5), mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), aggregative_function=Tschebycheff( dimension=objective_function.number_of_objectives), neighbor_size=5, neighbourhood_selection_probability=0.9, max_number_of_replaced_solutions=2, weight_files_path='resources/MOEAD_weights', termination_criterion=StoppingByEvaluations(max_evaluations=700)), 'ibea': IBEA(problem=objective_function, kappa=1.0, population_size=30, offspring_population_size=30, mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max_evaluations)) } return algos[algo_name]
read_solutions, ) from jmetal.util.termination_criterion import StoppingByEvaluations if __name__ == "__main__": problem = Srinivas() problem.reference_front = read_solutions( filename="resources/reference_front/Srinivas.pf") max_evaluations = 25000 algorithm = NSGAII( problem=problem, population_size=100, offspring_population_size=100, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations), dominance_comparator=DominanceComparator(), ) algorithm.run() front = algorithm.get_result() # Save results to file print_function_values_to_file(front, "FUN." + algorithm.label) print_variables_to_file(front, "VAR." + algorithm.label) print(f"Algorithm: {algorithm.get_name()}")
def dtlz_test(p: FloatProblemGD, label: str = '', experiment: int = 50): # problem.reference_front = read_solutions(filename='resources/reference_front/DTLZ2.3D.pf') max_evaluations = 25000 # references = ReferenceDirectionFromSolution(p) algorithm = NSGA3C( problem=p, population_size=100, reference_directions=UniformReferenceDirectionFactory(p.instance_.n_obj, n_points=92), mutation=PolynomialMutation(probability=1.0 / p.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=30), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations) ) bag = [] total_time = 0 for i in range(experiment): algorithm = NSGA3C( problem=p, population_size=92, reference_directions=UniformReferenceDirectionFactory(p.instance_.n_obj, n_points=91), mutation=PolynomialMutation(probability=1.0 / p.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=30), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations) ) progress_bar = ProgressBarObserver(max=max_evaluations) algorithm.observable.register(progress_bar) algorithm.run() total_time += algorithm.total_computing_time bag = bag + algorithm.get_result() print(len(bag)) print('Total computing time:', total_time) print('Average time: ', str(total_time / experiment)) print_solutions_to_file(bag, DIRECTORY_RESULTS + 'Solutions.bag._class_' + label + algorithm.label) ranking = FastNonDominatedRanking() ranking.compute_ranking(bag) front_ = ranking.get_subfront(0) print('Front 0 size : ', len(front_)) alabels = [] for obj in range(p.number_of_objectives): alabels.append('Obj-' + str(obj)) plot_front = Plot(title='Pareto front approximation' + ' ' + label, axis_labels=alabels) plot_front.plot(front_, label=label + 'F0 ' + algorithm.label, filename=DIRECTORY_RESULTS + 'F0_class_' + 'original_' + label + algorithm.label, format='png') class_fronts = [[], [], [], []] for s in front_: _class = problem.classifier.classify(s) if _class[0] > 0: class_fronts[0].append(s) elif _class[1] > 0: class_fronts[1].append(s) elif _class[2] > 0: class_fronts[2].append(s) else: class_fronts[3].append(s) print(len(class_fronts[0]), len(class_fronts[1]), len(class_fronts[2]), len(class_fronts[3])) _front = class_fronts[0] + class_fronts[1] if len(_front) == 0: _front = class_fronts[2] + class_fronts[3] print('Class : ', len(_front)) # Save results to file print_solutions_to_file(_front, DIRECTORY_RESULTS + 'Class_F0' + label + algorithm.label) print(f'Algorithm: ${algorithm.get_name()}') print(f'Problem: ${p.get_name()}') plot_front = Plot(title=label + 'F_' + p.get_name(), axis_labels=alabels) plot_front.plot(_front, label=label + 'F_' + p.get_name(), filename=DIRECTORY_RESULTS + 'Class_F0' + label + p.get_name(), format='png')