def configurar_RandomSearch(self): algorithm = RandomSearch( problem = self.problema, termination_criterion=StoppingByEvaluations(max_evaluations=self.evaluaciones)) return algorithm
def configure_experiment(problems: dict, n_run: int): jobs = [] num_processes = config.num_cpu population = 10 generations = 10 max_evaluations = population * generations for run in range(n_run): for problem_tag, problem in problems.items(): reference_point = FloatSolution([0, 0], [1, 1], problem.number_of_objectives, ) reference_point.objectives = np.repeat(1, problem.number_of_objectives).tolist() jobs.append( Job( algorithm=NSGAIII( problem=problem, population_size=population, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), population_evaluator=MultiprocessEvaluator(processes=num_processes), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), reference_directions=UniformReferenceDirectionFactory(4, n_points=100), ), algorithm_tag='NSGAIII', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=GDE3( problem=problem, population_size=population, population_evaluator=MultiprocessEvaluator(processes=num_processes), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), cr=0.5, f=0.5, ), algorithm_tag='GDE3', problem_tag=problem_tag, run=run, ) ) jobs.append( Job( algorithm=SPEA2( problem=problem, population_size=population, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), population_evaluator=MultiprocessEvaluator(processes=num_processes), termination_criterion=StoppingByEvaluations(max_evaluations=max_evaluations), offspring_population_size=population, ), algorithm_tag='SPEA2', problem_tag=problem_tag, run=run, ) ) return jobs
import time from framework.problems.singleobjective.ackley import Ackley from evolutionary_memetic.memetic_cognitive import MemeticCognitiveAlgorithm, Species import matplotlib.pyplot as plt from jmetal.algorithm.singleobjective import GeneticAlgorithm from jmetal.operator import PolynomialMutation, SBXCrossover, BinaryTournamentSelection from jmetal.util.termination_criterion import StoppingByEvaluations from evolutionary_memetic.memetic import MemeticAlgorithm, MemeticLocalSearch if __name__ == '__main__': problem = Ackley(number_of_variables=150) max_evaluations = 500000 mutation = PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20) local_search = MemeticLocalSearch(problem, mutation, StoppingByEvaluations(500)) memetic_algo = MemeticCognitiveAlgorithm( problem=problem, population_size=5000, offspring_population_size=1000, mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), species1=Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), species2=Species( mutation=mutation,
def get_algorithm_instance(algo_name): algos = { 'smpso': SMPSO(problem=objective_function, swarm_size=swarm_size, mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations)), 'omopso': OMOPSO(problem=objective_function, swarm_size=swarm_size, epsilon=0.0075, uniform_mutation=UniformMutation( probability=mutation_probability, perturbation=0.5), non_uniform_mutation=NonUniformMutation( mutation_probability, perturbation=0.5, max_iterations=int(max_evaluations / swarm_size)), leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations)), 'nsgaii': NSGAII(problem=objective_function, population_size=30, offspring_population_size=30, mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations)), 'spea2': SPEA2(problem=objective_function, population_size=30, offspring_population_size=30, mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations( max_evaluations=max_evaluations)), 'moead': MOEAD( problem=objective_function, population_size=30, crossover=DifferentialEvolutionCrossover(CR=1.0, F=0.5, K=0.5), mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), aggregative_function=Tschebycheff( dimension=objective_function.number_of_objectives), neighbor_size=5, neighbourhood_selection_probability=0.9, max_number_of_replaced_solutions=2, weight_files_path='resources/MOEAD_weights', termination_criterion=StoppingByEvaluations(max_evaluations=700)), 'ibea': IBEA(problem=objective_function, kappa=1.0, population_size=30, offspring_population_size=30, mutation=PolynomialMutation(probability=mutation_probability, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max_evaluations)) } return algos[algo_name]
def run() -> None: problem = Ackley(number_of_variables=150) # problem = Griewank(number_of_variables=150) # problem = Schwefel(number_of_variables=150) # problem = SchafferF7(number_of_variables=150) mutation = PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20) local_search = MemeticLocalSearch(problem, mutation, StoppingByEvaluations(250)) local_search2 = MemeticLocalSearch(problem, mutation, StoppingByEvaluations(500)) max_evaluations = 1000000 drawing_class = DrawingClass(registered_runs=6) target_path = os.path.join(RESULTS_DIR, f"test_{datetime.now().isoformat()}.json") first_execution_unit = ExecutionUnit( algorithm_cls=MemeticCognitiveAlgorithm, problem_name="Ackley", drawing_fun=drawing_class.draw_avg_function, drawing_series_labels=["RUN1", "RUN1"] ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 1000, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "species1": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "species2": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "local_search": local_search, "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 1000, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "species1": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "species2": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "local_search": local_search, "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ) second_execution_unit = ExecutionUnit( algorithm_cls=MemeticCognitiveAlgorithm, problem_name="Ackley", drawing_fun=drawing_class.draw_avg_function, drawing_series_labels=["RUN2", "RUN2"] ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 2500, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "species1": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "species2": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search2, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "local_search": local_search, "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 2500, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "species1": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "species2": Species( mutation=mutation, crossover=SBXCrossover(probability=1.0, distribution_index=20), selection=BinaryTournamentSelection(), local_search=local_search2, termination_criterion=StoppingByEvaluations(max_evaluations=1000) ), "local_search": local_search, "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ) # modify genetic to get history (see MemeticCognitiveAlgorithm) third_execution_unit = ExecutionUnit( algorithm_cls=GeneticAlgorithm, problem_name="Ackley", drawing_fun=drawing_class.draw_avg_function, drawing_series_labels=["GENETIC", "GENETIC"] ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 1000, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ).register_run( parameters={ "problem": problem, "population_size": 5000, "offspring_population_size": 1000, "mutation": mutation, "crossover": SBXCrossover(probability=1.0, distribution_index=20), "selection": BinaryTournamentSelection(), "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations) } ) runner = MultiAlgorithmRunner( execution_units=[ first_execution_unit, second_execution_unit, third_execution_unit ], drawing_properties= DrawingProperties(title='Memetic1', target_location=os.path.join(RESULTS_DIR, "photo.png")) ) print("Runner starts evaluation.") results = runner.run_all() print("Results") for run_result in results.run_results: print(run_result) save_execution_history(execution_history=results, path=target_path)
def train(self): problem = SVM_Problem(X=self.Xtrain, Y=self.Ytrain) #problem.reference_front = read_solutions(filename='resources/reference_front/ZDT1.pf') max_evaluations = self.maxEvaluations algorithm = NSGAII( problem=problem, population_size=self.popsize, offspring_population_size=self.popsize, mutation=PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20), crossover=SBXCrossover(probability=1.0, distribution_index=20), termination_criterion=StoppingByEvaluations(max=max_evaluations)) algorithm.observable.register(observer=ProgressBarObserver( max=max_evaluations)) #algorithm.observable.register(observer=VisualizerObserver(reference_front=problem.reference_front)) algorithm.run() front = algorithm.get_result() # Plot front plot_front = Plot(plot_title='Pareto front approximation', reference_front=None, axis_labels=problem.obj_labels) plot_front.plot(front, label=algorithm.label, filename=algorithm.get_name()) # Plot interactive front plot_front = InteractivePlot(plot_title='Pareto front approximation', axis_labels=problem.obj_labels) plot_front.plot(front, label=algorithm.label, filename=algorithm.get_name()) # Save results to file print_function_values_to_file(front, 'FUN.' + algorithm.label) print_variables_to_file(front, 'VAR.' + algorithm.label) print('Algorithm (continuous problem): ' + algorithm.get_name()) print( "-----------------------------------------------------------------------------" ) print('Problem: ' + problem.get_name()) print('Computing time: ' + str(algorithm.total_computing_time)) # Get normalized matrix of results normed_matrix = normalize( list(map(lambda result: result.objectives, front))) # Get the sum of each objective results and select the best (min) scores = list(map(lambda item: sum(item), normed_matrix)) solution = front[scores.index(min(scores))] # Get our variables self.gamma = solution.variables[0] self.C = solution.variables[1] self.coef0 = solution.variables[2] self.degree = solution.variables[3] self.kernel = solution.variables[4] self.instances = solution.masks[0] self.attributes = solution.masks[1] # Select pick a random array with length of the variable X = self.Xtrain[self.instances, :] X = X[:, self.attributes] Y = self.Ytrain[self.instances] print(*front, sep=", ") # Contruct model self.model = SVM(Xtrain=X, Ytrain=Y, kernel=self.kernel, C=self.C, degree=self.degree, coef0=self.coef0, gamma=self.gamma, seed=self.seed).train() print('Objectives: ', *solution.objectives, sep=", ") # write your code here return self.model
def test_SMPSO(self): SMPSO(problem=self.problem, swarm_size=self.population_size, mutation=self.mutation, leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations(max=1000)).run()
# phi_n: float - the weight for speed change influenced by neighborhood. Default is 0.3. # # n_neighbors: int - neighborhood size. Default is 30. # # hops: int - How is neighborhood determined (if neighbors' neighobrs should be also taken into account etc.) # I would not change the number of hops from the default value 1, because it becomes computationally to expensive # due to inefficient implementation. # # use_global: bool - whether to use global best while determining speed change. Default is False and it should not be # changed because it gives poor results, but is left to be compliant with the specification given during lab exercises. # algorithm = WithNeighborsPSO( problem=Rastrigin(100), swarm_size=100, leaders=CrowdingDistanceArchive(100), termination_criterion=StoppingByEvaluations( 100 ) # To get better results increase the number of iterations, I tested for 10 000-50 000. ) algorithm.run() solutions = algorithm.get_result() objectives = solutions[0].objectives variables = solutions[0].variables print("PSO with neighbors") print("Fitness: {}".format(objectives)) # print("Variables: {}".format(variables))