def test_should_hypervolume_return_the_correct_value_when_applied_to_the_ZDT1_reference_front(self):
        problem = ZDT1(rf_path='resources/reference_front/ZDT1.pf')
        reference_point = [1, 1]

        hv = HyperVolume(reference_point)
        value = hv.compute(problem.reference_front)

        self.assertAlmostEqual(0.666, value, delta=0.001)
    def test_should_hypervolume_return_the_correct_value_when_applied_to_the_ZDT1_reference_front(
            self):
        problem = ZDT1()
        reference_point = [1, 1]

        hv = HyperVolume(reference_point)
        value = hv.compute(problem.get_reference_front())

        self.assertAlmostEqual(0.666, value, delta=0.001)
Example #3
0
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None,
                 levy: int = 0,
                 levy_decay: int = 0):
        """ This class implements the Multi-Objective variant of chaotic Quantum Behaved Particle Swarm Optimization
        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        :param levy: turn on levy walk
        :param levy_decay: turn on levy decay 
        """
        super(MOQPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator
        self.levy = levy
        self.levy_decay = levy_decay
        self.prev_gbest = None
        self.hypervolume_calculator = HyperVolume(reference_point)

        self.evaluations = 0
        self.beta_swarm = 1.2
        self.g = 0.95
        self.particle_history = {}
        self.objective_history = {0: [], 1: []}
        self.dominance_comparator = DominanceComparator()
        self.constrictors = [
            (problem.upper_bound[i] - problem.lower_bound[i]) / 5000.0
            for i in range(problem.number_of_variables)
        ]

        self.prev_hypervolume = 0
        self.current_hv = 0

        self.hv_changes = []

        self.beta = 3 / 2
        self.sigma = (gamma(1 + self.beta) * sin(pi * self.beta / 2) / (gamma(
            (1 + self.beta) / 2) * self.beta * 2**((self.beta - 1) / 2)))**(
                1 / self.beta)
    def test_should_hypervolume_return_5_0(self):
        reference_point = [2, 2, 2]

        solution1 = Solution(1, 3)
        solution1.objectives = [1, 0, 1]

        solution2 = Solution(1, 3)
        solution2.objectives = [0, 1, 0]

        front = [solution1, solution2]

        hv = HyperVolume(reference_point)
        value = hv.compute(front)

        self.assertEqual(5.0, value)
    def test_should_hypervolume_return_5_0(self):
        reference_point = [2, 2, 2]

        solution1 = Solution(1, 3)
        solution1.objectives = [1, 0, 1]

        solution2 = Solution(1, 3)
        solution2.objectives = [0, 1, 0]

        front = [solution1, solution2]

        hv = HyperVolume(reference_point)
        value = hv.compute(front)

        self.assertEqual(5.0, value)
Example #6
0
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None):
        """ This class implements the Multi-Objective variant of Quantum Behaved PSO algorithm  as described in
        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        """
        super(MOQPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator

        self.hypervolume_calculator = HyperVolume(reference_point)

        self.evaluations = 0

        self.g = 0.95

        self.dominance_comparator = DominanceComparator()
        self.constrictors = [
            (problem.upper_bound[i] - problem.lower_bound[i]) / 5000.0
            for i in range(problem.number_of_variables)
        ]

        self.prev_hypervolume = 0
        self.current_hv = 0

        self.hv_changes = []
        progress_bar = ProgressBarObserver(step=10, maximum=250)
        #algorithm.observable.register(observer=observer)
        algorithm.observable.register(observer=progress_bar)

        algorithm.run()
        front = algorithm.get_result()

        # Plot frontier to file
        #pareto_front = ScatterMatplotlib(plot_title='NSGAII for IoT-Min', number_of_objectives=problem.number_of_objectives)
        #pareto_front.plot(front, reference=problem.get_reference_front(), output='NSGAII-IoT-Min', show=False)

        # Save variables to file
        SolutionList.print_function_values_to_file(
            front, 'FUN.NSGAII.' + problem.run_id + '.' + problem.get_name())
        SolutionList.print_variables_to_file(
            front, 'VAR.NSGAII.' + problem.run_id + '.' + problem.get_name())

        reference_point = [1, 1, 1, 1]
        hv = HyperVolume(reference_point)
        value = hv.compute(front)
        with open("HV." + problem.run_id + '.' + problem.get_name(),
                  "w") as text_file:
            print(f"{value}", file=text_file)
        print('Algorithm (binary problem): ' + algorithm.get_name())
        print('Problem: ' + problem.get_name())
        print('HyperVolume: %f' % value)
        print('Computing time: ' + str(algorithm.total_computing_time))
        problem.sf.plog.logError(
            'Repeated solutions:' +
            str(len(problem.sf.js_engine_helper.tested_solutions)) + '\n')
Example #8
0
class MOQPSO(ParticleSwarmOptimization):
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None):
        """ This class implements the Multi-Objective variant of Quantum Behaved PSO algorithm  as described in
        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        """
        super(MOQPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator

        self.hypervolume_calculator = HyperVolume(reference_point)

        self.evaluations = 0

        self.g = 0.95

        self.dominance_comparator = DominanceComparator()
        self.constrictors = [
            (problem.upper_bound[i] - problem.lower_bound[i]) / 5000.0
            for i in range(problem.number_of_variables)
        ]

        self.prev_hypervolume = 0
        self.current_hv = 0

        self.hv_changes = []

    def init_progress(self) -> None:
        self.evaluations = 0
        self.leaders.compute_density_estimator()

    def update_progress(self) -> None:
        self.evaluations += 1
        self.leaders.compute_density_estimator()

        observable_data = {
            'evaluations': self.evaluations,
            'computing time': self.get_current_computing_time(),
            'population': self.leaders.solution_list,
            'reference_front': self.problem.reference_front
        }

        self.observable.notify_all(**observable_data)

    def is_stopping_condition_reached(self) -> bool:
        completion = self.evaluations / float(self.max_evaluations)
        condition1 = self.evaluations >= self.max_evaluations
        condition2 = completion > 0.01 and (self.current_hv -
                                            self.prev_hypervolume) < 10e-10
        self.prev_hypervolume = self.current_hv
        return condition1 or condition2

    def create_initial_swarm(self) -> List[FloatSolution]:
        swarm = []
        for _ in range(self.swarm_size):
            swarm.append(self.problem.create_solution())

        return swarm

    def evaluate_swarm(self,
                       swarm: List[FloatSolution]) -> List[FloatSolution]:
        return self.evaluator.evaluate(swarm, self.problem)

    def initialize_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(particle)

    def initialize_particle_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            particle.attributes['local_best'] = copy(particle)

    def initialize_velocity(self, swarm: List[FloatSolution]) -> None:
        pass  # Velocity initialized in the constructor

    def update_velocity(self, swarm: List[FloatSolution]) -> None:
        pass

    def update_position(self, swarm: List[FloatSolution]) -> None:
        best_global = self.select_global_best()
        self.current_hv = self.hypervolume_calculator.compute(
            self.leaders.solution_list)
        self.hv_changes.append(self.current_hv)
        # print("Iteration : {} HV: {}".format(self.evaluations, self.current_hv))
        for i in range(self.swarm_size):
            particle = swarm[i]
            best_particle = copy(swarm[i].attributes['local_best'])
            best_global = self.select_global_best()

            for j in range(particle.number_of_variables):
                psi_1 = random_uniform(0, 1)
                psi_2 = random_uniform(0, 1)
                P = (psi_1 * best_particle.variables[j] +
                     psi_2 * best_global.variables[j]) / (psi_1 + psi_2)
                u = random_uniform(0, 1)
                L = 1 / self.g * np.abs(particle.variables[j] - P)
                if random_uniform(0, 1) > 0.5:
                    particle.variables[
                        j] = P - self.constrictors[j] * L * np.log(1 / u)
                else:
                    particle.variables[
                        j] = P + self.constrictors[j] * L * np.log(1 / u)
                particle.variables[j] = max(self.problem.lower_bound[j],
                                            particle.variables[j])
                particle.variables[j] = min(self.problem.upper_bound[j],
                                            particle.variables[j])

    def perturbation(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            if (i % 6) == 0:
                self.mutation.execute(swarm[i])

    def update_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(copy(particle))

    def update_particle_best(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            flag = self.dominance_comparator.compare(
                swarm[i], swarm[i].attributes['local_best'])
            if flag != 1:
                swarm[i].attributes['local_best'] = copy(swarm[i])

    def get_result(self) -> List[FloatSolution]:
        return self.leaders.solution_list

    def select_global_best(self) -> FloatSolution:
        leaders = self.leaders.solution_list

        if len(leaders) > 2:
            particles = random.sample(leaders, 2)

            if self.leaders.comparator.compare(particles[0], particles[1]) < 1:
                best_global = copy(particles[0])
            else:
                best_global = copy(particles[1])
        else:
            best_global = copy(self.leaders.solution_list[0])

        return best_global

    def get_hypvervolume_history(self):
        return self.hv_changes
Example #9
0
from jmetal.problem import Kursawe,Fonseca,Schaffer, ZDT1,ZDT2, ZDT3, ZDT4,Viennet2
from jmetal.operator import Polynomial,SBX, BinaryTournamentSelection, Uniform
from objective_functions import get_crs_objective, get_drs_objective, interior_score, surface_score
from jmetal.component.quality_indicator import HyperVolume
from matplotlib import pyplot as plt 
import pandas as pd
import numpy as np

problem_names = ["kurswae", "fonseca", "zdt1" , "zdt2","zdt3","zdt4"]
problems = [Viennet2()]
sm_hv = []
q_hv = []

for i,problem in enumerate(problems):
    print("solving for {}".format(problem_names[i]))
    hv_comp = HyperVolume([5]*problem.number_of_objectives)
    smpso = SMPSO(
            problem=problem,
            swarm_size=100,
            max_evaluations=100000,
            mutation=Polynomial(probability=0.3, distribution_index=10),
            leaders=CrowdingDistanceArchive(100),
            reference_point=[5] * problem.number_of_objectives
        )
    smpso.run()
    print("SMPSO HV {}".format(hv_comp.compute(smpso.get_result())))
    print("SMPSO ITERATIONS {}".format(smpso.evaluations))
    moqpso = MOQPSO(
        problem=problem,
        swarm_size=100,
        max_evaluations=100000,
Example #10
0
algorithm_list = []

for problem in problem_list:
    algorithm_list.append(
        ('NSGAII_A',
         NSGAII(problem=problem,
                population_size=100,
                max_evaluations=25000,
                mutation=NullMutation(),
                crossover=SBX(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(
                    comparator=RankingAndCrowdingDistanceComparator()))))
    algorithm_list.append(
        ('NSGAII_B',
         NSGAII(problem=problem,
                population_size=100,
                max_evaluations=25000,
                mutation=Polynomial(probability=1.0 /
                                    problem.number_of_variables,
                                    distribution_index=20),
                crossover=SBX(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(
                    comparator=RankingAndCrowdingDistanceComparator()))))

study = Experiment(algorithm_list, n_runs=2)
study.run()

# Compute quality indicators
metric_list = [HyperVolume(reference_point=[1, 1])]

print(study.compute_metrics(metric_list))
Example #11
0
class MOQPSO(ParticleSwarmOptimization):
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None,
                 levy: int = 0,
                 levy_decay: int = 0):
        """ This class implements the Multi-Objective variant of chaotic Quantum Behaved Particle Swarm Optimization
        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        :param levy: turn on levy walk
        :param levy_decay: turn on levy decay 
        """
        super(MOQPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator
        self.levy = levy
        self.levy_decay = levy_decay
        self.prev_gbest = None
        self.hypervolume_calculator = HyperVolume(reference_point)

        self.evaluations = 0
        self.beta_swarm = 1.2
        self.g = 0.95
        self.particle_history = {}
        self.objective_history = {0: [], 1: []}
        self.dominance_comparator = DominanceComparator()
        self.constrictors = [
            (problem.upper_bound[i] - problem.lower_bound[i]) / 5000.0
            for i in range(problem.number_of_variables)
        ]

        self.prev_hypervolume = 0
        self.current_hv = 0

        self.hv_changes = []

        self.beta = 3 / 2
        self.sigma = (gamma(1 + self.beta) * sin(pi * self.beta / 2) / (gamma(
            (1 + self.beta) / 2) * self.beta * 2**((self.beta - 1) / 2)))**(
                1 / self.beta)

    def init_progress(self) -> None:
        self.evaluations = 0
        self.leaders.compute_density_estimator()

    def update_progress(self) -> None:
        self.evaluations += 1
        self.leaders.compute_density_estimator()

        observable_data = {
            'evaluations': self.evaluations,
            'computing time': self.get_current_computing_time(),
            'population': self.leaders.solution_list,
            'reference_front': self.problem.reference_front
        }

        self.observable.notify_all(**observable_data)

    def is_stopping_condition_reached(self) -> bool:
        completion = self.evaluations / float(self.max_evaluations)
        condition1 = self.evaluations >= self.max_evaluations
        tolerance_cond = False
        '''fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        x,y,z = [],[],[]
        for particle in self.swarm:
            x.append(particle.variables[0])
            y.append(particle.variables[1])
            z.append(particle.objectives[0])
        ax.scatter(x, y, z, c='r', marker='o')
        plt.show()
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        x,y,z = [],[],[]
        for particle in self.swarm:
            x.append(particle.variables[2])
            y.append(particle.variables[3])
            z.append(particle.objectives[1])
        ax.scatter(x, y, z, c='r', marker='o')
        plt.show()
        '''
        for variableno in range(4):
            if variableno not in list(self.particle_history.keys()):
                self.particle_history[variableno] = []
            temp = []
            for particle in self.swarm:
                temp.append(particle.variables[variableno])
            self.particle_history[variableno].append(temp)
        temp1 = []
        for particle in self.swarm:
            temp1.append(particle.objectives[0])
        self.objective_history[0].append(temp1)
        self.beta_swarm = 1.2 - (self.evaluations /
                                 self.max_evaluations) * (0.7)
        if (self.prev_gbest is not None):
            gbest = self.evaluator.evaluate([self.prev_gbest], self.problem)
            gbest = gbest[0]
            alpha = 1 / gbest.number_of_objectives
            old_score = 0
            for i in range(gbest.number_of_objectives):
                old_score += alpha * gbest.objectives[i]
            gbest = self.select_global_best()
            gbest = self.evaluator.evaluate([gbest], self.problem)
            gbest = gbest[0]
            new_score = 0
            for i in range(gbest.number_of_objectives):
                new_score += alpha * gbest.objectives[i]
            if np.abs(new_score - old_score) < 10e-6:
                tolerance_cond = True
        condition2 = completion > 0.1 and tolerance_cond
        self.prev_hypervolume = self.current_hv
        if condition1 or condition2:
            xs = np.array(self.particle_history[0])
            ys = np.array(self.particle_history[1])
            fig = plt.figure()
            ax = fig.add_subplot(111)
            sct, = ax.plot([], [], "o", markersize=2)

            def update(ifrm, xa, ya):
                sct.set_data(xa[ifrm], ya[ifrm])

            ax.set_xlim(-0.2, 1.2)
            ax.set_ylim(-0.2, 1.2)
            ani = animation.FuncAnimation(fig,
                                          update,
                                          self.evaluations,
                                          fargs=(xs, ys),
                                          interval=1000 / 1,
                                          repeat=False)
            plt.show()
            xs = np.array(self.particle_history[2])
            ys = np.array(self.particle_history[3])
            fig = plt.figure()
            ax = fig.add_subplot(111)
            sct, = ax.plot([], [], "o", markersize=2)
            ax.set_xlim(-0.2, 1.2)
            ax.set_ylim(-0.2, 1.2)
            ani = animation.FuncAnimation(fig,
                                          update,
                                          self.evaluations,
                                          fargs=(xs, ys),
                                          interval=1000 / 1,
                                          repeat=False)
            plt.show()
        return condition1 or condition2

    def create_initial_swarm(self) -> List[FloatSolution]:
        #swarm = lorenz_map(self.swarm_size)
        '''
        new_solution = FloatSolution(number_of_variables, number_of_objectives, number_of_constraints,
                                     lower_bound, upper_bound)
        
        
        '''
        swarm = self.problem.create_solution(self.swarm_size)
        return swarm

    def evaluate_swarm(self,
                       swarm: List[FloatSolution]) -> List[FloatSolution]:
        return self.evaluator.evaluate(swarm, self.problem)

    def initialize_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(particle)

    def initialize_particle_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            particle.attributes['local_best'] = copy(particle)

    def initialize_velocity(self, swarm: List[FloatSolution]) -> None:
        pass  # Velocity initialized in the constructor

    def update_velocity(self, swarm: List[FloatSolution]) -> None:
        pass

    def update_position(self, swarm: List[FloatSolution]) -> None:
        self.best_global = self.select_global_best()
        #print(self.leaders.solution_list)
        #input()
        self.prev_gbest = self.best_global
        self.current_hv = self.hypervolume_calculator.compute(
            self.leaders.solution_list)
        self.hv_changes.append(self.current_hv)
        # print("Iteration : {} HV: {}".format(self.evaluations, self.current_hv))
        mbest = []
        for i in range(swarm[0].number_of_variables):
            mbest.append([])
        for i in range(self.swarm_size):
            particle = swarm[i]
            for vari in range(swarm[i].number_of_variables):
                mbest[vari].append(
                    copy(swarm[i].attributes['local_best'].variables[vari]))
        for i in range(len(mbest)):
            mbest[i] = sum(mbest[i]) / self.swarm_size

        for i in range(self.swarm_size):
            particle = swarm[i]
            #eu_dist = np.linalg.norm(np.array(particle.variables)-np.array(self.prev_gbest.variables))
            best_particle = copy(swarm[i].attributes['local_best'])
            #best_global = self.select_global_best()
            #rint(best_global)

            for j in range(particle.number_of_variables):
                psi_1 = random_uniform(0, 1)
                psi_2 = random_uniform(0, 1)
                P = (psi_1 * best_particle.variables[j] +
                     psi_2 * self.best_global.variables[j]) / (psi_1 + psi_2)
                u = random_uniform(0, 1)

                #levy part here
                levy_decayed = 1
                if self.levy:
                    l_u = np.random.normal(0, 1) * self.sigma
                    l_v = np.random.normal(0, 1)
                    step = l_u / abs(l_v)**(1 / self.beta)
                    stepsize = 0.01 * step * (1 /
                                              (0.000001 + particle.variables[j]
                                               - self.prev_gbest.variables[j]))
                    levy_decayed = stepsize
                    if self.levy_decay:
                        levy_decayed *= 5 * (
                            0.001)**(self.evaluations /
                                     (self.max_evaluations * 0.5)) + 1
                        #if self.evaluations == int(self.max_evaluations*0.5):
                        #    levy_decayed = 1

                if random_uniform(0, 1) > 0.5:
                    particle.variables[
                        j] = P - self.beta_swarm * self.constrictors[
                            j] * mbest[j] * np.log(1 / u) * levy_decayed
                else:
                    particle.variables[
                        j] = P + self.beta_swarm * self.constrictors[
                            j] * mbest[j] * np.log(1 / u) * levy_decayed

                particle.variables[j] = max(self.problem.lower_bound[j],
                                            particle.variables[j])
                particle.variables[j] = min(self.problem.upper_bound[j],
                                            particle.variables[j])

    def perturbation(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            if (i % 6) == 0:
                self.mutation.execute(swarm[i])

    def update_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(copy(particle))

    def update_particle_best(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            flag = self.dominance_comparator.compare(
                swarm[i], swarm[i].attributes['local_best'])
            if flag != 1:
                swarm[i].attributes['local_best'] = copy(swarm[i])

    def get_result(self) -> List[FloatSolution]:
        return self.leaders.solution_list

    def select_global_best(self) -> FloatSolution:
        leaders = self.leaders.solution_list

        if len(leaders) > 2:
            particles = random.sample(leaders, 2)

            if self.leaders.comparator.compare(particles[0], particles[1]) < 1:
                best_global = copy(particles[0])
            else:
                best_global = copy(particles[1])
        else:
            best_global = copy(self.leaders.solution_list[0])

        return best_global

    def get_hypervolume_history(self):
        return self.hv_changes
Example #12
0
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None):
        """ This class implements the SMPSO algorithm as described in

        * SMPSO: A new PSO-based metaheuristic for multi-objective optimization
        * MCDM 2009. DOI: `<http://dx.doi.org/10.1109/MCDM.2009.4938830/>`_.

        The implementation of SMPSO provided in jMetalPy follows the algorithm template described in the algorithm
        templates section of the documentation.

        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        """
        super(SMPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator

        self.hypervolume_calculator = HyperVolume(reference_point)
        self.evaluations = 0

        self.c1_min = 0.1
        self.c1_max = 0.5
        self.c2_min = 0.8
        self.c2_max = 1.5
        self.r1_min = 0.0
        self.r1_max = 1.0
        self.r2_min = 0.0
        self.r2_max = 1.0
        self.min_weight = 0.1
        self.max_weight = 0.1

        self.change_velocity1 = -1
        self.change_velocity2 = -1

        self.dominance_comparator = DominanceComparator()

        self.speed = numpy.zeros(
            (self.swarm_size, self.problem.number_of_variables), dtype=float)
        self.delta_max, self.delta_min = numpy.empty(problem.number_of_variables),\
                                         numpy.empty(problem.number_of_variables)
        for i in range(problem.number_of_variables):
            self.delta_max[i] = (self.problem.upper_bound[i] -
                                 self.problem.lower_bound[i]) / 1000.0

        self.delta_min = -1.0 * self.delta_max

        self.hv_history = []

        self.current_hv = 0

        self.prev_hypervolume = 0
Example #13
0
class SMPSO(ParticleSwarmOptimization):
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None):
        """ This class implements the SMPSO algorithm as described in

        * SMPSO: A new PSO-based metaheuristic for multi-objective optimization
        * MCDM 2009. DOI: `<http://dx.doi.org/10.1109/MCDM.2009.4938830/>`_.

        The implementation of SMPSO provided in jMetalPy follows the algorithm template described in the algorithm
        templates section of the documentation.

        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        """
        super(SMPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator

        self.hypervolume_calculator = HyperVolume(reference_point)
        self.evaluations = 0

        self.c1_min = 0.1
        self.c1_max = 0.5
        self.c2_min = 0.8
        self.c2_max = 1.5
        self.r1_min = 0.0
        self.r1_max = 1.0
        self.r2_min = 0.0
        self.r2_max = 1.0
        self.min_weight = 0.1
        self.max_weight = 0.1

        self.change_velocity1 = -1
        self.change_velocity2 = -1

        self.dominance_comparator = DominanceComparator()

        self.speed = numpy.zeros(
            (self.swarm_size, self.problem.number_of_variables), dtype=float)
        self.delta_max, self.delta_min = numpy.empty(problem.number_of_variables),\
                                         numpy.empty(problem.number_of_variables)
        for i in range(problem.number_of_variables):
            self.delta_max[i] = (self.problem.upper_bound[i] -
                                 self.problem.lower_bound[i]) / 1000.0

        self.delta_min = -1.0 * self.delta_max

        self.hv_history = []

        self.current_hv = 0

        self.prev_hypervolume = 0

    def init_progress(self) -> None:
        self.evaluations = 0
        self.leaders.compute_density_estimator()

    def update_progress(self) -> None:
        self.evaluations += 1
        self.leaders.compute_density_estimator()

        observable_data = {
            'evaluations': self.evaluations,
            'computing time': self.get_current_computing_time(),
            'population': self.leaders.solution_list,
            'reference_front': self.problem.reference_front
        }

        self.observable.notify_all(**observable_data)

    def is_stopping_condition_reached(self) -> bool:
        completion = self.evaluations / float(self.max_evaluations)
        condition1 = self.evaluations >= self.max_evaluations
        condition2 = completion > 0.05 and (self.current_hv -
                                            self.prev_hypervolume) < 10e-10
        self.prev_hypervolume = self.current_hv
        return condition1 or condition2

    def create_initial_swarm(self) -> List[FloatSolution]:
        swarm = []
        for _ in range(self.swarm_size):
            swarm.append(self.problem.create_solution())
        return swarm

    def evaluate_swarm(self,
                       swarm: List[FloatSolution]) -> List[FloatSolution]:
        return self.evaluator.evaluate(swarm, self.problem)

    def initialize_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(particle)

    def initialize_particle_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            particle.attributes['local_best'] = copy(particle)

    def initialize_velocity(self, swarm: List[FloatSolution]) -> None:
        pass  # Velocity initialized in the constructor

    def update_velocity(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            best_particle = copy(swarm[i].attributes['local_best'])
            best_global = self.select_global_best()
            r1 = round(random.uniform(self.r1_min, self.r1_max), 1)
            r2 = round(random.uniform(self.r2_min, self.r2_max), 1)
            c1 = round(random.uniform(self.c1_min, self.c1_max), 1)
            c2 = round(random.uniform(self.c2_min, self.c2_max), 1)
            wmax = self.max_weight
            wmin = self.min_weight

            for var in range(swarm[i].number_of_variables):
                self.speed[i][var] = \
                    self.__velocity_constriction(
                        self.__constriction_coefficient(c1, c2) *
                        ((self.__inertia_weight(self.evaluations, self.max_evaluations, wmax, wmin)
                          * self.speed[i][var])
                         + (c1 * r1 * (best_particle.variables[var] - swarm[i].variables[var]))
                         + (c2 * r2 * (best_global.variables[var] - swarm[i].variables[var]))
                         ),
                        self.delta_max, self.delta_min, var)

    def update_position(self, swarm: List[FloatSolution]) -> None:
        self.current_hv = self.hypervolume_calculator.compute(
            self.leaders.solution_list)
        self.hv_history.append(self.current_hv)
        #print("Iteration : {} HV: {}".format(self.evaluations, self.current_hv))
        for i in range(self.swarm_size):
            particle = swarm[i]

            for j in range(particle.number_of_variables):
                particle.variables[j] += self.speed[i][j]

                if particle.variables[j] < self.problem.lower_bound[j]:
                    particle.variables[j] = self.problem.lower_bound[j]
                    self.speed[i][j] *= self.change_velocity1

                if particle.variables[j] > self.problem.upper_bound[j]:
                    particle.variables[j] = self.problem.upper_bound[j]
                    self.speed[i][j] *= self.change_velocity2

    def perturbation(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            if (i % 6) == 0:
                self.mutation.execute(swarm[i])

    def update_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(copy(particle))

    def update_particle_best(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            flag = self.dominance_comparator.compare(
                swarm[i], swarm[i].attributes['local_best'])
            if flag != 1:
                swarm[i].attributes['local_best'] = copy(swarm[i])

    def get_result(self) -> List[FloatSolution]:
        return self.leaders.solution_list

    def select_global_best(self) -> FloatSolution:
        leaders = self.leaders.solution_list

        if len(leaders) > 2:
            particles = random.sample(leaders, 2)

            if self.leaders.comparator.compare(particles[0], particles[1]) < 1:
                best_global = copy(particles[0])
            else:
                best_global = copy(particles[1])
        else:
            best_global = copy(self.leaders.solution_list[0])

        return best_global

    def __velocity_constriction(self, value: float, delta_max: [],
                                delta_min: [], variable_index: int) -> float:
        result = value
        if value > delta_max[variable_index]:
            result = delta_max[variable_index]
        if value < delta_min[variable_index]:
            result = delta_min[variable_index]

        return result

    def __inertia_weight(self, evaluations: int, max_evaluations: int,
                         wmax: float, wmin: float):
        # todo ?
        return wmax

    def __constriction_coefficient(self, c1: float, c2: float) -> float:
        rho = c1 + c2
        if rho <= 4:
            result = 1.0
        else:
            result = 2.0 / (2.0 - rho - sqrt(pow(rho, 2.0) - 4.0 * rho))

        return result

    def get_hypervolume_history(self):
        return self.hv_history