Ejemplo n.º 1
0
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution]()):
        """ This class implements the SMPSO algorithm as described in

        * SMPSO: A new PSO-based metaheuristic for multi-objective optimization
        * MCDM 2009. DOI: `<http://dx.doi.org/10.1109/MCDM.2009.4938830/>`_.

        The implementation of SMPSO provided in jMetalPy follows the algorithm template described in the algorithm
        templates section of the documentation.

        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        """
        super(SMPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator

        self.evaluations = 0

        self.c1_min = 1.5
        self.c1_max = 2.5
        self.c2_min = 1.5
        self.c2_max = 2.5
        self.r1_min = 0.0
        self.r1_max = 1.0
        self.r2_min = 0.0
        self.r2_max = 1.0
        self.min_weight = 0.1
        self.max_weight = 0.1

        self.change_velocity1 = -1
        self.change_velocity2 = -1

        self.dominance_comparator = DominanceComparator()

        self.speed = numpy.zeros(
            (self.swarm_size, self.problem.number_of_variables), dtype=float)
        self.delta_max, self.delta_min = numpy.empty(problem.number_of_variables),\
                                         numpy.empty(problem.number_of_variables)
        for i in range(problem.number_of_variables):
            self.delta_max[i] = (self.problem.upper_bound[i] -
                                 self.problem.lower_bound[i]) / 2.0

        self.delta_min = -1.0 * self.delta_max
Ejemplo n.º 2
0
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None,
                 levy: int = 0,
                 levy_decay: int = 0):
        """ This class implements the Multi-Objective variant of chaotic Quantum Behaved Particle Swarm Optimization
        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        :param levy: turn on levy walk
        :param levy_decay: turn on levy decay 
        """
        super(MOQPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator
        self.levy = levy
        self.levy_decay = levy_decay
        self.prev_gbest = None
        self.hypervolume_calculator = HyperVolume(reference_point)

        self.evaluations = 0
        self.beta_swarm = 1.2
        self.g = 0.95
        self.particle_history = {}
        self.objective_history = {0: [], 1: []}
        self.dominance_comparator = DominanceComparator()
        self.constrictors = [
            (problem.upper_bound[i] - problem.lower_bound[i]) / 5000.0
            for i in range(problem.number_of_variables)
        ]

        self.prev_hypervolume = 0
        self.current_hv = 0

        self.hv_changes = []

        self.beta = 3 / 2
        self.sigma = (gamma(1 + self.beta) * sin(pi * self.beta / 2) / (gamma(
            (1 + self.beta) / 2) * self.beta * 2**((self.beta - 1) / 2)))**(
                1 / self.beta)
Ejemplo n.º 3
0
class NonDominatedSolutionListArchive(Archive[S]):

    def __init__(self):
        super(NonDominatedSolutionListArchive, self).__init__()
        self.comparator = DominanceComparator()

    def add(self, solution: S) -> bool:
        is_dominated = False
        is_contained = False

        if len(self.solution_list) == 0:
            self.solution_list.append(solution)
            return True
        else:
            number_of_deleted_solutions = 0

            # New copy of list and enumerate
            for index, current_solution in enumerate(list(self.solution_list)):
                is_dominated_flag = self.comparator.compare(solution, current_solution)
                if is_dominated_flag == -1:
                    del self.solution_list[index-number_of_deleted_solutions]
                    number_of_deleted_solutions += 1
                elif is_dominated_flag == 1:
                    is_dominated = True
                    break
                elif is_dominated_flag == 0:
                    if EqualSolutionsComparator().compare(solution, current_solution) == 0:
                        is_contained = True
                        break

        if not is_dominated and not is_contained:
            self.solution_list.append(solution)
            return True

        return False
Ejemplo n.º 4
0
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None):
        """ This class implements the Multi-Objective variant of Quantum Behaved PSO algorithm  as described in
        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        """
        super(MOQPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator

        self.hypervolume_calculator = HyperVolume(reference_point)

        self.evaluations = 0

        self.g = 0.95

        self.dominance_comparator = DominanceComparator()
        self.constrictors = [
            (problem.upper_bound[i] - problem.lower_bound[i]) / 5000.0
            for i in range(problem.number_of_variables)
        ]

        self.prev_hypervolume = 0
        self.current_hv = 0

        self.hv_changes = []
Ejemplo n.º 5
0
    def execute(self, front: List[S]) -> S:
        if front is None:
            raise Exception('The front is null')
        elif len(front) == 0:
            raise Exception('The front is empty')

        result = front[0]
        for solution in front[1:]:
            if DominanceComparator().compare(solution, result) < 0:
                result = solution

        return result
Ejemplo n.º 6
0
    def compute_ranking(self, solution_list: List[S]):
        # number of solutions dominating solution ith
        dominating_ith = [0 for _ in range(len(solution_list))]

        # list of solutions dominated by solution ith
        ith_dominated = [[] for _ in range(len(solution_list))]

        # front[i] contains the list of solutions belonging to front i
        front = [[] for _ in range(len(solution_list) + 1)]

        for p in range(len(solution_list) - 1):
            for q in range(p + 1, len(solution_list)):
                dominance_test_result = DominanceComparator().compare(
                    solution_list[p], solution_list[q])
                self.number_of_comparisons += 1

                if dominance_test_result == -1:
                    ith_dominated[p].append(q)
                    dominating_ith[q] += 1
                elif dominance_test_result is 1:
                    ith_dominated[q].append(p)
                    dominating_ith[p] += 1

        for i in range(len(solution_list)):
            if dominating_ith[i] is 0:
                front[0].append(i)
                solution_list[i].attributes['dominance_ranking'] = 0

        i = 0
        while len(front[i]) != 0:
            i += 1
            for p in front[i - 1]:
                if p <= len(ith_dominated):
                    for q in ith_dominated[p]:
                        dominating_ith[q] -= 1
                        if dominating_ith[q] is 0:
                            front[i].append(q)
                            solution_list[q].attributes[
                                'dominance_ranking'] = i

        self.ranked_sublists = [[]] * i
        for j in range(i):
            q = [0] * len(front[j])
            for k in range(len(front[j])):
                q[k] = solution_list[front[j][k]]
            self.ranked_sublists[j] = q

        return self.ranked_sublists
Ejemplo n.º 7
0
class NonDominatedSolutionListArchive(Archive[S]):
    def __init__(self):
        super(NonDominatedSolutionListArchive, self).__init__()
        self.comparator = DominanceComparator()

    def add(self, solution: S) -> bool:
        is_dominated = False
        is_contained = False

        if len(self.solution_list) == 0:
            self.solution_list.append(solution)
            return True
        else:
            number_of_deleted_solutions = 0

            # New copy of list and enumerate
            for index, current_solution in enumerate(list(self.solution_list)):
                is_dominated_flag = self.comparator.compare(
                    solution, current_solution)
                if is_dominated_flag == -1:
                    del self.solution_list[index - number_of_deleted_solutions]
                    number_of_deleted_solutions += 1
                elif is_dominated_flag == 1:
                    is_dominated = True
                    break
                elif is_dominated_flag == 0:
                    if EqualSolutionsComparator().compare(
                            solution, current_solution) == 0:
                        is_contained = True
                        break

        if not is_dominated and not is_contained:
            self.solution_list.append(solution)
            return True

        return False
Ejemplo n.º 8
0
class SMPSO(ParticleSwarmOptimization):
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution]()):
        """ This class implements the SMPSO algorithm as described in

        * SMPSO: A new PSO-based metaheuristic for multi-objective optimization
        * MCDM 2009. DOI: `<http://dx.doi.org/10.1109/MCDM.2009.4938830/>`_.

        The implementation of SMPSO provided in jMetalPy follows the algorithm template described in the algorithm
        templates section of the documentation.

        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        """
        super(SMPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator

        self.evaluations = 0

        self.c1_min = 1.5
        self.c1_max = 2.5
        self.c2_min = 1.5
        self.c2_max = 2.5
        self.r1_min = 0.0
        self.r1_max = 1.0
        self.r2_min = 0.0
        self.r2_max = 1.0
        self.min_weight = 0.1
        self.max_weight = 0.1

        self.change_velocity1 = -1
        self.change_velocity2 = -1

        self.dominance_comparator = DominanceComparator()

        self.speed = numpy.zeros(
            (self.swarm_size, self.problem.number_of_variables), dtype=float)
        self.delta_max, self.delta_min = numpy.empty(problem.number_of_variables),\
                                         numpy.empty(problem.number_of_variables)
        for i in range(problem.number_of_variables):
            self.delta_max[i] = (self.problem.upper_bound[i] -
                                 self.problem.lower_bound[i]) / 2.0

        self.delta_min = -1.0 * self.delta_max

    def init_progress(self) -> None:
        self.evaluations = self.swarm_size
        self.leaders.compute_density_estimator()

    def update_progress(self) -> None:
        self.evaluations += self.swarm_size
        self.leaders.compute_density_estimator()

        observable_data = {
            'evaluations': self.evaluations,
            'computing time': self.get_current_computing_time(),
            'population': self.leaders.solution_list,
            'reference_front': self.problem.reference_front
        }

        self.observable.notify_all(**observable_data)

    def is_stopping_condition_reached(self) -> bool:
        return self.evaluations >= self.max_evaluations

    def create_initial_swarm(self) -> List[FloatSolution]:
        swarm = []
        for _ in range(self.swarm_size):
            swarm.append(self.problem.create_solution())
        return swarm

    def evaluate_swarm(self,
                       swarm: List[FloatSolution]) -> List[FloatSolution]:
        return self.evaluator.evaluate(swarm, self.problem)

    def initialize_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(particle)

    def initialize_particle_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            particle.attributes['local_best'] = copy(particle)

    def initialize_velocity(self, swarm: List[FloatSolution]) -> None:
        pass  # Velocity initialized in the constructor

    def update_velocity(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            best_particle = copy(swarm[i].attributes['local_best'])
            best_global = self.select_global_best()

            r1 = round(random.uniform(self.r1_min, self.r1_max), 1)
            r2 = round(random.uniform(self.r2_min, self.r2_max), 1)
            c1 = round(random.uniform(self.c1_min, self.c1_max), 1)
            c2 = round(random.uniform(self.c2_min, self.c2_max), 1)
            wmax = self.max_weight
            wmin = self.min_weight

            for var in range(swarm[i].number_of_variables):
                self.speed[i][var] = \
                    self.__velocity_constriction(
                        self.__constriction_coefficient(c1, c2) *
                        ((self.__inertia_weight(self.evaluations, self.max_evaluations, wmax, wmin)
                          * self.speed[i][var])
                         + (c1 * r1 * (best_particle.variables[var] - swarm[i].variables[var]))
                         + (c2 * r2 * (best_global.variables[var] - swarm[i].variables[var]))
                         ),
                        self.delta_max, self.delta_min, var)

    def update_position(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            particle = swarm[i]

            for j in range(particle.number_of_variables):
                particle.variables[j] += self.speed[i][j]

                if particle.variables[j] < self.problem.lower_bound[j]:
                    particle.variables[j] = self.problem.lower_bound[j]
                    self.speed[i][j] *= self.change_velocity1

                if particle.variables[j] > self.problem.upper_bound[j]:
                    particle.variables[j] = self.problem.upper_bound[j]
                    self.speed[i][j] *= self.change_velocity2

    def perturbation(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            if (i % 6) == 0:
                self.mutation.execute(swarm[i])

    def update_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(copy(particle))

    def update_particle_best(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            flag = self.dominance_comparator.compare(
                swarm[i], swarm[i].attributes['local_best'])
            if flag != 1:
                swarm[i].attributes['local_best'] = copy(swarm[i])

    def get_result(self) -> List[FloatSolution]:
        return self.leaders.solution_list

    def select_global_best(self) -> FloatSolution:
        leaders = self.leaders.solution_list

        if len(leaders) > 2:
            particles = random.sample(leaders, 2)

            if self.leaders.comparator.compare(particles[0], particles[1]) < 1:
                best_global = copy(particles[0])
            else:
                best_global = copy(particles[1])
        else:
            best_global = copy(self.leaders.solution_list[0])

        return best_global

    def __velocity_constriction(self, value: float, delta_max: [],
                                delta_min: [], variable_index: int) -> float:
        result = value
        if value > delta_max[variable_index]:
            result = delta_max[variable_index]
        if value < delta_min[variable_index]:
            result = delta_min[variable_index]

        return result

    def __inertia_weight(self, evaluations: int, max_evaluations: int,
                         wmax: float, wmin: float):
        # todo ?
        return wmax

    def __constriction_coefficient(self, c1: float, c2: float) -> float:
        rho = c1 + c2
        if rho <= 4:
            result = 1.0
        else:
            result = 2.0 / (2.0 - rho - sqrt(pow(rho, 2.0) - 4.0 * rho))

        return result
Ejemplo n.º 9
0
class MOQPSO(ParticleSwarmOptimization):
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None):
        """ This class implements the Multi-Objective variant of Quantum Behaved PSO algorithm  as described in
        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        """
        super(MOQPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator

        self.hypervolume_calculator = HyperVolume(reference_point)

        self.evaluations = 0

        self.g = 0.95

        self.dominance_comparator = DominanceComparator()
        self.constrictors = [
            (problem.upper_bound[i] - problem.lower_bound[i]) / 5000.0
            for i in range(problem.number_of_variables)
        ]

        self.prev_hypervolume = 0
        self.current_hv = 0

        self.hv_changes = []

    def init_progress(self) -> None:
        self.evaluations = 0
        self.leaders.compute_density_estimator()

    def update_progress(self) -> None:
        self.evaluations += 1
        self.leaders.compute_density_estimator()

        observable_data = {
            'evaluations': self.evaluations,
            'computing time': self.get_current_computing_time(),
            'population': self.leaders.solution_list,
            'reference_front': self.problem.reference_front
        }

        self.observable.notify_all(**observable_data)

    def is_stopping_condition_reached(self) -> bool:
        completion = self.evaluations / float(self.max_evaluations)
        condition1 = self.evaluations >= self.max_evaluations
        condition2 = completion > 0.01 and (self.current_hv -
                                            self.prev_hypervolume) < 10e-10
        self.prev_hypervolume = self.current_hv
        return condition1 or condition2

    def create_initial_swarm(self) -> List[FloatSolution]:
        swarm = []
        for _ in range(self.swarm_size):
            swarm.append(self.problem.create_solution())

        return swarm

    def evaluate_swarm(self,
                       swarm: List[FloatSolution]) -> List[FloatSolution]:
        return self.evaluator.evaluate(swarm, self.problem)

    def initialize_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(particle)

    def initialize_particle_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            particle.attributes['local_best'] = copy(particle)

    def initialize_velocity(self, swarm: List[FloatSolution]) -> None:
        pass  # Velocity initialized in the constructor

    def update_velocity(self, swarm: List[FloatSolution]) -> None:
        pass

    def update_position(self, swarm: List[FloatSolution]) -> None:
        best_global = self.select_global_best()
        self.current_hv = self.hypervolume_calculator.compute(
            self.leaders.solution_list)
        self.hv_changes.append(self.current_hv)
        # print("Iteration : {} HV: {}".format(self.evaluations, self.current_hv))
        for i in range(self.swarm_size):
            particle = swarm[i]
            best_particle = copy(swarm[i].attributes['local_best'])
            best_global = self.select_global_best()

            for j in range(particle.number_of_variables):
                psi_1 = random_uniform(0, 1)
                psi_2 = random_uniform(0, 1)
                P = (psi_1 * best_particle.variables[j] +
                     psi_2 * best_global.variables[j]) / (psi_1 + psi_2)
                u = random_uniform(0, 1)
                L = 1 / self.g * np.abs(particle.variables[j] - P)
                if random_uniform(0, 1) > 0.5:
                    particle.variables[
                        j] = P - self.constrictors[j] * L * np.log(1 / u)
                else:
                    particle.variables[
                        j] = P + self.constrictors[j] * L * np.log(1 / u)
                particle.variables[j] = max(self.problem.lower_bound[j],
                                            particle.variables[j])
                particle.variables[j] = min(self.problem.upper_bound[j],
                                            particle.variables[j])

    def perturbation(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            if (i % 6) == 0:
                self.mutation.execute(swarm[i])

    def update_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(copy(particle))

    def update_particle_best(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            flag = self.dominance_comparator.compare(
                swarm[i], swarm[i].attributes['local_best'])
            if flag != 1:
                swarm[i].attributes['local_best'] = copy(swarm[i])

    def get_result(self) -> List[FloatSolution]:
        return self.leaders.solution_list

    def select_global_best(self) -> FloatSolution:
        leaders = self.leaders.solution_list

        if len(leaders) > 2:
            particles = random.sample(leaders, 2)

            if self.leaders.comparator.compare(particles[0], particles[1]) < 1:
                best_global = copy(particles[0])
            else:
                best_global = copy(particles[1])
        else:
            best_global = copy(self.leaders.solution_list[0])

        return best_global

    def get_hypvervolume_history(self):
        return self.hv_changes
Ejemplo n.º 10
0
 def setUp(self):
     self.comparator = DominanceComparator()
Ejemplo n.º 11
0
class DominanceComparatorTestCases(unittest.TestCase):

    def setUp(self):
        self.comparator = DominanceComparator()

    def test_should_dominance_comparator_raise_an_exception_if_the_first_solution_is_null(self):
        solution = None
        solution2 = FloatSolution(3, 2, 0, [], [])
        with self.assertRaises(Exception):
            self.comparator.compare(solution, solution2)

    def test_should_dominance_comparator_raise_an_exception_if_the_second_solution_is_null(self):
        solution = FloatSolution(3, 2, 0, [], [])
        solution2 = None
        with self.assertRaises(Exception):
            self.comparator.compare(solution, solution2)

    def test_should_dominance_comparator_return_zero_if_the_two_solutions_have_one_objective_with_the_same_value(self):
        solution = FloatSolution(3, 1, 0, [], [])
        solution2 = FloatSolution(3, 1, 0, [], [])

        solution.objectives = [1.0]
        solution2.objectives = [1.0]

        self.assertEqual(0, self.comparator.compare(solution, solution2))

    def test_should_dominance_comparator_return_one_if_the_two_solutions_have_one_objective_and_the_second_one_is_lower(self):
        solution = FloatSolution(3, 1, 0, [], [])
        solution2 = FloatSolution(3, 1, 0, [], [])

        solution.objectives = [2.0]
        solution2.objectives = [1.0]

        self.assertEqual(1, self.comparator.compare(solution, solution2))

    def test_should_dominance_comparator_return_minus_one_if_the_two_solutions_have_one_objective_and_the_first_one_is_lower(self):
        solution = FloatSolution(3, 1, 0, [], [])
        solution2 = FloatSolution(3, 1, 0, [], [])

        solution.objectives = [1.0]
        solution2.objectives = [2.0]

        self.assertEqual(-1, self.comparator.compare(solution, solution2))

    def test_should_dominance_comparator_work_properly_case_a(self):
        """ Case A: solution1 has objectives [-1.0, 5.0, 9.0] and solution2 has [2.0, 6.0, 15.0]
        """
        solution = FloatSolution(3, 3, 0, [], [])
        solution2 = FloatSolution(3, 3, 0, [], [])

        solution.objectives = [-1.0, 5.0, 9.0]
        solution2.objectives = [2.0, 6.0, 15.0]

        self.assertEqual(-1, self.comparator.compare(solution, solution2))

    def test_should_dominance_comparator_work_properly_case_b(self):
        """ Case b: solution1 has objectives [-1.0, 5.0, 9.0] and solution2 has [-1.0, 5.0, 10.0]
        """
        solution = FloatSolution(3, 3, 0, [], [])
        solution2 = FloatSolution(3, 3, 0, [], [])

        solution.objectives = [-1.0, 5.0, 9.0]
        solution2.objectives = [-1.0, 5.0, 10.0]

        self.assertEqual(-1, self.comparator.compare(solution, solution2))

    def test_should_dominance_comparator_work_properly_case_c(self):
        """ Case c: solution1 has objectives [-1.0, 5.0, 9.0] and solution2 has [-2.0, 5.0, 9.0]
        """
        solution = FloatSolution(3, 3, 0, [], [])
        solution2 = FloatSolution(3, 3, 0, [], [])

        solution.objectives = [-1.0, 5.0, 9.0]
        solution2.objectives = [-2.0, 5.0, 9.0]

        self.assertEqual(1, self.comparator.compare(solution, solution2))

    def test_should_dominance_comparator_work_properly_case_d(self):
        """ Case d: solution1 has objectives [-1.0, 5.0, 9.0] and solution2 has [-1.0, 5.0, 8.0]
        """
        solution = FloatSolution(3, 3, 0, [], [])
        solution2 = FloatSolution(3, 3, 0, [], [])

        solution.objectives = [-1.0, 5.0, 9.0]
        solution2.objectives = [-1.0, 5.0, 8.0]

        self.assertEqual(1, self.comparator.compare(solution, solution2))

    def test_should_dominance_comparator_work_properly_case_3(self):
        """ Case d: solution1 has objectives [-1.0, 5.0, 9.0] and solution2 has [-2.0, 5.0, 10.0]
        """
        solution = FloatSolution(3, 3, 0, [], [])
        solution2 = FloatSolution(3, 3, 0, [], [])

        solution.objectives = [-1.0, 5.0, 9.0]
        solution2.objectives = [-2.0, 5.0, 10.0]

        self.assertEqual(0, self.comparator.compare(solution, solution2))

    def test_should_dominance_comparator_work_properly_with_constrains_case_1(self):
        """ Case 1: solution1 has a higher degree of constraint violation than solution 2
        """
        solution1 = FloatSolution(3, 3, 0, [], [])
        solution2 = FloatSolution(3, 3, 0, [], [])
        solution1.attributes["overall_constraint_violation"] = -0.1
        solution2.attributes["overall_constraint_violation"] = -0.3

        solution1.objectives = [-1.0, 5.0, 9.0]
        solution2.objectives = [-2.0, 5.0, 10.0]

        self.assertEqual(-1, self.comparator.compare(solution1, solution2))

    def test_should_dominance_comparator_work_properly_with_constrains_case_2(self):
        """ Case 2: solution1 has a lower degree of constraint violation than solution 2
        """
        solution1 = FloatSolution(3, 3, 0, [], [])
        solution2 = FloatSolution(3, 3, 0, [], [])
        solution1.attributes["overall_constraint_violation"] = -0.3
        solution2.attributes["overall_constraint_violation"] = -0.1

        solution1.objectives = [-1.0, 5.0, 9.0]
        solution2.objectives = [-2.0, 5.0, 10.0]

        self.assertEqual(1, self.comparator.compare(solution1, solution2))
Ejemplo n.º 12
0
class MOQPSO(ParticleSwarmOptimization):
    def __init__(self,
                 problem: FloatProblem,
                 swarm_size: int,
                 max_evaluations: int,
                 mutation: Mutation[FloatSolution],
                 leaders: BoundedArchive[FloatSolution],
                 evaluator: Evaluator[FloatSolution] = SequentialEvaluator[
                     FloatSolution](),
                 reference_point=None,
                 levy: int = 0,
                 levy_decay: int = 0):
        """ This class implements the Multi-Objective variant of chaotic Quantum Behaved Particle Swarm Optimization
        :param problem: The problem to solve.
        :param swarm_size: Swarm size.
        :param max_evaluations: Maximum number of evaluations.
        :param mutation: Mutation operator.
        :param leaders: Archive for leaders.
        :param evaluator: An evaluator object to evaluate the solutions in the population.
        :param levy: turn on levy walk
        :param levy_decay: turn on levy decay 
        """
        super(MOQPSO, self).__init__()
        self.problem = problem
        self.swarm_size = swarm_size
        self.max_evaluations = max_evaluations
        self.mutation = mutation
        self.leaders = leaders
        self.evaluator = evaluator
        self.levy = levy
        self.levy_decay = levy_decay
        self.prev_gbest = None
        self.hypervolume_calculator = HyperVolume(reference_point)

        self.evaluations = 0
        self.beta_swarm = 1.2
        self.g = 0.95
        self.particle_history = {}
        self.objective_history = {0: [], 1: []}
        self.dominance_comparator = DominanceComparator()
        self.constrictors = [
            (problem.upper_bound[i] - problem.lower_bound[i]) / 5000.0
            for i in range(problem.number_of_variables)
        ]

        self.prev_hypervolume = 0
        self.current_hv = 0

        self.hv_changes = []

        self.beta = 3 / 2
        self.sigma = (gamma(1 + self.beta) * sin(pi * self.beta / 2) / (gamma(
            (1 + self.beta) / 2) * self.beta * 2**((self.beta - 1) / 2)))**(
                1 / self.beta)

    def init_progress(self) -> None:
        self.evaluations = 0
        self.leaders.compute_density_estimator()

    def update_progress(self) -> None:
        self.evaluations += 1
        self.leaders.compute_density_estimator()

        observable_data = {
            'evaluations': self.evaluations,
            'computing time': self.get_current_computing_time(),
            'population': self.leaders.solution_list,
            'reference_front': self.problem.reference_front
        }

        self.observable.notify_all(**observable_data)

    def is_stopping_condition_reached(self) -> bool:
        completion = self.evaluations / float(self.max_evaluations)
        condition1 = self.evaluations >= self.max_evaluations
        tolerance_cond = False
        '''fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        x,y,z = [],[],[]
        for particle in self.swarm:
            x.append(particle.variables[0])
            y.append(particle.variables[1])
            z.append(particle.objectives[0])
        ax.scatter(x, y, z, c='r', marker='o')
        plt.show()
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        x,y,z = [],[],[]
        for particle in self.swarm:
            x.append(particle.variables[2])
            y.append(particle.variables[3])
            z.append(particle.objectives[1])
        ax.scatter(x, y, z, c='r', marker='o')
        plt.show()
        '''
        for variableno in range(4):
            if variableno not in list(self.particle_history.keys()):
                self.particle_history[variableno] = []
            temp = []
            for particle in self.swarm:
                temp.append(particle.variables[variableno])
            self.particle_history[variableno].append(temp)
        temp1 = []
        for particle in self.swarm:
            temp1.append(particle.objectives[0])
        self.objective_history[0].append(temp1)
        self.beta_swarm = 1.2 - (self.evaluations /
                                 self.max_evaluations) * (0.7)
        if (self.prev_gbest is not None):
            gbest = self.evaluator.evaluate([self.prev_gbest], self.problem)
            gbest = gbest[0]
            alpha = 1 / gbest.number_of_objectives
            old_score = 0
            for i in range(gbest.number_of_objectives):
                old_score += alpha * gbest.objectives[i]
            gbest = self.select_global_best()
            gbest = self.evaluator.evaluate([gbest], self.problem)
            gbest = gbest[0]
            new_score = 0
            for i in range(gbest.number_of_objectives):
                new_score += alpha * gbest.objectives[i]
            if np.abs(new_score - old_score) < 10e-6:
                tolerance_cond = True
        condition2 = completion > 0.1 and tolerance_cond
        self.prev_hypervolume = self.current_hv
        if condition1 or condition2:
            xs = np.array(self.particle_history[0])
            ys = np.array(self.particle_history[1])
            fig = plt.figure()
            ax = fig.add_subplot(111)
            sct, = ax.plot([], [], "o", markersize=2)

            def update(ifrm, xa, ya):
                sct.set_data(xa[ifrm], ya[ifrm])

            ax.set_xlim(-0.2, 1.2)
            ax.set_ylim(-0.2, 1.2)
            ani = animation.FuncAnimation(fig,
                                          update,
                                          self.evaluations,
                                          fargs=(xs, ys),
                                          interval=1000 / 1,
                                          repeat=False)
            plt.show()
            xs = np.array(self.particle_history[2])
            ys = np.array(self.particle_history[3])
            fig = plt.figure()
            ax = fig.add_subplot(111)
            sct, = ax.plot([], [], "o", markersize=2)
            ax.set_xlim(-0.2, 1.2)
            ax.set_ylim(-0.2, 1.2)
            ani = animation.FuncAnimation(fig,
                                          update,
                                          self.evaluations,
                                          fargs=(xs, ys),
                                          interval=1000 / 1,
                                          repeat=False)
            plt.show()
        return condition1 or condition2

    def create_initial_swarm(self) -> List[FloatSolution]:
        #swarm = lorenz_map(self.swarm_size)
        '''
        new_solution = FloatSolution(number_of_variables, number_of_objectives, number_of_constraints,
                                     lower_bound, upper_bound)
        
        
        '''
        swarm = self.problem.create_solution(self.swarm_size)
        return swarm

    def evaluate_swarm(self,
                       swarm: List[FloatSolution]) -> List[FloatSolution]:
        return self.evaluator.evaluate(swarm, self.problem)

    def initialize_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(particle)

    def initialize_particle_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            particle.attributes['local_best'] = copy(particle)

    def initialize_velocity(self, swarm: List[FloatSolution]) -> None:
        pass  # Velocity initialized in the constructor

    def update_velocity(self, swarm: List[FloatSolution]) -> None:
        pass

    def update_position(self, swarm: List[FloatSolution]) -> None:
        self.best_global = self.select_global_best()
        #print(self.leaders.solution_list)
        #input()
        self.prev_gbest = self.best_global
        self.current_hv = self.hypervolume_calculator.compute(
            self.leaders.solution_list)
        self.hv_changes.append(self.current_hv)
        # print("Iteration : {} HV: {}".format(self.evaluations, self.current_hv))
        mbest = []
        for i in range(swarm[0].number_of_variables):
            mbest.append([])
        for i in range(self.swarm_size):
            particle = swarm[i]
            for vari in range(swarm[i].number_of_variables):
                mbest[vari].append(
                    copy(swarm[i].attributes['local_best'].variables[vari]))
        for i in range(len(mbest)):
            mbest[i] = sum(mbest[i]) / self.swarm_size

        for i in range(self.swarm_size):
            particle = swarm[i]
            #eu_dist = np.linalg.norm(np.array(particle.variables)-np.array(self.prev_gbest.variables))
            best_particle = copy(swarm[i].attributes['local_best'])
            #best_global = self.select_global_best()
            #rint(best_global)

            for j in range(particle.number_of_variables):
                psi_1 = random_uniform(0, 1)
                psi_2 = random_uniform(0, 1)
                P = (psi_1 * best_particle.variables[j] +
                     psi_2 * self.best_global.variables[j]) / (psi_1 + psi_2)
                u = random_uniform(0, 1)

                #levy part here
                levy_decayed = 1
                if self.levy:
                    l_u = np.random.normal(0, 1) * self.sigma
                    l_v = np.random.normal(0, 1)
                    step = l_u / abs(l_v)**(1 / self.beta)
                    stepsize = 0.01 * step * (1 /
                                              (0.000001 + particle.variables[j]
                                               - self.prev_gbest.variables[j]))
                    levy_decayed = stepsize
                    if self.levy_decay:
                        levy_decayed *= 5 * (
                            0.001)**(self.evaluations /
                                     (self.max_evaluations * 0.5)) + 1
                        #if self.evaluations == int(self.max_evaluations*0.5):
                        #    levy_decayed = 1

                if random_uniform(0, 1) > 0.5:
                    particle.variables[
                        j] = P - self.beta_swarm * self.constrictors[
                            j] * mbest[j] * np.log(1 / u) * levy_decayed
                else:
                    particle.variables[
                        j] = P + self.beta_swarm * self.constrictors[
                            j] * mbest[j] * np.log(1 / u) * levy_decayed

                particle.variables[j] = max(self.problem.lower_bound[j],
                                            particle.variables[j])
                particle.variables[j] = min(self.problem.upper_bound[j],
                                            particle.variables[j])

    def perturbation(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            if (i % 6) == 0:
                self.mutation.execute(swarm[i])

    def update_global_best(self, swarm: List[FloatSolution]) -> None:
        for particle in swarm:
            self.leaders.add(copy(particle))

    def update_particle_best(self, swarm: List[FloatSolution]) -> None:
        for i in range(self.swarm_size):
            flag = self.dominance_comparator.compare(
                swarm[i], swarm[i].attributes['local_best'])
            if flag != 1:
                swarm[i].attributes['local_best'] = copy(swarm[i])

    def get_result(self) -> List[FloatSolution]:
        return self.leaders.solution_list

    def select_global_best(self) -> FloatSolution:
        leaders = self.leaders.solution_list

        if len(leaders) > 2:
            particles = random.sample(leaders, 2)

            if self.leaders.comparator.compare(particles[0], particles[1]) < 1:
                best_global = copy(particles[0])
            else:
                best_global = copy(particles[1])
        else:
            best_global = copy(self.leaders.solution_list[0])

        return best_global

    def get_hypervolume_history(self):
        return self.hv_changes
Ejemplo n.º 13
0
 def __init__(self):
     super(NonDominatedSolutionListArchive, self).__init__()
     self.comparator = DominanceComparator()
Ejemplo n.º 14
0
 def __init__(self):
     super(NonDominatedSolutionListArchive, self).__init__()
     self.comparator = DominanceComparator()
Ejemplo n.º 15
0
 def __init__(self, comparator: Comparator = DominanceComparator()):
     super(BinaryTournamentSelection, self).__init__()
     self.comparator = comparator