Esempio n. 1
0
    def __init__(
        self,
        problem: Problem,
        population_size: int,
        mutation: Mutation,
        crossover: Crossover,
        number_of_cores: int,
        client,
        selection: Selection = BinaryTournamentSelection(
            MultiComparator([
                FastNonDominatedRanking.get_comparator(),
                CrowdingDistance.get_comparator()
            ])),
        termination_criterion: TerminationCriterion = store.
        default_termination_criteria,
        dominance_comparator: DominanceComparator = DominanceComparator()):
        super(DistributedNSGAII, self).__init__()
        self.problem = problem
        self.population_size = population_size
        self.mutation_operator = mutation
        self.crossover_operator = crossover
        self.selection_operator = selection
        self.dominance_comparator = dominance_comparator

        self.termination_criterion = termination_criterion
        self.observable.register(termination_criterion)

        self.number_of_cores = number_of_cores
        self.client = client
Esempio n. 2
0
 def __init__(
     self,
     problem: DynamicProblem[S],
     population_size: int,
     offspring_population_size: int,
     mutation: Mutation,
     crossover: Crossover,
     selection: Selection = BinaryTournamentSelection(
         MultiComparator([
             FastNonDominatedRanking.get_comparator(),
             CrowdingDistance.get_comparator()
         ])),
     termination_criterion: TerminationCriterion = store.
     default_termination_criteria,
     population_generator: Generator = store.default_generator,
     population_evaluator: Evaluator = store.default_evaluator,
     dominance_comparator: DominanceComparator = DominanceComparator()):
     super(DynamicNSGAII, self).__init__(
         problem=problem,
         population_size=population_size,
         offspring_population_size=offspring_population_size,
         mutation=mutation,
         crossover=crossover,
         selection=selection,
         population_evaluator=population_evaluator,
         population_generator=population_generator,
         termination_criterion=termination_criterion,
         dominance_comparator=dominance_comparator)
     self.completed_iterations = 0
     self.start_computing_time = 0
     self.total_computing_time = 0
Esempio n. 3
0
    def __init__(
        self,
        problem: Problem,
        population_size: int,
        offspring_population_size: int,
        mutation: Mutation,
        crossover: Crossover,
        termination_criterion: TerminationCriterion = store.default_termination_criteria,
        population_generator: Generator = store.default_generator,
        population_evaluator: Evaluator = store.default_evaluator,
        dominance_comparator: Comparator = store.default_comparator,
    ):
        """
        :param problem: The problem to solve.
        :param population_size: Size of the population.
        :param mutation: Mutation operator (see :py:mod:`jmetal.operator.mutation`).
        :param crossover: Crossover operator (see :py:mod:`jmetal.operator.crossover`).
        """
        multi_comparator = MultiComparator(
            [StrengthRanking.get_comparator(), KNearestNeighborDensityEstimator.get_comparator()]
        )
        selection = BinaryTournamentSelection(comparator=multi_comparator)

        super(SPEA2, self).__init__(
            problem=problem,
            population_size=population_size,
            offspring_population_size=offspring_population_size,
            mutation=mutation,
            crossover=crossover,
            selection=selection,
            termination_criterion=termination_criterion,
            population_evaluator=population_evaluator,
            population_generator=population_generator,
        )
        self.dominance_comparator = dominance_comparator
Esempio n. 4
0
    def __init__(self,
                 reference_directions,
                 problem: Problem,
                 mutation: Mutation,
                 crossover: Crossover,
                 population_size: int = None,
                 selection: Selection = BinaryTournamentSelection(
                     MultiComparator([FastNonDominatedRanking.get_comparator(),
                                      CrowdingDistance.get_comparator()])),
                 termination_criterion: TerminationCriterion = store.default_termination_criteria,
                 population_generator: Generator = store.default_generator,
                 population_evaluator: Evaluator = store.default_evaluator,
                 dominance_comparator: Comparator = store.default_comparator):
        self.reference_directions = reference_directions.compute()
        if not population_size:
            population_size = len(self.reference_directions)
        if self.reference_directions.shape[1] != problem.number_of_objectives:
            raise Exception('Dimensionality of reference points must be equal to the number of objectives')

        super(NSGAIII, self).__init__(
            problem=problem,
            population_size=population_size,
            offspring_population_size=population_size,
            mutation=mutation,
            crossover=crossover,
            selection=selection,
            termination_criterion=termination_criterion,
            population_evaluator=population_evaluator,
            population_generator=population_generator,
            dominance_comparator=dominance_comparator
        )

        self.extreme_points = None
        self.ideal_point = np.full(self.problem.number_of_objectives, np.inf)
        self.worst_point = np.full(self.problem.number_of_objectives, -np.inf)
Esempio n. 5
0
    def _run_so(self):
        """ Runs a single objective EA optimization ()
        """
        self.ea_problem.reset_initial_population_counter()
        if self.algorithm_name == 'SA':
            print("Running SA")
            self.mutation.probability = 1.0
            algorithm = SimulatedAnnealing(
                problem=self.ea_problem,
                mutation=self.mutation.probability,
                termination_criterion=StoppingByEvaluations(max_evaluations=self.max_evaluations)
            )

        else:
            print("Running GA")
            algorithm = GeneticAlgorithm(
                problem=self.ea_problem,
                population_size=self.population_size,
                offspring_population_size=self.population_size,
                mutation=self.mutation,
                crossover=self.crossover,
                selection=BinaryTournamentSelection(),
                termination_criterion=StoppingByEvaluations(
                    max_evaluations=self.max_evaluations)
            )

        algorithm.observable.register(observer=PrintObjectivesStatObserver())
        algorithm.run()

        result = algorithm.solutions
        return result
Esempio n. 6
0
    def __init__(self,
                 problem: Problem,
                 population_size: int,
                 offspring_population_size: int,
                 mutation: Mutation,
                 crossover: Crossover,
                 selection: Selection = BinaryTournamentSelection(
                     MultiComparator([
                         FastNonDominatedRanking.get_comparator(),
                         CrowdingDistance.get_comparator()
                     ])),
                 termination_criterion: TerminationCriterion = store.
                 default_termination_criteria,
                 population_generator: Generator = store.default_generator,
                 population_evaluator: Evaluator = store.default_evaluator,
                 dominance_comparator: Comparator = store.default_comparator,
                 target_value_threshold: List[float] = None,
                 target_pattern: List[int] = None):
        """
        NSGA-II implementation as described in

        * K. Deb, A. Pratap, S. Agarwal and T. Meyarivan, "A fast and elitist
          multiobjective genetic algorithm: NSGA-II," in IEEE Transactions on Evolutionary Computation,
          vol. 6, no. 2, pp. 182-197, Apr 2002. doi: 10.1109/4235.996017

        NSGA-II is a genetic algorithm (GA), i.e. it belongs to the evolutionary algorithms (EAs)
        family. The implementation of NSGA-II provided in jMetalPy follows the evolutionary
        algorithm template described in the algorithm module (:py:mod:`jmetal.core.algorithm`).

        .. note:: A steady-state version of this algorithm can be run by setting the offspring size to 1.

        :param problem: The problem to solve.
        :param population_size: Size of the population.
        :param mutation: Mutation operator (see :py:mod:`jmetal.operator.mutation`).
        :param crossover: Crossover operator (see :py:mod:`jmetal.operator.crossover`).
        :param selection: Selection operator (see :py:mod:`jmetal.operator.selection`).
        """
        super(NSGAII, self).__init__(
            problem=problem,
            population_size=population_size,
            offspring_population_size=offspring_population_size,
            mutation=mutation,
            crossover=crossover,
            selection=selection,
            termination_criterion=termination_criterion,
            population_evaluator=population_evaluator,
            population_generator=population_generator)
        self.dominance_comparator = dominance_comparator
        self.generation = 0
        self.target_pattern = target_pattern
        self.problem_solved = False
        self.target_value_threshold = target_value_threshold

        self.file_pareto_front = os.getcwd() + '/' + str(
            time.strftime("%Y_%m_%d")) + '_PARETO_'
        if not os.path.exists(self.file_pareto_front):
            os.mkdir(self.file_pareto_front)
Esempio n. 7
0
 def test_NSGAII(self):
     NSGAII(
         problem=self.problem,
         population_size=self.population_size,
         offspring_population_size=self.offspring_size,
         mutation=self.mutation,
         crossover=self.crossover,
         selection=BinaryTournamentSelection(comparator=RankingAndCrowdingDistanceComparator()),
         termination_criterion=StoppingByEvaluations(max=1000)
     ).run()
Esempio n. 8
0
def configure_experiment(problems: dict, n_run: int):
    jobs = []
    max_evaluations = 25000

    for run in range(n_run):
        for problem_tag, problem in problems.items():
            jobs.append(
                Job(
                    algorithm=NSGAII(
                        problem=problem,
                        population_size=100,
                        offspring_population_size=100,
                        mutation=PolynomialMutation(
                            probability=1.0 / problem.number_of_variables,
                            distribution_index=20),
                        crossover=SBXCrossover(probability=1.0,
                                               distribution_index=20),
                        selection=BinaryTournamentSelection(
                            comparator=RankingAndCrowdingDistanceComparator()),
                        termination_criterion=StoppingByEvaluations(
                            max=max_evaluations)),
                    algorithm_tag='NSGAII',
                    problem_tag=problem_tag,
                    run=run,
                ))
            jobs.append(
                Job(
                    algorithm=GDE3(problem=problem,
                                   population_size=100,
                                   cr=0.5,
                                   f=0.5,
                                   termination_criterion=StoppingByEvaluations(
                                       max=max_evaluations)),
                    algorithm_tag='GDE3',
                    problem_tag=problem_tag,
                    run=run,
                ))
            jobs.append(
                Job(
                    algorithm=SMPSO(
                        problem=problem,
                        swarm_size=100,
                        mutation=PolynomialMutation(
                            probability=1.0 / problem.number_of_variables,
                            distribution_index=20),
                        leaders=CrowdingDistanceArchive(100),
                        termination_criterion=StoppingByEvaluations(
                            max=max_evaluations)),
                    algorithm_tag='SMPSO',
                    problem_tag=problem_tag,
                    run=run,
                ))

    return jobs
Esempio n. 9
0
    def __init__(
        self,
        problem: Problem,
        reference_point: Solution,
        population_size: int,
        offspring_population_size: int,
        mutation: Mutation,
        crossover: Crossover,
        termination_criterion: TerminationCriterion = store.
        default_termination_criteria,
        population_generator: Generator = store.default_generator,
        population_evaluator: Evaluator = store.default_evaluator,
        dominance_comparator: Comparator = store.default_comparator,
    ):
        """This is an implementation of the Hypervolume Estimation Algorithm for Multi-objective Optimization
        proposed in:

        * J. Bader and E. Zitzler. HypE: An Algorithm for Fast Hypervolume-Based Many-Objective
        Optimization. TIK Report 286, Computer Engineering and Networks Laboratory (TIK), ETH
        Zurich, November 2008.

        It uses the Exact Hypervolume-based indicator formulation, which once computed, guides both
        the environmental selection and the binary tournament selection operator

        Please note that as per the publication above, the evaluator and replacement should not be changed
        anyhow. It also requires that Problem() has a reference_point with objective values defined, e.g.

        problem = ZDT1()
        reference_point = FloatSolution(problem.number_of_variables,problem.number_of_objectives, [0], [1])
        reference_point.objectives = [1., 1.]
        """

        selection = BinaryTournamentSelection(
            comparator=SolutionAttributeComparator(key="fitness",
                                                   lowest_is_best=False))
        self.ranking_fitness = RankingAndFitnessSelection(
            population_size,
            dominance_comparator=dominance_comparator,
            reference_point=reference_point)
        self.reference_point = reference_point
        self.dominance_comparator = dominance_comparator

        super(HYPE, self).__init__(
            problem=problem,
            population_size=population_size,
            offspring_population_size=offspring_population_size,
            mutation=mutation,
            crossover=crossover,
            selection=selection,
            termination_criterion=termination_criterion,
            population_evaluator=population_evaluator,
            population_generator=population_generator,
        )
Esempio n. 10
0
    def __init__(
        self,
        problem: Problem,
        population_size: int,
        neighborhood: Neighborhood,
        archive: BoundedArchive,
        mutation: Mutation,
        crossover: Crossover,
        selection: Selection = BinaryTournamentSelection(
            MultiComparator([
                FastNonDominatedRanking.get_comparator(),
                CrowdingDistance.get_comparator()
            ])),
        termination_criterion: TerminationCriterion = store.
        default_termination_criteria,
        population_generator: Generator = store.default_generator,
        population_evaluator: Evaluator = store.default_evaluator,
        dominance_comparator: Comparator = store.default_comparator,
    ):
        """
        MOCEll implementation as described in:

        :param problem: The problem to solve.
        :param population_size: Size of the population.
        :param mutation: Mutation operator (see :py:mod:`jmetal.operator.mutation`).
        :param crossover: Crossover operator (see :py:mod:`jmetal.operator.crossover`).
        :param selection: Selection operator (see :py:mod:`jmetal.operator.selection`).
        """
        super(MOCell, self).__init__(
            problem=problem,
            population_size=population_size,
            offspring_population_size=1,
            mutation=mutation,
            crossover=crossover,
            selection=selection,
            termination_criterion=termination_criterion,
            population_evaluator=population_evaluator,
            population_generator=population_generator,
        )
        self.dominance_comparator = dominance_comparator
        self.neighborhood = neighborhood
        self.archive = archive
        self.current_individual = 0
        self.current_neighbors = []

        self.comparator = MultiComparator([
            FastNonDominatedRanking.get_comparator(),
            CrowdingDistance.get_comparator()
        ])
Esempio n. 11
0
    def __init__(
        self,
        problem: Problem,
        population_size: int,
        offspring_population_size: int,
        mutation: Mutation,
        crossover: Crossover,
        kappa: float,
        termination_criterion: TerminationCriterion = store.default_termination_criteria,
        population_generator: Generator = store.default_generator,
        population_evaluator: Evaluator = store.default_evaluator,
    ):
        """Epsilon IBEA implementation as described in

        * Zitzler, Eckart, and Simon Künzli. "Indicator-based selection in multiobjective search."
        In International Conference on Parallel Problem Solving from Nature, pp. 832-842. Springer,
        Berlin, Heidelberg, 2004.

        https://link.springer.com/chapter/10.1007/978-3-540-30217-9_84

        IBEA is a genetic algorithm (GA), i.e. it belongs to the evolutionary algorithms (EAs)
        family. The multi-objective search in IBEA is guided by a fitness associated to every solution,
        which is in turn controlled by a binary quality indicator. This implementation uses the so-called
        additive epsilon indicator, along with a binary tournament mating selector.

        :param problem: The problem to solve.
        :param population_size: Size of the population.
        :param mutation: Mutation operator (see :py:mod:`jmetal.operator.mutation`).
        :param crossover: Crossover operator (see :py:mod:`jmetal.operator.crossover`).
        :param kappa: Weight in the fitness computation.
        """

        selection = BinaryTournamentSelection(
            comparator=SolutionAttributeComparator(key="fitness", lowest_is_best=False)
        )
        self.kappa = kappa

        super(IBEA, self).__init__(
            problem=problem,
            population_size=population_size,
            offspring_population_size=offspring_population_size,
            mutation=mutation,
            crossover=crossover,
            selection=selection,
            termination_criterion=termination_criterion,
            population_evaluator=population_evaluator,
            population_generator=population_generator,
        )
Esempio n. 12
0
    def __init__(self,
                 problem: MultiTransfer.MultiTransferProblem,
                 population_size: int,
                 offspring_population_size: int,
                 mutation: Mutation,
                 crossover: Crossover,
                 selection: Selection = BinaryTournamentSelection(
                     MultiComparator([FastNonDominatedRanking.get_comparator(),
                                      CrowdingDistance.get_comparator()])),
                 termination_criterion: TerminationCriterion = store.default_termination_criteria,
                 population_generator: Generator = store.default_generator,
                 population_evaluator: Evaluator = store.default_evaluator,
                 dominance_comparator: Comparator = store.default_comparator):
        """
        NSGA-II implementation as described in

        * K. Deb, A. Pratap, S. Agarwal and T. Meyarivan, "A fast and elitist
          multiobjective genetic algorithm: NSGA-II," in IEEE Transactions on Evolutionary Computation,
          vol. 6, no. 2, pp. 182-197, Apr 2002. doi: 10.1109/4235.996017

        NSGA-II is a genetic algorithm (GA), i.e. it belongs to the evolutionary algorithms (EAs)
        family. The implementation of NSGA-II provided in jMetalPy follows the evolutionary
        algorithm template described in the algorithm module (:py:mod:`jmetal.core.algorithm`).

        .. note:: A steady-state version of this algorithm can be run by setting the offspring size to 1.

        :param problem: The problem to solve.
        :param population_size: Size of the population.
        :param mutation: Mutation operator (see :py:mod:`jmetal.operator.mutation`).
        :param crossover: Crossover operator (see :py:mod:`jmetal.operator.crossover`).
        :param selection: Selection operator (see :py:mod:`jmetal.operator.selection`).
        """
        super(NSGAII_MEDA, self).__init__(
            problem=problem,
            population_size=population_size,
            offspring_population_size=offspring_population_size,
            mutation=mutation,
            crossover=crossover,
            selection=selection,
            termination_criterion=termination_criterion,
            population_evaluator=population_evaluator,
            population_generator=population_generator
        )
        self.dominance_comparator = dominance_comparator
Esempio n. 13
0
 def __init__(self, islands_len, iterval, migration_rate, random_groups, random_destination, max_evaluations, problem):
     self.islands_len = islands_len
     self.iterval = iterval
     self.max_evaluations = max_evaluations
     self.problem = problem
     self.migration = Migration(
         migration_rate, random_groups, random_destination)
     for i in range(islands_len):
         # possible to override problem later self.problem
         self.islands[str(i)] = GeneticAlgorithm(
             problem=self.problem,
             population_size=100,
             offspring_population_size=100,
             mutation=UniformMutation(0.006, 20.0),
             crossover=SBXCrossover(0.3, 19.0),
             selection=BinaryTournamentSelection(),
             termination_criterion=StoppingByEvaluations(
                 max_evaluations=iterval)
         )
     print(self.islands)
Esempio n. 14
0
def evaluate(crosssover_algo, problem):
  alldata = []
  series = []
  for x in range(10):
    algorithm = GeneticAlgorithm(
        problem=problem,
        population_size=100,
        offspring_population_size=100,
        mutation=PolynomialMutation(1.0 / problem.number_of_variables, 20.0),
        crossover=crosssover_algo,
        selection=BinaryTournamentSelection(),
        termination_criterion=StoppingByEvaluations(max_evaluations=500000)
    )
    data = []
    dataobserver = DataObserver(1.0, data)
    algorithm.observable.register(observer=dataobserver)
    algorithm.run()
    result = algorithm.get_result().objectives[0]
    series.append(result) 
    alldata.append(data)

  numpy_array = np.array(alldata)
  transpose = numpy_array.T
  transpose_list = transpose.tolist()

    
  fig = plt.figure(figsize =(60, 42)) 
     
  ax = fig.add_axes([0, 0, 1, 1]) 
    
  bp = ax.boxplot(transpose_list) 
   
  plt.show()

  print(stats.kruskal(transpose_list[0],transpose_list[1],transpose_list[-1]))
  series = [series] 
  print(np.average(series))

  sp.posthoc_dunn([transpose_list[0],transpose_list[1],transpose_list[-1]], p_adjust = 'holm')
Esempio n. 15
0
    def __init__(self,
                 problem: Problem,
                 population_size: int,
                 offspring_population_size: int,
                 mutation: Mutation,
                 crossover: Crossover,
                 selection: Selection = BinaryTournamentSelection(MultiComparator([SolutionAttributeComparator(key = "fitness")])),
                 termination_criterion: TerminationCriterion = store.default_termination_criteria,
                 population_generator: Generator = store.default_generator,
                 population_evaluator: Evaluator = store.default_evaluator,
                 dominance_comparator: Comparator = store.default_comparator):

        super(MOGA, self).__init__(
            problem=problem,
            population_size=population_size,
            offspring_population_size=offspring_population_size,
            mutation=mutation,
            crossover=crossover,
            selection=selection,
            termination_criterion=termination_criterion,
            population_evaluator=population_evaluator,
            population_generator=population_generator
        )
        self.dominance_comparator = dominance_comparator
Esempio n. 16
0
    def run(self) -> List[S]:
        pool_1_size = self.population_size
        pool_2_size = self.population_size

        selection_operator_1 = BinaryTournamentSelection()
        crossover_operator_1 = IntegerSBXCrossover(1.0, 20.0)
        mutation_operator_1 = IntegerPolynomialMutation(
            1.0 / self.problem.number_of_variables, 20.0)
        selection_operator_2 = DifferentialEvolutionSelection()
        crossover_operator_2 = DifferentialEvolutionCrossover(0.2, 0.5, 0.5)

        dominance = DominanceComparator()

        max_iterations = self.max_iterations
        iterations = 0

        parent_1: List[IntegerSolution] = [None, None]

        generational_hv: List[float] = []

        current_gen = 0
        """Create the initial subpopulation pools and evaluate them"""
        pool_1: List[IntegerSolution] = []
        for i in range(pool_1_size):
            pool_1.append(self.problem.create_solution())
            pool_1[i] = self.problem.evaluate(pool_1[i])

        pool_2: List[IntegerSolution] = []
        for i in range(pool_2_size):
            pool_2.append(self.problem.create_solution())
            pool_2[i] = self.problem.evaluate(pool_2[i])

        evaluations = pool_1_size + pool_2_size

        mix = self.mix_interval

        problem = self.problem

        h = HyperVolume(reference_point=[1] *
                        self.problem.number_of_objectives)

        initial_population = True
        """The main evolutionary cycle"""
        while iterations < max_iterations:
            combi: List[IntegerSolution] = []
            if not initial_population:
                offspring_pop_1: List[IntegerSolution] = []
                offspring_pop_2: List[IntegerSolution] = []
                """Evolve pool 1"""
                for i in range(pool_1_size):
                    parent_1[0] = selection_operator_1.execute(pool_1)
                    parent_1[1] = selection_operator_1.execute(pool_1)

                    child_1: IntegerSolution = crossover_operator_1.execute(
                        parent_1)[0]
                    child_1 = mutation_operator_1.execute(child_1)

                    child_1 = problem.evaluate(child_1)
                    evaluations += 1

                    offspring_pop_1.append(child_1)
                """Evolve pool 2"""
                for i in range(pool_2_size):
                    parent_2: List[
                        IntegerSolution] = selection_operator_2.execute(pool_2)

                    crossover_operator_2.current_individual = pool_2[i]
                    child_2 = crossover_operator_2.execute(parent_2)
                    child_2 = problem.evaluate(child_2[0])

                    evaluations += 1

                    result = dominance.compare(pool_2[i], child_2)

                    if result == -1:
                        offspring_pop_2.append(pool_2[i])
                    elif result == 1:
                        offspring_pop_2.append(child_2)
                    else:
                        offspring_pop_2.append(child_2)
                        offspring_pop_2.append(pool_2[i])

                ind_1 = pool_1[random.randint(0, pool_1_size - 1)]
                ind_2 = pool_2[random.randint(0, pool_2_size - 1)]

                offspring_pop_1.append(ind_1)
                offspring_pop_2.append(ind_2)

                offspring_pop_1.extend(pool_1)
                pool_1 = self.r.replace(offspring_pop_1[:pool_1_size],
                                        offspring_pop_1[pool_1_size:])

                pool_2 = self.r.replace(offspring_pop_2[:pool_2_size],
                                        offspring_pop_2[pool_2_size:])

                mix -= 1
                if mix == 0:
                    """Time to perform fitness sharing"""
                    mix = self.mix_interval
                    combi = combi + pool_1 + pool_2
                    # print("Combi size: ", len(combi))
                    """pool1size/10"""

                    combi = self.r.replace(
                        combi[:int(pool_1_size / 10)],
                        combi[int(pool_1_size / 10):len(combi)],
                    )
                    """
                    print(
                        "Sizes: ",
                        len(pool_1) + len(combi),
                        len(pool_2) + len(combi),
                        "\n",
                    )
                    """
                    pool_1 = self.r.replace(pool_1, combi)

                    pool_2 = self.r.replace(pool_2, combi)

            if initial_population:
                initial_population = False

            iterations += 1
            print("Iterations: ", str(iterations))
            """
            hval_1 = h.compute([s.objectives for s in pool_1])
            hval_2 = h.compute([s.objectives for s in pool_2])
            print("hval_1: ", str(hval_1))
            print("hval_2: ", str(hval_2), "\n")
            """

            new_gen = int(evaluations / self.report_interval)
            if new_gen > current_gen:
                combi = combi + pool_1 + pool_2

                combi = self.r.replace(combi[:(2 * pool_1_size)],
                                       combi[(2 * pool_1_size):])

                hval = h.compute([s.objectives for s in combi])
                for i in range(current_gen, new_gen, 1):
                    generational_hv.append(hval)

                current_gen = new_gen
        """#Write runtime generational HV to file"""
        """Return the first non dominated front"""
        combi_ini: List[IntegerSolution] = []
        combi_ini.extend(pool_1)
        combi_ini.extend(pool_2)
        combi_ini = self.r.replace(
            combi_ini[:pool_1_size + pool_2_size],
            combi_ini[pool_1_size + pool_2_size:],
        )
        return combi_ini
from jmetal.util.termination_criterion import StoppingByEvaluations

if __name__ == '__main__':
    problem = TSP(instance='../../resources/TSP_instances/kroA100.tsp')

    print('Cities: ', problem.number_of_variables)

    algorithm = GeneticAlgorithm(
        problem=problem,
        population_size=100,
        offspring_population_size=100,
        mutation=PermutationSwapMutation(1.0 / problem.number_of_variables),
        crossover=PMXCrossover(0.8),
        selection=BinaryTournamentSelection(
            MultiComparator([
                FastNonDominatedRanking.get_comparator(),
                CrowdingDistance.get_comparator()
            ])),
        termination_criterion=StoppingByEvaluations(max=2500000))

    algorithm.observable.register(observer=PrintObjectivesObserver(1000))

    algorithm.run()
    result = algorithm.get_result()

    print('Algorithm: {}'.format(algorithm.get_name()))
    print('Problem: {}'.format(problem.get_name()))
    print('Solution: {}'.format(result.variables))
    print('Fitness: {}'.format(result.objectives[0]))
    print('Computing time: {}'.format(algorithm.total_computing_time))
         1323, 1146, 5192, 6547, 343, 7584, 3765, 8660, 9318,
         5098, 5185, 9253, 4495, 892, 5080, 5297, 9275, 7515,
         9729, 6200, 2138, 5480, 860, 8295, 8327, 9629, 4212,
         3087, 5276, 9250, 1835, 9241, 1790, 1947, 8146, 8328,
         973, 1255, 9733, 4314, 6912, 8007, 8911, 6802, 5102,
         5451, 1026, 8029, 6628, 8121, 5509, 3603, 6094, 4447,
         683, 6996, 3304, 3130, 2314, 7788, 8689, 3253, 5920,
         3660, 2489, 8153, 2822, 6132, 7684, 3032, 9949, 59,
         6669, 6334]

    problem = SubsetSum(C, W)

    algorithm = GeneticAlgorithm(
        problem=problem,
        population_size=100,
        offspring_population_size=1,
        mutation=BitFlipMutation(probability=0.1),
        crossover=SPXCrossover(probability=0.8),
        selection=BinaryTournamentSelection(),
        termination_criterion=StoppingByEvaluations(max_evaluations=25000)
    )

    algorithm.run()
    subset = algorithm.get_result()

    print('Algorithm: {}'.format(algorithm.get_name()))
    print('Problem: {}'.format(problem.get_name()))
    print('Solution: {}'.format(subset.variables))
    print('Fitness: {}'.format(subset.objectives[0]))
    print('Computing time: {}'.format(algorithm.total_computing_time))
def run() -> None:
    problem = Ackley(number_of_variables=150)
    # problem = Griewank(number_of_variables=150)
    # problem = Schwefel(number_of_variables=150)
    # problem = SchafferF7(number_of_variables=150)
    mutation = PolynomialMutation(probability=1.0 / problem.number_of_variables, distribution_index=20)
    local_search = MemeticLocalSearch(problem, mutation, StoppingByEvaluations(250))
    local_search2 = MemeticLocalSearch(problem, mutation, StoppingByEvaluations(500))

    max_evaluations = 1000000

    drawing_class = DrawingClass(registered_runs=6)

    target_path = os.path.join(RESULTS_DIR, f"test_{datetime.now().isoformat()}.json")
    first_execution_unit = ExecutionUnit(
        algorithm_cls=MemeticCognitiveAlgorithm,
        problem_name="Ackley",
        drawing_fun=drawing_class.draw_avg_function,
        drawing_series_labels=["RUN1", "RUN1"]
    ).register_run(
        parameters={
            "problem": problem,
            "population_size": 5000,
            "offspring_population_size": 1000,
            "mutation": mutation,
            "crossover": SBXCrossover(probability=1.0, distribution_index=20),
            "selection": BinaryTournamentSelection(),
            "species1": Species(
                mutation=mutation,
                crossover=SBXCrossover(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(),
                local_search=local_search,
                termination_criterion=StoppingByEvaluations(max_evaluations=1000)
            ),
            "species2": Species(
                mutation=mutation,
                crossover=SBXCrossover(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(),
                local_search=local_search,
                termination_criterion=StoppingByEvaluations(max_evaluations=1000)
            ),
            "local_search": local_search,
            "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations)
        }
    ).register_run(
        parameters={
            "problem": problem,
            "population_size": 5000,
            "offspring_population_size": 1000,
            "mutation": mutation,
            "crossover": SBXCrossover(probability=1.0, distribution_index=20),
            "selection": BinaryTournamentSelection(),
            "species1": Species(
                mutation=mutation,
                crossover=SBXCrossover(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(),
                local_search=local_search,
                termination_criterion=StoppingByEvaluations(max_evaluations=1000)
            ),
            "species2": Species(
                mutation=mutation,
                crossover=SBXCrossover(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(),
                local_search=local_search,
                termination_criterion=StoppingByEvaluations(max_evaluations=1000)
            ),
            "local_search": local_search,
            "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations)
        }
    )

    second_execution_unit = ExecutionUnit(
        algorithm_cls=MemeticCognitiveAlgorithm,
        problem_name="Ackley",
        drawing_fun=drawing_class.draw_avg_function,
        drawing_series_labels=["RUN2", "RUN2"]
    ).register_run(
        parameters={
            "problem": problem,
            "population_size": 5000,
            "offspring_population_size": 2500,
            "mutation": mutation,
            "crossover": SBXCrossover(probability=1.0, distribution_index=20),
            "selection": BinaryTournamentSelection(),
            "species1": Species(
                mutation=mutation,
                crossover=SBXCrossover(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(),
                local_search=local_search,
                termination_criterion=StoppingByEvaluations(max_evaluations=1000)
            ),
            "species2": Species(
                mutation=mutation,
                crossover=SBXCrossover(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(),
                local_search=local_search2,
                termination_criterion=StoppingByEvaluations(max_evaluations=1000)
            ),
            "local_search": local_search,
            "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations)
        }
    ).register_run(
        parameters={
            "problem": problem,
            "population_size": 5000,
            "offspring_population_size": 2500,
            "mutation": mutation,
            "crossover": SBXCrossover(probability=1.0, distribution_index=20),
            "selection": BinaryTournamentSelection(),
            "species1": Species(
                mutation=mutation,
                crossover=SBXCrossover(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(),
                local_search=local_search,
                termination_criterion=StoppingByEvaluations(max_evaluations=1000)
            ),
            "species2": Species(
                mutation=mutation,
                crossover=SBXCrossover(probability=1.0, distribution_index=20),
                selection=BinaryTournamentSelection(),
                local_search=local_search2,
                termination_criterion=StoppingByEvaluations(max_evaluations=1000)
            ),
            "local_search": local_search,
            "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations)
        }
    )

    # modify genetic to get history (see MemeticCognitiveAlgorithm)
    third_execution_unit = ExecutionUnit(
        algorithm_cls=GeneticAlgorithm,
        problem_name="Ackley",
        drawing_fun=drawing_class.draw_avg_function,
        drawing_series_labels=["GENETIC", "GENETIC"]
    ).register_run(
        parameters={
            "problem": problem,
            "population_size": 5000,
            "offspring_population_size": 1000,
            "mutation": mutation,
            "crossover": SBXCrossover(probability=1.0, distribution_index=20),
            "selection": BinaryTournamentSelection(),
            "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations)
        }
    ).register_run(
        parameters={
            "problem": problem,
            "population_size": 5000,
            "offspring_population_size": 1000,
            "mutation": mutation,
            "crossover": SBXCrossover(probability=1.0, distribution_index=20),
            "selection": BinaryTournamentSelection(),
            "termination_criterion": StoppingByEvaluations(max_evaluations=max_evaluations)
        }
    )

    runner = MultiAlgorithmRunner(
        execution_units=[
            first_execution_unit, second_execution_unit, third_execution_unit
        ],
        drawing_properties=
        DrawingProperties(title='Memetic1', target_location=os.path.join(RESULTS_DIR, "photo.png"))
    )
    print("Runner starts evaluation.")
    results = runner.run_all()
    print("Results")
    for run_result in results.run_results:
        print(run_result)
    save_execution_history(execution_history=results, path=target_path)
Esempio n. 20
0
    problem.reference_front = read_solutions(
        filename='../../../resources/reference_front/{}.pf'.format(
            problem.get_name()))

    reference_point = [0.8, 0.5]

    max_evaluations = 25000
    algorithm = NSGAII(
        problem=problem,
        population_size=100,
        offspring_population_size=100,
        mutation=PolynomialMutation(probability=1.0 /
                                    problem.number_of_variables,
                                    distribution_index=20),
        crossover=SBXCrossover(probability=1.0, distribution_index=20),
        selection=BinaryTournamentSelection(
            comparator=RankingAndCrowdingDistanceComparator()),
        dominance_comparator=GDominanceComparator(reference_point),
        termination_criterion=StoppingByEvaluations(max=max_evaluations))

    algorithm.observable.register(observer=ProgressBarObserver(
        max=max_evaluations))
    algorithm.observable.register(
        observer=VisualizerObserver(reference_front=problem.reference_front,
                                    reference_point=(reference_point)))

    algorithm.run()
    front = algorithm.get_result()

    # Plot front
    plot_front = Plot(plot_title='Pareto front approximation',
                      axis_labels=problem.obj_labels,
Esempio n. 21
0
    def run(self) -> List[S]:
        # selection operator 1
        selection_operator_1 = BinaryTournamentSelection()
        # selection operator 2
        selection_operator_2 = DifferentialEvolutionSelection()
        # crossover operator 1
        crossover_operator_1 = SBXCrossover(1.0, 20.0)
        # crossover operator 2
        crossover_operator_2 = DifferentialEvolutionCrossover(0.2, 0.5, 0.5)
        # crossover operator 3
        crossover_operator_3 = DifferentialEvolutionCrossover(1.0, 0.5, 0.5)
        # mutation operator 1
        mutation_operator_1 = PolynomialMutation(
            1.0 / self.problem.number_of_variables, 20.0)
        # dominance comparator
        dominance = DominanceComparator()

        # array that stores the "generational" HV quality
        generational_hv: List[float] = []

        parent_1: List[FloatSolution] = [None, None]
        parent_2: List[FloatSolution] = []
        parent_3: List[FloatSolution] = []

        # initialize some local and global variables
        pool_1: List[FloatSolution] = []
        pool_2: List[FloatSolution] = []

        # size of elite subset used for fitness sharing between subpopulations
        nrOfDirectionalSolutionsToEvolve = int(self.population_size / 5)
        # subpopulation 1
        pool_1_size = int(self.population_size -
                          (nrOfDirectionalSolutionsToEvolve / 2))
        # subpopulation 2
        pool_2_size = int(self.population_size -
                          (nrOfDirectionalSolutionsToEvolve / 2))

        print(
            str(pool_1_size) + " - " + str(nrOfDirectionalSolutionsToEvolve) +
            " - " + str(self.mix_interval))

        evaluations = 0
        current_gen = 0
        directionalArchiveSize = 2 * self.population_size
        weights = self.__create_uniform_weights(
            directionalArchiveSize, self.problem.number_of_objectives)

        directionalArchive = self.__create_directional_archive(weights)
        neighbourhoods = self.__create_neighbourhoods(directionalArchive,
                                                      self.population_size)

        nrOfReplacements = 1
        iniID = 0

        # Create the initial pools
        # pool1
        pool_1: List[FloatSolution] = []
        for _ in range(pool_1_size):
            new_solution = self.problem.create_solution()
            new_solution = self.problem.evaluate(new_solution)
            evaluations += 1
            pool_1.append(new_solution)

            self.__update_extreme_values(new_solution)
            dr = directionalArchive[iniID]
            dr.curr_sol = new_solution
            iniID += 1
        # pool2
        pool_2: List[FloatSolution] = []
        for _ in range(pool_2_size):
            new_solution = self.problem.create_solution()
            new_solution = self.problem.evaluate(new_solution)
            evaluations += 1
            pool_2.append(new_solution)

            self.__update_extreme_values(new_solution)
            dr = directionalArchive[iniID]
            dr.curr_sol = new_solution
            iniID += 1
        # directional archive initialization
        pool_A: List[FloatSolution] = []
        while iniID < directionalArchiveSize:
            new_solution = self.problem.create_solution()
            new_solution = self.problem.evaluate(new_solution)
            evaluations += 1
            pool_A.append(new_solution)

            self.__update_extreme_values(new_solution)
            dr = directionalArchive[iniID]
            dr.curr_sol = new_solution
            iniID += 1

        mix = self.mix_interval
        h = HyperVolume(reference_point=[1] *
                        self.problem.number_of_objectives)

        insertionRate: List[float] = [0, 0, 0]
        bonusEvals: List[int] = [0, 0, nrOfDirectionalSolutionsToEvolve]
        testRun = True

        # record the generational HV of the initial population
        combiAll: List[FloatSolution] = []
        cGen = int(evaluations / self.report_interval)
        if cGen > 0:
            combiAll = pool_1 + pool_2 + pool_A
            combiAll = self.r.replace(
                combiAll[:pool_1_size + pool_2_size],
                combiAll[pool_1_size + pool_2_size:],
            )
            hval = h.compute([s.objectives for s in combiAll])
            for _ in range(cGen):
                generational_hv.append(hval)
            current_gen = cGen

        # the main loop of the algorithm
        while evaluations < self.max_evaluations:
            offspringPop1: List[FloatSolution] = []
            offspringPop2: List[FloatSolution] = []
            offspringPop3: List[FloatSolution] = []

            dirInsertPool1: List[FloatSolution] = []
            dirInsertPool2: List[FloatSolution] = []
            dirInsertPool3: List[FloatSolution] = []

            # evolve pool1 - using SPEA2 evolutionary model
            nfe: int = 0
            while nfe < (pool_1_size + bonusEvals[0]):
                parent_1[0] = selection_operator_1.execute(pool_1)
                parent_1[1] = selection_operator_1.execute(pool_1)

                child1a: FloatSolution = crossover_operator_1.execute(
                    parent_1)[0]
                child1a = mutation_operator_1.execute(child1a)

                child1a = self.problem.evaluate(child1a)
                evaluations += 1
                nfe += 1

                offspringPop1.append(child1a)
                dirInsertPool1.append(child1a)

            # evolve pool2 - using DEMO SP evolutionary model
            i: int = 0
            unselectedIDs: List[int] = []
            for ID in range(len(pool_2)):
                unselectedIDs.append(ID)

            nfe = 0
            while nfe < (pool_2_size + bonusEvals[1]):
                index = random.randint(0, len(unselectedIDs) - 1)
                i = unselectedIDs[index]
                unselectedIDs.pop(index)

                parent_2 = selection_operator_2.execute(pool_2)

                crossover_operator_2.current_individual = pool_2[i]
                child2 = crossover_operator_2.execute(parent_2)
                child2 = self.problem.evaluate(child2[0])

                evaluations += 1
                nfe += 1

                result = dominance.compare(pool_2[i], child2)

                if result == -1:  # solution i dominates child
                    offspringPop2.append(pool_2[i])
                elif result == 1:  # child dominates
                    offspringPop2.append(child2)
                else:  # the two solutions are non-dominated
                    offspringPop2.append(child2)
                    offspringPop2.append(pool_2[i])

                dirInsertPool2.append(child2)

                if len(unselectedIDs) == 0:
                    for ID in range(len(pool_2)):
                        unselectedIDs.append(random.randint(
                            0,
                            len(pool_2) - 1))

            # evolve pool3 - Directional Decomposition DE/rand/1/bin
            IDs = self.__compute_neighbourhood_Nfe_since_last_update(
                neighbourhoods, directionalArchive,
                nrOfDirectionalSolutionsToEvolve)

            nfe = 0
            for j in range(len(IDs)):
                if nfe < bonusEvals[2]:
                    nfe += 1
                else:
                    break

                cID = IDs[j]

                chosenSol: FloatSolution = None
                if directionalArchive[cID].curr_sol != None:
                    chosenSol = directionalArchive[cID].curr_sol
                else:
                    chosenSol = pool_1[0]
                    print("error!")

                parent_3: List[FloatSolution] = [None, None, None]

                r1 = random.randint(0, len(neighbourhoods[cID]) - 1)
                r2 = random.randint(0, len(neighbourhoods[cID]) - 1)
                r3 = random.randint(0, len(neighbourhoods[cID]) - 1)
                while r2 == r1:
                    r2 = random.randint(0, len(neighbourhoods[cID]) - 1)
                while r3 == r1 or r3 == r2:
                    r3 = random.randint(0, len(neighbourhoods[cID]) - 1)

                parent_3[0] = directionalArchive[r1].curr_sol
                parent_3[1] = directionalArchive[r2].curr_sol
                parent_3[2] = directionalArchive[r3].curr_sol

                crossover_operator_3.current_individual = chosenSol
                child3 = crossover_operator_3.execute(parent_3)[0]
                child3 = mutation_operator_1.execute(child3)

                child3 = self.problem.evaluate(child3)
                evaluations += 1

                dirInsertPool3.append(child3)

            # compute directional improvements
            # pool1
            improvements = 0
            for j in range(len(dirInsertPool1)):
                testSol = dirInsertPool1[j]
                self.__update_extreme_values(testSol)
                improvements += self.__update_neighbourhoods(
                    directionalArchive, testSol, nrOfReplacements)
            insertionRate[0] += (1.0 * improvements) / len(dirInsertPool1)

            # pool2
            improvements = 0
            for j in range(len(dirInsertPool2)):
                testSol = dirInsertPool2[j]
                self.__update_extreme_values(testSol)
                improvements += self.__update_neighbourhoods(
                    directionalArchive, testSol, nrOfReplacements)
            insertionRate[1] += (1.0 * improvements) / len(dirInsertPool2)

            # pool3
            improvements = 0
            for j in range(len(dirInsertPool3)):
                testSol = dirInsertPool3[j]
                self.__update_extreme_values(testSol)
                improvements += self.__update_neighbourhoods(
                    directionalArchive, testSol, nrOfReplacements)
            # on java, dividing a floating number by 0, returns NaN
            # on python, dividing a floating number by 0, returns an exception
            if len(dirInsertPool3) == 0:
                insertionRate[2] = None
            else:
                insertionRate[2] += (1.0 * improvements) / len(dirInsertPool3)

            for dr in directionalArchive:
                offspringPop3.append(dr.curr_sol)

            offspringPop1 = offspringPop1 + pool_1
            pool_1 = self.r.replace(offspringPop1[:pool_1_size],
                                    offspringPop1[pool_1_size:])
            pool_2 = self.r.replace(offspringPop2[:pool_2_size],
                                    offspringPop2[pool_2_size:])

            combi: List[FloatSolution] = []
            mix -= 1

            if mix == 0:
                mix = self.mix_interval
                combi = combi + pool_1 + pool_2 + offspringPop3
                print("Combi size: " + str(len(combi)))

                combi = self.r.replace(
                    combi[:nrOfDirectionalSolutionsToEvolve],
                    combi[nrOfDirectionalSolutionsToEvolve:],
                )

                insertionRate[0] /= self.mix_interval
                insertionRate[1] /= self.mix_interval
                if insertionRate[2] != None:
                    insertionRate[2] /= self.mix_interval
                """
                print(
                    "Insertion rates: "
                    + str(insertionRate[0])
                    + " - "
                    + str(insertionRate[1])
                    + " - "
                    + str(insertionRate[2])
                    + " - Test run:"
                    + str(testRun)
                )
                """
                if testRun:
                    if (insertionRate[0] > insertionRate[1]) and (
                            insertionRate[0] > insertionRate[2]):
                        print("SPEA2 win - bonus run!")
                        bonusEvals[0] = nrOfDirectionalSolutionsToEvolve
                        bonusEvals[1] = 0
                        bonusEvals[2] = 0
                    if (insertionRate[1] > insertionRate[0]) and (
                            insertionRate[1] > insertionRate[2]):
                        print("DE win - bonus run!")
                        bonusEvals[0] = 0
                        bonusEvals[1] = nrOfDirectionalSolutionsToEvolve
                        bonusEvals[2] = 0
                    if (insertionRate[2] > insertionRate[0]) and (
                            insertionRate[2] > insertionRate[1]):
                        print("Directional win - no bonus!")
                        bonusEvals[0] = 0
                        bonusEvals[1] = 0
                        bonusEvals[2] = nrOfDirectionalSolutionsToEvolve
                else:
                    print("Test run - no bonus!")
                    bonusEvals[0] = 0
                    bonusEvals[1] = 0
                    bonusEvals[2] = nrOfDirectionalSolutionsToEvolve

                testRun = not testRun

                insertionRate[0] = 0.0
                insertionRate[1] = 0.0
                insertionRate[2] = 0.0

                pool_1 = pool_1 + combi
                pool_2 = pool_2 + combi
                print("Sizes: " + str(len(pool_1)) + " " + str(len(pool_2)))

                pool_1 = self.r.replace(pool_1[:pool_1_size],
                                        pool_1[pool_1_size:])
                pool_2 = self.r.replace(pool_2[:pool_2_size],
                                        pool_2[pool_2_size:])

                self.__clear_Nfe_history(directionalArchive)

            hVal1 = h.compute([s.objectives for s in pool_1])
            hVal2 = h.compute([s.objectives for s in pool_2])
            hVal3 = h.compute([s.objectives for s in offspringPop3])

            newGen = int(evaluations / self.report_interval)

            if newGen > current_gen:
                print("Hypervolume: " + str(newGen) + " - " + str(hVal1) +
                      " - " + str(hVal2) + " - " + str(hVal3))
                combi = combi + pool_1 + pool_2 + offspringPop3
                combi = self.r.replace(combi[:self.population_size * 2],
                                       combi[self.population_size * 2:])
                hval = h.compute([s.objectives for s in combi])
                for j in range(current_gen, newGen):
                    generational_hv.append(hval)
                current_gen = newGen

        # return the final combined non-dominated set of maximum size = (populationSize * 2)
        combiAll: List[FloatSolution] = []
        combiAll = combiAll + pool_1 + pool_2 + pool_A
        combiAll = self.r.replace(combiAll[:self.population_size * 2],
                                  combiAll[self.population_size * 2:])
        return combiAll