Exemple #1
0
    def _do(self, problem, pop, n_survive, algorithm=None, **kwargs):

        if isinstance(self.eliminate_duplicates, bool) and self.eliminate_duplicates:
            pop = DefaultDuplicateElimination(func=lambda p: p.get("F")).do(pop)

        elif isinstance(self.eliminate_duplicates, DuplicateElimination):
            _, no_candidates, candidates = DefaultDuplicateElimination(func=lambda pop: pop.get("F")).do(pop,
                                                                                                         return_indices=True)
            _, _, is_duplicate = self.eliminate_duplicates.do(pop[candidates], pop[no_candidates], return_indices=True,
                                                              to_itself=False)
            elim = set(np.array(candidates)[is_duplicate])
            pop = pop[[k for k in range(len(pop)) if k not in elim]]

        if problem.n_obj == 1:
            pop = FitnessSurvival().do(problem, pop, n_survive=len(pop))
            elites = pop[:self.n_elites]
            non_elites = pop[self.n_elites:]
        else:
            I = NonDominatedSorting().do(pop.get("F"), only_non_dominated_front=True)
            elites = pop[I]
            non_elites = pop[[k for k in range(len(pop)) if k not in I]]

        elites.set("type", ["elite"] * len(elites))
        non_elites.set("type", ["non_elite"] * len(non_elites))

        return pop
Exemple #2
0
    def _do(self, problem, pop, off, algorithm=None, **kwargs):

        if self.cnt is None:
            self.cnt = np.zeros(len(pop), dtype=int)

        cnt = self.cnt

        cv = pop.get("CV")[:, 0]

        # cnt = self.cnt
        cnt = algorithm.n_gen - pop.get("n_gen") - 1

        # make sure we never replace the best solution if we would consider feasibility first
        best = FitnessSurvival().do(problem,
                                    Population.merge(pop, off),
                                    n_survive=1)[0]

        eps = np.zeros(len(pop))

        for k, t in enumerate(cnt):

            # cycle = (t // (4 * self.t))
            # max_eps = (2 ** cycle) * self.eps

            max_eps = self.eps

            t = t % (4 * self.t)

            if t < self.t:
                eps[k] = cv[k] + (max_eps - cv[k]) * (t / self.t)
            elif t < 2 * self.t:
                eps[k] = max_eps
            elif t < 3 * self.t:
                eps[k] = max_eps * (1 - ((t % self.t) / self.t))
            else:
                eps[k] = 0.0

        eps_is_zero = np.where(eps <= 0)[0]

        # print(len(eps_is_zero))

        repl = np.full(len(pop), False)
        for k in range(len(pop)):

            if pop[k] == best:
                repl[k] = False
            elif off[k] == best:
                repl[k] = True

            else:
                if rel_eps_constr(pop[k], off[k], eps[k]) <= 0:
                    repl[k] = True

        # self.cnt[repl] = 0
        # self.cnt[~repl] += 1

        return repl
Exemple #3
0
    def _advance(self, infills=None, **kwargs):
        assert infills is not None, "This algorithms uses the AskAndTell interface thus infills must to be provided."

        # get the indices where each offspring is originating from
        I = infills.get("index")

        # replace the individuals with the corresponding parents from the mating
        self.pop[I] = ImprovementReplacement().do(self.problem, self.pop[I],
                                                  infills)

        # sort the population by fitness to make the selection simpler for mating (not an actual survival, just sorting)
        self.pop = FitnessSurvival().do(self.problem, self.pop)
Exemple #4
0
    def __init__(self,
                 n_offsprings=200,
                 pop_size=None,
                 rule=1.0 / 7.0,
                 phi=1.0,
                 gamma=0.85,
                 sampling=FloatRandomSampling(),
                 survival=FitnessSurvival(),
                 display=SingleObjectiveDisplay(),
                 **kwargs):
        """
        Evolutionary Strategy (ES)

        Parameters
        ----------
        n_offsprings : int
            The number of individuals created in each iteration.
        pop_size : int
            The number of individuals which are surviving from the offspring population (non-elitist)
        rule : float
            The rule (ratio) of individuals surviving. This automatically either calculated `n_offsprings` or `pop_size`.
        phi : float
            Expected rate of convergence (usually 1.0).
        gamma : float
            If not `None`, some individuals are created using the differentials with this as a length scale.
        sampling : object
            The sampling method for creating the initial population.
        """

        if pop_size is None and n_offsprings is not None:
            pop_size = int(np.math.ceil(n_offsprings * rule))
        elif n_offsprings is None and pop_size is not None:
            n_offsprings = int(np.math.fllor(n_offsprings / rule))

        assert pop_size is not None and n_offsprings is not None, "You have to at least provivde pop_size of n_offsprings."
        assert n_offsprings >= 2 * pop_size, "The number of offsprings should be at least double the population size."

        super().__init__(pop_size=pop_size,
                         n_offsprings=n_offsprings,
                         sampling=sampling,
                         survival=survival,
                         display=display,
                         advance_after_initial_infill=True,
                         **kwargs)

        self.default_termination = SingleObjectiveDefaultTermination()
        self.phi = phi
        self.gamma = gamma

        self.tau, self.taup, self.sigma_max = None, None, None
Exemple #5
0
    def _infill(self):
        problem, particles, pbest = self.problem, self.particles, self.pop

        (X, V) = particles.get("X", "V")
        P_X = pbest.get("X")

        sbest = self._social_best()
        S_X = sbest.get("X")

        Xp, Vp = pso_equation(X, P_X, S_X, V, self.V_max, self.w, self.c1,
                              self.c2)

        # if the problem has boundaries to be considered
        if problem.has_bounds():

            for k in range(20):
                # find the individuals which are still infeasible
                m = is_out_of_bounds_by_problem(problem, Xp)

                # actually execute the differential equation
                Xp[m], Vp[m] = pso_equation(X[m], P_X[m], S_X[m], V[m],
                                            self.V_max, self.w, self.c1,
                                            self.c2)

            # if still infeasible do a random initialization
            Xp = repair_random_init(Xp, X, *problem.bounds())

        # create the offspring population
        off = Population.new(X=Xp, V=Vp)

        # try to improve the current best with a pertubation
        if self.pertube_best:
            k = FitnessSurvival().do(problem,
                                     pbest,
                                     n_survive=1,
                                     return_indices=True)[0]
            eta = int(np.random.uniform(20, 30))
            mutant = PolynomialMutation(eta).do(problem, pbest[[k]])[0]
            off[k].set("X", mutant.X)

        self.repair.do(problem, off)

        self.sbest = sbest.copy()

        return off
Exemple #6
0
    def _do(self, problem, pop, n_survive, out=None, **kwargs):
        F = pop.get("F")

        if F.shape[1] != 1:
            raise ValueError("FitnessSurvival can only used for single objective single!")

        # this basically sorts the population by constraint and objective value
        pop = FitnessSurvival().do(problem, pop, n_survive=len(pop))

        # calculate the distance from each individual to another - pre-processing for the clearing
        # NOTE: the distance is normalized by the maximum distance possible
        X = pop.get("X").astype(float)
        D = norm_eucl_dist(problem, X, X)
        if self.norm_by_dim:
            D = D / (problem.n_var ** 0.5)

        # initialize the clearing strategy
        clearing = EpsilonClearing(D, self.epsilon)

        # initialize the iteration and rank i the beginning
        iter, rank = 1, 1

        # also solutions that have been found in the first iteration
        iter_one = None

        # until the number of selected individuals are less than expected survivors
        while len(clearing.selected()) < n_survive:

            # get all the remaining indices
            remaining = clearing.remaining()

            # if no individuals are left because of clearing - perform a reset
            if len(remaining) == 0 or (self.n_max_each_iter is not None and rank > self.n_max_each_iter):
                # reset and retrieve the newly available indices
                clearing.reset()
                remaining = clearing.remaining()

                # increase the iteration counter and start over from rank 1
                iter += 1
                rank = 1

                # get the individual of the first iteration - needed for niche assignment
                iter_one = np.where(pop.get("iter") == 1)[0] if iter_one is None else iter_one

            # since the population is ordered by F and CV it is always the first index
            k = remaining[0]

            # set the attribute to the selected individual
            pop[k].set("iter", iter)
            pop[k].set("rank", rank)

            # in the first iteration set the niche counter for each solution equal to rank
            if iter == 1:
                pop[k].set("niche", rank)
            else:
                closest_iter_one = iter_one[D[k][iter_one].argmin()]
                niche = pop[closest_iter_one].get("niche")
                pop[k].set("niche", niche)

            clearing.select(k)
            rank += 1

        # retrieve all individuals being selected
        S = clearing.selected()

        return pop[S]
Exemple #7
0
    def _initialize_advance(self, infills=None, **kwargs):
        super()._initialize_advance(infills=infills, **kwargs)

        self.evaluator.eval(self.problem, infills, algorithm=self)
        self.x0 = FitnessSurvival().do(self.problem, infills, n_survive=1)[0]
Exemple #8
0
 def _initialize_advance(self, infills=None, **kwargs):
     self.pop = FitnessSurvival().do(self.problem,
                                     infills,
                                     n_survive=len(infills))
Exemple #9
0
    def step(self):

        # initialize all ants to be used in this iteration
        ants = []
        for k in range(self.n_ants):
            ant = self.ant()
            ant._initialize(self.problem, self.pheromones)
            ants.append(ant)

        active = list(range(self.n_ants))

        while len(active) > 0:

            for k in active:
                ant = ants[k]

                if ant.has_next():
                    ant.next()

                    if self.local_update:
                        e = ant.last()
                        if e is None or e.pheromone is None:
                            raise Exception(
                                "For a local update the ant has to set the pheromones when notified."
                            )
                        else:
                            self.pheromones.set(
                                e.key,
                                self.pheromones.get(e.key) * ant.alpha +
                                e.pheromone * ant.alpha)
                            # self.pheromones.update(e.key, e.pheromone * ant.alpha)

                else:
                    ant.finalize()
                    active = [i for i in active if i != k]

        colony = Population.create(*ants)

        # this evaluation can be disabled or faked if evaluate_each_ant is false - then the finalize method of the
        # ant has to set the objective and/or constraint values accordingly
        self.evaluator.eval(self.problem, colony)
        set_cv(colony)
        set_feasibility(colony)

        # set the current best including the new colony
        opt = FitnessSurvival().do(problem, Population.merge(colony, self.opt),
                                   1)

        # do the evaporation after this iteration
        self.pheromones.evaporate()

        # select the ants to be used for the global pheromone update
        if self.global_update == "all":
            ants_to_update = colony
        elif self.global_update == "it-best":
            ants_to_update = FitnessSurvival().do(problem, colony, 1)
        elif self.global_update == "best":
            ants_to_update = self.opt
        else:
            raise Exception(
                "Unknown value for global updating the pheromones!")

        # now spread the pheromones for each ant depending on performance
        for ant in ants_to_update:
            for e in ant.path:
                if e.pheromone is None:
                    raise Exception(
                        "The ant has to set the pheromone of each entry in the path."
                    )
                else:
                    self.pheromones.update(e.key, e.pheromone * pheromones.rho)

        self.pop, self.off = colony, colony
        self.opt = opt
Exemple #10
0
    def _local_advance(self, **kwargs):

        # number of variables increased by one - matches equations in the paper
        xl, xu = self.problem.bounds()
        pop, n = self.pop, self.problem.n_var - 1

        # calculate the centroid
        centroid = pop[:n + 1].get("X").mean(axis=0)

        # -------------------------------------------------------------------------------------------
        # REFLECT
        # -------------------------------------------------------------------------------------------

        # check the maximum alpha until the bounds are hit
        alphaU = max_alpha(centroid, (centroid - pop[n + 1].X), xl, xu)

        # reflect the point, consider factor if bounds are there, make sure in bounds (floating point) evaluate
        x_reflect = centroid + min(self.alpha, alphaU) * (centroid - pop[n + 1].X)
        x_reflect = set_to_bounds_if_outside_by_problem(self.problem, x_reflect)
        reflect = self.evaluator.eval(self.problem, Individual(X=x_reflect), algorithm=self)

        # whether a shrink is necessary or not - decided during this step
        shrink = False

        better_than_current_best = is_better(reflect, pop[0])
        better_than_second_worst = is_better(reflect, pop[n])
        better_than_worst = is_better(reflect, pop[n + 1])

        # if better than the current best - check for expansion
        if better_than_current_best:

            # -------------------------------------------------------------------------------------------
            # EXPAND
            # -------------------------------------------------------------------------------------------

            # the maximum expansion until the bounds are hit
            betaU = max_alpha(centroid, (x_reflect - centroid), xl, xu)

            # expand using the factor, consider bounds, make sure in case of floating point issues
            x_expand = centroid + min(self.beta, betaU) * (x_reflect - centroid)
            x_expand = set_to_bounds_if_outside_by_problem(self.problem, x_expand)

            # if the expansion is almost equal to reflection (if boundaries were hit) - no evaluation
            if np.allclose(x_expand, x_reflect, atol=1e-16):
                expand = reflect
            else:
                expand = self.evaluator.eval(self.problem, Individual(X=x_expand), algorithm=self)

            # if the expansion further improved take it - otherwise use expansion
            if is_better(expand, reflect):
                pop[n + 1] = expand
            else:
                pop[n + 1] = reflect

        # if the new point is not better than the best, but better than second worst - just keep it
        elif not better_than_current_best and better_than_second_worst:
            pop[n + 1] = reflect

        # if not worse than the worst - outside contraction
        elif not better_than_second_worst and better_than_worst:

            # -------------------------------------------------------------------------------------------
            # Outside Contraction
            # -------------------------------------------------------------------------------------------

            x_contract_outside = centroid + self.gamma * (x_reflect - centroid)
            contract_outside = self.evaluator.eval(self.problem, Individual(X=x_contract_outside), algorithm=self)

            if is_better(contract_outside, reflect):
                pop[n + 1] = contract_outside
            else:
                shrink = True

        # if the reflection was worse than the worst - inside contraction
        else:

            # -------------------------------------------------------------------------------------------
            # Inside Contraction
            # -------------------------------------------------------------------------------------------

            x_contract_inside = centroid - self.gamma * (x_reflect - centroid)
            contract_inside = self.evaluator.eval(self.problem, Individual(X=x_contract_inside), algorithm=self)

            if is_better(contract_inside, pop[n + 1]):
                pop[n + 1] = contract_inside
            else:
                shrink = True

        # -------------------------------------------------------------------------------------------
        # Shrink (only if necessary)
        # -------------------------------------------------------------------------------------------

        if shrink:
            for i in range(1, len(pop)):
                pop[i].X = pop[0].X + self.delta * (pop[i].X - pop[0].X)
            pop[1:] = self.evaluator.eval(self.problem, pop[1:], algorithm=self)

        self.pop = FitnessSurvival().do(self.problem, pop, n_survive=len(pop))
Exemple #11
0
 def _local_initialize_advance(self, infills=None, **kwargs):
     # sort the current simplex by fitness
     self.pop = FitnessSurvival().do(self.problem, infills, n_survive=len(infills))