Exemple #1
0
    def _do(self, problem, pop, n_survive, **kwargs):
        feasible, infeasible = split_by_feasibility(
            pop, sort_infeasbible_by_cv=True)

        n_feasible = min(len(feasible), int(n_survive * self.feasible_perc))
        n_infeasible = n_survive - n_feasible

        if n_feasible > 0:
            ret = FitnessSurvival().do(problem, pop[feasible], n_feasible)
        else:
            ret = None

        pop_infeasible = pop[infeasible]
        F = pop_infeasible.get("F")
        pop_infeasible.set("__F", F)
        pop_infeasible.set("F", np.column_stack([F, pop_infeasible.get("CV")]))
        S = RankAndCrowdingSurvival().do(problem, pop_infeasible, n_infeasible)
        S.set("F", S.get("__F"))

        if ret is None:
            ret = S
        else:
            ret = ret.merge(S)

        return ret
Exemple #2
0
    def _do(self, problem, pop, n_survive, out=None, algorithm=None, **kwargs):
        X, F = pop.get("X", "F")
        if F.shape[1] != 1:
            raise ValueError(
                "FitnessSurvival can only used for single objective single!")

        n_neighbors = 5

        # calculate the normalized euclidean distances from each solution to another
        D = norm_eucl_dist(problem, X, X, fill_diag_with_inf=True)

        # set the neighborhood for each individual
        for k, individual in enumerate(pop):

            # the neighbors in the current population
            neighbors = pop[D[k].argsort()[:n_neighbors]]

            # get the neighbors of the current individual and merge
            N = individual.get("neighbors")
            if N is not None:
                rec = []
                h = set()
                for n in N:
                    for entry in n.get("neighbors"):
                        if entry not in h:
                            rec.append(entry)
                            h.add(entry)

                neighbors = Population.merge(neighbors, rec)

            # keep only the closest solutions to the individual
            _D = norm_eucl_dist(problem, individual.X[None, :],
                                neighbors.get("X"))[0]

            # find only the closest neighbors
            closest = _D.argsort()[:n_neighbors]

            individual.set("crowding", _D[closest].mean())
            individual.set("neighbors", neighbors[closest])

        best = F[:, 0].argmin()
        print(F[best], pop[best].get("crowding"))

        # plt.scatter(F[:, 0], pop.get("crowding"))
        # plt.show()

        pop.set("_F", pop.get("F"))
        pop.set("F", np.column_stack([F, -pop.get("crowding")]))
        pop = RankAndCrowdingSurvival().do(problem, pop, n_survive)
        pop.set("F", pop.get("_F"))

        return pop
Exemple #3
0
    def _next(self):

        # make a step and create the offsprings
        self.off = self._step()

        # evaluate the offsprings
        self.evaluator.eval(self.problem, self.off, algorithm=self)

        survivors = []

        for k in range(self.pop_size):
            parent, off = self.pop[k], self.off[k]

            rel = get_relation(parent, off)

            if rel == 0:
                survivors.extend([parent, off])
            elif rel == -1:
                survivors.append(off)
            else:
                survivors.append(parent)

        survivors = Population.create(*survivors)

        if len(survivors) > self.pop_size:
            survivors = RankAndCrowdingSurvival().do(self.problem, survivors,
                                                     self.pop_size)

        self.pop = survivors
Exemple #4
0
    def _potential_optimal(self):
        pop = self.pop

        if len(pop) == 1:
            return pop

        # get the intervals of each individual
        _F, _CV, xl, xu = pop.get("F", "CV", "xl", "xu")
        nF = normalize(_F)
        F = nF + self.penalty * _CV

        # get the length of the interval of each solution
        nxl, nxu = norm_bounds(pop, self.problem)
        length = (nxu - nxl) / 2

        val = length.max(axis=1)

        # (a) non-dominated with respect to interval
        obj = np.column_stack([-val, F])
        I = NonDominatedSorting().do(obj, only_non_dominated_front=True)
        candidates, F, xl, xu, val = pop[I], F[I], xl[I], xu[I], val[I]

        # import matplotlib.pyplot as plt
        # plt.scatter(obj[:, 0], obj[:, 1])
        # plt.scatter(obj[I, 0], obj[I, 1], color="red")
        # plt.show()

        if len(candidates) == 1:
            return candidates
        else:
            if len(candidates) > self.n_max_candidates:
                candidates = RankAndCrowdingSurvival().do(
                    self.problem, pop, self.n_max_candidates)

            return candidates
Exemple #5
0
def calc_pareto_front(problem, ref_dirs):
    n_pareto_points = 200
    np.random.seed(1)

    pf = problem.pareto_front(n_pareto_points=n_pareto_points, use_cache=False)
    # survival = ReferenceDirectionSurvival(ref_dirs)
    survival = RankAndCrowdingSurvival()

    for i in range(1000):
        _pf = problem.pareto_front(n_pareto_points=n_pareto_points,
                                   use_cache=False)
        F = np.row_stack([pf, _pf])

        pop = Population().new("F", F)
        pop = survival.do(problem, pop, n_pareto_points // 2)

        pf = pop.get("F")

    return pf
Exemple #6
0
    def __init__(self,
                 display=MOCSDisplay(),
                 sampling=FloatRandomSampling(),
                 survival=RankAndCrowdingSurvival(),
                 eliminate_duplicates=DefaultDuplicateElimination(),
                 termination=None,
                 pop_size=100,
                 beta=1.5,
                 alfa=0.1,
                 pa=0.35,
                 **kwargs):
        """

        Parameters
        ----------
        display : {display}
        sampling : {sampling}
        survival : {survival}
        eliminate_duplicates: {eliminate_duplicates}
        termination : {termination}

        pop_size : The number of nests (solutions)

        beta : The input parameter of the Mantegna's Algorithm to simulate
            sampling on Levy Distribution

        alfa   : The scaling step size and is usually O(L/100) with L is the
            scale of the problem

        pa   : The switch probability, pa fraction of the nests will be
            abandoned on every iteration
        """

        super().__init__(display=display,
                         sampling=sampling,
                         survival=survival,
                         eliminate_duplicates=eliminate_duplicates,
                         termination=termination,
                         pop_size=pop_size,
                         beta=beta,
                         alfa=alfa,
                         pa=pa,
                         **kwargs)