Beispiel #1
0
    def _decide(self):

        # get the beginning and the end of the window
        current = self.history[0][0]
        last = self.history[-1][0]

        if self.xl is not None and self.xu is not None:
            current = normalize(current, x_min=self.xl, x_max=self.xu)
            last = normalize(last, x_min=self.xl, x_max=self.xu)

        # now analyze the change in X space always from the closest two solutions
        I = vectorized_cdist(current, last).argmin(axis=1)
        avg_dist = np.sqrt((current - last[I])**2).mean()

        # whether the change was less than x space tolerance
        x_tol = avg_dist < self.x_tol

        # now check the F space
        current = self.history[0][1].min()
        last = self.history[-1][1].min()

        # the absolute difference of current to last f
        f_tol_abs = last - current < self.f_tol_abs

        # now the relative tolerance which is usually more important
        f_tol = last / self.F_min - current / self.F_min < self.f_tol

        return not (x_tol or f_tol_abs or f_tol)
Beispiel #2
0
    def _do_continue(self, algorithm):
        pop, problem = algorithm.pop, algorithm.problem

        X, F = pop.get("X", "F")

        ftol = np.abs(F[1:] - F[0]).max() <= self.ftol

        # if the problem has bounds we can normalize the x space to to be more accurate
        if problem.has_bounds():
            xtol = np.abs((X[1:] - X[0]) / (problem.xu - problem.xl)).max() <= self.xtol
        else:
            xtol = np.abs(X[1:] - X[0]).max() <= self.xtol

        # degenerated simplex - get all edges and minimum and maximum length
        D = vectorized_cdist(X, X)
        val = D[np.triu_indices(len(pop), 1)]
        min_e, max_e = val.min(), val.max()

        # either if the maximum length is very small or the ratio is degenerated
        is_degenerated = max_e < 1e-16 or min_e / max_e < 1e-16

        max_iter = algorithm.n_gen > self.n_max_iter
        max_evals = algorithm.evaluator.n_eval > self.n_max_evals

        return not (ftol or xtol or max_iter or max_evals or is_degenerated)
Beispiel #3
0
def closest_point_variance(z):
    for row in np.eye(z.shape[1]):
        if not np.any(np.all(row == z, axis=1)):
            z = np.row_stack([z, row])

    D = vectorized_cdist(z, z)
    np.fill_diagonal(D, 1)

    return D.min(axis=1).var()
Beispiel #4
0
    def _calc(self, F):

        if self.normalize:
            def dist(A, B):
                return np.sqrt(np.sum(np.square((A - B) / self.N), axis=1))

            D = vectorized_cdist(self.pareto_front, F, dist)

        else:
            D = cdist(self.pareto_front, F)

        return np.mean(np.min(D, axis=1))
Beispiel #5
0
    def _do(self, F):

        # a factor to normalize the distances by (1.0 disables that by default)
        norm = 1.0

        # if zero_to_one is disabled this can be used to normalize the distance calculation itself
        if self.norm_by_dist:
            assert self.ideal is not None and self.nadir is not None, "If norm_by_dist is enabled ideal and nadir must be set!"
            norm = self.nadir - self.ideal

        D = vectorized_cdist(self.pf, F, func_dist=self.dist_func, norm=norm)
        return np.mean(np.min(D, axis=self.axis))
Beispiel #6
0
def predict_by_nearest_neighbors(X, F, X_pred, n_nearest, problem):
    D = vectorized_cdist(X_pred, X, func_dist=norm_euclidean_distance(problem))
    nearest = np.argsort(D, axis=1)[:, :n_nearest]

    I = np.arange(len(D))[None, :].repeat(n_nearest, axis=0).T
    dist_to_nearest = D[I, nearest]

    w = dist_to_nearest / dist_to_nearest.sum(axis=1)[:, None]

    F_pred = (F[:, 0][nearest] * w).sum(axis=1)
    F_uncert = dist_to_nearest.mean(axis=1)

    return F_pred, F_uncert
Beispiel #7
0
def closest_point_variance_mod(z):
    n_points, n_dim = z.shape

    for row in np.eye(z.shape[1]):
        if not np.any(np.all(row == z, axis=1)):
            z = np.row_stack([z, row])

    D = vectorized_cdist(z, z)
    np.fill_diagonal(D, np.inf)

    k = int(np.ceil(np.sqrt(n_dim)))
    I = D.argsort(axis=1)[:, k - 1]

    return D[np.arange(n_points), I].var()
Beispiel #8
0
    def __init__(self, ref_dirs, alpha=2.0) -> None:
        super().__init__(filter_infeasible=True)
        n_dim = ref_dirs.shape[1]

        self.alpha = alpha
        self.niches = None
        self.V, self.gamma = None, None
        self.ideal, self.nadir = np.full(n_dim, np.inf), None

        self.ref_dirs = ref_dirs
        self.extreme_ref_dirs = np.where(np.any(vectorized_cdist(self.ref_dirs, np.eye(n_dim)) == 0, axis=1))[0]

        self.V = calc_V(self.ref_dirs)
        self.gamma = calc_gamma(self.V)
Beispiel #9
0
    def _calc(self, F):

        if self.normalize:

            def dist(A, B):
                return np.sqrt(np.sum(np.square((A - B) / self.N), axis=1))

            D = vectorized_cdist(self.pareto_front, F, dist)
            #_D = cdist(self.pareto_front, F, metric=lambda u, v: np.sqrt((((u - v)/self.N) ** 2).sum()))
        else:
            D = cdist(self.pareto_front, F)

        #np.all(np.abs(cdist(self.pareto_front, F) - D) < 1e-4)

        return np.mean(np.min(D, axis=1))
Beispiel #10
0
def mean_mean(z):
    for row in np.eye(z.shape[1]):
        if not np.any(np.all(row == z, axis=1)):
            z = np.row_stack([z, row])
    n_points, n_dim = z.shape

    D = vectorized_cdist(z, z)
    np.fill_diagonal(D, np.inf)

    k = n_dim - 1
    I = D.argsort(axis=1)[:, :k]

    first = np.column_stack([np.arange(n_points) for _ in range(k)])

    val = np.mean(D[first, I], axis=1)

    return val.mean()
Beispiel #11
0
def subset_max_energy(X, n_survive):
    A = X

    def energy_if_replaced(A, B, i, j):
        _A = np.copy(A)
        _A[i] = B[j]
        return calc_potential_energy(_A)

    P = np.random.permutation(len(X))

    C = X[P[n_survive:]]
    X = X[P[:n_survive]]

    D_cand = np.sqrt(squared_dist(C, X))
    closest_to = D_cand.argmin(axis=1)

    has_improved = True
    energy = calc_potential_energy(X)

    while has_improved:

        has_improved = False

        for j in np.random.permutation(n_survive):
            I = np.where(closest_to == j)[0]

            if len(I) == 0:
                continue

            vals = np.array([energy_if_replaced(X, C, j, i) for i in I])

            _energy, i = vals.min(), I[vals.argmin()]

            if _energy < energy:
                point = X[j]
                X[j] = C[i]
                C[i] = point

                D_cand = np.sqrt(squared_dist(C, X))
                closest_to = D_cand.argmin(axis=1)

                energy = _energy
                has_improved = True

    return vectorized_cdist(X, A).argmin(axis=1)
Beispiel #12
0
    def _do_continue(self, algorithm):
        do_continue = self.default.do_continue(algorithm)

        # if the default says do not continue just follow that
        if not do_continue:
            return False

        # additionally check for degenerated simplex
        else:
            X = algorithm.pop.get("X")

            # degenerated simplex - get all edges and minimum and maximum length
            D = vectorized_cdist(X, X)
            val = D[np.triu_indices(len(X), 1)]
            min_e, max_e = val.min(), val.max()

            # either if the maximum length is very small or the ratio is degenerated
            is_degenerated = max_e < 1e-16 or min_e / max_e < 1e-16

            return not is_degenerated
Beispiel #13
0
    def _do(self, problem, pop, n_survive, out=None, algorithm=None, **kwargs):
        X, F = pop.get("X", "F")
        if F.shape[1] != 1:
            raise ValueError("FitnessSurvival can only used for single objective single!")

        # the final indices of surviving individuals
        survivors = []

        # calculate the normalized distance
        D = vectorized_cdist(X, X)
        # np.fill_diagonal(D, np.inf)
        norm = np.linalg.norm(problem.xu - problem.xl)
        D /= norm

        # find the best solution in the population
        S = np.argmin(F[:, 0])

        # create the data structure to work with in order to flag survivors
        survivors = []
        remaining = [k for k in range(len(pop)) if k != S]

        while len(survivors) < n_survive:

            # the extreme point for decision making
            farthest = D[S, :].argmax()

            # sort by distance to best
            delta_x = D[S, :] / D[S, farthest]
            delta_f = (F[:, 0] - F[S, 0]) / (F[farthest, 0] - F[S, 0])
            f = np.column_stack([-delta_x, delta_f])
            z = np.array([-1, 0])
            p = 2

            val = ((f - z) ** p).sum(axis=1) ** (1 / p)
            survivors = val.argsort()[:n_survive]
            pop[survivors].set("v", val[survivors])



            plt.figure(figsize=(5, 5))
            plt.scatter(X, F, color="black", alpha=0.8, s=20, label='pop')
            plt.scatter(X[survivors], F[survivors], color="red", label="survivors")
            v = np.round(pop[survivors].get("v"), 3)

            for i in range(len(survivors)):
                x = X[survivors][i]
                y = F[survivors][i]
                plt.text(x, y, v[i], fontsize=9)



            plt.scatter(X[farthest], F[farthest], color="green", label="survivors")

            _curve = curve(problem)
            plt.plot(_curve[:, 0], _curve[:, 1], color="black")
            plt.xlabel("X")
            plt.ylabel("F")
            plt.legend()
            plt.show()

            return pop[survivors]

            survivors.append(remaining[val.argmin()])
            remaining = [k for k in range(len(pop)) if k != S]

            plt.scatter(X, F)
            plt.scatter(X[survivors], F[survivors], color="red", marker='x')

            _curve = curve(problem)
            plt.plot(_curve[:, 0], _curve[:, 1], color="black")
            plt.xlabel("X")
            plt.ylabel("F")
            plt.show()

        return pop[fronts[0]]

        plt.scatter(delta_x, delta_f)
        plt.scatter(delta_x[nds], delta_f[nds], color="red")
        plt.xlabel("D")
        plt.ylabel("F")
        plt.show()

        pop[S].set("rank", 0)

        # initialize utility data structures
        survivors = [S]
        remaining = [k for k in range(len(pop)) if k != S]

        n_neighbors = 10
        cnt = 1

        while len(survivors) < n_survive:

            closest = D[survivors, :][:, remaining].argmin(axis=0)

            delta_f = F[remaining, 0] - F[np.argmin(F[:, 0]), 0]
            delta_x = D[closest, remaining]
            fitness = delta_f / delta_x

            S = remaining[np.argmin(fitness)]

            if algorithm.n_gen == 20:
                sc = Scatter(title=algorithm.n_gen)
                sc.add(curve(problem), plot_type="line", color="black")
                sc.add(np.column_stack([pop.get("X"), F[:, 0]]), color="purple")
                sc.add(np.column_stack([pop[survivors].get("X"), pop[survivors].get("F")]), color="red", s=40,
                       marker="x")
                sc.do()
                plt.ylim(0, 2)
                plt.show()
                plt.close()

            # update the survivors and remaining individuals
            individual = pop[S]
            neighbors = pop[D[S].argsort()[:n_neighbors]]

            # if individual has had neighbors before update them
            N = individual.get("neighbors")
            if N is not None:
                neighbors = Population.merge(neighbors, N)
                neighbors = neighbors[neighbors.get("F")[:, 0].argsort()[:n_neighbors]]

            individual.set("neighbors", neighbors)
            individual.set("rank", cnt)

            survivors.append(S)
            remaining = [k for k in remaining if k != S]

            cnt += 1

        return pop[survivors]
Beispiel #14
0
    def _next(self):

        # all place visited so far
        _X, _F, _evaluated_by_algorithm = self.evaluator.history.get("X", "F", "algorithm")

        # collect attributes from each algorithm and determine whether it has to be replaced or not
        pop, F, n_evals = [], [], []
        for k, algorithm in enumerate(self.algorithms):

            # collect some data from the current algorithms
            _pop = algorithm.pop

            # if the algorithm has terminated or not
            has_finished = algorithm.termination.has_terminated(algorithm)

            # if the area was already explored before
            closest_dist_to_others = vectorized_cdist(_pop.get("X"), _X[_evaluated_by_algorithm != algorithm],
                                                      func_dist=norm_euclidean_distance(self.problem))
            too_close_to_others = (closest_dist_to_others.min(axis=1) < 1e-3).all()

            # whether the algorithm is the current best - if yes it will not be replaced
            current_best = self.evaluator.opt.get("F") == _pop.get("F").min()

            # algorithm not really useful anymore
            if not current_best and (has_finished or too_close_to_others):
                # find a suitable x0 which is far from other or has good expectations
                self.sampling.criterion = lambda X: vectorized_cdist(X, _X).min()
                X = self.sampling.do(self.problem, self.n_initial_samples).get("X")

                # distance in x space to other existing points
                x_dist = vectorized_cdist(X, _X, func_dist=norm_euclidean_distance(self.problem)).min(axis=1)
                f_pred, f_uncert = predict_by_nearest_neighbors(_X, _F, X, 5, self.problem)
                fronts = NonDominatedSorting().do(np.column_stack([- x_dist, f_pred, f_uncert]))
                I = np.random.choice(fronts[0])

                # I = vectorized_cdist(X, _X, func_dist=norm_euclidean_distance(self.problem)).min(axis=1).argmax()

                # choose the one with the largest distance to current solutions
                x0 = X[[I]]

                # replace the current algorithm
                algorithm = get_algorithm("nelder-mead",
                                          problem=self.problem,
                                          x0=x0,
                                          termination=NelderAndMeadTermination(x_tol=1e-3, f_tol=1e-3),
                                          evaluator=self.evaluator,
                                          )
                algorithm.initialize()
                self.algorithms[k] = algorithm

            pop.append(algorithm.pop)
            F.append(algorithm.pop.get("F"))
            n_evals.append(self.evaluator.algorithms[algorithm])

        # get the values of all algorithms as arrays
        F, n_evals = np.array(F), np.array(n_evals)
        rewards = 1 - normalize(F.min(axis=1))[:, 0]
        n_evals_total = self.evaluator.n_eval - self.evaluator.algorithms[self]

        # calculate the upper confidence bound
        ucb = rewards + 0.95 * np.sqrt(np.log(n_evals_total) / n_evals)

        I = ucb.argmax()
        self.algorithms[I].next()

        # create the population object with all algorithms
        self.pop = Population.create(*pop)

        # update the current optimum
        self.opt = self.evaluator.opt
Beispiel #15
0
def average_distance_to_other_points(ref_dirs):
    D = vectorized_cdist(ref_dirs, ref_dirs)
    D = D[np.triu_indices(len(ref_dirs), 1)]
    return D.mean()
Beispiel #16
0
    def _do(self, problem, pop, n_survive, out=None, algorithm=None, **kwargs):
        X, F = pop.get("X", "F")
        if F.shape[1] != 1:
            raise ValueError(
                "FitnessSurvival can only used for single objective single!")

        # calculate the normalized distance
        D = vectorized_cdist(X, X)
        # np.fill_diagonal(D, np.inf)
        norm = np.linalg.norm(problem.xu - problem.xl)
        D /= norm

        # find the best solution in the population
        S = np.argmin(F[:, 0])

        # create the data structure to work with in order to flag survivors
        survivors = [S]
        remaining = [k for k in range(len(pop)) if k != S]

        # assign all solutions to the minimum first
        assigned_to = np.full(len(pop), S)
        dist = D[S, :]

        # never select more than actually should survive
        while len(survivors) < n_survive:

            rem = np.array(remaining)
            vals = np.full(len(pop), np.inf)

            for S in survivors:
                I = rem[assigned_to[rem] == S]
                if len(I) > 0:
                    vals[I] = calc_metric(dist[I], F[I], p=2)

            select = vals.argmin()

            reassign = np.logical_and(D[select] < dist,
                                      F[:, 0] >= F[select, 0])
            assigned_to[reassign] = select

            survivors.append(select)
            remaining = [k for k in remaining if k != select]

            plt.scatter(X, F)
            plt.scatter(X[survivors], F[survivors], color="red", marker='x')

            _curve = curve(problem)
            plt.plot(_curve[:, 0], _curve[:, 1], color="black")
            plt.xlabel("X")
            plt.ylabel("F")
            plt.show()

        print(survivors)

        # set the neighborhood for the local search for each survivor
        # for k in survivors:
        #
        #     individual = pop[k]
        #     # if individual has had neighbors before update them
        #     N = individual.get("neighbors")
        #     if N is not None:
        #         neighbors = Population.merge(neighbors, N)
        #         neighbors = neighbors[neighbors.get("F")[:, 0].argsort()[:10]]
        #
        #     individual.set("neighbors", neighbors)

        return pop[survivors]

        # do the non-dominated sorting
        val = np.column_stack([-D[S, :], F[:, 0]])
        fronts = NonDominatedSorting().do(val)

        # for each of the fronts regarding the dummy objectives
        for k, front in enumerate(fronts):

            if len(survivors) + len(front) <= n_survive:
                survivors.extend(front)
            # if we have found the splitting front
            else:
                S = F[front, 0].argmin()
                survivors.append(front[S])

                # the extreme point for decision making
                _D = D[front, :][:, front]
                farthest = _D[S].argmax()

                # sort by distance to best
                delta_x = _D[S, :] / _D[S, farthest]
                delta_f = (F[front, 0] - F[S, 0]) / (F[front[farthest], 0] -
                                                     F[S, 0])
                f = np.column_stack([-delta_x, delta_f])
                z = np.array([-1, 0])
                p = 2

                val = ((f - z)**p).sum(axis=1)**(1 / p)
                I = val.argsort()[:n_survive]
                pop[front[I]].set("v", val[I])

                survivors.extend(front[I])

        plt.scatter(X, F)
        plt.scatter(X[survivors], F[survivors], color="red", marker='x')

        _curve = curve(problem)
        plt.plot(_curve[:, 0], _curve[:, 1], color="black")
        plt.xlabel("X")
        plt.ylabel("F")
        plt.show()

        return pop[fronts[0]]

        X, F = pop.get("X", "F")
        if F.shape[1] != 1:
            raise ValueError(
                "FitnessSurvival can only used for single objective single!")

        # the final indices of surviving individuals
        survivors = []

        # calculate the normalized distance
        D = vectorized_cdist(X, X)
        # np.fill_diagonal(D, np.inf)
        norm = np.linalg.norm(problem.xu - problem.xl)
        D /= norm

        # find the best solution in the population
        S = np.argmin(F[:, 0])

        # create the data structure to work with in order to flag survivors
        survivors = []
        remaining = [k for k in range(len(pop)) if k != S]

        while len(survivors) < n_survive:

            plt.figure(figsize=(5, 5))
            plt.scatter(X, F, color="black", alpha=0.8, s=20, label='pop')
            plt.scatter(X[survivors],
                        F[survivors],
                        color="red",
                        label="survivors")
            v = np.round(pop[survivors].get("v"), 3)

            for i in range(len(survivors)):
                x = X[survivors][i]
                y = F[survivors][i]
                plt.text(x, y, v[i], fontsize=9)

            plt.scatter(X[farthest],
                        F[farthest],
                        color="green",
                        label="survivors")

            _curve = curve(problem)
            plt.plot(_curve[:, 0], _curve[:, 1], color="black")
            plt.xlabel("X")
            plt.ylabel("F")
            plt.legend()
            plt.show()

            return pop[survivors]

            survivors.append(remaining[val.argmin()])
            remaining = [k for k in range(len(pop)) if k != S]

            plt.scatter(X, F)
            plt.scatter(X[survivors], F[survivors], color="red", marker='x')

            _curve = curve(problem)
            plt.plot(_curve[:, 0], _curve[:, 1], color="black")
            plt.xlabel("X")
            plt.ylabel("F")
            plt.show()

        return pop[fronts[0]]

        plt.scatter(delta_x, delta_f)
        plt.scatter(delta_x[nds], delta_f[nds], color="red")
        plt.xlabel("D")
        plt.ylabel("F")
        plt.show()

        pop[S].set("rank", 0)

        # initialize utility data structures
        survivors = [S]
        remaining = [k for k in range(len(pop)) if k != S]

        n_neighbors = 10
        cnt = 1

        while len(survivors) < n_survive:

            closest = D[survivors, :][:, remaining].argmin(axis=0)

            delta_f = F[remaining, 0] - F[np.argmin(F[:, 0]), 0]
            delta_x = D[closest, remaining]
            fitness = delta_f / delta_x

            S = remaining[np.argmin(fitness)]

            if algorithm.n_gen == 20:
                sc = Scatter(title=algorithm.n_gen)
                sc.add(curve(problem), plot_type="line", color="black")
                sc.add(np.column_stack([pop.get("X"), F[:, 0]]),
                       color="purple")
                sc.add(np.column_stack(
                    [pop[survivors].get("X"), pop[survivors].get("F")]),
                       color="red",
                       s=40,
                       marker="x")
                sc.do()
                plt.ylim(0, 2)
                plt.show()
                plt.close()

            # update the survivors and remaining individuals
            individual = pop[S]
            neighbors = pop[D[S].argsort()[:n_neighbors]]

            # if individual has had neighbors before update them
            N = individual.get("neighbors")
            if N is not None:
                neighbors = Population.merge(neighbors, N)
                neighbors = neighbors[neighbors.get("F")[:, 0].argsort()
                                      [:n_neighbors]]

            individual.set("neighbors", neighbors)
            individual.set("rank", cnt)

            survivors.append(S)
            remaining = [k for k in remaining if k != S]

            cnt += 1

        return pop[survivors]
Beispiel #17
0
 def _calc(self, F):
     D = vectorized_cdist(self.pf, F, func_dist=self.dist_func, norm=self.range)
     return np.mean(np.min(D, axis=self.axis))
Beispiel #18
0
    def _do(self, problem, pop, n_survive, out=None, algorithm=None, **kwargs):
        X, F = pop.get("X", "F")
        if F.shape[1] != 1:
            raise ValueError("FitnessSurvival can only used for single objective single!")


        # calculate the normalized distance
        D = vectorized_cdist(X, X)
        np.fill_diagonal(D, np.inf)
        norm = np.linalg.norm(problem.xu - problem.xl)
        D /= norm

        # find the best solution in the population
        S = np.argmin(F[:, 0])
        pop[S].set("rank", 0)

        # initialize utility data structures
        survivors = [S]
        remaining = [k for k in range(len(pop)) if k != S]

        n_neighbors = 10
        cnt = 1

        while len(survivors) < n_survive:

            closest = D[survivors, :][:, remaining].argmin(axis=0)

            delta_f = F[remaining, 0] - F[np.argmin(F[:, 0]), 0]
            delta_x = D[closest, remaining]
            fitness = delta_f / delta_x

            S = remaining[np.argmin(fitness)]

            if algorithm.n_gen == 20:

                sc = Scatter(title=algorithm.n_gen)
                sc.add(curve(problem), plot_type="line", color="black")
                sc.add(np.column_stack([pop.get("X"), F[:, 0]]), color="purple")
                sc.add(np.column_stack([pop[survivors].get("X"), pop[survivors].get("F")]), color="red", s=40, marker="x")
                sc.do()
                plt.ylim(0, 2)
                plt.show()
                plt.close()

            # update the survivors and remaining individuals
            individual = pop[S]
            neighbors = pop[D[S].argsort()[:n_neighbors]]

            # if individual has had neighbors before update them
            N = individual.get("neighbors")
            if N is not None:
                neighbors = Population.merge(neighbors, N)
                neighbors = neighbors[neighbors.get("F")[:, 0].argsort()[:n_neighbors]]

            individual.set("neighbors", neighbors)
            individual.set("rank", cnt)

            survivors.append(S)
            remaining = [k for k in remaining if k != S]

            cnt += 1

        return pop[survivors]