Ejemplo n.º 1
0
    def _potential_optimal(self):
        pop = self.pop

        if len(pop) == 1:
            return pop

        # get the intervals of each individual
        _F, _CV, xl, xu = pop.get("F", "CV", "xl", "xu")
        nF = normalize(_F)
        F = nF + self.penalty * _CV

        # get the length of the interval of each solution
        nxl, nxu = norm_bounds(pop, self.problem)
        length = (nxu - nxl) / 2
        val = length.mean(axis=1)

        # (a) non-dominated set with respect to interval
        obj = np.column_stack([-val, F])

        # an unlimited archive size can cause issues - thus truncate if necessary
        if len(pop) > self.n_max_archive:
            # find the rank of each individual
            _, rank = NonDominatedSorting().do(obj, return_rank=True)

            # calculate the number of solutions after truncation and filter the best ones out
            n_truncated = int(self.archive_reduct * self.n_max_archive)
            I = np.argsort(rank)[:n_truncated]

            # also update all the utility variables defined so far to match the truncation
            pop, F, nxl, nxu, length, val, obj = pop[I], F[I], nxl[I], nxu[I], length[I], val[I], obj[I]
            self.pop = pop

        I = NonDominatedSorting().do(obj, only_non_dominated_front=True)
        candidates, F, xl, xu, val = pop[I], F[I], xl[I], xu[I], val[I]

        # import matplotlib.pyplot as plt
        # plt.scatter(obj[:, 0], obj[:, 1])
        # plt.scatter(obj[I, 0], obj[I, 1], color="red")
        # plt.show()

        # if all candidates are expanded in each iteration this can cause issues - here use crowding distance to decide
        if len(candidates) == 1:
            return candidates
        else:
            if len(candidates) > self.n_max_candidates:
                candidates = RankAndCrowdingSurvival().do(self.problem, pop, n_survive=self.n_max_candidates)

            return candidates
Ejemplo n.º 2
0
def filter_optimum(pop, least_infeasible=False):
    # first only choose feasible solutions
    ret = pop[pop.get("feasible")[:, 0]]

    # if at least one feasible solution was found
    if len(ret) > 0:

        # then check the objective values
        F = ret.get("F")

        if F.shape[1] > 1:
            I = NonDominatedSorting().do(F, only_non_dominated_front=True)
            ret = ret[I]

        else:
            ret = ret[np.argmin(F)]

    # no feasible solution was found
    else:
        # if flag enable report the least infeasible
        if least_infeasible:
            ret = pop[np.argmin(pop.get("CV"))]
        # otherwise just return none
        else:
            ret = None

    if isinstance(ret, Individual):
        ret = Population().create(ret)

    return ret
Ejemplo n.º 3
0
    def _updateDA(self, pop, Hd, n_survive):
        """Update the Diversity archive (DA)"""
        niche_Hd, FV = self._associate(Hd)
        niche_CA, _ = self._associate(pop)

        itr = 1
        S = []
        while len(S) < n_survive:
            for i in range(n_survive):
                current_ca, = np.where(niche_CA == i)
                if len(current_ca) < itr:
                    for _ in range(itr - len(current_ca)):
                        current_da = np.where(niche_Hd == i)[0]
                        if current_da.size > 0:
                            F = Hd[current_da].get('F')
                            nd = NonDominatedSorting().do(
                                F,
                                only_non_dominated_front=True,
                                n_stop_if_ranked=0)
                            i_best = current_da[nd[np.argmin(
                                FV[current_da[nd]])]]
                            niche_Hd[i_best] = -1
                            if len(S) < n_survive:
                                S.append(i_best)
                        else:
                            break
                if len(S) == n_survive:
                    break
            itr += 1
        return Hd[S]
Ejemplo n.º 4
0
    def _do(self, Hm, n_select, n_parents=1, **kwargs):
        algorithm = kwargs['algorithm']

        n_pop = len(Hm) // 2

        _, rank = NonDominatedSorting().do(Hm.get('F'), return_rank=True)

        Pc = (rank[:n_pop] == 0).sum() / len(Hm)
        Pd = (rank[n_pop:] == 0).sum() / len(Hm)
        PC = len(algorithm.opt) / n_pop

        # number of random individuals needed
        n_random = n_select * n_parents * self.pressure
        n_perms = math.ceil(n_random / n_pop)
        # get random permutations and reshape them
        P = random_permuations(n_perms, n_pop)[:n_random]
        P = np.reshape(P, (n_select * n_parents, self.pressure))
        if Pc <= Pd:
            # Choose from DA
            P[::n_parents, :] += n_pop
        pf = np.random.random(n_select)
        P[1::2, 1][pf >= PC] += n_pop

        # compare using tournament function
        S = self.f_comp(Hm, P, **kwargs)

        return np.reshape(S, (n_select, n_parents))
Ejemplo n.º 5
0
    def _updateDA(self, pop, Hd, n_survive):
        niche_Hd = self._association(Hd.get('F'))
        niche_CA = self._association(pop.get('F'))

        itr = 1
        S = []
        while len(S) < n_survive:
            for i in range(n_survive):
                current_ca = np.where(niche_CA == i)
                if len(current_ca) < itr:
                    for _ in range(itr - len(current_ca)):
                        current_da = np.where(niche_Hd == i)[0]
                        if current_da.size > 0:
                            F = Hd[current_da].get('F')
                            nd = NonDominatedSorting().do(
                                F,
                                only_non_dominated_front=True,
                                n_stop_if_ranked=0)
                            FV = self._get_decomposition(F[nd])
                            i_best = current_da[nd[np.argmin(FV)]]
                            niche_Hd[i_best] = -1
                            if len(S) < n_survive:
                                S.append(i_best)
                        else:
                            break
                if len(S) == n_survive:
                    break
            itr += 1
        return Hd[S]
Ejemplo n.º 6
0
    def _get_sampling(self, X, Y):
        '''
        Initialize population from data.

        Parameters
        ----------
        X: np.array
            Design variables.
        Y: np.array
            Performance values.

        Returns
        -------
        sampling: np.array or pymoo.model.sampling.Sampling
            Initial population or a sampling method for generating initial population.
        '''
        if self.pop_init_method == 'lhs':
            sampling = LatinHypercubeSampling()
        elif self.pop_init_method == 'nds':
            sorted_indices = NonDominatedSorting().do(Y)
            pop_size = self.algo_kwargs['pop_size']
            sampling = X[np.concatenate(sorted_indices)][:pop_size]
            # NOTE: use lhs if current samples are not enough
            if len(sampling) < pop_size:
                rest_sampling = lhs(X.shape[1], pop_size - len(sampling))
                sampling = np.vstack([sampling, rest_sampling])
        elif self.pop_init_method == 'random':
            sampling = FloatRandomSampling()
        else:
            raise NotImplementedError

        return sampling
Ejemplo n.º 7
0
    def _get_sampling(self, X, Y, bound=None, mode=0):
        '''
        Initialize population from data
        '''
        if self.pop_init_method == 'lhs':
            sampling = LatinHypercubeSampling()
        elif self.pop_init_method == 'nds':
            sorted_indices = NonDominatedSorting().do(Y)
            pop_size = self.algo_kwargs['pop_size']
            sampling = X[np.concatenate(sorted_indices)][:pop_size]
            # NOTE: use lhs if current samples are not enough
            if len(sampling) < pop_size:
                if bound is None:
                    rest_sampling = lhs(X.shape[1], pop_size - len(sampling))
                else:
                    rest_sampling = lhs(X.shape[1], pop_size - len(sampling))
                    rest_sampling = bound[0] + rest_sampling * (bound[1] - bound[0])
                    rest_sampling = np.round(rest_sampling)
                        
                sampling = np.vstack([sampling, rest_sampling])
        elif self.pop_init_method == 'random':
            sampling = FloatRandomSampling()
        else:
            raise NotImplementedError

        return sampling
Ejemplo n.º 8
0
 def __init__(self, weights_dir: str, logger: Logger, targets: [OptimizationTarget], mutations: [AbstractPbtMutation],
              each_epochs: int, grace_epochs: int, save_ema: bool, elitist: bool):
     super().__init__()
     self._nds = NonDominatedSorting()
     self._data = {
         'saves': dict(),         # {key: PbtSave}
         'replacements': [],      # ReplacementPbtEvent
     }
     self.weights_dir = weights_dir
     self.logger = logger
     self.targets = targets
     self.mutations = mutations
     self.each_epochs = each_epochs
     self.grace_epochs = grace_epochs
     self.save_ema = save_ema
     self.elitist = elitist
Ejemplo n.º 9
0
    def test_association(self):
        problem = C1DTLZ3(n_var=12, n_obj=3)
        ca_x = np.loadtxt(path_to_test_resources('ctaea', 'c1dtlz3', 'case3', 'preCA.x'))
        CA = Population.create(ca_x)
        self.evaluator.eval(problem, CA)

        da_x = np.loadtxt(path_to_test_resources('ctaea', 'c1dtlz3', 'case3', 'preDA.x'))
        DA = Population.create(da_x)
        self.evaluator.eval(problem, DA)

        off_x = np.loadtxt(path_to_test_resources('ctaea', 'c1dtlz3', 'case3', 'offspring.x'))
        off = Population.create(off_x)
        self.evaluator.eval(problem, off)

        true_assoc = np.loadtxt(path_to_test_resources('ctaea', 'c1dtlz3', 'case3', 'feasible_rank0.txt'))
        true_niche = true_assoc[:, 1]
        true_id = true_assoc[:, 0]
        sorted_id = np.argsort(true_id)

        survival = CADASurvival(self.ref_dirs)
        mixed = CA.merge(off)
        survival.ideal_point = np.min(np.vstack((DA.get("F"), mixed.get("F"))), axis=0)

        fronts = NonDominatedSorting().do(mixed.get("F"), n_stop_if_ranked=len(self.ref_dirs))
        I = np.concatenate(fronts)
        niche, _ = survival._associate(mixed[I])
        sorted_I = np.argsort(I)

        assert (niche[sorted_I] == true_niche[sorted_id]).all()
Ejemplo n.º 10
0
    def _do(self, problem, pop, n_survive, algorithm=None, **kwargs):

        if isinstance(self.eliminate_duplicates, bool) and self.eliminate_duplicates:
            pop = DefaultDuplicateElimination(func=lambda p: p.get("F")).do(pop)

        elif isinstance(self.eliminate_duplicates, DuplicateElimination):
            _, no_candidates, candidates = DefaultDuplicateElimination(func=lambda pop: pop.get("F")).do(pop,
                                                                                                         return_indices=True)
            _, _, is_duplicate = self.eliminate_duplicates.do(pop[candidates], pop[no_candidates], return_indices=True,
                                                              to_itself=False)
            elim = set(np.array(candidates)[is_duplicate])
            pop = pop[[k for k in range(len(pop)) if k not in elim]]

        if problem.n_obj == 1:
            pop = FitnessSurvival().do(problem, pop, len(pop))
            elites = pop[:self.n_elites]
            non_elites = pop[self.n_elites:]
        else:
            I = NonDominatedSorting().do(pop.get("F"), only_non_dominated_front=True)
            elites = pop[I]
            non_elites = pop[[k for k in range(len(pop)) if k not in I]]

        elites.set("type", ["elite"] * len(elites))
        non_elites.set("type", ["non_elite"] * len(non_elites))

        return pop
Ejemplo n.º 11
0
def filter_optimum(pop, least_infeasible=False):

    # first only choose feasible solutions
    ret = pop[pop.collect(lambda ind: ind.feasible)[:, 0]]

    # if at least one feasible solution was found
    if len(ret) > 0:

        # then check the objective values
        F = ret.get("F")

        if F.shape[1] > 1:
            I = NonDominatedSorting().do(ret.get("F"), only_non_dominated_front=True)
            ret = ret[I]

        else:
            ret = ret[np.argmin(F)]

    # no feasible solution was found
    else:
        # if flag enable report the least infeasible
        if least_infeasible:
            ret = pop[np.argmin(pop.get("CV"))]
        # otherwise just return none
        else:
            ret = None

    return ret
Ejemplo n.º 12
0
    def _potential_optimal(self):
        pop = self.pop

        if len(pop) == 1:
            return pop

        # get the intervals of each individual
        _F, _CV, xl, xu = pop.get("F", "CV", "xl", "xu")
        nF = normalize(_F)
        F = nF + self.penalty * _CV

        # get the length of the interval of each solution
        nxl, nxu = norm_bounds(pop, self.problem)
        length = (nxu - nxl) / 2

        val = length.max(axis=1)

        # (a) non-dominated with respect to interval
        obj = np.column_stack([-val, F])
        I = NonDominatedSorting().do(obj, only_non_dominated_front=True)
        candidates, F, xl, xu, val = pop[I], F[I], xl[I], xu[I], val[I]

        # import matplotlib.pyplot as plt
        # plt.scatter(obj[:, 0], obj[:, 1])
        # plt.scatter(obj[I, 0], obj[I, 1], color="red")
        # plt.show()

        if len(candidates) == 1:
            return candidates
        else:
            if len(candidates) > self.n_max_candidates:
                candidates = RankAndCrowdingSurvival().do(
                    self.problem, pop, self.n_max_candidates)

            return candidates
Ejemplo n.º 13
0
    def _do(self, problem, pop, n_survive, D=None, **kwargs):

        # get the objective space values and objects
        F = pop.get("F").astype(np.float, copy=False)

        # the final indices of surviving individuals
        survivors = []

        # do the non-dominated sorting until splitting front
        fronts = NonDominatedSorting().do(F, n_stop_if_ranked=n_survive)

        for k, front in enumerate(fronts):

            # calculate the crowding distance of the front
            crowding_of_front = calc_crowding_distance(F[front, :])

            # save rank and crowding in the individual class
            for j, i in enumerate(front):
                pop[i].set("rank", k)
                pop[i].set("crowding", crowding_of_front[j])

            # current front sorted by crowding distance if splitting
            if len(survivors) + len(front) > n_survive:
                I = randomized_argsort(crowding_of_front, order='descending', method='numpy')
                I = I[:(n_survive - len(survivors))]

            # otherwise take the whole front unsorted
            else:
                I = np.arange(len(front))

            # extend the survivors by all or selected individuals
            survivors.extend(front[I])

        return pop[survivors]
Ejemplo n.º 14
0
    def test_restricted_mating_selection(self):
        np.random.seed(200)
        selection = RestrictedMating(func_comp=comp_by_cv_dom_then_random)

        problem = C3DTLZ4(n_var=12, n_obj=3)
        ca_x = np.loadtxt(path_to_test_resources('ctaea', 'c3dtlz4', 'case2', 'preCA.x'))
        CA = Population.create(ca_x)
        self.evaluator.eval(problem, CA)

        da_x = np.loadtxt(path_to_test_resources('ctaea', 'c3dtlz4', 'case2', 'preDA.x'))
        DA = Population.create(da_x)
        self.evaluator.eval(problem, DA)

        Hm = CA.merge(DA)
        n_pop = len(CA)

        _, rank = NonDominatedSorting().do(Hm.get('F'), return_rank=True)

        Pc = (rank[:n_pop] == 0).sum()/len(Hm)
        Pd = (rank[n_pop:] == 0).sum()/len(Hm)

        P = selection.do(Hm, len(CA))

        assert P.shape == (91, 2)
        if Pc > Pd:
            assert (P[:, 0] < n_pop).all()
        else:
            assert (P[:, 0] >= n_pop).all()
        assert (P[:, 1] >= n_pop).any()
        assert (P[:, 1] < n_pop).any()
Ejemplo n.º 15
0
    def _solve(self, X, Y, batch_size):

        # initialize population
        if len(X) < self.pop_size:
            X = np.vstack([X, lhs(X.shape[1], self.pop_size - len(X))])
        elif len(X) > self.pop_size:
            sorted_indices = NonDominatedSorting().do(Y)
            X = X[sorted_indices[:self.pop_size]]
        self.algo.initialization.sampling = X

        res = minimize(self.problem, self.algo, ('n_gen', self.n_gen))

        X_candidate, Y_candidate, algo = res.pop.get('X'), res.pop.get(
            'F'), res.algorithm
        G = Y_candidate

        _, curr_pset_idx = find_pareto_front(Y, return_index=True)
        curr_pset = X[curr_pset_idx]

        G_s = algo._decomposition.do(
            G, weights=self.ref_dirs,
            ideal_point=algo.ideal_point)  # scalarized acquisition value

        # build candidate pool Q
        Q_x, Q_dir, Q_g, Q_gs = [], [], [], []
        X_added = curr_pset.copy()
        for x, ref_dir, g, gs in zip(X_candidate, self.ref_dirs, G, G_s):
            if (x != X_added).any(axis=1).all():
                Q_x.append(x)
                Q_dir.append(ref_dir)
                Q_g.append(g)
                Q_gs.append(gs)
                X_added = np.vstack([X_added, x])
        Q_x, Q_dir, Q_g, Q_gs = np.array(Q_x), np.array(Q_dir), np.array(
            Q_g), np.array(Q_gs)

        min_batch_size = min(batch_size,
                             len(Q_x))  # in case Q is smaller than batch size

        if min_batch_size == 0:
            indices = np.random.choice(len(X_candidate),
                                       batch_size,
                                       replace=False)
            return X_candidate[indices], Y_candidate[indices]

        # k-means clustering on X with weight vectors
        labels = KMeans(n_clusters=batch_size).fit_predict(
            np.column_stack([Q_x, Q_dir]))

        # select point in each cluster with lowest scalarized acquisition value
        X_candidate, Y_candidate = [], []
        for i in range(batch_size):
            indices = np.where(labels == i)[0]
            top_idx = indices[np.argmin(Q_gs[indices])]
            X_candidate.append(Q_x[top_idx])
            Y_candidate.append(Q_g[top_idx])

        return np.array(X_candidate), np.array(Y_candidate)
Ejemplo n.º 16
0
def main(args):
    # preferences
    if args.prefer is not None:
        preferences = {}
        for p in args.prefer.split("+"):
            k, v = p.split("#")
            if k == 'top1':
                preferences[k] = 100 - float(v)  # assuming top-1 accuracy
            else:
                preferences[k] = float(v)
        weights = np.fromiter(preferences.values(), dtype=float)

    archive = json.load(open(args.expr))['archive']
    subnets, top1, sec_obj = [v[0]
                              for v in archive], [v[1] for v in archive
                                                  ], [v[2] for v in archive]
    sort_idx = np.argsort(top1)
    F = np.column_stack((top1, sec_obj))[sort_idx, :]
    front = NonDominatedSorting().do(F, only_non_dominated_front=True)
    pf = F[front, :]
    ps = np.array(subnets)[sort_idx][front]

    if args.prefer is not None:
        # choose the architectures thats closest to the preferences
        I = get_decomposition("asf").do(pf, weights).argsort()[:args.n]
    else:
        # choose the architectures with highest trade-off
        dm = HighTradeoffPoints(n_survive=args.n)
        I = dm.do(pf)

    # always add most accurate architectures
    I = np.append(I, 0)

    # create the supernet
    from evaluator import OFAEvaluator
    supernet = OFAEvaluator(model_path=args.supernet_path)

    for idx in I:
        save = os.path.join(args.save, "net-flops@{:.0f}".format(pf[idx, 1]))
        os.makedirs(save, exist_ok=True)
        subnet, _ = supernet.sample({
            'ks': ps[idx]['ks'],
            'e': ps[idx]['e'],
            'd': ps[idx]['d']
        })
        with open(os.path.join(save, "net.subnet"), 'w') as handle:
            json.dump(ps[idx], handle)
        supernet.save_net_config(save, subnet, "net.config")
        supernet.save_net(save, subnet, "net.inherited")

    if _DEBUG:
        print(ps[I])
        plot = Scatter()
        plot.add(pf, alpha=0.2)
        plot.add(pf[I, :], color="red", s=100)
        plot.show()

    return
Ejemplo n.º 17
0
    def _potential_optimal(self):
        pop = self.pop

        if len(pop) == 1:
            return pop

        # get the intervals of each individual
        _F, _CV, xl, xu = pop.get("F", "CV", "xl", "xu")
        nF = normalize(_F)
        F = nF + self.penalty * _CV

        # get the length of the interval of each solution
        nxl, nxu = norm_bounds(pop, problem)
        length = (nxu - nxl) / 2

        val = length.max(axis=1)

        # (a) non-dominated with respect to interval
        obj = np.column_stack([-val, F])
        I = NonDominatedSorting().do(obj, only_non_dominated_front=True)
        candidates, F, xl, xu, val = pop[I], F[I], xl[I], xu[I], val[I]

        # import matplotlib.pyplot as plt
        # plt.scatter(obj[:, 0], obj[:, 1])
        # plt.scatter(obj[I, 0], obj[I, 1], color="red")
        # plt.show()

        if len(candidates) == 1:
            return candidates

        else:
            # TODO: The second condition needs to be implemented here. Exact implementation still unclear.

            n_max_candidates = 10

            if len(candidates) > n_max_candidates:
                I = list(
                    np.random.choice(np.arange(len(candidates)),
                                     n_max_candidates - 1))
                k = np.argmin(F[:, 0])
                if k not in I:
                    I.append(k)
                candidates = candidates[I]

            return candidates
Ejemplo n.º 18
0
 def _calc_hv(ref_pt, F, normalized=True):
     # calculate hypervolume on the non-dominated set of F
     front = NonDominatedSorting().do(F, only_non_dominated_front=True)
     nd_F = F[front, :]
     ref_point = 1.01 * ref_pt
     hv = get_performance_indicator("hv", ref_point=ref_point).calc(nd_F)
     if normalized:
         hv = hv / np.prod(ref_point)
     return hv
Ejemplo n.º 19
0
    def _do(self, F):
        if self.nds:
            non_dom = NonDominatedSorting().do(F, only_non_dominated_front=True)
            F = np.copy(F[non_dom, :])

        # calculate the hypervolume using a vendor library
        hv = _HyperVolume(self.ref_point)
        val = hv.compute(F)
        return val
Ejemplo n.º 20
0
    def _next(self, archive, predictor, K):
        """ searching for next K candidate for high-fidelity evaluation (lower level) """

        # the following lines corresponding to Algo 1 line 10 / Fig. 3(b) in the paper
        # get non-dominated architectures from archive
        F = np.column_stack(([x[1] for x in archive], [x[2] for x in archive]))
        front = NonDominatedSorting().do(F, only_non_dominated_front=True)
        # non-dominated arch bit-strings
        nd_X = np.array([self.search_space.encode(x[0])
                         for x in archive])[front]

        # initialize the candidate finding optimization problem
        problem = AuxiliarySingleLevelProblem(
            self.search_space, predictor, self.sec_obj, {
                'n_classes': self.n_classes,
                'model_path': self.supernet_path
            })

        # initiate a multi-objective solver to optimize the problem
        method = get_algorithm(
            "nsga2",
            pop_size=40,
            sampling=nd_X,  # initialize with current nd archs
            crossover=get_crossover("int_two_point", prob=0.9),
            mutation=get_mutation("int_pm", eta=1.0),
            eliminate_duplicates=True)

        # kick-off the search
        res = minimize(problem,
                       method,
                       termination=('n_gen', 20),
                       save_history=True,
                       verbose=True)

        # check for duplicates
        not_duplicate = np.logical_not([
            x in [x[0] for x in archive]
            for x in [self.search_space.decode(x) for x in res.pop.get("X")]
        ])

        # the following lines corresponding to Algo 1 line 11 / Fig. 3(c)-(d) in the paper
        # form a subset selection problem to short list K from pop_size
        indices = self._subset_selection(res.pop[not_duplicate], F[front, 1],
                                         K)
        pop = res.pop[not_duplicate][indices]

        candidates = []
        for x in pop.get("X"):
            candidates.append(self.search_space.decode(x))

        # decode integer bit-string to config and also return predicted top1_err
        return candidates, predictor.predict(pop.get("X"))
Ejemplo n.º 21
0
    def test_rank_and_crowding_distance(self):
        for i, D in enumerate(self.data):

            survivor_and_last_front = np.where(D['rank'] != -1.0)[0]
            crowding = D['crowding'][survivor_and_last_front]
            rank = D['rank'][survivor_and_last_front].astype(np.int)
            F = D['F'][survivor_and_last_front, :]

            fronts, _rank = NonDominatedSorting().do(F, return_rank=True)
            _rank += 1
            _crowding = np.full(len(F), np.nan)
            for front in fronts:
                _crowding[front] = calc_crowding_distance(F[front])
            _crowding[np.isinf(_crowding)] = 1e14

            is_equal = np.all(rank == _rank)
            if not is_equal:
                index = np.where(rank == _rank)
                print(index)
                print(D['rank'][index])
                print(D['F'][index])

            self.assertTrue(is_equal)

            is_equal = np.all(np.abs(_crowding - crowding) < 0.001)
            if not is_equal:

                index = np.where(np.abs(_crowding - crowding) > 0.001)[0]
                index = index[np.argsort(rank[index])]

                # only an error if it is not a duplicate F value
                for i_not_equal in index:

                    if len(
                            np.where(np.all(F[i_not_equal, :] == F,
                                            axis=1))[0]) == 1:
                        print("-" * 30)
                        print("Generation: ", i)
                        print("Is rank equal: ", np.all(rank == _rank))

                        print(index)
                        print(rank[index])
                        print(F[index])
                        print(
                            np.concatenate(
                                [_crowding[:, None], crowding[:, None]],
                                axis=1)[index, :])
                        print()

                        self.assertTrue(is_equal)
Ejemplo n.º 22
0
    def _initialize_advance(self, infills=None, **kwargs):
        super()._initialize_advance(infills, **kwargs)
        self.ideal = np.min(self.pop.get("F"), axis=0)

        # retrieve the current population
        self.npc_pop = self.pop.copy(deep=True)

        # get the objective space values and objects
        npc_objs = self.npc_pop.get("F")

        fronts, rank = NonDominatedSorting().do(npc_objs, return_rank=True)
        front_0_index = fronts[0]

        # put the nondominated individuals of the NPC population into the PC population
        self.pc_pop = self.npc_pop[front_0_index].copy(deep=True)
Ejemplo n.º 23
0
Archivo: hv.py Proyecto: mbeza/pymoo-1
    def _calc(self, F):

        # only consider the non-dominated solutions for HV
        non_dom = NonDominatedSorting().do(F, only_non_dominated_front=True)
        _F = np.copy(F[non_dom, :])

        if self.normalize:
            # because we normalize now the reference point is (1,...1)
            ref_point = np.ones(F.shape[1])
            hv = _HyperVolume(ref_point)
            _F = normalize(_F, x_min=self.ideal_point, x_max=self.nadir_point)
        else:
            hv = _HyperVolume(self.ref_point)

        val = hv.compute(_F)
        return val
Ejemplo n.º 24
0
def filter_optimum(pop):

    # first only choose feasible solutions
    pop = pop[pop.collect(lambda ind: ind.feasible)[:, 0]]

    # if at least one feasible solution was found
    if len(pop) > 0:

        # then check the objective values
        F = pop.get("F")

        if F.shape[1] > 1:
            I = NonDominatedSorting().do(pop.get("F"),
                                         only_non_dominated_front=True)
            pop = pop[I]

        else:
            pop = pop[np.argmin(F)]

    else:
        pop = None

    return pop
Ejemplo n.º 25
0
    def _updateCA(self, pop, n_survive):
        """Update the Convergence archive (CA)"""
        CV = pop.get("CV").flatten()

        Sc = pop[CV == 0]  # Feasible population
        if len(Sc) == n_survive:  # Exactly n_survive feasible individuals
            F = Sc.get("F")
            fronts, rank = NonDominatedSorting().do(F, return_rank=True)
            Sc.set('rank', rank)
            self.opt = Sc[fronts[0]]
            return Sc
        elif len(Sc) < n_survive:  # Not enough feasible individuals
            remainder = n_survive - len(Sc)
            # Solve sub-problem CV, tche
            SI = pop[CV > 0]
            f1 = SI.get("CV")
            _, f2 = self._associate(SI)
            sub_F = np.column_stack([f1, f2])
            fronts = NonDominatedSorting().do(sub_F,
                                              n_stop_if_ranked=remainder)
            I = []
            for front in fronts:
                if len(I) + len(front) <= remainder:
                    I.extend(front)
                else:
                    n_missing = remainder - len(I)
                    last_front_CV = np.argsort(f1.flatten()[front])
                    I.extend(front[last_front_CV[:n_missing]])
            SI = SI[I]
            S = Sc.merge(SI)
            F = S.get("F")
            fronts, rank = NonDominatedSorting().do(F, return_rank=True)
            S.set('rank', rank)
            self.opt = S[fronts[0]]
            return S
        else:  # Too many feasible individuals
            F = Sc.get("F")
            # Filter by non-dominated sorting
            fronts, rank = NonDominatedSorting().do(F,
                                                    return_rank=True,
                                                    n_stop_if_ranked=n_survive)
            I = np.concatenate(fronts)
            S, rank, F = Sc[I], rank[I], F[I]
            if len(S) > n_survive:
                # Remove individual in most crowded niche and with worst fitness
                niche_of_individuals, FV = self._associate(S)
                index, count = np.unique(niche_of_individuals,
                                         return_counts=True)
                survivors = np.full(S.shape[0], True)
                while survivors.sum() > n_survive:
                    crowdest_niches, = np.where(count == count.max())
                    worst_idx = None
                    worst_niche = None
                    worst_fit = -1
                    for crowdest_niche in crowdest_niches:
                        crowdest, = np.where(
                            (niche_of_individuals == index[crowdest_niche])
                            & survivors)
                        niche_worst = crowdest[FV[crowdest].argmax()]
                        dist_to_max_fit = cdist(F[[niche_worst], :],
                                                F).flatten()
                        dist_to_max_fit[niche_worst] = np.inf
                        dist_to_max_fit[~survivors] = np.inf
                        min_d_to_max_fit = dist_to_max_fit.min()

                        dist_in_niche = squareform(pdist(F[crowdest]))
                        np.fill_diagonal(dist_in_niche, np.inf)

                        delta_d = dist_in_niche - min_d_to_max_fit
                        min_d_i = np.unravel_index(
                            np.argmin(delta_d, axis=None), dist_in_niche.shape)
                        if (delta_d[min_d_i] < 0) or (
                                delta_d[min_d_i] == 0 and
                            (FV[crowdest[list(min_d_i)]] > niche_worst).any()):
                            min_d_i = list(min_d_i)
                            np.random.shuffle(min_d_i)
                            closest = crowdest[min_d_i]
                            niche_worst = closest[np.argmax(FV[closest])]
                        if FV[niche_worst] > worst_fit:
                            worst_fit = FV[niche_worst]
                            worst_idx = niche_worst
                            worst_niche = crowdest_niche
                    survivors[worst_idx] = False
                    count[worst_niche] -= 1
                S, rank = S[survivors], rank[survivors]
            S.set('rank', rank)
            self.opt = S[rank == 0]
            return S
Ejemplo n.º 26
0
    def _do(self, problem, pop, n_survive, D=None, **kwargs):

        # attributes to be set after the survival
        F = pop.get("F")

        # find or usually update the new ideal point - from feasible solutions
        self.ideal_point = np.min(np.vstack((self.ideal_point, F)), axis=0)
        self.worst_point = np.max(np.vstack((self.worst_point, F)), axis=0)

        # calculate the fronts of the population
        fronts, rank = NonDominatedSorting().do(F,
                                                return_rank=True,
                                                n_stop_if_ranked=n_survive)
        non_dominated, last_front = fronts[0], fronts[-1]

        # find the extreme points for normalization
        self.extreme_points = get_extreme_points_c(
            F[non_dominated, :],
            self.ideal_point,
            extreme_points=self.extreme_points)

        # find the intercepts for normalization and do backup if gaussian elimination fails
        worst_of_population = np.max(F, axis=0)
        worst_of_front = np.max(F[non_dominated, :], axis=0)

        self.nadir_point = get_nadir_point(self.extreme_points,
                                           self.ideal_point, self.worst_point,
                                           worst_of_population, worst_of_front)

        #  consider only the population until we come to the splitting front
        I = np.concatenate(fronts)
        pop, rank, F = pop[I], rank[I], F[I]

        # update the front indices for the current population
        counter = 0
        for i in range(len(fronts)):
            for j in range(len(fronts[i])):
                fronts[i][j] = counter
                counter += 1
        last_front = fronts[-1]

        # associate individuals to niches
        niche_of_individuals, dist_to_niche, dist_matrix = \
            associate_to_niches(F, self.ref_dirs, self.ideal_point, self.nadir_point)

        pop.set('rank', rank, 'niche', niche_of_individuals, 'dist_to_niche',
                dist_to_niche)

        # if we need to select individuals to survive
        if len(pop) > n_survive:

            # if there is only one front
            if len(fronts) == 1:
                n_remaining = n_survive
                until_last_front = np.array([], dtype=np.int)
                niche_count = np.zeros(len(self.ref_dirs), dtype=np.int)

            # if some individuals already survived
            else:
                until_last_front = np.concatenate(fronts[:-1])
                niche_count = calc_niche_count(
                    len(self.ref_dirs), niche_of_individuals[until_last_front])
                n_remaining = n_survive - len(until_last_front)

            S = niching(pop[last_front], n_remaining, niche_count,
                        niche_of_individuals[last_front],
                        dist_to_niche[last_front])

            survivors = np.concatenate(
                (until_last_front, last_front[S].tolist()))
            pop = pop[survivors]

        return pop
Ejemplo n.º 27
0
    def _advance(self, **kwargs):
        repair, crossover, mutation = self.repair, self.mating.crossover, self.mating.mutation

        pc_pop = self.pc_pop.copy(deep=True)
        npc_pop = self.npc_pop.copy(deep=True)

        ##############################################################
        # PC evolving
        ##############################################################

        # Normalise both poulations according to the PC individuals
        pc_pop, npc_pop = normalize_bothpop(pc_pop, npc_pop)

        PCObj = pc_pop.get("F")
        NPCObj = npc_pop.get("F")
        pc_size = PCObj.shape[0]
        npc_size = NPCObj.shape[0]

        ######################################################
        # Calculate the Euclidean distance among individuals
        ######################################################
        d = np.zeros((pc_size, pc_size))
        d = cdist(PCObj, PCObj, 'euclidean')
        d[d == 0] = np.inf

        # Determine the size of the niche
        if pc_size == 1:
            radius = 0
        else:
            radius = determine_radius(d, pc_size, self.pc_capacity)

        # calculate the radius for individual exploration
        r = pc_size / self.pc_capacity * radius

        ########################################################
        # find the promising individuals in PC for exploration
        ########################################################

        # promising_num: record how many promising individuals in PC
        promising_num = 0
        # count: record how many NPC individuals are located in each PC individual's niche
        count = np.array([])

        d2 = np.zeros((pc_size, npc_size))
        d2 = cdist(PCObj, NPCObj, 'euclidean')

        # Count of True elements in each row (each individual in PC) of 2D Numpy Array
        count = np.count_nonzero(d2 <= r, axis=1)

        # Check if the niche has no NPC individual or has only one NPC individual
        # Record the indices of promising individuals.
        # Since np.nonzero() returns a tuple of arrays, we change the type of promising_index to a numpy.ndarray.
        promising_index = np.nonzero(count <= 1)
        promising_index = np.asarray(promising_index).flatten()

        # Record total number of promising individuals in PC for exploration
        promising_num = len(promising_index)

        ########################################
        # explore these promising individuals
        ########################################

        original_size = pc_size
        off = Individual()

        if promising_num > 0:
            for i in range(promising_num):
                if original_size > 1:
                    parents = Population.new(2)

                    # The explored individual is considered as one parent
                    parents[0] = pc_pop[promising_index[i]]

                    # The remaining parent will be selected randomly from the PC population
                    rnd = np.random.permutation(pc_size)

                    for j in rnd:
                        if j != promising_index[i]:
                            parents[1] = pc_pop[j]
                            break

                    index = np.array([0, 1])
                    parents_shape = index[None, :]

                    # do recombination and create an offspring
                    off = crossover.do(self.problem, parents, parents_shape)[0]

                else:
                    off = pc_pop[0]

                # mutation
                off = Population.create(off)
                off = mutation.do(self.problem, off)

                # evaluate the offspring
                self.evaluator.eval(self.problem, off)

                # update the PC population by the offspring
                self.pc_pop = update_PCpop(self.pc_pop, off)

                # update the ideal point
                self.ideal = np.min(np.vstack([self.ideal,
                                               off.get("F")]),
                                    axis=0)

                # update at most one solution in NPC population
                self.npc_pop = update_NPCpop(self.npc_pop, off, self.ideal,
                                             self.ref_dirs, self.decomp)

        ########################################################
        # NPC evolution based on MOEA/D
        ########################################################

        # iterate for each member of the population in random order
        for i in np.random.permutation(len(self.npc_pop)):
            # get the parents using the neighborhood selection
            P = self.selection.do(self.npc_pop,
                                  1,
                                  self.mating.crossover.n_parents,
                                  k=[i])

            # perform a mating using the default operators (recombination & mutation) - if more than one offspring just pick the first
            off = self.mating.do(self.problem, self.npc_pop, 1, parents=P)[0]

            off = Population.create(off)

            # evaluate the offspring
            self.evaluator.eval(self.problem, off, algorithm=self)

            # update the PC population by the offspring
            self.pc_pop = update_PCpop(self.pc_pop, off)

            # update the ideal point
            self.ideal = np.min(np.vstack([self.ideal, off.get("F")]), axis=0)

            # now actually do the replacement of the individual is better
            self.npc_pop = self._replace(i, off)

        ########################################################
        # population maintenance operation in the PC evolution
        ########################################################

        current_pop = Population.merge(self.pc_pop, self.npc_pop)
        current_pop = Population.merge(current_pop, self.pop)

        # filter duplicate in the population
        pc_pop = self.eliminate_duplicates.do(current_pop)

        pc_size = len(pc_pop)

        if (pc_size > self.pc_capacity):

            # get the objective space values and objects
            pc_objs = pc_pop.get("F")

            fronts, rank = NonDominatedSorting().do(pc_objs, return_rank=True)
            front_0_index = fronts[0]

            # put the nondominated individuals of the NPC population into the PC population
            self.pc_pop = pc_pop[front_0_index]

            if len(self.pc_pop) > self.pc_capacity:
                self.pc_pop = maintain_PCpop(self.pc_pop, self.pc_capacity)

        self.pop = self.pc_pop.copy(deep=True)
Ejemplo n.º 28
0
    def optimize(self, jobs, nodes, base_allocations, node_template):
        """
        Run one optimization cycle of the Pollux scheduling policy.

        Arguments:
            jobs (dict): map from job keys to `JobInfo` objects which
                correspond to the incomplete jobs which should be optimized.
            nodes (dict): map from node keys to `NodeInfo` objects which
                correspond to the existing nodes in the cluster.
            base_allocations (dict): map from job keys to their current
                resource allocations, in the form of a list of a node key for
                each replica.
            node_template (NodeInfo): represents a node which can be requested,
                used to decide the cluster size for cluster auto-scaling.

        Returns:
            dict: map from job keys to their optimized resource allocations,
                in the form of a list of a node key for each replica.
        """
        jobs = OrderedDict(  # Sort jobs in FIFO order.
            sorted(jobs.items(), key=lambda kv: kv[1].creation_timestamp))
        nodes = OrderedDict(  # Sort preemptible nodes last.
            sorted(nodes.items(), key=lambda kv: (kv[1].preemptible, kv[0])))
        base_state = np.concatenate(
            (self._allocations_to_state(base_allocations, jobs, nodes),
             np.zeros((len(jobs), len(nodes)), dtype=np.int)),
            axis=1)
        if self._prev_states is None:
            states = np.expand_dims(base_state, 0)
        else:
            states = self._adapt_prev_states(jobs, nodes)
        problem = Problem(list(jobs.values()),
                          list(nodes.values()) + len(nodes) * [node_template],
                          base_state)
        algorithm = NSGA2(
            pop_size=100,
            # pymoo expects a flattened 2-D array.
            sampling=states.reshape(states.shape[0], -1),
            crossover=Crossover(),
            mutation=Mutation(),
            repair=Repair(),
        )
        result = pymoo.optimize.minimize(problem, algorithm, ("n_gen", 100))
        states = result.X.reshape(result.X.shape[0], len(jobs), 2 * len(nodes))
        self._prev_states = copy.deepcopy(states)
        self._prev_jobs = copy.deepcopy(jobs)
        self._prev_nodes = copy.deepcopy(nodes)
        # Get the pareto front.
        nds = NonDominatedSorting().do(result.F, only_non_dominated_front=True)
        states = states[nds]
        values = result.F[nds]
        # Construct return values.
        utilities = problem.get_cluster_utilities(states)
        desired_nodes = self._desired_nodes(utilities, values, nodes)
        idx = self._select_result(values, min(len(nodes), desired_nodes))
        LOG.info("\n" + "-" * 80)
        for i, state in enumerate(states):
            out = "Solution {}:\n".format(i)
            out += "{}\n".format(state)
            out += "Value: {}\n".format(values[i].tolist())
            out += "Utility: {}\n".format(utilities[i])
            out += "-" * 80
            LOG.info(out)
        return (self._state_to_allocations(states[idx], jobs, nodes)
                if idx is not None else {}), desired_nodes
Ejemplo n.º 29
0
def main():
    # get argument values
    args = get_args()

    # get reference point
    if args.ref_point is None:
        args.ref_point = get_ref_point(args.problem, args.n_var, args.n_obj,
                                       args.n_init_sample)

    t0 = time()

    # set seed
    np.random.seed(args.seed)

    # build problem, get initial samples
    problem, true_pfront, X_init, Y_init = build_problem(
        args.problem, args.n_var, args.n_obj, args.n_init_sample,
        args.n_process)
    args.n_var, args.n_obj, args.algo = problem.n_var, problem.n_obj, 'nsga2'

    # save arguments and setup logger
    save_args(args)
    logger = setup_logger(args)
    print(problem)

    # initialize data exporter
    exporter = DataExport(X_init, Y_init, args)

    # initialize population
    if args.pop_init_method == 'lhs':
        sampling = LatinHypercubeSampling()
    elif args.pop_init_method == 'nds':
        sorted_indices = NonDominatedSorting().do(Y_init)
        sampling = X_init[np.concatenate(sorted_indices)][:args.batch_size]
        if len(sampling) < args.batch_size:
            rest_sampling = lhs(X_init.shape[1],
                                args.batch_size - len(sampling))
            sampling = np.vstack([sampling, rest_sampling])
    elif args.pop_init_method == 'random':
        sampling = FloatRandomSampling()
    else:
        raise NotImplementedError

    # initialize evolutionary algorithm
    ea_algorithm = NSGA2(pop_size=args.batch_size, sampling=sampling)

    # find Pareto front
    res = minimize(problem,
                   ea_algorithm, ('n_gen', args.n_iter),
                   save_history=True)
    X_history = np.array([algo.pop.get('X') for algo in res.history])
    Y_history = np.array([algo.pop.get('F') for algo in res.history])

    # update data exporter
    for X_next, Y_next in zip(X_history, Y_history):
        exporter.update(X_next, Y_next)

    # export all result to csv
    exporter.write_csvs()
    if true_pfront is not None:
        exporter.write_truefront_csv(true_pfront)

    # statistics
    final_hv = calc_hypervolume(exporter.Y, exporter.ref_point)
    print('========== Result ==========')
    print('Total runtime: %.2fs' % (time() - t0))
    print('Total evaluations: %d, hypervolume: %.4f\n' %
          (args.batch_size * args.n_iter, final_hv))

    # close logger
    if logger is not None:
        logger.close()
Ejemplo n.º 30
0
    def _next(self):

        # all place visited so far
        _X, _F, _evaluated_by_algorithm = self.evaluator.history.get("X", "F", "algorithm")

        # collect attributes from each algorithm and determine whether it has to be replaced or not
        pop, F, n_evals = [], [], []
        for k, algorithm in enumerate(self.algorithms):

            # collect some data from the current algorithms
            _pop = algorithm.pop

            # if the algorithm has terminated or not
            has_finished = algorithm.termination.has_terminated(algorithm)

            # if the area was already explored before
            closest_dist_to_others = vectorized_cdist(_pop.get("X"), _X[_evaluated_by_algorithm != algorithm],
                                                      func_dist=norm_euclidean_distance(self.problem))
            too_close_to_others = (closest_dist_to_others.min(axis=1) < 1e-3).all()

            # whether the algorithm is the current best - if yes it will not be replaced
            current_best = self.evaluator.opt.get("F") == _pop.get("F").min()

            # algorithm not really useful anymore
            if not current_best and (has_finished or too_close_to_others):
                # find a suitable x0 which is far from other or has good expectations
                self.sampling.criterion = lambda X: vectorized_cdist(X, _X).min()
                X = self.sampling.do(self.problem, self.n_initial_samples).get("X")

                # distance in x space to other existing points
                x_dist = vectorized_cdist(X, _X, func_dist=norm_euclidean_distance(self.problem)).min(axis=1)
                f_pred, f_uncert = predict_by_nearest_neighbors(_X, _F, X, 5, self.problem)
                fronts = NonDominatedSorting().do(np.column_stack([- x_dist, f_pred, f_uncert]))
                I = np.random.choice(fronts[0])

                # I = vectorized_cdist(X, _X, func_dist=norm_euclidean_distance(self.problem)).min(axis=1).argmax()

                # choose the one with the largest distance to current solutions
                x0 = X[[I]]

                # replace the current algorithm
                algorithm = get_algorithm("nelder-mead",
                                          problem=self.problem,
                                          x0=x0,
                                          termination=NelderAndMeadTermination(x_tol=1e-3, f_tol=1e-3),
                                          evaluator=self.evaluator,
                                          )
                algorithm.initialize()
                self.algorithms[k] = algorithm

            pop.append(algorithm.pop)
            F.append(algorithm.pop.get("F"))
            n_evals.append(self.evaluator.algorithms[algorithm])

        # get the values of all algorithms as arrays
        F, n_evals = np.array(F), np.array(n_evals)
        rewards = 1 - normalize(F.min(axis=1))[:, 0]
        n_evals_total = self.evaluator.n_eval - self.evaluator.algorithms[self]

        # calculate the upper confidence bound
        ucb = rewards + 0.95 * np.sqrt(np.log(n_evals_total) / n_evals)

        I = ucb.argmax()
        self.algorithms[I].next()

        # create the population object with all algorithms
        self.pop = Population.create(*pop)

        # update the current optimum
        self.opt = self.evaluator.opt