Exemplo n.º 1
0
class PSO(Algorithm):
    def __init__(self,
                 pop_size=25,
                 sampling=LatinHypercubeSampling(),
                 w=0.9,
                 c1=2.0,
                 c2=2.0,
                 adaptive=True,
                 initial_velocity="random",
                 max_velocity_rate=0.20,
                 pertube_best=True,
                 display=PSODisplay(),
                 **kwargs):
        """

        Parameters
        ----------
        pop_size : The size of the swarm being used.

        sampling : {sampling}

        adaptive : bool
            Whether w, c1, and c2 are changed dynamically over time. The update uses the spread from the global
            optimum to determine suitable values.

        w : float
            The inertia weight to be used in each iteration for the velocity update. This can be interpreted
            as the momentum term regarding the velocity. If `adaptive=True` this is only the
            initially used value.

        c1 : float
            The cognitive impact (personal best) during the velocity update. If `adaptive=True` this is only the
            initially used value.
        c2 : float
            The social impact (global best) during the velocity update. If `adaptive=True` this is only the
            initially used value.

        initial_velocity : str - ('random', or 'zero')
            How the initial velocity of each particle should be assigned. Either 'random' which creates a
            random velocity vector or 'zero' which makes the particles start to find the direction through the
            velocity update equation.


        max_velocity_rate : float
            The maximum velocity rate. It is determined variable (and not vector) wise. We consider the rate here
            since the value is normalized regarding the `xl` and `xu` defined in the problem.

        pertube_best : bool
            Some studies have proposed to mutate the global best because it has been found to converge better.
            Which means the population size is reduced by one particle and one function evaluation is spend
            additionally to permute the best found solution so far.

        """

        super().__init__(display=display, **kwargs)

        self.initialization = Initialization(sampling)

        self.pop_size = pop_size
        self.adaptive = adaptive
        self.pertube_best = pertube_best
        self.default_termination = SingleObjectiveDefaultTermination()
        self.V_max = None
        self.initial_velocity = initial_velocity
        self.max_velocity_rate = max_velocity_rate

        self.w = w
        self.c1 = c1
        self.c2 = c2

    def initialize(self, problem, **kwargs):
        super().initialize(problem, **kwargs)
        self.V_max = self.max_velocity_rate * (problem.xu - problem.xl)

    def _initialize(self):
        pop = self.initialization.do(self.problem,
                                     self.pop_size,
                                     algorithm=self)
        self.evaluator.eval(self.problem, pop, algorithm=self)

        if self.pertube_best:
            pop = FitnessSurvival().do(self.problem, pop, self.pop_size - 1)

        if self.initial_velocity == "random":
            init_V = np.random.random(
                (len(pop), self.problem.n_var)) * self.V_max[None, :]
        elif self.initial_velocity == "zero":
            init_V = np.zeros((len(pop), self.problem.n_var))

        pop.set("V", init_V)
        pop.set("pbest", pop)
        self.pop, self.off = pop, pop

        self.f = None
        self.strategy = None

    def _next(self):
        self._step()

        if self.adaptive:
            self._adapt()

    def _step(self):
        pop = self.pop
        X, F, V = pop.get("X", "F", "V")

        # get the personal best of each particle
        pbest = Population.create(*pop.get("pbest"))
        P_X, P_F = pbest.get("X", "F")

        # get the GLOBAL best solution - other variants such as local best can be implemented here too
        best = self.opt.repeat(len(pop))
        G_X = best.get("X")

        # get the inertia weight of the individual
        inerta = self.w * V

        # calculate random values for the updates
        r1 = np.random.random((len(pop), self.problem.n_var))
        r2 = np.random.random((len(pop), self.problem.n_var))

        cognitive = self.c1 * r1 * (P_X - X)
        social = self.c2 * r2 * (G_X - X)

        # calculate the velocity vector
        _V = inerta + cognitive + social
        _V = repair_out_of_bounds_manually(_V, -self.V_max, self.V_max)

        # update the values of each particle
        _X = X + _V
        _X = repair_out_of_bounds(self.problem, _X)

        # evaluate the offspring population
        off = Population(len(pop)).set("X", _X, "V", _V, "pbest", pbest)
        self.evaluator.eval(self.problem, off, algorithm=self)

        # check whether a solution has improved or not - also consider constraints here
        has_improved = ImprovementReplacement().do(self.problem,
                                                   pbest,
                                                   off,
                                                   return_indices=True)

        # replace the personal best of each particle if it has improved
        off[has_improved].set("pbest", off[has_improved])
        off.set("best", best)
        pop = off

        # try to improve the current best with a pertubation
        if self.pertube_best:
            pbest = Population.create(*pop.get("pbest"))
            k = FitnessSurvival().do(self.problem,
                                     pbest,
                                     1,
                                     return_indices=True)[0]
            eta = int(np.random.uniform(5, 30))
            mutant = PolynomialMutation(eta).do(self.problem, pbest[[k]])[0]
            self.evaluator.eval(self.problem, mutant, algorithm=self)

            # if the mutant is in fact better - replace the personal best
            if is_better(mutant, pop[k]):
                pop[k].set("pbest", mutant)

        self.pop = pop

    def _adapt(self):
        pop = self.pop

        X, F, best = pop.get("X", "F", "best")
        best = Population.create(*best)
        w, c1, c2, = self.w, self.c1, self.c2

        # get the average distance from one to another for normalization
        D = norm_eucl_dist(self.problem, X, X)
        mD = D.sum(axis=1) / (len(pop) - 1)
        _min, _max = mD.min(), mD.max()

        # get the average distance to the global best
        g_D = norm_euclidean_distance(self.problem)(best.get("X"), X).mean()
        f = (g_D - _min) / (_max - _min + 1e-32)

        S = np.array([
            S1_exploration(f),
            S2_exploitation(f),
            S3_convergence(f),
            S4_jumping_out(f)
        ])
        strategy = S.argmax() + 1

        delta = 0.05 + (np.random.random() * 0.05)

        if strategy == 1:
            c1 += delta
            c2 -= delta
        elif strategy == 2:
            c1 += 0.5 * delta
            c2 -= 0.5 * delta
        elif strategy == 3:
            c1 += 0.5 * delta
            c2 += 0.5 * delta
        elif strategy == 4:
            c1 -= delta
            c2 += delta

        c1 = max(1.5, min(2.5, c1))
        c2 = max(1.5, min(2.5, c2))

        if c1 + c2 > 4.0:
            c1 = 4.0 * (c1 / (c1 + c2))
            c2 = 4.0 * (c2 / (c1 + c2))

        w = 1 / (1 + 1.5 * np.exp(-2.6 * f))

        self.f = f
        self.strategy = strategy
        self.c1 = c1
        self.c2 = c2
        self.w = w

    def _set_optimum(self, force=False):
        pbest = Population.create(*self.pop.get("pbest"))
        self.opt = filter_optimum(pbest, least_infeasible=True)
Exemplo n.º 2
0
class GeneticAlgorithm(Algorithm):

    def __init__(self,
                 pop_size=None,
                 sampling=None,
                 selection=None,
                 crossover=None,
                 mutation=None,
                 survival=None,
                 n_offsprings=None,
                 eliminate_duplicates=DefaultDuplicateElimination(),
                 repair=None,
                 individual=Individual(),
                 **kwargs
                 ):

        super().__init__(**kwargs)

        # the population size used
        self.pop_size = pop_size

        # the survival for the genetic algorithm
        self.survival = survival

        # number of offsprings to generate through recombination
        self.n_offsprings = n_offsprings

        # if the number of offspring is not set - equal to population size
        if self.n_offsprings is None:
            self.n_offsprings = pop_size

        # the object to be used to represent an individual - either individual or derived class
        self.individual = individual

        # set the duplicate detection class - a boolean value chooses the default duplicate detection
        if isinstance(eliminate_duplicates, bool):
            if eliminate_duplicates:
                self.eliminate_duplicates = DefaultDuplicateElimination()
            else:
                self.eliminate_duplicates = None
        else:
            self.eliminate_duplicates = eliminate_duplicates

        self.initialization = Initialization(sampling,
                                             individual=individual,
                                             repair=repair,
                                             eliminate_duplicates=self.eliminate_duplicates)

        self.mating = Mating(selection,
                             crossover,
                             mutation,
                             repair=repair,
                             eliminate_duplicates=self.eliminate_duplicates,
                             n_max_iterations=100)

        # other run specific data updated whenever solve is called - to share them in all algorithms
        self.n_gen = None
        self.pop = None
        self.off = None

    def _initialize(self):

        # create the initial population
        pop = self.initialization.do(self.problem, self.pop_size, algorithm=self)

        # then evaluate using the objective function
        self.evaluator.eval(self.problem, pop, algorithm=self)

        # that call is a dummy survival to set attributes that are necessary for the mating selection
        if self.survival:
            pop = self.survival.do(self.problem, pop, len(pop), algorithm=self)

        self.pop = pop

    def _next(self):

        # do the mating using the current population
        self.off = self.mating.do(self.problem, self.pop, n_offsprings=self.n_offsprings, algorithm=self)

        # if the mating could not generate any new offspring (duplicate elimination might make that happen)
        if len(self.off) == 0:
            self.termination.force_termination = True
            return

        # if not the desired number of offspring could be created
        elif len(self.off) < self.n_offsprings:
            if self.verbose:
                print("WARNING: Mating could not produce the required number of (unique) offsprings!")

        # evaluate the offspring
        self.evaluator.eval(self.problem, self.off, algorithm=self)

        # merge the offsprings with the current population
        self.pop = self.pop.merge(self.off)

        # the do survival selection
        if self.survival:
            self.pop = self.survival.do(self.problem, self.pop, self.pop_size, algorithm=self)

    def _finalize(self):
        pass
Exemplo n.º 3
0
class ParetoDiscovery(Algorithm):
    '''
    The Pareto discovery algorithm introduced by: Schulz, Adriana, et al. "Interactive exploration of design trade-offs." ACM Transactions on Graphics (TOG) 37.4 (2018): 1-14.
    '''
    def __init__(self,
                 pop_size=None,
                 sampling=None,
                 survival=None,
                 eliminate_duplicates=DefaultDuplicateElimination(),
                 repair=None,
                 individual=Individual(),
                 n_cell=None,
                 cell_size=10,
                 buffer_origin=None,
                 buffer_origin_constant=1e-2,
                 delta_b=0.2,
                 label_cost=10,
                 delta_p=10,
                 delta_s=0.3,
                 n_grid_sample=100,
                 n_process=cpu_count(),
                 **kwargs):
        '''
        Inputs (essential parameters):
            pop_size: population size
            sampling: initial sample data or sampling method to obtain initial population
            n_cell: number of cells in performance buffer
            cell_size: maximum number of samples inside each cell of performance buffer
            buffer_origin: origin of performance buffer
            buffer_origin_constant: when evaluted value surpasses the buffer origin, adjust the origin accordingly and subtract this constant
            delta_b: unary energy normalization constant for sparse approximation, see section 6.4
            label_cost: for reducing number of unique labels in sparse approximation, see section 6.4
            delta_p: factor of perturbation in stochastic sampling, see section 6.2.2
            delta_s: scaling factor for choosing reference point in local optimization, see section 6.2.3
            n_grid_sample: number of samples on local manifold (grid), see section 6.3.1
            n_process: number of processes for parallelization
        '''
        super().__init__(**kwargs)

        self.pop_size = pop_size
        self.survival = survival
        self.individual = individual

        if isinstance(eliminate_duplicates, bool):
            if eliminate_duplicates:
                self.eliminate_duplicates = DefaultDuplicateElimination()
            else:
                self.eliminate_duplicates = None
        else:
            self.eliminate_duplicates = eliminate_duplicates

        self.initialization = Initialization(
            sampling,
            individual=individual,
            repair=repair,
            eliminate_duplicates=self.eliminate_duplicates)

        self.n_gen = None
        self.pop = None
        self.off = None

        self.approx_set = None
        self.approx_front = None
        self.fam_lbls = None

        self.buffer = None
        if n_cell is None:
            n_cell = self.pop_size
        self.buffer_args = {
            'cell_num': n_cell,
            'cell_size': cell_size,
            'origin': buffer_origin,
            'origin_constant': buffer_origin_constant,
            'delta_b': delta_b,
            'label_cost': label_cost
        }

        self.delta_p = delta_p
        self.delta_s = delta_s
        self.n_grid_sample = n_grid_sample
        self.n_process = n_process
        self.patch_id = 0

        self.constr_func = None

    def _initialize(self):
        # create the initial population
        pop = self.initialization.do(self.problem,
                                     self.pop_size,
                                     algorithm=self)
        pop_x = pop.get('X').copy()

        # check if problem has constraints other than bounds
        pop_constr = self.problem.evaluate_constraint(pop_x)
        if pop_constr is not None:
            self.constr_func = self.problem.evaluate_constraint
            pop = pop[pop_constr <= 0]

            while len(pop) < self.pop_size:
                new_pop = self.initialization.do(self.problem,
                                                 self.pop_size,
                                                 algorithm=self)
                new_pop_x = new_pop.get('X').copy()
                new_pop = new_pop[
                    self.problem.evaluate_constraint(new_pop_x) <= 0]
                pop = pop.merge(new_pop)

            pop = pop[:self.pop_size]
            pop_x = pop.get('X').copy()

        pop_f = self.problem.evaluate(pop_x, return_values_of=['F'])

        # initialize buffer
        self.buffer = get_buffer(self.problem.n_obj, **self.buffer_args)
        self.buffer.origin = self.problem.normalization.do(
            y=self.buffer.origin.reshape(1, -1))[0]
        patch_ids = np.full(
            self.pop_size,
            self.patch_id)  # NOTE: patch_ids here might not make sense
        self.patch_id += 1
        self.buffer.insert(pop_x, pop_f, patch_ids)

        # update population by the best samples in the buffer
        pop = pop.new('X', self.buffer.sample(self.pop_size))

        # evaluate population using the objective function
        self.evaluator.eval(self.problem, pop, algorithm=self)

        # NOTE: check if need survival here
        if self.survival:
            pop = self.survival.do(self.problem, pop, len(pop), algorithm=self)

        self.pop = pop

        # sys.stdout.write('ParetoDiscovery optimizing: generation %i' % self.n_gen)
        # sys.stdout.flush()

    def _next(self):
        '''
        Core algorithm part in each iteration, see algorithm 1.
        --------------------------------------
        xs = stochastic_sampling(B, F, X)
        for x in xs:
            D = select_direction(B, x)
            x_opt = local_optimization(D, F, X)
            M = first_order_approximation(x_opt, F, X)
            update_buffer(B, F(M))
        --------------------------------------
        where F is problem evaluation, X is design constraints
        '''
        # update optimization progress
        # sys.stdout.write('\b' * len(str(self.n_gen - 1)) + str(self.n_gen))
        # sys.stdout.flush()

        # stochastic sampling by adding local perturbance
        xs = self._stochastic_sampling()

        # parallelize core pareto discovery process by multiprocess, see _pareto_discover()
        # including select_direction, local_optimization, first_order_approximation in above algorithm illustration
        x_batch = np.array_split(xs, self.n_process)
        queue = Queue()
        process_count = 0
        for x in x_batch:
            if len(x) > 0:
                p = Process(target=_pareto_discover,
                            args=(x, self.problem.evaluate,
                                  [self.problem.xl,
                                   self.problem.xu], self.constr_func,
                                  self.delta_s, self.buffer.origin,
                                  self.buffer.origin_constant,
                                  self.n_grid_sample, queue))
                p.start()
                process_count += 1

        # gather results (new samples, new patch ids, new origin of performance buffer) from parallel discovery
        new_origin = self.buffer.origin
        x_samples_all = []
        patch_ids_all = []
        for _ in range(process_count):
            x_samples, patch_ids, sample_num, origin = queue.get()
            if x_samples is not None:
                x_samples_all.append(x_samples)
                patch_ids_all.append(
                    np.array(patch_ids) + self.patch_id
                )  # assign corresponding global patch ids to samples
                self.patch_id += sample_num
            new_origin = np.minimum(new_origin, origin)

        # evalaute all new samples and adjust the origin point of buffer
        x_samples_all = np.vstack(x_samples_all)
        y_samples_all = self.problem.evaluate(x_samples_all,
                                              return_values_of=['F'])
        new_origin = np.minimum(np.min(y_samples_all, axis=0), new_origin)
        patch_ids_all = np.concatenate(patch_ids_all)

        # update buffer
        self.buffer.move_origin(new_origin)
        self.buffer.insert(x_samples_all, y_samples_all, patch_ids_all)

        # update population by the best samples in the buffer
        self.pop = self.pop.new('X', self.buffer.sample(self.pop_size))
        self.evaluator.eval(self.problem, self.pop, algorithm=self)

    def _stochastic_sampling(self):
        '''
        Stochastic sampling around current population to initialize each iteration to avoid local minima, see section 6.2.2
        '''
        xs = self.pop.get('X').copy()

        # sampling loop
        num_target = xs.shape[0]
        xs_final = np.zeros((0, xs.shape[1]), xs.dtype)

        while xs_final.shape[0] < num_target:
            # generate stochastic direction
            d = np.random.random(xs.shape)
            d /= np.expand_dims(np.linalg.norm(d, axis=1), axis=1)

            # generate random scaling factor
            delta = np.random.random() * self.delta_p

            # generate new stochastic samples
            xs_perturb = xs + 1.0 / (
                2**delta
            ) * d  # NOTE: is this scaling form reasonable? maybe better use relative scaling?
            xs_perturb = np.clip(xs_perturb, self.problem.xl, self.problem.xu)

            if self.constr_func is None:
                xs_final = xs_perturb
            else:
                # check constraint values
                constr = self.constr_func(xs_perturb)
                flag = constr <= 0
                if np.any(flag):
                    xs_final = np.vstack((xs_final, xs_perturb[flag]))

        return xs_final[:num_target]

    def propose_next_batch(self, curr_pfront, ref_point, batch_size,
                           normalization):
        '''
        Propose next batch to evaluate for active learning. 
        Greedely propose sample with max HV until all families ar visited. Allow only samples with max HV from unvisited family.
        '''
        approx_x, approx_y = normalization.undo(self.approx_set,
                                                self.approx_front)
        labels = self.fam_lbls

        X_next = []
        Y_next = []
        family_lbls = []

        if len(approx_x) >= batch_size:
            # approximation result is enough to propose all candidates
            curr_X_next, curr_Y_next, labels_next = propose_next_batch(
                curr_pfront, ref_point, approx_y, approx_x, batch_size, labels)
            X_next.append(curr_X_next)
            Y_next.append(curr_Y_next)
            family_lbls.append(labels_next)

        else:
            # approximation result is not enough to propose all candidates
            # so propose all result as candidates, and propose others from buffer
            # NOTE: may consider re-expanding manifolds to produce more approximation result, but may not be necessary
            X_next.append(approx_x)
            Y_next.append(approx_y)
            family_lbls.extend(labels)
            remain_batch_size = batch_size - len(approx_x)
            buffer_xs, buffer_ys = self.buffer.flattened()
            buffer_xs, buffer_ys = normalization.undo(buffer_xs, buffer_ys)
            prop_X_next, prop_Y_next = propose_next_batch_without_label(
                curr_pfront, ref_point, buffer_ys, buffer_xs,
                remain_batch_size)
            X_next.append(prop_X_next)
            Y_next.append(prop_Y_next)
            family_lbls.extend(np.full(remain_batch_size, -1))

        X_next = np.vstack(X_next)
        Y_next = np.vstack(Y_next)
        return X_next, Y_next, family_lbls

    def get_sparse_front(self, normalization):
        '''
        Get sparse approximation of Pareto front and set
        '''
        approx_x, approx_y = normalization.undo(self.approx_set,
                                                self.approx_front)
        labels = self.fam_lbls

        return labels, approx_x, approx_y

    def _finalize(self):
        # set population as all samples in performance buffer
        pop_x, pop_y = self.buffer.flattened()
        self.pop = self.pop.new('X', pop_x, 'F', pop_y)
        # get sparse front approximation
        self.fam_lbls, self.approx_set, self.approx_front = self.buffer.sparse_approximation(
        )
Exemplo n.º 4
0
class PSO(Algorithm):
    def __init__(self,
                 pop_size=20,
                 w=0.9,
                 c1=2.0,
                 c2=2.0,
                 sampling=LatinHypercubeSampling(),
                 adaptive=True,
                 pertube_best=True,
                 display=PSODisplay(),
                 repair=None,
                 individual=Individual(),
                 **kwargs):
        """

        Parameters
        ----------
        pop_size : {pop_size}
        sampling : {sampling}

        """

        super().__init__(display=display, **kwargs)

        self.initialization = Initialization(sampling,
                                             individual=individual,
                                             repair=repair)

        self.pop_size = pop_size
        self.adaptive = adaptive
        self.pertube_best = pertube_best
        self.default_termination = SingleObjectiveDefaultTermination()
        self.V_max = None

        self.w = w
        self.c1 = c1
        self.c2 = c2

    def initialize(self, problem, **kwargs):
        super().initialize(problem, **kwargs)
        self.V_max = 0.2 * (problem.xu - problem.xl)

    def _initialize(self):
        pop = self.initialization.do(self.problem,
                                     self.pop_size,
                                     algorithm=self)
        self.evaluator.eval(self.problem, pop, algorithm=self)

        if self.pertube_best:
            pop = FitnessSurvival().do(self.problem, pop, self.pop_size - 1)

        pop.set("V", np.zeros((len(pop), self.problem.n_var)))
        pop.set("pbest", pop)
        self.pop = pop

        self.f = None
        self.strategy = None

    def _next(self):
        self._step()

        if self.adaptive:
            self._adapt()

    def _step(self):
        pop = self.pop
        X, F, V = pop.get("X", "F", "V")

        # get the personal best of each particle
        pbest = Population.create(*pop.get("pbest"))
        P_X, P_F = pbest.get("X", "F")

        # get the global best solution
        best = self.opt.repeat(len(pop))
        G_X = best.get("X")

        # perform the pso equation
        inerta = self.w * V

        # calculate random values for the updates
        r1 = np.random.random((len(pop), self.problem.n_var))
        r2 = np.random.random((len(pop), self.problem.n_var))

        cognitive = self.c1 * r1 * (P_X - X)
        social = self.c2 * r2 * (G_X - X)

        # calculate the velocity vector
        _V = inerta + cognitive + social
        _V = repair_out_of_bounds_manually(_V, -self.V_max, self.V_max)

        # update the values of each particle
        _X = X + _V
        _X = repair_out_of_bounds(self.problem, _X)

        # evaluate the offspring population
        off = Population(len(pop)).set("X", _X, "V", _V, "pbest", pbest)
        self.evaluator.eval(self.problem, off, algorithm=self)

        # check whether a solution has improved or not - also consider constraints here
        has_improved = ImprovementReplacement().do(self.problem,
                                                   pbest,
                                                   off,
                                                   return_indices=True)

        # replace the personal best of each particle if it has improved
        off[has_improved].set("pbest", off[has_improved])
        off.set("best", best)
        pop = off

        # try to improve the current best with a pertubation
        if self.pertube_best:
            opt = FitnessSurvival().do(self.problem,
                                       Population.create(*pop.get("pbest")), 1)
            eta = int(np.random.uniform(5, 30))
            mutant = PolynomialMutation(eta).do(self.problem, opt)
            self.evaluator.eval(self.problem, mutant, algorithm=self)
            if ImprovementReplacement().do(self.problem,
                                           opt,
                                           mutant,
                                           return_indices=True)[0]:
                k = [i for i, e in enumerate(pop.get("pbest")) if e == opt][0]
                pop[k].set("pbest", mutant)

        self.pop = pop

    def _adapt(self):
        pop = self.pop

        X, F, best = pop.get("X", "F", "best")
        best = Population.create(*best)
        w, c1, c2, = self.w, self.c1, self.c2

        # get the average distance from one to another for normalization
        D = norm_eucl_dist(self.problem, X, X)
        mD = D.sum(axis=1) / (len(pop) - 1)
        _min, _max = mD.min(), mD.max()

        # get the average distance to the global best
        g_D = norm_euclidean_distance(self.problem)(best.get("X"), X).mean()
        f = (g_D - _min) / (_max - _min + 1e-32)

        S = np.array([
            S1_exploration(f),
            S2_exploitation(f),
            S3_convergence(f),
            S4_jumping_out(f)
        ])
        strategy = S.argmax() + 1

        delta = 0.05 + (np.random.random() * 0.05)

        if strategy == 1:
            c1 += delta
            c2 -= delta
        elif strategy == 2:
            c1 += 0.5 * delta
            c2 -= 0.5 * delta
        elif strategy == 3:
            c1 += 0.5 * delta
            c2 += 0.5 * delta
        elif strategy == 4:
            c1 -= delta
            c2 += delta

        c1 = max(1.5, min(2.5, c1))
        c2 = max(1.5, min(2.5, c2))

        if c1 + c2 > 4.0:
            c1 = 4.0 * (c1 / (c1 + c2))
            c2 = 4.0 * (c2 / (c1 + c2))

        w = 1 / (1 + 1.5 * np.exp(-2.6 * f))

        self.f = f
        self.strategy = strategy
        self.c1 = c1
        self.c2 = c2
        self.w = w

    def _set_optimum(self, force=False):
        pbest = Population.create(*self.pop.get("pbest"))
        self.opt = filter_optimum(pbest, least_infeasible=True)
Exemplo n.º 5
0
class CuckooSearch(Algorithm):
    def __init__(self,
                 display=CSDisplay(),
                 sampling=FloatRandomSampling(),
                 survival=FitnessSurvival(),
                 eliminate_duplicates=DefaultDuplicateElimination(),
                 termination=SingleObjectiveDefaultTermination(),
                 pop_size=100,
                 beta=1.5,
                 alfa=0.01,
                 pa=0.35,
                 **kwargs):
        """

        Parameters
        ----------
        display : {display}
        sampling : {sampling}
        survival : {survival}
        eliminate_duplicates: This does not exists in the original paper/book.
            Without this the solutions might get too biased to current global best solution,
            because the global random walk use the global best solution as the reference.

        termination : {termination}

        pop_size : The number of nests (solutions)

        beta : The input parameter of the Mantegna's Algorithm to simulate
            sampling on Levy Distribution

        alfa : alfa is the step size scaling factor and is usually
            0.01, so that the step size will be scaled down to O(L/100) with L is
            the scale (range of bounds) of the problem.

        pa   : The switch probability, pa fraction of the nests will be
            abandoned on every iteration
        """

        super().__init__(**kwargs)

        self.initialization = Initialization(sampling)
        self.survival = survival
        self.display = display
        self.pop_size = pop_size
        self.default_termination = termination
        self.eliminate_duplicates = eliminate_duplicates

        #the scale will be multiplied by problem scale after problem given in setup
        self.alfa = alfa
        self.scale = alfa
        self.pa = pa
        self.beta = beta
        a = math.gamma(1. + beta) * math.sin(math.pi * beta / 2.)
        b = beta * math.gamma((1. + beta) / 2.) * 2**((beta - 1.) / 2)
        self.sig = (a / b)**(1. / (2 * beta))

    def setup(self, problem, **kwargs):
        super().setup(problem, **kwargs)
        x_lower, x_upper = self.problem.bounds()
        if x_lower is not None and x_upper is not None:
            self.scale = self.alfa * (x_upper - x_lower)
        else:
            self.scale = self.alfa

    def _initialize(self):
        pop = self.initialization.do(
            self.problem,
            self.pop_size,
            algorithm=self,
            eliminate_duplicates=self.eliminate_duplicates)
        self.evaluator.eval(self.problem, pop, algorithm=self)

        if self.survival:
            pop = self.survival.do(self.problem, pop, len(pop), algorithm=self)
        self.pop = pop

    def _next(self):
        self._step()

    def _get_levy_step(self, shape):
        #Mantegna's algorithm simulating levy sampling
        U = np.random.normal(0, self.sig, shape)
        V = abs(np.random.normal(0, 1, shape))**(1. / self.beta)
        return U / V

    def _get_global_step_size(self, X):
        step = self._get_levy_step(X.shape)
        step_size = self.scale * step
        return step_size

    def _get_local_directional_vector(self, X):
        #local random walk (abandon nest) for pa fraction of the nests
        #find 2 random different solution for the local random walk (nest_i ~ nest_i+ (nest_j - nest_k))
        Xjk_idx = np.random.rand(len(X), len(X)).argpartition(2, axis=1)[:, :2]
        Xj_idx = Xjk_idx[:, 0]
        Xk_idx = Xjk_idx[:, 1]
        Xj = X[Xj_idx]
        Xk = X[Xk_idx]

        #calculate Heaviside function (or wether local search will be done with nest_i or not)
        #then duplicate H coloumn as many as the number of decision variable
        H = (np.random.rand(len(X)) < self.pa).astype(np.float)
        H = np.tile(H, (self.problem.n_var, 1)).transpose()

        #calculate d (scale*(X_j - X_k)) , however XS Yang implementation in mathworks differ from the book
        #replacing the scale with a random number [0,1], we use the book version here (a0)
        dir_vec = np.random.rand(*X.shape) * (Xj - Xk) * H
        return dir_vec

    def _step(self):
        pop = self.pop
        X = pop.get("X")
        F = pop.get("F")

        #Levy Flight
        best = self.opt
        G_X = best.get("X")

        step_size = self._get_global_step_size(X)
        _X = X + np.random.rand(*X.shape) * step_size * (G_X - X)
        _X = set_to_bounds_if_outside_by_problem(self.problem, _X)

        #Evaluate
        off = Population(len(pop)).set("X", _X)
        self.evaluator.eval(self.problem, off, algorithm=self)

        # replace the worse pop with better off per index
        # this method includes replacement with less constraints violation
        # which the original paper doesn't have
        ImprovementReplacement().do(self.problem, pop, off, inplace=True)

        #Local Random Walk
        dir_vec = self._get_local_directional_vector(X)
        _X = X + dir_vec
        _X = set_to_bounds_if_outside_by_problem(self.problem, _X)
        off = Population(len(pop)).set("X", _X)
        self.evaluator.eval(self.problem, off, algorithm=self)

        #append offspring to population and then sort for elitism (survival)
        self.pop = Population.merge(pop, off)
        self.pop = self.survival.do(self.problem,
                                    self.pop,
                                    self.pop_size,
                                    algorithm=self)
Exemplo n.º 6
0
class MMGA(Algorithm):
    def __init__(self,
                 pop_size=200,
                 n_parallel=10,
                 sampling=LatinHypercubeSampling(),
                 display=SingleObjectiveDisplay(),
                 repair=None,
                 individual=Individual(),
                 **kwargs):
        """

        Parameters
        ----------
        pop_size : {pop_size}
        sampling : {sampling}
        selection : {selection}
        crossover : {crossover}
        mutation : {mutation}
        eliminate_duplicates : {eliminate_duplicates}
        n_offsprings : {n_offsprings}

        """

        super().__init__(display=display, **kwargs)

        self.initialization = Initialization(sampling,
                                             individual=individual,
                                             repair=repair)

        self.pop_size = pop_size
        self.n_parallel = n_parallel
        self.each_pop_size = pop_size // n_parallel

        self.solvers = None
        self.niches = []

        def cmaes(problem, x):
            solver = CMAES(x0=x, tolfun=1e-11, tolx=1e-3, restarts=0)
            solver.initialize(problem)
            solver.next()
            return solver

        def nelder_mead(problem, x):
            solver = NelderMead(X=x)
            solver.initialize(problem)
            solver._initialize()
            solver.n_gen = 1
            solver.next()
            return solver

        self.func_create_solver = nelder_mead

        self.default_termination = SingleObjectiveDefaultTermination()

    def _initialize(self):
        self.pop = self.initialization.do(self.problem,
                                          self.pop_size,
                                          algorithm=self)
        self.evaluator.eval(self.problem, self.pop, algorithm=self)

        X = self.pop.get("X")
        D = norm_eucl_dist(self.problem, X, X)
        S = select_by_clearing(self.pop, D, self.n_parallel,
                               func_select_by_objective)

        self.solvers = []
        for s in S:
            solver = self.func_create_solver(self.problem, self.pop[s].X)
            self.solvers.append(solver)

    def _next(self):
        n_evals = np.array(
            [solver.evaluator.n_eval for solver in self.solvers])
        ranks = np.array([solver.opt[0].F[0]
                          for solver in self.solvers]).argsort() + 1

        rws = RouletteWheelSelection(ranks, larger_is_better=False)
        S = rws.next()
        self.solvers[S].next()

        print(n_evals.sum(), n_evals)

        if self.solvers[S].termination.force_termination or self.solvers[
                S].termination.has_terminated(self.solvers[S]):
            self.niches.append(self.solvers[S])
            print(self.solvers[S].opt.get("F"), self.solvers[S].opt.get("X"))
            self.solvers[S] = None

        for k in range(self.n_parallel):
            if self.solvers[k] is None:
                x = FloatRandomSampling().do(self.problem, 1)[0].get("X")
                self.solvers[S] = self.func_create_solver(self.problem, x)

    def _set_optimum(self, force=False):
        self.opt = Population()
        for solver in self.niches:
            self.opt = Population.merge(self.opt, solver.opt)
Exemplo n.º 7
0
class RandomAlgorithm(Algorithm):

    def __init__(self,
                 pop_size=None,
                 sampling=None,
                 eliminate_duplicates=DefaultDuplicateElimination(),
                 individual=Individual(),
                 **kwargs
                 ):

        super().__init__(**kwargs)

        # the population size used
        self.pop_size = pop_size

        # number of offsprings
        self.n_offsprings = pop_size

        # the object to be used to represent an individual - either individual or derived class
        self.individual = individual

        # set the duplicate detection class - a boolean value chooses the default duplicate detection
        if isinstance(eliminate_duplicates, bool):
            if eliminate_duplicates:
                self.eliminate_duplicates = DefaultDuplicateElimination()
            else:
                self.eliminate_duplicates = NoDuplicateElimination()
        else:
            self.eliminate_duplicates = eliminate_duplicates

        # simply set the no repair object if it is None
        self.repair = NoRepair()

        self.initialization = Initialization(sampling,
                                             individual=individual,
                                             repair=self.repair,
                                             eliminate_duplicates=self.eliminate_duplicates)

        self.n_gen = None
        self.pop = None
        self.off = None

    def _initialize(self):
        # create the initial population
        pop = self.initialization.do(self.problem, self.pop_size, algorithm=self)
        pop.set("n_gen", self.n_gen)

        # then evaluate using the objective function
        self.evaluator.eval(self.problem, pop, algorithm=self)

        self.pop, self.off = pop, pop

    def _next(self):
        # sampling again
        self.off = self.initialization.do(self.problem, self.pop_size, algorithm=self)
        self.off.set("n_gen", self.n_gen)

        # evaluate the offspring
        self.evaluator.eval(self.problem, self.off, algorithm=self)

        self.pop = self.off