Esempio n. 1
0
    def _get_sampling(self, X, Y):
        '''
        Initialize population from data.

        Parameters
        ----------
        X: np.array
            Design variables.
        Y: np.array
            Performance values.

        Returns
        -------
        sampling: np.array or pymoo.model.sampling.Sampling
            Initial population or a sampling method for generating initial population.
        '''
        if self.pop_init_method == 'lhs':
            sampling = LatinHypercubeSampling()
        elif self.pop_init_method == 'nds':
            sorted_indices = NonDominatedSorting().do(Y)
            pop_size = self.algo_kwargs['pop_size']
            sampling = X[np.concatenate(sorted_indices)][:pop_size]
            # NOTE: use lhs if current samples are not enough
            if len(sampling) < pop_size:
                rest_sampling = lhs(X.shape[1], pop_size - len(sampling))
                sampling = np.vstack([sampling, rest_sampling])
        elif self.pop_init_method == 'random':
            sampling = FloatRandomSampling()
        else:
            raise NotImplementedError

        return sampling
Esempio n. 2
0
    def __init__(self,
                 pop_size=100,
                 sampling=LatinHypercubeSampling(iterations=100,
                                                 criterion="maxmin"),
                 variant="DE/rand/1/bin",
                 CR=0.5,
                 F=0.3,
                 dither="vector",
                 jitter=False,
                 **kwargs):
        """

        Parameters
        ----------

        pop_size : {pop_size}

        sampling : {sampling}

        variant : {{DE/(rand|best)/1/(bin/exp)}}
         The different variants of DE to be used. DE/x/y/z where x how to select individuals to be pertubed,
         y the number of difference vector to be used and z the crossover type. One of the most common variant
         is DE/rand/1/bin.

        F : float
         The weight to be used during the crossover.

        CR : float
         The probability the individual exchanges variable values from the donor vector.

        dither : {{'no', 'scalar', 'vector'}}
         One strategy to introduce adaptive weights (F) during one run. The option allows
         the same dither to be used in one iteration ('scalar') or a different one for
         each individual ('vector).

        jitter : bool
         Another strategy for adaptive weights (F). Here, only a very small value is added or
         subtracted to the weight used for the crossover for each individual.


        """

        _, self.var_selection, self.var_n, self.var_mutation, = variant.split(
            "/")

        if self.var_mutation == "exp":
            mutation = ExponentialCrossover(CR)
        elif self.var_mutation == "bin":
            mutation = UniformCrossover(CR)

        super().__init__(pop_size=pop_size,
                         sampling=sampling,
                         selection=RandomSelection(),
                         crossover=DifferentialEvolutionCrossover(
                             weight=F, dither=dither, jitter=jitter),
                         mutation=mutation,
                         survival=None,
                         **kwargs)

        self.func_display_attrs = disp_single_objective
Esempio n. 3
0
    def _get_sampling(self, X, Y, bound=None, mode=0):
        '''
        Initialize population from data
        '''
        if self.pop_init_method == 'lhs':
            sampling = LatinHypercubeSampling()
        elif self.pop_init_method == 'nds':
            sorted_indices = NonDominatedSorting().do(Y)
            pop_size = self.algo_kwargs['pop_size']
            sampling = X[np.concatenate(sorted_indices)][:pop_size]
            # NOTE: use lhs if current samples are not enough
            if len(sampling) < pop_size:
                if bound is None:
                    rest_sampling = lhs(X.shape[1], pop_size - len(sampling))
                else:
                    rest_sampling = lhs(X.shape[1], pop_size - len(sampling))
                    rest_sampling = bound[0] + rest_sampling * (bound[1] - bound[0])
                    rest_sampling = np.round(rest_sampling)
                        
                sampling = np.vstack([sampling, rest_sampling])
        elif self.pop_init_method == 'random':
            sampling = FloatRandomSampling()
        else:
            raise NotImplementedError

        return sampling
Esempio n. 4
0
def de(
        pop_size=100,
        sampling=LatinHypercubeSampling(criterion="maxmin", iterations=100),
        variant="DE/rand+best/1/bin",
        CR=0.5,
        F=0.75,
        **kwargs):
    """

    Parameters
    ----------
    pop_size : {pop_size}
    sampling : {sampling}
    variant : str

    CR : float

    F : float

    Returns
    -------
    de : :class:`~pymoo.model.algorithm.Algorithm`
        Returns an DifferentialEvolution algorithm object.

    """

    _, _selection, _n, _mutation, = variant.split("/")

    return DifferentialEvolution(pop_size=pop_size,
                                 sampling=sampling,
                                 selection=RandomSelection(),
                                 crossover=DifferentialEvolutionCrossover(weight=F),
                                 mutation=DifferentialEvolutionMutation(_mutation, CR),
                                 **kwargs)
Esempio n. 5
0
 def __init__(self,
              n_points_per_iteration=100,
              sampling=LatinHypercubeSampling(),
              display=SingleObjectiveDisplay(),
              **kwargs):
     super().__init__(display=display, **kwargs)
     self.n_points_per_iteration = n_points_per_iteration
     self.sampling = sampling
Esempio n. 6
0
def de(
        pop_size=100,
        sampling=LatinHypercubeSampling(iterations=100, criterion="maxmin"),
        variant="DE/rand/1/bin",
        CR=0.5,
        F=0.3,
        dither="vector",
        jitter=False,
        **kwargs):
    """

    Parameters
    ----------

    pop_size : {pop_size}

    sampling : {sampling}

    variant : {{DE/(rand|best)/1/(bin/exp)}}
        The different variants of DE to be used. DE/x/y/z where x how to select individuals to be pertubed,
        y the number of difference vector to be used and z the crossover type. One of the most common variant
        is DE/rand/1/bin.

    F : float
        The weight to be used during the crossover.

    CR : float
        The probability the individual exchanges variable values from the donor vector.

    dither : {{'no', 'scalar', 'vector'}}
        One strategy to introduce adaptive weights (F) during one run. The option allows
        the same dither to be used in one iteration ('scalar') or a different one for
        each individual ('vector).

    jitter : bool
        Another strategy for adaptive weights (F). Here, only a very small value is added or
        substracted to the weight used for the crossover for each individual.


    Returns
    -------
    de : :class:`~pymoo.model.algorithm.Algorithm`
        Returns an DifferentialEvolution algorithm object.

    """

    _, _selection, _n, _mutation, = variant.split("/")

    return DifferentialEvolution(
        variant,
        CR,
        F,
        dither,
        jitter,
        pop_size=pop_size,
        sampling=sampling,
        **kwargs)
    def __init__(self, n_initial_samples=50, n_parallel_searches=5, **kwargs):
        super().__init__(**kwargs)

        # the algorithm to be used for optimization
        self.n_parallel_searches = n_parallel_searches

        # the initial sampling to be used
        self.sampling = LatinHypercubeSampling(iterations=100)
        self.n_initial_samples = n_initial_samples

        # create a global evaluator that keeps track if the inner algorithms as well
        self.evaluator = GlobalEvaluator()

        # objects used during the optimization
        self.algorithms = []

        # display the single-objective metrics
        self.func_display_attrs = disp_single_objective
Esempio n. 8
0
    def __init__(self,
                 x0=None,
                 sampling=LatinHypercubeSampling(),
                 n_sample_points="auto",
                 **kwargs):

        super().__init__(**kwargs)
        self.x0 = x0
        self.sampling = sampling
        self.n_sample_points = n_sample_points
Esempio n. 9
0
    def __init__(self,
                 pop_size=100,
                 sampling=LatinHypercubeSampling(),
                 variant="DE/rand/1/bin",
                 CR=0.5,
                 F=0.3,
                 dither="vector",
                 jitter=False,
                 display=SingleObjectiveDisplay(),
                 **kwargs):
        """

        Parameters
        ----------

        pop_size : {pop_size}

        sampling : {sampling}

        variant : {{DE/(rand|best)/1/(bin/exp)}}
         The different variants of DE to be used. DE/x/y/z where x how to select individuals to be pertubed,
         y the number of difference vector to be used and z the crossover type. One of the most common variant
         is DE/rand/1/bin.

        F : float
         The weight to be used during the crossover.

        CR : float
         The probability the individual exchanges variable values from the donor vector.

        dither : {{'no', 'scalar', 'vector'}}
         One strategy to introduce adaptive weights (F) during one run. The option allows
         the same dither to be used in one iteration ('scalar') or a different one for
         each individual ('vector).

        jitter : bool
         Another strategy for adaptive weights (F). Here, only a very small value is added or
         subtracted to the weight used for the crossover for each individual.

        """

        mating = DifferentialEvolutionMating(variant=variant,
                                             CR=CR,
                                             F=F,
                                             dither=dither,
                                             jitter=jitter)

        super().__init__(pop_size=pop_size,
                         sampling=sampling,
                         mating=mating,
                         survival=None,
                         display=display,
                         **kwargs)

        self.default_termination = SingleObjectiveDefaultTermination()
Esempio n. 10
0
    def __init__(self,
                 pop_size=200,
                 n_parallel=10,
                 sampling=LatinHypercubeSampling(),
                 display=SingleObjectiveDisplay(),
                 repair=None,
                 individual=Individual(),
                 **kwargs):
        """

        Parameters
        ----------
        pop_size : {pop_size}
        sampling : {sampling}
        selection : {selection}
        crossover : {crossover}
        mutation : {mutation}
        eliminate_duplicates : {eliminate_duplicates}
        n_offsprings : {n_offsprings}

        """

        super().__init__(display=display, **kwargs)

        self.initialization = Initialization(sampling,
                                             individual=individual,
                                             repair=repair)

        self.pop_size = pop_size
        self.n_parallel = n_parallel
        self.each_pop_size = pop_size // n_parallel

        self.solvers = None
        self.niches = []

        def cmaes(problem, x):
            solver = CMAES(x0=x, tolfun=1e-11, tolx=1e-3, restarts=0)
            solver.initialize(problem)
            solver.next()
            return solver

        def nelder_mead(problem, x):
            solver = NelderMead(X=x)
            solver.initialize(problem)
            solver._initialize()
            solver.n_gen = 1
            solver.next()
            return solver

        self.func_create_solver = nelder_mead

        self.default_termination = SingleObjectiveDefaultTermination()
Esempio n. 11
0
    def __init__(self,
                 variant="DE/rand+best/1/bin",
                 CR=0.5,
                 F=0.75,
                 n_replace=None,
                 **kwargs):

        _, self.var_selection, self.var_n, self.var_mutation, = variant.split("/")

        set_if_none(kwargs, 'pop_size', 200)
        set_if_none(kwargs, 'sampling', LatinHypercubeSampling(criterion="maxmin", iterations=100))
        set_if_none(kwargs, 'crossover', DifferentialEvolutionCrossover(weight=F))
        set_if_none(kwargs, 'selection', RandomSelection())
        set_if_none(kwargs, 'mutation', DifferentialEvolutionMutation(self.var_mutation, CR))
        set_if_none(kwargs, 'survival', None)
        super().__init__(**kwargs)

        self.n_replace = n_replace
        self.func_display_attrs = disp_single_objective
Esempio n. 12
0
    def __init__(self,
                 variant,
                 CR,
                 F,
                 dither,
                 jitter,
                 **kwargs):

        _, self.var_selection, self.var_n, self.var_mutation, = variant.split("/")

        set_if_none(kwargs, 'pop_size', 200)
        set_if_none(kwargs, 'sampling', LatinHypercubeSampling(criterion="maxmin", iterations=100))
        set_if_none(kwargs, 'crossover', DifferentialEvolutionCrossover(weight=F, dither=dither, jitter=jitter))
        set_if_none(kwargs, 'selection', RandomSelection())

        if self.var_mutation == "exp":
            set_if_none(kwargs, 'mutation', ExponentialCrossover(CR))
        elif self.var_mutation == "bin":
            set_if_none(kwargs, 'mutation', UniformCrossover(CR))

        set_if_none(kwargs, 'survival', None)
        super().__init__(**kwargs)

        self.func_display_attrs = disp_single_objective
Esempio n. 13
0
    def __init__(self,
                 pop_size=20,
                 w=0.9,
                 c1=2.0,
                 c2=2.0,
                 sampling=LatinHypercubeSampling(),
                 adaptive=True,
                 pertube_best=True,
                 display=PSODisplay(),
                 repair=None,
                 individual=Individual(),
                 **kwargs):
        """

        Parameters
        ----------
        pop_size : {pop_size}
        sampling : {sampling}

        """

        super().__init__(display=display, **kwargs)

        self.initialization = Initialization(sampling,
                                             individual=individual,
                                             repair=repair)

        self.pop_size = pop_size
        self.adaptive = adaptive
        self.pertube_best = pertube_best
        self.default_termination = SingleObjectiveDefaultTermination()
        self.V_max = None

        self.w = w
        self.c1 = c1
        self.c2 = c2
Esempio n. 14
0
def main():
    # get argument values
    args = get_args()

    # get reference point
    if args.ref_point is None:
        args.ref_point = get_ref_point(args.problem, args.n_var, args.n_obj,
                                       args.n_init_sample)

    t0 = time()

    # set seed
    np.random.seed(args.seed)

    # build problem, get initial samples
    problem, true_pfront, X_init, Y_init = build_problem(
        args.problem, args.n_var, args.n_obj, args.n_init_sample,
        args.n_process)
    args.n_var, args.n_obj, args.algo = problem.n_var, problem.n_obj, 'nsga2'

    # save arguments and setup logger
    save_args(args)
    logger = setup_logger(args)
    print(problem)

    # initialize data exporter
    exporter = DataExport(X_init, Y_init, args)

    # initialize population
    if args.pop_init_method == 'lhs':
        sampling = LatinHypercubeSampling()
    elif args.pop_init_method == 'nds':
        sorted_indices = NonDominatedSorting().do(Y_init)
        sampling = X_init[np.concatenate(sorted_indices)][:args.batch_size]
        if len(sampling) < args.batch_size:
            rest_sampling = lhs(X_init.shape[1],
                                args.batch_size - len(sampling))
            sampling = np.vstack([sampling, rest_sampling])
    elif args.pop_init_method == 'random':
        sampling = FloatRandomSampling()
    else:
        raise NotImplementedError

    # initialize evolutionary algorithm
    ea_algorithm = NSGA2(pop_size=args.batch_size, sampling=sampling)

    # find Pareto front
    res = minimize(problem,
                   ea_algorithm, ('n_gen', args.n_iter),
                   save_history=True)
    X_history = np.array([algo.pop.get('X') for algo in res.history])
    Y_history = np.array([algo.pop.get('F') for algo in res.history])

    # update data exporter
    for X_next, Y_next in zip(X_history, Y_history):
        exporter.update(X_next, Y_next)

    # export all result to csv
    exporter.write_csvs()
    if true_pfront is not None:
        exporter.write_truefront_csv(true_pfront)

    # statistics
    final_hv = calc_hypervolume(exporter.Y, exporter.ref_point)
    print('========== Result ==========')
    print('Total runtime: %.2fs' % (time() - t0))
    print('Total evaluations: %d, hypervolume: %.4f\n' %
          (args.batch_size * args.n_iter, final_hv))

    # close logger
    if logger is not None:
        logger.close()
Esempio n. 15
0
 def _do(self):
     x = sample(LatinHypercubeSampling(), self.n_points - self.n_dim,
                self.n_dim)
     x = map_onto_unit_simplex(x, "kraemer")
     x = np.row_stack([x, np.eye(self.n_dim)])
     return x
Esempio n. 16
0
from pymoo.algorithms.so_de import DE
from pymoo.factory import get_problem
from pymoo.operators.sampling.latin_hypercube_sampling import LatinHypercubeSampling
from pymoo.optimize import minimize

problem = get_problem("ackley", n_var=10)

algorithm = DE(pop_size=100,
               sampling=LatinHypercubeSampling(iterations=100,
                                               criterion="maxmin"),
               variant="DE/rand/1/bin",
               CR=0.5,
               F=0.3,
               dither="vector",
               jitter=False)

res = minimize(problem, algorithm, seed=1, verbose=True)

print("Best solution found: \nX = %s\nF = %s" % (res.X, res.F))
def main():
    # get argument values
    args = get_args()
    # get reference point
    if args.ref_point is None:
        args.ref_point = get_ref_point(args.problem, args.n_var, args.n_obj,
                                       args.n_init_sample)

    t0 = time()

    # set seed
    np.random.seed(args.seed)

    # build problem, get initial samples
    jsonFile = "/home/picarib/Desktop/cdnet/config/json/sbd_custom.json"
    configDirPath = "/home/picarib/Desktop/cdnet/config/sbd_custom/"
    dataPath = "/home/picarib/Desktop/cdnet/data/"

    config = loadJSON(jsonFile)
    interval = 1 if "custom" not in config["RequestModels"] else config[
        "RequestModels"]["custom"]["interval"]
    isLoadRTable = config["isLoadRTable"]
    isLoadSeparatorRank = config["isLoadSeparatorRank"]
    mode = config["RoutingMode"]  # [no-cache, no-color, tag-color, full-color]
    fileSize = config["FileSize"]
    runReqNums = config["RunReqNums"] if "RunReqNums" in config else -1
    warmUpReqNums = config["WarmUpReqNums"] if "WarmUpReqNums" in config else -1
    colorNums = config["colorNums"]
    separatorRankIncrement = config["separatorRankIncrement"]

    colorList = [
        ''.join(random.choices(string.ascii_uppercase + string.digits, k=6))
        for i in range(colorNums)
    ]

    topo = NetTopology(config, configDirPath, mode, warmUpReqNums, fileSize,
                       colorList)
    topo.build()

    extra_params = (topo, fileSize, mode, colorList, runReqNums, warmUpReqNums,
                    separatorRankIncrement)
    problem, X_init, Y_init = build_problem(args.problem,
                                            args.n_var,
                                            args.n_obj,
                                            args.n_init_sample,
                                            args.n_process,
                                            extra_params=extra_params)
    args.n_var, args.n_obj, args.algo = problem.n_var, problem.n_obj, 'moeda'

    # save arguments and setup logger
    save_args(args)
    logger = setup_logger(args)
    print(problem)

    # initialize evolutionary algorithm
    ref_dir = get_reference_directions("das-dennis", 2, n_partitions=15)
    args.batch_size = min(len(ref_dir), args.batch_size)
    # initialize population
    if args.pop_init_method == 'lhs':
        sampling = LatinHypercubeSampling()
    elif args.pop_init_method == 'nds':
        sorted_indices = NonDominatedSorting().do(Y_init)
        sampling = X_init[np.concatenate(sorted_indices)][:args.batch_size]
        if len(sampling) < args.batch_size:
            rest_sampling = lhs(X_init.shape[1],
                                args.batch_size - len(sampling))
            sampling = np.vstack([sampling, rest_sampling])
    elif args.pop_init_method == 'random':
        sampling = FloatRandomSampling()
    else:
        raise NotImplementedError

    ea_algorithm = MOEAD(ref_dir,
                         n_neighbors=args.batch_size,
                         sampling=sampling,
                         crossover=get_crossover("int_one_point"),
                         mutation=get_mutation("int_pm", prob=1.0 / 13),
                         decomposition="pbi",
                         seed=1)

    # initialize data exporter
    exporter = DataExport(X_init, Y_init, args)

    # find Pareto front
    res = minimize(problem,
                   ea_algorithm, ('n_gen', args.n_iter),
                   save_history=True)
    X_history = np.array([algo.pop.get('X') for algo in res.history])
    Y_history = np.array([algo.pop.get('F') for algo in res.history])

    # update data exporter
    for X_next, Y_next in zip(X_history, Y_history):
        exporter.update(X_next, Y_next)

    # export all result to csv
    exporter.write_csvs()

    # statistics
    final_hv = calc_hypervolume(exporter.Y, exporter.ref_point)
    print('========== Result ==========')
    print('Total runtime: %.2fs' % (time() - t0))
    print('Total evaluations: %d, hypervolume: %.4f\n' %
          (args.batch_size * args.n_iter, final_hv))

    # close logger
    if logger is not None:
        logger.close()
class SingleObjectiveGlobalOptimization(Algorithm):
    def __init__(self, n_initial_samples=50, n_parallel_searches=5, **kwargs):
        super().__init__(**kwargs)

        # the algorithm to be used for optimization
        self.n_parallel_searches = n_parallel_searches

        # the initial sampling to be used
        self.sampling = LatinHypercubeSampling(iterations=100)
        self.n_initial_samples = n_initial_samples

        # create a global evaluator that keeps track if the inner algorithms as well
        self.evaluator = GlobalEvaluator()

        # objects used during the optimization
        self.algorithms = []

        # display the single-objective metrics
        self.func_display_attrs = disp_single_objective

    def _initialize(self):
        pop = pop_from_sampling(self.problem, self.sampling,
                                self.n_initial_samples)
        evaluate_if_not_done_yet(self.evaluator,
                                 self.problem,
                                 pop,
                                 algorithm=self)

        for i in np.argsort(pop.get("F")[:, 0]):
            algorithm = get_algorithm("nelder-mead",
                                      problem=self.problem,
                                      x0=pop[i],
                                      termination=NelderAndMeadTermination(
                                          xtol=1e-3, ftol=1e-3),
                                      evaluator=self.evaluator)
            algorithm.initialize()
            self.algorithms.append(algorithm)

        self.pop = pop

    def _next(self):

        # all place visited so far
        _X, _F, _evaluated_by_algorithm = self.evaluator.history.get(
            "X", "F", "algorithm")

        # collect attributes from each algorithm and determine whether it has to be replaced or not
        pop, F, n_evals = [], [], []
        for k, algorithm in enumerate(self.algorithms):

            # collect some data from the current algorithms
            _pop = algorithm.pop

            # if the algorithm has terminated or not
            has_finished = algorithm.termination.has_finished(algorithm)

            # if the area was already explored before
            closest_dist_to_others = vectorized_cdist(
                _pop.get("X"),
                _X[_evaluated_by_algorithm != algorithm],
                func_dist=norm_euclidean_distance(self.problem))
            too_close_to_others = (closest_dist_to_others.min(axis=1) <
                                   1e-3).all()

            # whether the algorithm is the current best - if yes it will not be replaced
            current_best = self.evaluator.opt.get("F") == _pop.get("F").min()

            # algorithm not really useful anymore
            if not current_best and (has_finished or too_close_to_others):
                # find a suitable x0 which is far from other or has good expectations
                self.sampling.criterion = lambda X: vectorized_cdist(X, _X
                                                                     ).min()
                X = self.sampling.do(self.problem,
                                     self.n_initial_samples).get("X")

                # distance in x space to other existing points
                x_dist = vectorized_cdist(X,
                                          _X,
                                          func_dist=norm_euclidean_distance(
                                              self.problem)).min(axis=1)
                f_pred, f_uncert = predict_by_nearest_neighbors(
                    _X, _F, X, 5, self.problem)
                fronts = NonDominatedSorting().do(
                    np.column_stack([-x_dist, f_pred, f_uncert]))
                I = np.random.choice(fronts[0])

                # I = vectorized_cdist(X, _X, func_dist=norm_euclidean_distance(self.problem)).min(axis=1).argmax()

                # choose the one with the largest distance to current solutions
                x0 = X[[I]]

                # replace the current algorithm
                algorithm = get_algorithm(
                    "nelder-mead",
                    problem=self.problem,
                    x0=x0,
                    termination=NelderAndMeadTermination(xtol=1e-3, ftol=1e-3),
                    evaluator=self.evaluator,
                )
                algorithm.initialize()
                self.algorithms[k] = algorithm

            pop.append(algorithm.pop)
            F.append(algorithm.pop.get("F"))
            n_evals.append(self.evaluator.algorithms[algorithm])

        # get the values of all algorithms as arrays
        F, n_evals = np.array(F), np.array(n_evals)
        rewards = 1 - normalize(F.min(axis=1))[:, 0]
        n_evals_total = self.evaluator.n_eval - self.evaluator.algorithms[self]

        # calculate the upper confidence bound
        ucb = rewards + 0.95 * np.sqrt(np.log(n_evals_total) / n_evals)

        I = ucb.argmax()
        self.algorithms[I].next()

        # create the population object with all algorithms
        self.pop = Population.create(*pop)

        # update the current optimum
        self.opt = self.evaluator.opt
Esempio n. 19
0
    def __init__(self,
                 pop_size=25,
                 sampling=LatinHypercubeSampling(),
                 w=0.9,
                 c1=2.0,
                 c2=2.0,
                 adaptive=True,
                 initial_velocity="random",
                 max_velocity_rate=0.20,
                 pertube_best=True,
                 display=PSODisplay(),
                 **kwargs):
        """

        Parameters
        ----------
        pop_size : The size of the swarm being used.

        sampling : {sampling}

        adaptive : bool
            Whether w, c1, and c2 are changed dynamically over time. The update uses the spread from the global
            optimum to determine suitable values.

        w : float
            The inertia weight to be used in each iteration for the velocity update. This can be interpreted
            as the momentum term regarding the velocity. If `adaptive=True` this is only the
            initially used value.

        c1 : float
            The cognitive impact (personal best) during the velocity update. If `adaptive=True` this is only the
            initially used value.
        c2 : float
            The social impact (global best) during the velocity update. If `adaptive=True` this is only the
            initially used value.

        initial_velocity : str - ('random', or 'zero')
            How the initial velocity of each particle should be assigned. Either 'random' which creates a
            random velocity vector or 'zero' which makes the particles start to find the direction through the
            velocity update equation.


        max_velocity_rate : float
            The maximum velocity rate. It is determined variable (and not vector) wise. We consider the rate here
            since the value is normalized regarding the `xl` and `xu` defined in the problem.

        pertube_best : bool
            Some studies have proposed to mutate the global best because it has been found to converge better.
            Which means the population size is reduced by one particle and one function evaluation is spend
            additionally to permute the best found solution so far.

        """

        super().__init__(display=display, **kwargs)

        self.initialization = Initialization(sampling)

        self.pop_size = pop_size
        self.adaptive = adaptive
        self.pertube_best = pertube_best
        self.default_termination = SingleObjectiveDefaultTermination()
        self.V_max = None
        self.initial_velocity = initial_velocity
        self.max_velocity_rate = max_velocity_rate

        self.w = w
        self.c1 = c1
        self.c2 = c2
Esempio n. 20
0
from pymoo.algorithms.so_random_search import RandomSearch
from pymoo.factory import get_problem
from pymoo.operators.sampling.latin_hypercube_sampling import LatinHypercubeSampling
from pymoo.optimize import minimize

problem = get_problem("ackley")

algorithm = RandomSearch(n_points_per_iteration=100,
                         sampling=LatinHypercubeSampling())

res = minimize(problem, algorithm, ("n_gen", 5), seed=1, verbose=False)

print("Best solution found: \nX = %s\nF = %s" % (res.X, res.F))