def run_iteration(self, task, c, population_fitness, best_x, best_fitness, **params): r"""Core function of EvolutionStrategyMpL algorithm. Args: task (Task): Optimization task. c (numpy.ndarray): Current population. population_fitness (numpy.ndarray): Current populations fitness/function values. best_x (numpy.ndarray): Global best individual. best_fitness (float): Global best individuals fitness/function value. **params (Dict[str, Any]): Additional arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]: 1. New population. 2. New populations function/fitness values. 3. New global best solution. 4. New global best solutions fitness/objective value. 5. Additional arguments: * ki (int): Number of successful mutations. """ ki = params.pop('ki') if (task.iters + 1) % self.k == 0: _, ki = self.update_rho(c, ki), 0 cn = objects_to_array( [IndividualES(x=self.mutate_rand(c, task), task=task, rng=self.rng) for _ in range(self.lam)]) cn = np.append(cn, c) cn = objects_to_array([cn[i] for i in np.argsort([i.f for i in cn])[:self.mu]]) ki += self.change_count(c, cn) fcn = np.asarray([x.f for x in cn]) best_x, best_fitness = self.get_best(cn, fcn, best_x, best_fitness) return cn, fcn, best_x, best_fitness, {'ki': ki}
def run_iteration(self, task, population, population_fitness, best_x, best_fitness, **params): r"""Core function of Camel Algorithm. Args: task (Task): Optimization task. population (numpy.ndarray[Camel]): Current population of Camels. population_fitness (numpy.ndarray[float]): Current population fitness/function values. best_x (numpy.ndarray): Current best Camel. best_fitness (float): Current best Camel fitness/function value. **params (Dict[str, Any]): Additional arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, dict]: 1. New population 2. New population function/fitness value 3. New global best solution 4. New global best fitness/objective value 5. Additional arguments """ new_caravan = objects_to_array( [self.walk(c, best_x, task) for c in population]) new_caravan = objects_to_array([self.oasis(c) for c in new_caravan]) new_caravan = objects_to_array( [self.life_cycle(c, task) for c in new_caravan]) new_caravan_fitness = np.asarray([c.f for c in new_caravan]) best_x, best_fitness = self.get_best(new_caravan, new_caravan_fitness, best_x, best_fitness) return new_caravan, new_caravan_fitness, best_x, best_fitness, {}
def run_iteration(self, task, c, population_fitness, best_x, best_fitness, **params): r"""Core function of EvolutionStrategyML algorithm. Args: task (Task): Optimization task. c (numpy.ndarray): Current population. population_fitness (numpy.ndarray): Current population fitness/function values. best_x (numpy.ndarray): Global best individual. best_fitness (float): Global best individuals fitness/function value. **params Dict[str, Any]: Additional arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]: 1. New population. 2. New populations fitness/function values. 3. New global best solution. 4. New global best solutions fitness/objective value. 5. Additional arguments. """ cn = objects_to_array([ IndividualES(x=self.mutate_rand(c, task), task=task, rand=self.rng) for _ in range(self.lam) ]) c = self.new_pop(cn) fc = np.asarray([x.f for x in c]) best_x, best_fitness = self.get_best(c, fc, best_x, best_fitness) return c, fc, best_x, best_fitness, {}
def run_iteration(self, task, c, population_fitness, best_x, best_fitness, **params): r"""Core function of EvolutionStrategy(1+1) algorithm. Args: task (Task): Optimization task. c (Individual): Current position. population_fitness (float): Current position function/fitness value. best_x (numpy.ndarray): Global best position. best_fitness (float): Global best function/fitness value. **params (Dict[str, Any]): Additional arguments. Returns: Tuple[Individual, float, Individual, float, Dict[str, Any]]: 1. Initialized individual. 2. Initialized individual fitness/function value. 3. New global best solution. 4. New global best solutions fitness/objective value. 5. Additional arguments: * ki (int): Number of successful rho update. """ ki = params.pop('ki') if (task.iters + 1) % self.k == 0: c.rho, ki = self.update_rho(c.rho, ki), 0 cn = objects_to_array([task.repair(self.mutate(c.x, c.rho), self.rng) for _i in range(self.mu)]) cn_f = np.asarray([task.eval(cn[i]) for i in range(len(cn))]) ib = np.argmin(cn_f) if cn_f[ib] < c.f: c.x, c.f, ki = cn[ib], cn_f[ib], ki + 1 if cn_f[ib] < best_fitness: best_x, best_fitness = self.get_best(cn[ib], cn_f[ib], best_x, best_fitness) return c, c.f, best_x, best_fitness, {'ki': ki}
def init_school(self, task): """Initialize fish school with uniform distribution.""" step_individual = self.step_individual_init * task.range step_volitive = self.step_volitive_init * task.range school = [Fish(weight=self.w_scale / 2.0, task=task, e=True, rng=self.rng) for _ in range(self.population_size)] school_weight = self.population_size * self.w_scale / 2.0 return step_individual, step_volitive, school_weight, objects_to_array(school)
def new_pop(self, pop): r"""Return new population. Args: pop (numpy.ndarray): Current population. Returns: numpy.ndarray: New population. """ pop_s = np.argsort([i.f for i in pop]) if self.mu < self.lam: return objects_to_array([pop[i] for i in pop_s[:self.mu]]) new_population = list() for i in range(int(ceil(float(self.mu) / self.lam))): new_population.extend(pop[:self.lam if (self.mu - i * self.lam) >= self.lam else self.mu - i * self.lam]) return objects_to_array(new_population)
def evolve(self, pop, xb, task, **kwargs): r"""Evolve population with the help multiple mutation strategies. Args: pop (numpy.ndarray[Individual]): Current population. xb (Individual): Current best individual. task (Task): Optimization task. Returns: numpy.ndarray[Individual]: New population of individuals. """ return objects_to_array( [self.strategy(pop, i, xb, self.differential_weight, self.crossover_probability, self.rng, task, self.individual_type, self.strategies) for i in range(len(pop))])
def init_pop_individual(task, population_size, individual_type, **_kwargs): r"""Custom population initialization function for numpy individual type. Args: task (Task): Optimization task. population_size (int): Population size. individual_type (Type[Individual]): Type of individual in population. Returns: Tuple[numpy.ndarray, numpy.ndarray[float]): 1. Initialized population. 2. Initialized populations fitness/function values. """ pop = objects_to_array([individual_type(x=np.zeros(task.dimension), task=task) for _ in range(population_size)]) return pop, np.asarray([x.f for x in pop])
def evolve(self, pop, xb, task, **_kwargs): r"""Evolve current population. Args: pop (numpy.ndarray[Individual]): Current population. xb (Individual): Global best individual. task (Task): Optimization task. Returns: numpy.ndarray: New population. """ new_pop = objects_to_array([self.adaptive_gen(e) for e in pop]) for i, e in enumerate(new_pop): new_pop[i].x = self.strategy(new_pop, i, e.differential_weight, e.crossover_probability, rng=self.rng, x_b=xb) for e in new_pop: e.evaluate(task, rng=self.random) return new_pop
def heuristicInit(task, NP, rnd, **kwargs): target_stats = Counter(task.benchmark.y_train) class_perc = 1 - np.array(list(target_stats.values())) / sum( target_stats.values()) instance_perc = class_perc.take(task.benchmark.y_train) pop = np.random.normal(0, 0.25, (NP, task.D)) pop = pop + instance_perc out_of_range = (pop < 0) | (pop > 1) while np.any(out_of_range): pop[out_of_range] = np.random.normal(0, 0.25, len( pop[out_of_range])) + np.tile(instance_perc, (NP, 1))[out_of_range] out_of_range = (pop < 0) | (pop > 1) pop = objects_to_array([ Individual(task=task, rnd=rnd, e=True, x=pop[i]) for i in range(NP) ]) return pop, np.asarray([x.f for x in pop])
def init_pop(self, task, population_size, rng, individual_type, **_kwargs): r"""Initialize starting population. Args: task (Task): Optimization task. population_size (int): Number of camels in population. rng (numpy.random.Generator): Random number generator. individual_type (Type[Individual]): Individual type. Returns: Tuple[numpy.ndarray[Camel], numpy.ndarray[float]]: 1. Initialize population of camels. 2. Initialized populations function/fitness values. """ caravan = objects_to_array([ individual_type(endurance_init=self.endurance_init, supply_init=self.supply_init, task=task, rng=rng, e=True) for _ in range(population_size) ]) return caravan, np.asarray([c.f for c in caravan])