def runIteration(self, task, c, fpop, xb, fxb, ki, **dparams): r"""Core function of EvolutionStrategyMpL algorithm. Args: task (Task): Optimization task. c (numpy.ndarray): Current population. fpop (numpy.ndarray): Current populations fitness/function values. xb (numpy.ndarray): Global best individual. fxb (float): Global best individuals fitness/function value. ki (int): Number of successful mutations. **dparams (Dict[str, Any]): Additional arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]: 1. New population. 2. New populations function/fitness values. 3. New global best solution. 4. New global best solutions fitness/objective value. 5. Additional arguments: * ki (int): Number of successful mutations. """ if task.Iters % self.k == 0: _, ki = self.updateRho(c, ki), 0 cn = objects2array([ IndividualES(x=self.mutateRand(c, task), task=task, rnd=self.Rand) for _ in range(self.lam) ]) cn = append(cn, c) cn = objects2array( [cn[i] for i in argsort([i.f for i in cn])[:self.mu]]) ki += self.changeCount(c, cn) fcn = asarray([x.f for x in cn]) xb, fxb = self.getBest(cn, fcn, xb, fxb) return cn, fcn, xb, fxb, {'ki': ki}
def runIteration(self, task, caravan, fcaravan, cb, fcb, **dparams): r"""Core function of Camel Algorithm. Args: task (Task): Optimization task. caravan (numpy.ndarray[Camel]): Current population of Camels. fcaravan (numpy.ndarray[float]): Current population fitness/function values. cb (Camel): Current best Camel. fcb (float): Current best Camel fitness/function value. **dparams (Dict[str, Any]): Additional arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, folat, dict]: 1. New population 2. New population function/fitness value 3. New global best solution 4. New global best fitness/objective value 5. Additional arguments """ ncaravan = objects2array([self.walk(c, cb, task) for c in caravan]) ncaravan = objects2array( [self.oasis(c, self.rand(), self.alpha) for c in ncaravan]) ncaravan = objects2array( [self.lifeCycle(c, self.mu, task) for c in ncaravan]) fncaravan = asarray([c.f for c in ncaravan]) cb, fcb = self.getBest(ncaravan, fncaravan, cb, fcb) return ncaravan, fncaravan, cb, fcb, {}
def newPop(self, pop): r"""Return new population. Args: pop (numpy.ndarray): Current population. Returns: numpy.ndarray: New population. """ pop_s = argsort([i.f for i in pop]) if self.mu < self.lam: return objects2array([pop[i] for i in pop_s[:self.mu]]) npop = list() for i in range(int(ceil(float(self.mu) / self.lam))): npop.extend(pop[:self.lam if (self.mu - i * self.lam) >= self.lam else self.mu - i * self.lam]) return objects2array(npop)
def runIteration(self, task, c, fpop, xb, fxb, **dparams): r"""Core function of EvolutionStrategyML algorithm. Args: task (Task): Optimization task. c (numpy.ndarray): Current population. fpop (numpy.ndarray): Current population fitness/function values. xb (numpy.ndarray): Global best individual. fxb (float): Global best individuals fitness/function value. **dparams Dict[str, Any]: Additional arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, Dict[str, Any]]: 1. New population. 2. New populations fitness/function values. 3. New global best solution. 4. New global best solutions fitness/objective value. 5. Additional arguments. """ cn = objects2array([ IndividualES(x=self.mutateRand(c, task), task=task, rand=self.Rand) for _ in range(self.lam) ]) c = self.newPop(cn) fc = asarray([x.f for x in c]) xb, fxb = self.getBest(c, fc, xb, fxb) return c, fc, xb, fxb, {}
def aging(self, task, pop): r"""Apply aging to individuals. Args: task (Task): Optimization task. pop (numpy.ndarray[Individual]): Current population. Returns: numpy.ndarray[Individual]: New population. """ fpop = asarray([x.f for x in pop]) x_b, x_w = pop[argmin(fpop)], pop[argmax(fpop)] avg, npop = mean(fpop), [] for x in pop: x.age += 1 Lt = round( self.age(Lt_min=self.Lt_min, Lt_max=self.Lt_max, mu=self.mu, x_f=x.f, avg=avg, x_gw=x_w.f, x_gb=x_b.f)) if x.age <= Lt: npop.append(x) if len(npop) == 0: npop = objects2array([ self.itype(task=task, rnd=self.Rand, e=True) for _ in range(self.NP) ]) return npop
def runIteration(self, task, c, fpop, xb, fxb, ki, **dparams): r"""Core function of EvolutionStrategy(1+1) algorithm. Args: task (Task): Optimization task. pop (Individual): Current position. fpop (float): Current position function/fitness value. xb (Individual): Global best position. fxb (float): Global best function/fitness value. ki (int): Number of successful updates before rho update. **dparams (Dict[str, Any]): Additional arguments. Returns: Tuple[Individual, float, Individual, float, Dict[str, Any]]: 1, Initialized individual. 2, Initialized individual fitness/function value. 3. New global best solution. 4. New global best soluitons fitness/objective value. 5. Additional arguments: * ki (int): Number of successful rho update. """ if task.Iters % self.k == 0: c.rho, ki = self.updateRho(c.rho, ki), 0 cn = objects2array([ task.repair(self.mutate(c.x, c.rho), self.Rand) for _i in range(self.mu) ]) cn_f = asarray([task.eval(cn[i]) for i in range(len(cn))]) ib = argmin(cn_f) if cn_f[ib] < c.f: c.x, c.f, ki = cn[ib], cn_f[ib], ki + 1 if cn_f[ib] < fxb: xb, fxb = self.getBest(cn[ib], cn_f[ib], xb, fxb) return c, c.f, xb, fxb, {'ki': ki}
def popIncrement(self, pop, task): r"""Increment population. Args: pop (numpy.ndarray[Individual]): Current population. task (Task): Optimization task. Returns: numpy.ndarray[Individual]: Increased population. """ deltapop = int(round(max(1, self.NP * self.deltaPopE(task.Iters)))) return objects2array([self.itype(task=task, rnd=self.Rand, e=True) for _ in range(deltapop)])
def selection(self, pop, npop, **kwargs): r"""Operator for selection. Args: pop (numpy.ndarray[Individual]): Current population. npop (numpy.ndarray[Individual]): New Population. **kwargs (Dict[str, Any]): Additional arguments. Returns: numpy.ndarray[Individual]: New selected individuals. """ return objects2array( [e if e.f < pop[i].f else pop[i] for i, e in enumerate(npop)])
def evolve(self, pop, xb, task, **kwargs): r"""Evolve population with the help multiple mutation strategies. Args: pop (numpy.ndarray[Individual]): Current population. xb (Individual): Current best individual. task (Task): Optimization task. **kwargs (Dict[str, Any]): Additional arguments. Returns: numpy.ndarray[Individual]: New population of individuals. """ return objects2array([self.CrossMutt(pop, i, xb, self.F, self.CR, self.Rand, task, self.itype, self.strategies) for i in range(len(pop))])
def evolve(self, pop, xb, task, **kwargs): r"""Evolve population. Args: pop (numpy.ndarray): Current population. xb (Individual): Current best individual. task (Task): Optimization task. **kwargs (Dict[str, Any]): Additional arguments. Returns: numpy.ndarray: New evolved populations. """ return objects2array([self.itype(x=self.CrossMutt(pop, i, xb, self.F, self.CR, self.Rand), task=task, rnd=self.Rand, e=True) for i in range(len(pop))])
def postSelection(self, pop, task): r"""Post selection operator. Args: pop (numpy.ndarray[Individual]): Current population. task (Task): Optimization task. Returns: numpy.ndarray[Individual]: New population. """ Gr, nNP = task.nFES // (self.pmax * len(pop)) + self.rp, len(pop) // 2 if task.Iters == Gr and len(pop) > 3: return objects2array([pop[i] if pop[i].f < pop[i + nNP].f else pop[i + nNP] for i in range(nNP)]) return pop
def evolve(self, pop, xb, task): r"""Evolve current population. Args: pop (numpy.ndarray[Individual]): Current population. xb (Individual): Global best individual. task (Task): Optimization task. Returns: numpy.ndarray: New population. """ npop = objects2array([self.AdaptiveGen(e) for e in pop]) for i, e in enumerate(npop): npop[i].x = self.CrossMutt(npop, i, xb, e.F, e.CR, rnd=self.Rand) return npop
def runIteration(self, task, caravan, fcaravan, cb, fcb, **dparams): r"""Core function of Camel Algorithm. Args: task (Task): Optimization task. caravan (numpy.ndarray[Camel]): Current population of Camels. fcaravan (numpy.ndarray[float]): Current population fitness/function values. cb (Camel): Current best Camel. fcb (float): Current best Camel fitness/function value. **dparams (Dict[str, Any]): Additional arguments. Returns: Tuple[array of array of (float or int), array of float, dict]: 1. New population 2. New population function/fitness value 3. Additional arguments """ ncaravan = objects2array([self.walk(c, cb, task) for c in caravan]) ncaravan = objects2array( [self.oasis(c, self.rand(), self.alpha) for c in ncaravan]) ncaravan = objects2array( [self.lifeCycle(c, self.mu, task) for c in ncaravan]) return ncaravan, asarray([x.f for x in ncaravan]), {}
def evolve(self, pop, xb, task, **ukwargs): r"""Evolve current population. Args: pop (numpy.ndarray[Individual]): Current population. xb (Individual): Global best individual. task (Task): Optimization task. ukwargs (Dict[str, Any]): Additional arguments. Returns: numpy.ndarray: New population. """ npop = objects2array([self.AdaptiveGen(e) for e in pop]) for i, e in enumerate(npop): npop[i].x = self.CrossMutt(npop, i, xb, e.F, e.CR, rnd=self.Rand) for e in npop: e.evaluate(task, rnd=self.rand) return npop
def defaultIndividualInit(task, NP, rnd=rand, itype=None, **kwargs): r"""Initialize `NP` individuals of type `itype`. Args: task (Task): Optimization task. NP (int): Number of individuals in population. rnd (Optional[mtrand.RandomState]): Random number generator. itype (Optional[Individual]): Class of individual in population. kwargs (Dict[str, Any]): Additional arguments. Returns: Tuple[numpy.ndarray[Individual], numpy.ndarray[float]: 1. Initialized individuals. 2. Initialized individuals function/fitness values. """ pop = objects2array([itype(task=task, rnd=rnd, e=True) for _ in range(NP)]) return pop, asarray([x.f for x in pop])
def popDecrement(self, pop, task): r"""Decrement population. Args: pop (numpy.ndarray): Current population. task (Task): Optimization task. Returns: numpy.ndarray[Individual]: Decreased population. """ deltapop = int(round(max(1, self.NP * self.deltaPopC(task.Iters)))) if len(pop) - deltapop <= 0: return pop ni = self.Rand.choice(len(pop), deltapop, replace=False) npop = [] for i, e in enumerate(pop): if i not in ni: npop.append(e) elif self.rand() >= self.omega: npop.append(e) return objects2array(npop)
def selection(self, pop, npop, xb, fxb, task, **kwargs): r"""Operator for selection. Args: pop (numpy.ndarray): Current population. npop (numpy.ndarray): New Population. xb (numpy.ndarray): Current global best solution. fxb (float): Current global best solutions fitness/objective value. task (Task): Optimization task. **kwargs (Dict[str, Any]): Additional arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, float]: 1. New selected individuals. 2. New global best solution. 3. New global best solutions fitness/objective value. """ arr = objects2array([e if e.f < pop[i].f else pop[i] for i, e in enumerate(npop)]) xb, fxb = self.getBest(arr, asarray([e.f for e in arr]), xb, fxb) return arr, xb, fxb
def postSelection(self, pop, task, xb, fxb, **kwargs): r"""Post selection operator. In this algorithm the post selection operator decrements the population at specific iterations/generations. Args: pop (numpy.ndarray): Current population. task (Task): Optimization task. kwargs (Dict[str, Any]): Additional arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, float]: 1. Changed current population. 2. New global best solution. 3. New global best solutions fitness/objective value. """ Gr = task.nFES // (self.pmax * len(pop)) + self.rp nNP = len(pop) // 2 if task.Iters == Gr and len(pop) > 3: pop = objects2array([pop[i] if pop[i].f < pop[i + nNP].f else pop[i + nNP] for i in range(nNP)]) return pop, xb, fxb
def postSelection(self, pop, task, **kwargs): r"""Post selection operator. In this algorithm the post selection operator decrements the population at specific iterations/generations. Args: pop (numpy.ndarray[Individual]): Current population. task (Task): Optimization task. kwargs (Dict[str, Any]): Additional arguments. Returns: numpy.ndarray[Individual]: Changed current population. """ Gr = task.nFES // (self.pmax * len(pop)) + self.rp nNP = len(pop) // 2 if task.Iters == Gr and len(pop) > 3: pop = objects2array([ pop[i] if pop[i].f < pop[i + nNP].f else pop[i + nNP] for i in range(nNP) ]) return pop
def initPop(self, task, NP, rnd, itype, **kwargs): r"""Initialize starting population. Args: task (Task): Optimization task. NP (int): Number of camels in population. rnd (mtrand.RandomState): Random number generator. itype (Individual): Individual type. **kwargs (Dict[str, Any]): Additional arguments. Returns: Tuple[numpy.ndarray[Camel], numpy.ndarray[float]]: 1. Initialize population of camels. 2. Initialized populations function/fitness values. """ caravan = objects2array([ itype(E_init=self.E_init, S_init=self.S_init, task=task, rnd=rnd, e=True) for _ in range(NP) ]) return caravan, asarray([c.f for c in caravan])
def init_school(self, task): """Initialize fish school with uniform distribution.""" curr_step_individual = self.step_individual_init * (task.Upper - task.Lower) curr_step_volitive = self.step_volitive_init * (task.Upper - task.Lower) curr_weight_school = 0.0 prev_weight_school = 0.0 school = [] positions = self.generate_uniform_coordinates(task) for idx in range(self.NP): fish = self.init_fish(positions[idx], task) school.append(fish) curr_weight_school += fish.weight prev_weight_school = curr_weight_school return curr_step_individual, curr_step_volitive, curr_weight_school, prev_weight_school, objects2array( school)