def test_weighted_wheel_selection(): weights = [1, 2, 3, 4, 5, 6, 7, 8] assert general.weighted_wheel_selection(weights) >= 0 weights = [0, 0, 0, 0, 0, 0, 0, 0] assert general.weighted_wheel_selection(weights) is None
def update(self, space: Space, function: Function) -> None: """Wraps Cohort Intelligence over all agents and variables. Args: space: Space containing agents and update-related information. function: A Function object that will be used as the objective function. """ # Gathers the fitnesses from all individuals fitness = [agent.fit for agent in space.agents] # Iterates through all agents for i, agent in enumerate(space.agents): # Performs the weighted wheel selection s = g.weighted_wheel_selection(fitness) # Shrinks and expands the sampling interval self.lower[i] = space.agents[s].position - self.lower[i] * self.r / 2 self.upper[i] = space.agents[s].position - self.upper[i] * self.r / 2 # Iterates through all possible variations for _ in range(self.t): # Creates a temporary agent a = copy.deepcopy(agent) # Iterates through all the decision variables for j, (lb, ub) in enumerate(zip(self.lower[i], self.upper[i])): # Fills the array based on a uniform distribution a.position[j] = rnd.generate_uniform_random_number( lb, ub, agent.n_dimensions ) # Checks agent's limits a.clip_by_bound() # Calculates the fitness for the temporary position a.fit = function(a.position) # If newly generated agent fitness is better if a.fit < agent.fit: # Updates the corresponding agent's position and fitness agent.position = copy.deepcopy(a.position) agent.fit = copy.deepcopy(a.fit)
def update(self, space: Space, function: Function, iteration: int, n_iterations: int) -> None: """Wraps Multi-Verse Optimizer over all agents and variables (eq. 3.1-3.4). Args: space: Space containing agents and update-related information. function: A Function object that will be used as the objective function. iteration: Current iteration. n_iterations: Maximum number of iterations. """ # Calculates the Wormhole Existence Probability WEP = self.WEP_min + (iteration + 1) * ( (self.WEP_max - self.WEP_min) / n_iterations) # Calculates the Travelling Distance Rate TDR = 1 - ((iteration + 1)**(1 / self.p) / n_iterations**(1 / self.p)) # Gathers the fitness for each individual fitness = [agent.fit for agent in space.agents] # Calculates the norm of the fitness norm = np.linalg.norm(fitness) # Normalizes every individual's fitness norm_fitness = fitness / norm # Iterates through all agents for i, agent in enumerate(space.agents): # For every decision variable for j in range(agent.n_variables): # Generates a uniform random number r1 = r.generate_uniform_random_number() # If random number is smaller than agent's normalized fitness if r1 < norm_fitness[i]: # Selects a white hole through weight-based roulette wheel white_hole = g.weighted_wheel_selection(norm_fitness) # Gathers current agent's position as white hole's position agent.position[j] = space.agents[white_hole].position[j] # Generates a second uniform random number r2 = r.generate_uniform_random_number() # If random number is smaller than WEP if r2 < WEP: # Generates a third uniform random number r3 = r.generate_uniform_random_number() # Calculates the width between lower and upper bounds width = r.generate_uniform_random_number( agent.lb[j], agent.ub[j]) # If random number is smaller than 0.5 if r3 < 0.5: # Updates the agent's position with `+` agent.position[ j] = space.best_agent.position[j] + TDR * width # If not else: # Updates the agent's position with `-` agent.position[ j] = space.best_agent.position[j] - TDR * width # Clips the agent limits agent.clip_by_bound() # Calculates its fitness agent.fit = function(agent.position)
def _update(self, agents, best_agent, function, WEP, TDR): """Method that wraps updates over all agents and variables (eq. 3.1-3.4). Args: agents (list): List of agents. best_agent (Agent): Global best agent. function (Function): A Function object that will be used as the objective function. WEP (float): Current iteration's Wormhole Existence Probability. TDR (floar): Current iteration's Travelling Distance Rate. """ # Gathers the fitness for each individual fitness = [agent.fit for agent in agents] # Calculates the norm of the fitness norm = np.linalg.norm(fitness) # Normalizes every individual's fitness norm_fitness = fitness / norm # Iterate through all agents for i, agent in enumerate(agents): # For every decision variable for j in range(agent.n_variables): # Generates a uniform random number r1 = r.generate_uniform_random_number() # If random number is smaller than agent's normalized fitness if r1 < norm_fitness[i]: # Selects a white hole through weight-based roulette wheel white_hole = g.weighted_wheel_selection(norm_fitness) # Gathers current agent's position as white hole's position agent.position[j] = agents[white_hole].position[j] # Generates a second uniform random number r2 = r.generate_uniform_random_number() # If random number is smaller than WEP if r2 < WEP: # Generates a third uniform random number r3 = r.generate_uniform_random_number() # Calculates the width between lower and upper bounds width = r.generate_uniform_random_number( agent.lb[j], agent.ub[j]) # If random number is smaller than 0.5 if r3 < 0.5: # Updates the agent's position with `+` agent.position[ j] = best_agent.position[j] + TDR * width # If not else: # Updates the agent's position with `-` agent.position[ j] = best_agent.position[j] - TDR * width # Clips the agent limits agent.clip_limits() # Calculates its fitness agent.fit = function(agent.position)