Esempio n. 1
0
def generate_levy_distribution(
    beta: Optional[float] = 0.1, size: Optional[int] = 1
) -> np.ndarray:
    """Generates a n-dimensional array based on a Lévy distribution.

    References:
        X.-S. Yang and S. Deb. Computers & Operations Research.
        Multiobjective Cuckoo Search for Design Optimization (2013).

    Args:
        beta: Skewness parameter.
        size: Size of array.

    Returns:
        (np.ndarray): Lévy distribution n-dimensional array.

    """

    # Calculates the equation's numerator and denominator
    num = gamma(1 + beta) * sin(pi * beta / 2)
    den = gamma((1 + beta) / 2) * beta * (2 ** ((beta - 1) / 2))

    # Calculates `sigma`
    sigma = (num / den) ** (1 / beta)

    # Calculates 'u' and `v` distributions
    u = r.generate_gaussian_random_number(0, sigma**2, size=size)
    v = r.generate_gaussian_random_number(size=size)

    # Calculates the Lévy distribution
    levy_array = u / np.fabs(v) ** (1 / beta)

    return levy_array
Esempio n. 2
0
    def _update_strategy(self, strategy):
        """Updates the strategy (eq. 5-10).

        Args:
            strategy (np.array): An strategy array to be updated.

        Returns:
            The updated strategy.

        """

        # Calculates the number of variables and dimensions
        n_variables, n_dimensions = strategy.shape[0], strategy.shape[1]

        # Calculates the mutation strength
        tau = 1 / np.sqrt(2 * n_variables)

        # Calculates the mutation strength complementary
        tau_p = 1 / np.sqrt(2 * np.sqrt(n_variables))

        # Generates a uniform random number
        r1 = r.generate_gaussian_random_number(size=(n_variables, n_dimensions))

        # Generates another uniform random number
        r2 = r.generate_gaussian_random_number(size=(n_variables, n_dimensions))

        # Calculates the new strategy
        new_strategy = strategy * np.exp(tau_p * r1 + tau * r2)

        return new_strategy
Esempio n. 3
0
    def _mutation(self, alpha, beta):
        """Performs the mutation over offsprings.

        Args:
            alpha (Agent): First offspring.
            beta (Agent): Second offspring.

        Returns:
            Two mutated offsprings.

        """

        # For every decision variable
        for j in range(alpha.n_variables):
            # Generates a uniform random number
            r1 = r.generate_uniform_random_number()

            # If random number is smaller than probability of mutation
            if r1 < self.p_mutation:
                # Mutates the offspring
                alpha.position[j] *= r.generate_gaussian_random_number()

            # Generates another uniform random number
            r2 = r.generate_uniform_random_number()

            # If random number is smaller than probability of mutation
            if r2 < self.p_mutation:
                # Mutates the offspring
                beta.position[j] *= r.generate_gaussian_random_number()

        return alpha, beta
Esempio n. 4
0
def generate_levy_distribution(beta=0.1, size=1):
    """Generates a n-dimensional array based on a Lévy distribution.

    References:
        X.-S. Yang and S. Deb. Computers & Operations Research.
        Multiobjective Cuckoo Search for Design Optimization (2013).

    Args:
        beta (float): Skewness parameter.
        size (int): Size of array.

    Returns:
        A Lévy distribution n-dimensional array.

    """

    # Calculates the equation's numerator
    num = gamma(1 + beta) * sin(pi * beta / 2)

    # Calculates the equation's denominator
    den = gamma((1 + beta) / 2) * beta * (2 ** ((beta - 1) / 2))

    # Calculates the sigma for further distribution generation
    sigma = (num / den) ** (1 / beta)

    # Calculates the 'u' distribution
    u = r.generate_gaussian_random_number(size=size) * sigma

    # Calculates the 'v' distribution
    v = r.generate_gaussian_random_number(size=size)

    # Finally, we can calculate the Lévy distribution
    levy_array = u / np.fabs(v) ** (1 / beta)

    return levy_array
Esempio n. 5
0
    def _update_strategy(self, index):
        """Updates the strategy (eq. 5-10).

        Args:
            index (int): Index of current agent.

        Returns:
            The updated strategy.

        """

        # Calculates the number of variables and dimensions
        n_variables, n_dimensions = self.strategy.shape[
            1], self.strategy.shape[2]

        # Calculates the mutation strength and its complementary
        tau = 1 / np.sqrt(2 * n_variables)
        tau_p = 1 / np.sqrt(2 * np.sqrt(n_variables))

        # Generates uniform random numbers
        r1 = r.generate_gaussian_random_number(size=(n_variables,
                                                     n_dimensions))
        r2 = r.generate_gaussian_random_number(size=(n_variables,
                                                     n_dimensions))

        # Calculates the new strategy
        self.strategy[index] *= np.exp(tau_p * r1 + tau * r2)
Esempio n. 6
0
    def update(
        self, space: Space, function: Function, iteration: int, n_iterations: int
    ) -> None:
        """Wraps Self-Adaptive Global-Best Harmony Search over all agents and variables.

        Args:
            space: Space containing agents and update-related information.
            function: A Function object that will be used as the objective function.
            iteration: Current iteration.
            n_iterations: Maximum number of iterations.

        """

        # Updates harmony memory considering and pitch adjusting rates
        self.HMCR = r.generate_gaussian_random_number(self.HMCRm, 0.01)[0]
        self.PAR = r.generate_gaussian_random_number(self.PARm, 0.05)[0]

        # Stores updates values to lists
        self.HMCR_history.append(self.HMCR)
        self.PAR_history.append(self.PAR)

        # If current iteration is smaller than half
        if iteration < n_iterations // 2:
            # Updates the bandwidth parameter
            self.bw = (
                self.bw_max
                - ((self.bw_max - self.bw_min) / n_iterations) * 2 * iteration
            )
        else:
            # Replaces by the minimum bandwidth
            self.bw = self.bw_min

        # Generates a new harmony
        agent = self._generate_new_harmony(space.agents)

        # Checks agent limits
        agent.clip_by_bound()

        # Calculates the new harmony fitness
        agent.fit = function(agent.position)

        # Sorts agents
        space.agents.sort(key=lambda x: x.fit)

        # If newly generated agent fitness is better
        if agent.fit < space.agents[-1].fit:
            # Updates the corresponding agent's position and fitness
            space.agents[-1].position = copy.deepcopy(agent.position)
            space.agents[-1].fit = copy.deepcopy(agent.fit)

        # Checks if learning period has reached its maximum
        if self.lp == self.LP:
            # Re-calculates the mean HMCR and PAR, and resets learning period
            self.HMCRm = np.mean(self.HMCR_history)
            self.PARm = np.mean(self.PAR_history)
            self.lp = 1
        else:
            # Increases learning period
            self.lp += 1
Esempio n. 7
0
    def _update_strategy(
        self, index: int, lower_bound: np.ndarray, upper_bound: np.ndarray
    ) -> np.ndarray:
        """Updates the strategy and performs a clipping process to help its convergence (eq. 5.2).

        Args:
            index: Index of current agent.
            lower_bound: An array holding the lower bounds.
            upper_bound: An array holding the upper bounds.

        Returns:
            (np.ndarray): The updated strategy.

        """

        # Calculates the number of variables and dimensions
        n_variables, n_dimensions = self.strategy.shape[1], self.strategy.shape[2]

        # Generates a uniform random number
        r1 = r.generate_gaussian_random_number(size=(n_variables, n_dimensions))

        # Calculates the new strategy
        self.strategy[index] += r1 * (np.sqrt(np.abs(self.strategy[index])))

        # For every decision variable
        for j, (lb, ub) in enumerate(zip(lower_bound, upper_bound)):
            # Uses the clip ratio to help the convergence
            self.strategy[index][j] = (
                np.clip(self.strategy[index][j], lb, ub) * self.clip_ratio
            )
Esempio n. 8
0
    def _mutate_parent(self, agent, function, strategy):
        """Mutates a parent into a new child (eq. 5.1).

        Args:
            agent (Agent): An agent instance to be reproduced.
            function (Function): A Function object that will be used as the objective function.
            strategy (np.array): An array holding the strategies that conduct the searching process.

        Returns:
            A mutated child.

        """

        # Makes a deepcopy on selected agent
        a = copy.deepcopy(agent)

        # Generates a uniform random number
        r1 = r.generate_gaussian_random_number()

        # Updates its position
        a.position += strategy * r1

        # Clips its limits
        a.clip_limits()

        # Calculates its fitness
        a.fit = function(a.position)

        return a
Esempio n. 9
0
    def _raining_process(self, agents: List[Agent], best_agent: Agent) -> None:
        """Performs the raining process (eq. 11-12).

        Args:
            agents: List of agents.
            best_agent: Global best agent.

        """

        # Iterates through all sea + rivers
        for i in range(0, self.nsr):
            # Iterates through all raindrops that belongs to specific sea or river
            for j in range(self.nsr, self.flows[i] + self.nsr):
                # Calculates the euclidean distance between sea and raindrop / stream
                distance = np.linalg.norm(best_agent.position - agents[j].position)

                # If distance if smaller than evaporation condition
                if distance < self.d_max:
                    # If it is supposed to replace the sea streams' position
                    if i == 0:
                        # Updates position (eq. 12)
                        r1 = r.generate_gaussian_random_number(1, agents[j].n_variables)
                        agents[j].position = best_agent.position + np.sqrt(0.1) * r1

                    # If it is supposed to replace the river streams' position
                    else:
                        # Updates position (eq. 11)
                        agents[j].fill_with_uniform()
Esempio n. 10
0
    def _update_strategy(self, strategy, lower_bound, upper_bound):
        """Updates the strategy and performs a clipping process to help its convergence (eq. 5.2).

        Args:
            strategy (np.array): An strategy array to be updated.
            lower_bound (np.array): An array holding the lower bounds.
            upper_bound (np.array): An array holding the upper bounds.

        Returns:
            The updated strategy.

        """

        # Calculates the number of variables and dimensions
        n_variables, n_dimensions = strategy.shape[0], strategy.shape[1]

        # Generates a uniform random number
        r1 = r.generate_gaussian_random_number(size=(n_variables,
                                                     n_dimensions))

        # Calculates the new strategy
        new_strategy = strategy + r1 * (np.sqrt(np.abs(strategy)))

        # For every decision variable
        for j, (lb, ub) in enumerate(zip(lower_bound, upper_bound)):
            # Uses the clip ratio to help the convergence
            new_strategy[j] = np.clip(new_strategy[j], lb,
                                      ub) * self.clip_ratio

        return new_strategy
Esempio n. 11
0
    def _constraint_handle(self, agents, best_agent, function, iteration):
        """Performs the constraint handling procedure (eq. 11).

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            function (Function): A Function object that will be used as the objective function.
            iteration (int): Current iteration.

        """

        # Iterates through every agent
        for agent in agents:
            # Generates a random number
            r1 = r.generate_uniform_random_number()

            # If random is smaller than 0.5
            if r1 < 0.5:
                # Generates a gaussian random number
                r2 = r.generate_gaussian_random_number()

                # Updates the agent's position
                agent.position = best_agent.position + \
                    (r2 / iteration) * (best_agent.position - agent.position)

            # Clips its limits
            agent.clip_limits()

            # Re-calculates its fitness
            agent.fit = function(agent.position)
Esempio n. 12
0
    def _mutate_parent(self, agent, index, function):
        """Mutates a parent into a new child (eq. 2).

        Args:
            agent (Agent): An agent instance to be reproduced.
            index (int): Index of current agent.
            function (Function): A Function object that will be used as the objective function.

        Returns:
            A mutated child.

        """

        # Makes a deepcopy on selected agent
        a = copy.deepcopy(agent)

        # Generates a uniform random number
        r1 = r.generate_gaussian_random_number()

        # Updates its position
        a.position += self.strategy[index] * r1

        # Clips its limits
        a.clip_by_bound()

        # Calculates its fitness
        a.fit = function(a.position)

        return a
Esempio n. 13
0
    def _break_wave(self, wave, function, j):
        """Breaks current wave into a new one (eq. 10).

        Args:
            wave (Agent): Wave to be broken.
            function (Function): A function object.
            j (int): Index of dimension to be broken.

        Returns:
            Broken wave.

        """

        # Makes a deepcopy of current wave
        broken_wave = copy.deepcopy(wave)

        # Generates a gaussian random number
        r1 = r.generate_gaussian_random_number()

        # Updates the broken wave's position
        broken_wave.position[j] += r1 * self.beta * (j + 1)

        # Clips its limits
        broken_wave.clip_limits()

        # Re-calculates its fitness
        broken_wave.fit = function(broken_wave.position)

        return broken_wave
Esempio n. 14
0
    def update(self, space: Space, function: Function) -> None:
        """Wraps Artificial Flora over all agents and variables.

        Args:
            space: Space containing agents and update-related information.
            function: A Function object that will be used as the objective function.

        """

        # Sorts the agents
        space.agents.sort(key=lambda x: x.fit)

        # Creates a list of new agents
        new_agents = []

        # Iterates thorugh all agents
        for i, agent in enumerate(space.agents):
            # Iterates through amount of branches
            for _ in range(self.m):
                # Makes a copy of current agent
                a = copy.deepcopy(agent)

                # Generates random numbers
                r1 = r.generate_uniform_random_number()
                r2 = r.generate_uniform_random_number()
                r3 = r.generate_uniform_random_number()

                # Calculates the new distance (eq. 1)
                distance = (self.g_distance[i] * r1 * self.c1 +
                            self.p_distance[i] * r2 * self.c2)

                # Generates a random gaussian number
                D = r.generate_gaussian_random_number(
                    0, distance, (space.n_variables, space.n_dimensions))

                # Updates offspring's position (eq. 5)
                a.position += D

                # Clips its limits
                a.clip_by_bound()

                # Evaluates its fitness
                a.fit = function(a.position)

                # Calculates the probability of selection (eq. 6)
                p = np.fabs(np.sqrt(a.fit / space.agents[-1].fit)) * self.Q

                # If random number is smaller than probability of selection
                if r3 < p:
                    # Appends the offsprings
                    new_agents.append(a)

            # Updates both grandparent and parent distances (eq. 2 and 3)
            self.g_distance[i] = self.p_distance[i]
            self.p_distance[i] = np.std(agent.position - a.position)

        # Randomly selects the agents
        idx = d.generate_choice_distribution(len(new_agents), None,
                                             space.n_agents)
        space.agents = [new_agents[i] for i in idx]
Esempio n. 15
0
    def update(self, space, iteration, n_iterations):
        """Wraps Whale Optimization Algorithm over all agents and variables.

        Args:
            space (Space): Space containing agents and update-related information.
            iteration (int): Current iteration.
            n_iterations (int): Maximum number of iterations

        """

        # Linearly decreases the coefficient
        coefficient = 2 - 2 * iteration / (n_iterations - 1)

        # Iterates through all agents
        for agent in space.agents:
            # Generates an uniform random number
            r1 = r.generate_uniform_random_number()

            # Calculates the `A` coefficient
            A = 2 * coefficient * r1 - coefficient

            # Calculates the `C` coefficient
            C = 2 * r1

            # Generates a random number between 0 and 1
            p = r.generate_uniform_random_number()

            # If `p` is smaller than 0.5
            if p < 0.5:
                # If `A` is smaller than 1
                if np.fabs(A) < 1:
                    # Calculates the distance coefficient
                    D = np.fabs(C * space.best_agent.position - agent.position)

                    # Updates the agent's position
                    agent.position = space.best_agent.position - A * D

                # If `A` is bigger or equal to 1
                else:
                    # Generates a random-based agent
                    a = self._generate_random_agent(agent)

                    # Calculates the distance coefficient
                    D = np.fabs(C * a.position - agent.position)

                    # Updates the agent's position
                    agent.position = a.position - A * D

            # If `p` is bigger or equal to 1
            else:
                # Generates a random number between -1 and 1
                l = r.generate_gaussian_random_number()

                # Calculates the distance coefficient
                D = np.fabs(space.best_agent.position - agent.position)

                # Updates the agent's position
                agent.position = D * np.exp(self.b * l) * np.cos(
                    2 * np.pi * l) + space.best_agent.position
Esempio n. 16
0
    def _update(self, agents, best_agent, function, iteration, frequency, velocity, loudness, pulse_rate):
        """Method that wraps Bat Algorithm over all agents and variables.

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            function (Function): A function object.
            iteration (int): Current iteration number.
            frequency (np.array): Array of frequencies.
            velocity (np.array): Array of current velocities.
            loudness (np.array): Array of loudnesses.
            pulse_rate (np.array): Array of pulse rates.

        """

        # Declaring alpha constant
        alpha = 0.9

        # Iterate through all agents
        for i, agent in enumerate(agents):
            # Updating frequency
            frequency[i] = self._update_frequency(self.f_min, self.f_max)

            # Updating velocity
            velocity[i] = self._update_velocity(
                agent.position, best_agent.position, frequency[i], velocity[i])

            # Updating agent's position
            agent.position = self._update_position(agent.position, velocity[i])

            # Generating a random probability
            p = r.generate_uniform_random_number()

            # Generating a random number
            e = r.generate_gaussian_random_number()

            # Check if probability is bigger than current pulse rate
            if p > pulse_rate[i]:
                # Performing a local random walk (Equation 5)
                # We apply 0.001 to limit the step size
                agent.position = best_agent.position + \
                    0.001 * e * np.mean(loudness)

            # Checks agent limits
            agent.check_limits()

            # Evaluates agent
            agent.fit = function.pointer(agent.position)

            # Checks if probability is smaller than loudness and if fit is better
            if p < loudness[i] and agent.fit < best_agent.fit:
                # Copying the new solution to space's best agent
                best_agent = copy.deepcopy(agent)

                # Increasing pulse rate (Equation 6)
                pulse_rate[i] = self.r * (1 - np.exp(-alpha * iteration))

                # Decreasing loudness (Equation 6)
                loudness[i] = self.A * alpha
Esempio n. 17
0
    def update(self, space: Space, function: Function) -> None:
        """Wraps Algorithm of the Innovative Gunner over all agents and variables.

        Args:
            space: Space containing agents and update-related information.
            function: A Function object that will be used as the objective function.

        """

        # Calculates the maximum correction angles (eq. 18)
        a = r.generate_uniform_random_number()
        alpha_max = self.alpha * a
        beta_max = self.beta * a

        # Iterates through all agents
        for agent in space.agents:
            # Makes a deep copy of current agent
            a = copy.deepcopy(agent)

            # Samples correction angles
            alpha = r.generate_gaussian_random_number(
                0, alpha_max / 3, (agent.n_variables, agent.n_dimensions)
            )
            beta = r.generate_gaussian_random_number(
                0, beta_max / 3, (agent.n_variables, agent.n_dimensions)
            )

            # Calculates correction functions (eq. 16 and 17)
            g_alpha = np.where(alpha < 0, np.cos(alpha), 1 / np.cos(alpha))
            g_beta = np.where(beta < 0, np.cos(beta), 1 / np.cos(beta))

            # Updates temporary agent's position (eq. 15)
            a.position *= g_alpha * g_beta

            # Checks agent's limits
            a.clip_by_bound()

            # Re-evaluates the temporary agent
            a.fit = function(a.position)

            # If temporary agent's fitness is better than agent's fitness
            if a.fit < agent.fit:
                # Replace its position and fitness
                agent.position = copy.deepcopy(a.position)
                agent.fit = copy.deepcopy(a.fit)
Esempio n. 18
0
    def _update(self, agents, best_agent, coefficient):
        """Method that wraps Whale Optimization Algorithm updates.

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            coefficient (float): A linearly decreased coefficient.

        """

        # Iterates through all agents
        for agent in agents:
            # Generating an uniform random number
            r1 = r.generate_uniform_random_number()

            # Calculates the `A` coefficient
            A = 2 * coefficient * r1 - coefficient

            # Calculates the `C` coefficient
            C = 2 * r1

            # Generates a random number between 0 and 1
            p = r.generate_uniform_random_number()

            # If `p` is smaller than 0.5
            if p < 0.5:
                # If `A` is smaller than 1
                if np.fabs(A) < 1:
                    # Calculates the distance coefficient
                    D = np.fabs(C * best_agent.position - agent.position)

                    # Updates the agent's position
                    agent.position = best_agent.position - A * D

                # If `A` is bigger or equal to 1
                else:
                    # Generates a random-based agent
                    a = self._generate_random_agent(agent)

                    # Calculates the distance coefficient
                    D = np.fabs(C * a.position - agent.position)

                    # Updates the agent's position
                    agent.position = a.position - A * D

            # If `p` is bigger or equal to 1
            else:
                # Generates a random number between -1 and 1
                l = r.generate_gaussian_random_number()

                # Calculates the distance coefficient
                D = np.fabs(best_agent.position - agent.position)

                # Updates the agent's position
                agent.position = D * np.exp(self.b * l) * np.cos(
                    2 * np.pi * l) + best_agent.position
Esempio n. 19
0
    def update(self, space: Space, function: Function, iteration: int) -> None:
        """Wraps Bat Algorithm over all agents and variables.

        Args:
            space: Space containing agents and update-related information.
            function: A Function object that will be used as the objective function.
            iteration: Current iteration.

        """

        # Declares alpha constant
        alpha = 0.9

        # Iterates through all agents
        for i, agent in enumerate(space.agents):
            # Updates frequency (eq. 2)
            # Note that we have to apply (min - max) instead of (max - min) or it will not converge
            beta = rnd.generate_uniform_random_number()
            self.frequency[i] = self.f_min + (self.f_min - self.f_max) * beta

            # Updates velocity (eq. 3)
            self.velocity[i] += (agent.position -
                                 space.best_agent.position) * self.frequency[i]

            # Updates agent's position (eq. 4)
            agent.position += self.velocity[i]

            # Generates random uniform and gaussian numbers
            p = rnd.generate_uniform_random_number()
            e = rnd.generate_gaussian_random_number()

            # Checks if probability is bigger than current pulse rate
            if p > self.pulse_rate[i]:
                # Performs a local random walk (eq. 5)
                # We apply 0.001 to limit the step size
                agent.position = space.best_agent.position + 0.001 * e * np.mean(
                    self.loudness)

            # Checks agent limits
            agent.clip_by_bound()

            # Evaluates agent
            agent.fit = function(agent.position)

            # Checks if probability is smaller than loudness and if fit is better
            if p < self.loudness[i] and agent.fit < space.best_agent.fit:
                # Copies the new solution to space's best agent
                space.best_agent = copy.deepcopy(agent)

                # Increasing pulse rate (eq. 6 - left)
                self.pulse_rate[i] = self.r * (1 - np.exp(-alpha * iteration))

                # Decreasing loudness (eq. 6 - right)
                self.loudness[i] = self.A * alpha
Esempio n. 20
0
    def update(
        self, space: Space, function: Function, iteration: int, n_iterations: int
    ) -> None:
        """Wraps Flying Squirrel Optimizer over all agents and variables.

        Args:
            space: Space containing agents and update-related information.
            function: A Function object that will be used as the objective function.
            iteration: Current iteration.
            n_iterations: Maximum number of iterations.

        """

        # Calculates the mean position of the population
        mean_position = np.mean([agent.position for agent in space.agents], axis=0)

        # Calculates the Sigma Reduction Factor (eq. 5)
        SRF = (-np.log(1 - (1 / np.sqrt(iteration + 2)))) ** 2

        # Calculates the Beta Expansion Factor
        BEF = self.beta + (2 - self.beta) * ((iteration + 1) / n_iterations)

        # Iterates through all agents
        for agent in space.agents:
            # Makes a deep copy of current agent
            a = copy.deepcopy(agent)

            # Iterates through all variables
            for j in range(agent.n_variables):
                # Calculates the random walk (eq. 2 and 3)
                random_step = r.generate_gaussian_random_number(mean_position[j], SRF)

                # Calculates the Lévy flight (eq. 6 to 18)
                levy_step = d.generate_levy_distribution(BEF)

                # Updates the agent's position
                a.position[j] += (
                    random_step
                    * levy_step
                    * (agent.position[j] - space.best_agent.position[j])
                )

            # Checks agent's limits
            a.clip_by_bound()

            # Re-evaluates the temporary agent
            a.fit = function(a.position)

            # If temporary agent's fitness is better than agent's fitness
            if a.fit < agent.fit:
                # Replace its position and fitness
                agent.position = copy.deepcopy(a.position)
                agent.fit = copy.deepcopy(a.fit)
Esempio n. 21
0
    def _update(self, agents, best_agent, function, sigma):
        """Method that wraps updates over all agents and variables.

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            function (Function): A Function object that will be used as the objective function.
            sigma (list): Width between lower and upper bounds.

        """

        # Calculates a list of fitness per agent
        fitness = [
            1 / (1 + agent.fit) if agent.fit >= 0 else 1 + np.abs(agent.fit)
            for agent in agents
        ]

        # Calculates the total fitness
        total_fitness = np.sum(fitness)

        # Calculates the probability of each agent's fitness
        probs = [fit / total_fitness for fit in fitness]

        # Iterate through all agents
        for agent in agents:
            # For every decision variable
            for j in range(agent.n_variables):
                # Selects a random individual based on its probability
                s = d.generate_choice_distribution(len(agents), probs, 1)[0]

                # Calculates the lambda factor
                lambda_k = self.alpha / (1 + probs[s])

                # Updates the decision variable position
                agent.position[j] += lambda_k * (
                    (agents[s].position[j] + best_agent.position[j]) / 2 -
                    agent.position[j])

                # Generates an uniform random number
                r1 = r.generate_uniform_random_number()

                # If random number is smaller than probability of mutation
                if r1 < self.p_mutation:
                    # Mutates the decision variable position
                    agent.position[
                        j] += sigma[j] * r.generate_gaussian_random_number()

            # Check agent limits
            agent.clip_limits()

            # Calculates its fitness
            agent.fit = function(agent.position)
Esempio n. 22
0
    def update(self, space: Space, function: Function) -> None:
        """Wraps Satin Bowerbird Optimizer over all agents and variables (eq. 1-7).

        Args:
            space: Space containing agents and update-related information.
            function: A Function object that will be used as the objective function.

        """

        # Calculates a list of fitness per agent
        fitness = [
            1 / (1 + agent.fit) if agent.fit >= 0 else 1 + np.abs(agent.fit)
            for agent in space.agents
        ]

        # Calculates the total fitness
        total_fitness = np.sum(fitness)

        # Calculates the probability of each agent's fitness
        probs = [fit / total_fitness for fit in fitness]

        # Iterates through all agents
        for agent in space.agents:
            # For every decision variable
            for j in range(agent.n_variables):
                # Selects a random individual based on its probability
                s = d.generate_choice_distribution(len(space.agents), probs, 1)[0]

                # Calculates the lambda factor
                lambda_k = self.alpha / (1 + probs[s])

                # Updates the decision variable position
                agent.position[j] += lambda_k * (
                    (space.agents[s].position[j] + space.best_agent.position[j]) / 2
                    - agent.position[j]
                )

                # Generates an uniform random number
                r1 = r.generate_uniform_random_number()

                # If random number is smaller than probability of mutation
                if r1 < self.p_mutation:
                    # Mutates the decision variable position
                    agent.position[j] += (
                        self.sigma[j] * r.generate_gaussian_random_number()
                    )

            # Checks agent's limits
            agent.clip_by_bound()

            # Calculates its fitness
            agent.fit = function(agent.position)
Esempio n. 23
0
    def _update(self, agents, function):
        """Method that wraps Simulated Annealing over all agents and variables.

        Args:
            agents (list): List of agents.
            function (Function): A function object.

        """

        # Iterate through all agents
        for agent in agents:
            # Mimics its position
            a = copy.deepcopy(agent)

            # Generating a random noise from a gaussian distribution
            noise = r.generate_gaussian_random_number(
                0, 0.1, size=((agent.n_variables, agent.n_dimensions)))

            # Applying the noise
            a.position += noise

            # Check agent limits
            a.clip_limits()

            # Calculates the fitness for the temporary position
            a.fit = function(a.position)

            # Generates an uniform random number
            r1 = r.generate_uniform_random_number()

            # If new fitness is better than agent's fitness
            if a.fit < agent.fit:
                # Copy its position to the agent
                agent.position = copy.deepcopy(a.position)

                # And also copy its fitness
                agent.fit = copy.deepcopy(a.fit)

            # Checks if state should be updated or not
            elif r1 < np.exp(-(a.fit - agent.fit) / self.T):
                # Copy its position to the agent
                agent.position = copy.deepcopy(a.position)

                # And also copy its fitness
                agent.fit = copy.deepcopy(a.fit)

        # Decay the temperature
        self.T *= self.beta
Esempio n. 24
0
    def _update(self, agents):
        """Method that wraps Hill Climbing over all agents and variables.

        Args:
            agents (list): List of agents.

        """

        # Iterate through all agents
        for agent in agents:
            # Creates a gaussian noise vector
            noise = r.generate_gaussian_random_number(
                self.r_mean, self.r_var, size=(agent.n_variables, agent.n_dimensions))

            # Updating agent's position
            agent.position += noise
Esempio n. 25
0
    def _update_decomposition(self, agents, best_agent, function):
        """Method that wraps decomposition updates over all
        agents and variables (eq. 9).

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            function (Function): A Function object that will be used as the objective function.

        """

        # Iterate through all agents
        for agent in agents:
            # Makes a deep copy of current agent
            a = copy.deepcopy(agent)

            # Calculates the decomposition factor (eq. 10)
            D = 3 * r.generate_gaussian_random_number()

            # Generates the third random number
            r3 = r.generate_uniform_random_number()

            # First weight coefficient (eq. 11)
            e = r3 * int(r.generate_uniform_random_number(1, 2)) - 1

            # Second weight coefficient (eq. 12)
            _h = 2 * r3 - 1

            # Updates the new agent position
            a.position = best_agent.position + D * (e * best_agent.position -
                                                    _h * agent.position)

            # Check agent limits
            a.clip_limits()

            # Calculates the fitness for the temporary position
            a.fit = function(a.position)

            # If new fitness is better than agent's fitness
            if a.fit < agent.fit:
                # Copy its position to the agent
                agent.position = copy.deepcopy(a.position)

                # And also copy its fitness
                agent.fit = copy.deepcopy(a.fit)
Esempio n. 26
0
    def update(self, space: Space) -> None:
        """Wraps Hill Climbing over all agents and variables (p. 252).

        Args:
            space: Space containing agents and update-related information.

        """

        # Iterates through all agents
        for agent in space.agents:
            # Creates a gaussian noise vector
            noise = r.generate_gaussian_random_number(
                self.r_mean,
                self.r_var,
                size=(agent.n_variables, agent.n_dimensions))

            # Updates agent's position
            agent.position += noise
Esempio n. 27
0
    def update(self, space: Space, function: Function) -> None:
        """Wraps Simulated Annealing over all agents and variables.

        Args:
            space: Space containing agents and update-related information.
            function: A function object.

        """

        # Iterates through all agents
        for agent in space.agents:
            # Mimics its position
            a = copy.deepcopy(agent)

            # Generates a random noise from a gaussian distribution
            noise = r.generate_gaussian_random_number(
                0, 0.1, size=((agent.n_variables, agent.n_dimensions)))

            # Applies the noise
            a.position += noise

            # Checks agent's limits
            a.clip_by_bound()

            # Calculates the fitness for the temporary position
            a.fit = function(a.position)

            # Generates an uniform random number
            r1 = r.generate_uniform_random_number()

            # If new fitness is better than agent's fitness
            if a.fit < agent.fit:
                # Copies its position and fitness to the agent
                agent.position = copy.deepcopy(a.position)
                agent.fit = copy.deepcopy(a.fit)

            # Checks if state should be updated or not
            elif r1 < np.exp(-(a.fit - agent.fit) / self.T):
                # Copies its position and fitness to the agent
                agent.position = copy.deepcopy(a.position)
                agent.fit = copy.deepcopy(a.fit)

        # Decay the temperature
        self.T *= self.beta
Esempio n. 28
0
    def _refract_wave(self, agent: Agent, best_agent: Agent,
                      function: Function, index: int) -> Tuple[float, float]:
        """Refract wave into a new position (eq. 8-9).

        Args:
            agent: Agent to be refracted.
            best_agent: Global best agent.
            function: A function object.
            index: Index of wave length.

        Returns:
            (Tuple[float, float]): New height and length values.

        """

        # Gathers current fitness
        current_fit = agent.fit

        # Iterates through all variables
        for j in range(agent.n_variables):
            # Calculates a mean value
            mean = (best_agent.position[j] + agent.position[j]) / 2

            # Calculates the standard deviation
            std = np.fabs(best_agent.position[j] - agent.position[j]) / 2

            # Generates a new position (eq. 8)
            agent.position[j] = r.generate_gaussian_random_number(mean, std)

        # Clips its limits
        agent.clip_by_bound()

        # Re-calculates its fitness
        agent.fit = function(agent.position)

        # Updates the new height to maximum height value
        new_height = self.h_max

        # Re-calculates the new length (eq. 9)
        new_length = self.length[index] * (current_fit /
                                           (agent.fit + c.EPSILON))

        return new_height, new_length
Esempio n. 29
0
    def _refract_wave(self, agent, best_agent, function, length):
        """Refract wave into a new position (eq. 8-9).

        Args:
            agent (Agent): Agent to be refracted.
            best_agent (Agent): Global best agent.
            function (Function): A function object.
            length (np.array): Array of wave lengths.

        Returns:
            New height and length values.

        """

        # Gathers current fitness
        current_fit = agent.fit

        # Iterates through all variables
        for j in range(agent.n_variables):
            # Calculates a mean value
            mean = (best_agent.position[j] + agent.position[j]) / 2

            # Calculates the standard deviation
            std = np.fabs(best_agent.position[j] - agent.position[j]) / 2

            # Generates a new position (eq. 8)
            agent.position[j] = r.generate_gaussian_random_number(mean, std)

        # Clips its limits
        agent.clip_limits()

        # Re-calculates its fitness
        agent.fit = function(agent.position)

        # Updates the new height to maximum height value
        new_height = self.h_max

        # Re-calculates the new length (eq. 9)
        new_length = length * (current_fit / agent.fit)

        return new_height, new_length
Esempio n. 30
0
    def _create_new_samples(self, agents, function):
        """Creates new agents based on current mean and standard deviation.

        Args:
            agents (list): List of agents.
            function (Function): A Function object that will be used as the objective function.

        """

        # Iterates through all agents
        for agent in agents:
            # Iterate through all decision variables
            for j, (m, s) in enumerate(zip(self.mean, self.std)):
                # For each decision variable, we generate gaussian numbers based on mean and std
                agent.position[j] = r.generate_gaussian_random_number(m, s, agent.n_dimensions)

            # Clips the agent limits
            agent.clip_by_bound()

            # Calculates its new fitness
            agent.fit = function(agent.position)