Esempio n. 1
0
    def _nesting_phase(self, space: Space, n_crows: int):
        """Performs the nesting phase using the current number of crows.

        Args:
            space: Space containing agents and update-related information.
            n_crows: Number of crows.

        """

        # Gathers the crows
        crows = space.agents[:n_crows]

        # Iterates through all crows
        for i, crow in enumerate(crows):
            # Generates a random index
            idx = r.generate_integer_random_number(high=space.n_agents,
                                                   exclude_value=i)

            # Calculates the step from Lévy distribution (eq. 7)
            step = d.generate_levy_distribution(size=crow.n_variables)
            step = np.expand_dims(step, axis=1)

            # Updates the crow's position and clips its bounds (eq. 6 and 8)
            crow.position = 0.01 * step * (space.agents[idx].position -
                                           crow.position)
            crow.clip_by_bound()
Esempio n. 2
0
    def update(
        self, space: Space, function: Function, iteration: int, n_iterations: int
    ) -> None:
        """Wraps Flying Squirrel Optimizer over all agents and variables.

        Args:
            space: Space containing agents and update-related information.
            function: A Function object that will be used as the objective function.
            iteration: Current iteration.
            n_iterations: Maximum number of iterations.

        """

        # Calculates the mean position of the population
        mean_position = np.mean([agent.position for agent in space.agents], axis=0)

        # Calculates the Sigma Reduction Factor (eq. 5)
        SRF = (-np.log(1 - (1 / np.sqrt(iteration + 2)))) ** 2

        # Calculates the Beta Expansion Factor
        BEF = self.beta + (2 - self.beta) * ((iteration + 1) / n_iterations)

        # Iterates through all agents
        for agent in space.agents:
            # Makes a deep copy of current agent
            a = copy.deepcopy(agent)

            # Iterates through all variables
            for j in range(agent.n_variables):
                # Calculates the random walk (eq. 2 and 3)
                random_step = r.generate_gaussian_random_number(mean_position[j], SRF)

                # Calculates the Lévy flight (eq. 6 to 18)
                levy_step = d.generate_levy_distribution(BEF)

                # Updates the agent's position
                a.position[j] += (
                    random_step
                    * levy_step
                    * (agent.position[j] - space.best_agent.position[j])
                )

            # Checks agent's limits
            a.clip_by_bound()

            # Re-evaluates the temporary agent
            a.fit = function(a.position)

            # If temporary agent's fitness is better than agent's fitness
            if a.fit < agent.fit:
                # Replace its position and fitness
                agent.position = copy.deepcopy(a.position)
                agent.fit = copy.deepcopy(a.fit)
Esempio n. 3
0
    def _generate_new_nests(self, agents: List[Agent],
                            best_agent: Agent) -> List[Agent]:
        """Generate new nests (eq. 1).

        Args:
            agents: List of agents.
            best_agent: Global best agent.

        Returns:
            (List[Agent]): A new list of agents which can be seen as new nests.

        """

        # Makes a temporary copy of current agents
        new_agents = copy.deepcopy(agents)

        # Then, we iterate for every agent
        for new_agent in new_agents:
            # Calculates the Lévy distribution
            step = d.generate_levy_distribution(self.beta,
                                                new_agent.n_variables)

            # Expanding its dimension to perform entrywise multiplication
            step = np.expand_dims(step, axis=1)

            # Calculates the difference vector between local and best positions
            # Alpha controls the intensity of the step size
            step_size = self.alpha * step * (new_agent.position -
                                             best_agent.position)

            # Generates a random normal distribution
            g = r.generate_gaussian_random_number(size=new_agent.n_variables)

            # Expanding its dimension to perform entrywise multiplication
            g = np.expand_dims(g, axis=1)

            # Acutally performs the random walk / flight
            new_agent.position += step_size * g

        return new_agents
Esempio n. 4
0
    def _global_pollination(self, agent_position, best_position):
        """Updates the agent's position based on a global pollination (Lévy's flight).

        Args:
            agent_position (float): Agent's current position.
            best_position (float): Best agent's current position.

        Returns:
            A new position based on FPA's paper equation 1.

        """

        # Generates a Lévy distribution
        step = d.generate_levy_distribution(self.beta)

        # Calculates the global pollination
        global_pollination = self.eta * step * (best_position - agent_position)

        # Calculates the new position based on previous global pollination
        new_position = agent_position + global_pollination

        return new_position
Esempio n. 5
0
    def _global_pollination(self, agent_position: np.ndarray,
                            best_position: np.ndarray) -> np.ndarray:
        """Updates the agent's position based on a global pollination (eq. 1).

        Args:
            agent_position: Agent's current position.
            best_position: Best agent's current position.

        Returns:
            (np.ndarray): A new position.

        """

        # Generates a Lévy distribution
        step = d.generate_levy_distribution(self.beta)

        # Calculates the global pollination
        global_pollination = self.eta * step * (best_position - agent_position)

        # Calculates the new position based on previous global pollination
        new_position = agent_position + global_pollination

        return new_position
Esempio n. 6
0
import opytimizer.math.distribution as d

# Generating a Lévy distribution
l = d.generate_levy_distribution(beta=0.5, size=10)
print(l)
Esempio n. 7
0
def test_generate_levy_distribution():
    levy_array = distribution.generate_levy_distribution(beta=0.1, size=5)

    assert levy_array.shape == (5,)
Esempio n. 8
0
    def _exploitation_phase(
        self,
        energy: float,
        jump: float,
        agents: List[Agent],
        current_agent: Agent,
        best_agent: Agent,
        function: Function,
    ) -> np.ndarray:
        """Performs the exploitation phase.

        Args:
            energy: Energy coefficient.
            jump: Jump's strength.
            agents: List of agents.
            current_agent: Current agent to be updated (or not).
            best_agent: Best population's agent.
            function: A function object.

        Returns:
            (np.ndarray): A location vector containing the updated position.

        """

        # Generates a uniform random number
        w = r.generate_uniform_random_number()

        # Without rapid dives
        if w >= 0.5:
            # Soft besiege
            if energy >= 0.5:
                # Calculates the delta's position
                delta = best_agent.position - current_agent.position

                # Calculates the location vector (eq. 4)
                location_vector = delta - energy * np.fabs(
                    jump * best_agent.position - current_agent.position)

                return location_vector

            # Hard besiege
            else:
                # Calculates the delta's position
                delta = best_agent.position - current_agent.position

                # Calculates the location vector (eq. 6)
                location_vector = best_agent.position - energy * np.fabs(delta)

                return location_vector

        # With rapid dives
        # Soft besiege
        if energy >= 0.5:
            # Calculates the `Y` position (eq. 7)
            Y = best_agent.position - energy * np.fabs(
                jump * best_agent.position - current_agent.position)

            # Generates the Lévy's flight and random array (eq. 9)
            LF = d.generate_levy_distribution(
                1.5, (current_agent.n_variables, current_agent.n_dimensions))
            S = r.generate_uniform_random_number(
                size=(current_agent.n_variables, current_agent.n_dimensions))

            # Calculates the `Z` position (eq. 8)
            Z = Y + S * LF

            # Evaluates new positions
            Y_fit = function(Y)
            Z_fit = function(Z)

            # If `Y` position is better than current agent's one (eq. 10 - part 1)
            if Y_fit < current_agent.fit:
                return Y

            # If `Z` position is better than current agent's one (eq. 10 - part 2)
            if Z_fit < current_agent.fit:
                return Z

        # Hard besiege
        else:
            # Averages the population's position
            average = np.mean([x.position for x in agents], axis=0)

            # Calculates the `Y` position (eq. 12)
            Y = best_agent.position - energy * np.fabs(
                jump * best_agent.position - average)

            # Generates the Lévy's flight and random array (eq. 9)
            LF = d.generate_levy_distribution(
                1.5, (current_agent.n_variables, current_agent.n_dimensions))
            S = r.generate_uniform_random_number(
                size=(current_agent.n_variables, current_agent.n_dimensions))

            # Calculates the `Z` position (eq. 13)
            Z = Y + S * LF

            # Evaluates new positions
            Y_fit = function(Y)
            Z_fit = function(Z)

            # If `Y` position is better than current agent's one (eq. 11 - part 1)
            if Y_fit < current_agent.fit:
                return Y

            # If `Z` position is better than current agent's one (eq. 11 - part 2)
            if Z_fit < current_agent.fit:
                return Z

        return current_agent.position
Esempio n. 9
0
    def update(self, space: Space, function: Function, iteration: int,
               n_iterations: int) -> None:
        """Wraps Aquila Optimizer over all agents and variables.

        Args:
            space: Space containing agents and update-related information.
            function: A Function object that will be used as the objective function.
            iteration: Current iteration.
            n_iterations: Maximum number of iterations.

        """

        # Calculates the mean position of space
        average = np.mean([agent.position for agent in space.agents], axis=0)

        # Iterates through all agents
        for agent in space.agents:
            # Makes a deep copy of current agent
            a = copy.deepcopy(agent)

            # Generates a random number
            r1 = r.generate_uniform_random_number()

            # If current iteration is smaller than 2/3 of maximum iterations
            if iteration <= ((2 / 3) * n_iterations):
                # Generates another random number
                r2 = r.generate_uniform_random_number()

                # If random number is smaller or equal to 0.5
                if r1 <= 0.5:
                    # Updates temporary agent's position (eq. 3)
                    a.position = space.best_agent.position * (
                        1 - (iteration / n_iterations)) + (
                            average - space.best_agent.position * r2)

                # If random number is bigger than 0.5
                else:
                    # Generates a Lévy distirbution and a random integer
                    levy = d.generate_levy_distribution(
                        size=(agent.n_variables, agent.n_dimensions))
                    idx = r.generate_integer_random_number(
                        high=len(space.agents))

                    # Creates an evenly-space array of `n_variables`
                    # Also broadcasts it to correct `n_dimensions` size
                    D = np.linspace(1, agent.n_variables, agent.n_variables)
                    D = np.repeat(np.expand_dims(D, -1),
                                  agent.n_dimensions,
                                  axis=1)

                    # Calculates current cycle value (eq. 10)
                    cycle = self.n_cycles + self.U * D

                    # Calculates `theta` (eq. 11)
                    theta = -self.w * D + (3 * np.pi) / 2

                    # Calculates `x` and `y` positioning (eq. 8 and 9)
                    x = cycle * np.sin(theta)
                    y = cycle * np.cos(theta)

                    # Updates temporary agent's position (eq. 5)
                    a.position = (space.best_agent.position * levy +
                                  space.agents[idx].position + (y - x) * r2)

            # If current iteration is bigger than 2/3 of maximum iterations
            else:
                # Generates another random number
                r2 = r.generate_uniform_random_number()

                # If random number is smaller or equal to 0.5
                if r1 <= 0.5:
                    # Expands both lower and upper bound dimensions
                    lb = np.expand_dims(agent.lb, -1)
                    ub = np.expand_dims(agent.ub, -1)

                    # Updates temporary agent's position (eq. 13)
                    a.position = (
                        (space.best_agent.position - average) * self.alpha -
                        r2 + ((ub - lb) * r2 + lb) * self.delta)

                # If random number is bigger than 0.5
                else:
                    # Calculates both motions (eq. 16 and 17)
                    G1 = 2 * r2 - 1
                    G2 = 2 * (1 - (iteration / n_iterations))

                    # Calculates quality function (eq. 15)
                    QF = iteration**(G1 / (1 - n_iterations)**2)

                    # Generates a Lévy distribution
                    levy = d.generate_levy_distribution(
                        size=(agent.n_variables, agent.n_dimensions))

                    # Updates temporary agent's position (eq. 14)
                    a.position = (QF * space.best_agent.position -
                                  (G1 * a.position * r2) - G2 * levy + r2 * G1)

            # Checks agent's limits
            a.clip_by_bound()

            # Calculates the fitness for the temporary position
            a.fit = function(a.position)

            # If new fitness is better than agent's fitness
            if a.fit < agent.fit:
                # Copies its position and fitness to the agent
                agent.position = copy.deepcopy(a.position)
                agent.fit = copy.deepcopy(a.fit)