Example #1
0
    def _rellocation(self, agent: Agent, best_agent: Agent,
                     function: Function) -> None:
        """Performs the fox rellocation procedure.

        Args:
            agent: Current agent.
            best_agent: Best agent.
            function: A Function object that will be used as the objective function.

        """

        # Creates a temporary agent
        temp = copy.deepcopy(agent)

        # Calculates the square root of euclidean distance between agent and best agent (eq. 1)
        distance = np.sqrt(
            g.euclidean_distance(temp.position, best_agent.position))

        # Randomly selects the scaling hyperparameter
        alpha = r.generate_uniform_random_number(0, distance)

        # Calculates individual reallocation (eq. 2)
        temp.position += alpha * np.sign(best_agent.position - temp.position)

        # Checks agent's limits
        temp.clip_by_bound()

        # Calculates the fitness for the temporary position
        temp.fit = function(temp.position)

        # If new fitness is better than agent's fitness
        if temp.fit < agent.fit:
            # Copies its position and fitness to the agent
            agent.position = copy.deepcopy(temp.position)
            agent.fit = copy.deepcopy(temp.fit)
Example #2
0
    def _commensalism(
        self, agent_i: Agent, agent_j: Agent, best_agent: Agent, function: Function
    ) -> None:
        """Performs the commensalism operation.

        Args:
            agent_i: Selected `i` agent.
            agent_j: Selected `j` agent.
            best_agent: Global best agent.
            function: A Function object that will be used as the objective function.

        """

        # Copies a temporary agent from `i`
        a = copy.deepcopy(agent_i)

        # Generates a uniform random number
        r1 = r.generate_uniform_random_number(-1, 1)

        # Updates the agent's position (eq. 4)
        a.position += r1 * (best_agent.position - agent_j.position)

        # Checks its limits
        a.clip_by_bound()

        # Evaluates its new position
        a.fit = function(a.position)

        # If the new position is better than the current agent's position
        if a.fit < agent_i.fit:
            # Replaces the current agent's position and fitness
            agent_i.position = copy.deepcopy(a.position)
            agent_i.fit = copy.deepcopy(a.fit)
Example #3
0
    def _parasitism(self, agent_i: Agent, agent_j: Agent, function: Function) -> None:
        """Performs the parasitism operation.

        Args:
            agent_i: Selected `i` agent.
            agent_j: Selected `j` agent.
            function: A Function object that will be used as the objective function.

        """

        # Creates a temporary parasite agent
        p = copy.deepcopy(agent_i)

        # Generates a integer random number
        r1 = r.generate_integer_random_number(0, agent_i.n_variables)

        # Updates its position on selected variable with a uniform random number
        p.position[r1] = r.generate_uniform_random_number(p.lb[r1], p.ub[r1])

        # Checks its limits
        p.clip_by_bound()

        # Evaluates its position
        p.fit = function(p.position)

        # If the new potision is better than agent's `j` position
        if p.fit < agent_j.fit:
            # Replaces the agent's `j` position and fitness
            agent_j.position = copy.deepcopy(p.position)
            agent_j.fit = copy.deepcopy(p.fit)
Example #4
0
    def _noticing(self, agent: Agent, function: Function,
                  alpha: float) -> None:
        """Performs the fox noticing procedure.

        Args:
            agent: Current agent.
            function: A Function object that will be used as the objective function.
            alpha: Scaling parameter.

        """

        # Defines the noticing parameter
        mu = r.generate_uniform_random_number()

        # If noticing is higher than 0.75
        if mu > 0.75:
            # If observation angle is different than zero
            if self.phi != 0:
                # Calculates fox observation radius (eq. 4 - top)
                radius = alpha * np.sin(self.phi) / self.phi

            # If observation angle equals to zero
            else:
                # Calculates fox observation radius (eq. 4 - bottom)
                radius = self.theta

            # Generates `phi` values for all variables
            phi = r.generate_uniform_random_number(0, 2 * np.pi,
                                                   agent.n_variables)

            # Iterates through all decision variables
            for j in range(agent.n_variables):
                # Defines the total sum
                total_sum = 0

                # Iterates from `k` to `j`
                for k in range(j):
                    # Accumulates the sum
                    total_sum += np.sin(phi[k])

                # Updates the corresponding position (eq. 5)
                agent.position[j] += alpha * radius * (total_sum +
                                                       np.cos(phi[j]))

            # Checks agent's limits
            agent.clip_by_bound()

            # Re-evaluates its fitness
            agent.fit = function(agent.position)
Example #5
0
    def _mutualism(
        self, agent_i: Agent, agent_j: Agent, best_agent: Agent, function: Function
    ) -> None:
        """Performs the mutualism operation.

        Args:
            agent_i: Selected `i` agent.
            agent_j: Selected `j` agent.
            best_agent: Global best agent.
            function: A Function object that will be used as the objective function.

        """

        # Copies temporary agents from `i` and `j`
        a = copy.deepcopy(agent_i)
        b = copy.deepcopy(agent_j)

        # Calculates the mutual vector (eq. 3)
        mutual_vector = (agent_i.position + agent_j.position) / 2

        # Calculates the benefitial factors
        BF_1, BF_2 = np.random.choice([1, 2], 2, replace=False)

        # Generates a uniform random number
        r1 = r.generate_uniform_random_number()

        # Re-calculates the new positions (eq. 1 and 2)
        a.position += r1 * (best_agent.position - mutual_vector * BF_1)
        b.position += r1 * (best_agent.position - mutual_vector * BF_2)

        # Checks their limits
        a.clip_by_bound()
        b.clip_by_bound()

        # Evaluates both agents
        a.fit = function(a.position)
        b.fit = function(b.position)

        # If new position is better than agent's `i` position
        if a.fit < agent_i.fit:
            # Replaces the agent's `i` position and fitness
            agent_i.position = copy.deepcopy(a.position)
            agent_i.fit = copy.deepcopy(a.fit)

        # If new position is better than agent's `j` position
        if b.fit < agent_j.fit:
            # Replaces the agent's `j` position and fitness
            agent_j.position = copy.deepcopy(b.position)
            agent_j.fit = copy.deepcopy(b.fit)
Example #6
0
    def _create_agents(self, n_agents, n_variables, n_dimensions):
        """Creates and populates the agents array.

        Also defines a random best agent, only for initialization purposes.

        Args:
            n_agents (int): Number of agents.
            n_variables (int): Number of decision variables.
            n_dimensions (int): Dimension of search space.

        Returns:
            A list of agents and a best agent.

        """

        logger.debug('Running private method: create_agents().')

        # Creating an agents list
        agents = []

        # Iterate through number of agents
        for _ in range(n_agents):
            # Appends new agent to list
            agents.append(
                Agent(n_variables=n_variables, n_dimensions=n_dimensions))

        # Apply first agent as the best one
        best_agent = copy.deepcopy(agents[0])

        return agents, best_agent
Example #7
0
    def _mutation(self, alpha: Agent) -> Agent:
        """Performs the mutation over an offspring (s. 3.4).

        Args:
            alpha: Offspring to be mutated.

        Returns:
            (Agent): The mutated offspring.

        """

        # Checks if the number of variables is bigger than one
        if alpha.n_variables > 1:
            # Samples random integers
            r1 = r.generate_integer_random_number(0, alpha.n_variables)
            r2 = r.generate_integer_random_number(0,
                                                  alpha.n_variables,
                                                  exclude_value=r1)

            # Swaps the randomly selected variables
            alpha.position[r1], alpha.position[r2] = (
                alpha.position[r2],
                alpha.position[r1],
            )

        return alpha
Example #8
0
    def _refract_wave(self, agent: Agent, best_agent: Agent,
                      function: Function, index: int) -> Tuple[float, float]:
        """Refract wave into a new position (eq. 8-9).

        Args:
            agent: Agent to be refracted.
            best_agent: Global best agent.
            function: A function object.
            index: Index of wave length.

        Returns:
            (Tuple[float, float]): New height and length values.

        """

        # Gathers current fitness
        current_fit = agent.fit

        # Iterates through all variables
        for j in range(agent.n_variables):
            # Calculates a mean value
            mean = (best_agent.position[j] + agent.position[j]) / 2

            # Calculates the standard deviation
            std = np.fabs(best_agent.position[j] - agent.position[j]) / 2

            # Generates a new position (eq. 8)
            agent.position[j] = r.generate_gaussian_random_number(mean, std)

        # Clips its limits
        agent.clip_by_bound()

        # Re-calculates its fitness
        agent.fit = function(agent.position)

        # Updates the new height to maximum height value
        new_height = self.h_max

        # Re-calculates the new length (eq. 9)
        new_length = self.length[index] * (current_fit /
                                           (agent.fit + c.EPSILON))

        return new_height, new_length
Example #9
0
    def _evaluate_location(self, agent: Agent, neighbour: Agent,
                           function: Function, index: int) -> None:
        """Evaluates a food source location and update its value if possible (eq. 2.2).

        Args:
            agent: An agent.
            neighbour: A neightbour agent.
            function: A function object.
            index: Index of trial.

        """

        # Generates an uniform random number
        r1 = r.generate_uniform_random_number(-1, 1)

        # Copies actual food source location
        a = copy.deepcopy(agent)

        # Change its location according to equation 2.2
        a.position = agent.position + (agent.position -
                                       neighbour.position) * r1

        # Checks agent's limits
        a.clip_by_bound()

        # Evaluating its fitness
        a.fit = function(a.position)

        # Check if fitness is improved
        if a.fit < agent.fit:
            # If yes, reset the number of trials for this particular food source
            self.trial[index] = 0

            # Copies the new position and fitness
            agent.position = copy.deepcopy(a.position)
            agent.fit = copy.deepcopy(a.fit)

        # If not
        else:
            # We increse the trials counter
            self.trial[index] += 1
Example #10
0
    def _create_terminals(self):
        """Creates a list of terminals based on the Agent class.

        Returns:
            A list of terminals.

        """

        logger.debug('Running private method: create_terminals().')

        # Creating a list of terminals, which will be Agent instances
        self.terminals = [Agent(self.n_variables, self.n_dimensions) for _ in range(self.n_terminals)]

        logger.debug('Terminals created.')
Example #11
0
    def _create_terminals(self):
        """Creates a list of terminals based on the Agent class.

        Returns:
            A list of terminals.

        """

        logger.debug('Running private method: create_terminals().')

        terminals = [Agent(n_variables=self.n_variables, n_dimensions=self.n_dimensions)
                     for _ in range(self.n_terminals)]

        logger.debug('Terminals created.')

        return terminals
Example #12
0
    def _create_agents(self):
        """Creates a list of agents and the best agent.

        Also defines a random best agent, only for initialization purposes.

        """

        logger.debug('Running private method: create_agents().')

        # Creating a list of agents
        self.agents = [
            Agent(self.n_variables, self.n_dimensions)
            for _ in range(self.n_agents)
        ]

        # Apply the first agent as the best one
        self.best_agent = copy.deepcopy(self.agents[0])
Example #13
0
    def __init__(self,
                 n_agents=1,
                 n_variables=1,
                 n_dimensions=1,
                 n_iterations=10):
        """Initialization method.

        Args:
            n_agents (int): Number of agents.
            n_variables (int): Number of decision variables.
            n_dimensions (int): Dimension of search space.
            n_iterations (int): Number of iterations.

        """

        # Number of agents
        self.n_agents = n_agents

        # Number of variables
        self.n_variables = n_variables

        # Number of dimensions
        self.n_dimensions = n_dimensions

        # Number of iterations
        self.n_iterations = n_iterations

        # List of agents
        self.agents = []

        # Best agent object
        self.best_agent = Agent()

        # Lower bounds
        self.lb = np.zeros(n_variables)

        # Upper bounds
        self.ub = np.ones(n_variables)

        # Indicates whether the space is built or not
        self.built = False
Example #14
0
from opytimizer.core.agent import Agent

# We need to define the amount of decision variables
# and its dimension (single, complex, quaternion, octonion)
n_variables = 1
n_dimensions = 2

# Creating a new Agent
a = Agent(n_variables=n_variables, n_dimensions=n_dimensions)
Example #15
0
    def _update_position(
        self, agent: Agent, best_agent: Agent, function: Function, energy: float
    ) -> None:
        """Updates agent's position.

        Args:
            agent: An agent instance.
            best_agent: A best agent instance.
            function: A Function object that will be used as the objective function.
            energy: Current energy value.

        """

        # Makes a copy of agent
        a = copy.deepcopy(agent)

        # Calculates the distance between agent and best agent
        distance = agent.position - best_agent.position

        # Iterates through all decision variables
        for j in range(agent.n_variables):
            # Iterates through all dimensions
            for k in range(agent.n_dimensions):
                # If distance equals to zero
                if distance[j][k] == 0:
                    # Updates the position by sampling a gaussian number
                    r1 = r.generate_gaussian_random_number(0, energy)
                    a.position[j][k] += self.direction[j][k] * r1

                # If distance is different from zero
                else:
                    # If distance is smaller than zero
                    if distance[j][k] < 0:
                        # Updates the position by adding an exponential number
                        a.position[j][k] += r.generate_exponential_random_number(
                            np.fabs(distance[j][k])
                        )

                    # If distance is bigger than zero
                    else:
                        # Updates the position by subtracting an exponential number
                        a.position[j][k] -= r.generate_exponential_random_number(
                            distance[j][k]
                        )

        # Clips the temporary agent's limits
        a.clip_by_bound()

        # Evaluates its new position
        a.fit = function(a.position)

        # If temporary agent's fitness is better than current agent's fitness
        if a.fit < agent.fit:
            # Replaces position and fitness
            agent.position = copy.deepcopy(a.position)
            agent.fit = copy.deepcopy(a.fit)

            # Generates a random number
            r1 = r.generate_uniform_random_number()

            # If random number is smaller than probability of forking
            if r1 < self.p_fork:
                # Makes a new copy of current agent
                a = copy.deepcopy(agent)

                # Generates a random position
                a.fill_with_uniform()

                # Re-evaluates its position
                a.fit = function(a.position)

                # If new fitness is better than agent's fitness
                if a.fit < agent.fit:
                    # Replaces position and fitness
                    agent.position = copy.deepcopy(a.position)
                    agent.fit = copy.deepcopy(a.fit)