Exemple #1
0
    def _flight_mode(self, agent, neighbour, function):
        """Flies to a new location according to the flight mode (eq. 1).

        Args:
            agent (Agent): Current agent.
            neighbour (Agent): Selected neigbour.
            function (Function): A Function object that will be used as the objective function.

        Returns:
            Current agent or an agent with updated position, along with a boolean that indicates whether
            agent is better or not than current one.

        """

        # Generates a random decision variable index
        j = r.generate_integer_random_number(0, agent.n_variables)

        # Generates a uniform random number
        r1 = r.generate_uniform_random_number(-1, 1)

        # Makes a deepcopy of current agent
        temp = copy.deepcopy(agent)

        # Updates temporary agent's position (eq. 1)
        temp.position[j] = agent.position[j] + (agent.position[j] -
                                                neighbour.position[j]) * r1

        # Clips its limits
        temp.clip_limits()

        # Re-calculates its fitness
        temp.fit = function(temp.position)

        # If its fitness is better than current agent
        if temp.fit < agent.fit:
            # Return temporary agent as well as a true variable
            return temp.position, temp.fit, True

        # Return current agent as well as a false variable
        return agent.position, agent.fit, False
Exemple #2
0
    def _update_position(self, agents, best_agent, function):
        """It updates every star position and calculates their event's horizon cost (eq. 3).

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            function (Function): A function object.

        Returns:
            The cost of the event horizon.

        """

        # Event's horizon cost
        cost = 0

        # Iterates through all agents
        for agent in agents:
            # Generate an uniform random number
            r1 = r.generate_uniform_random_number()

            # Updates agent's position
            agent.position += r1 * (best_agent.position - agent.position)

            # Checks agents limits
            agent.clip_by_bound()

            # Evaluates agent
            agent.fit = function(agent.position)

            # If new agent's fitness is better than best
            if agent.fit < best_agent.fit:
                # Swap their positions and their fitness
                agent.position, best_agent.position = best_agent.position, agent.position
                agent.fit, best_agent.fit = best_agent.fit, agent.fit

            # Increment the cost with current agent's fitness
            cost += agent.fit

        return cost
Exemple #3
0
    def _predation_phase(
        self,
        space: Space,
        n_crows: int,
        n_cuckoos: int,
        n_cats: int,
        iteration: int,
        n_iterations: int,
    ) -> None:
        """Performs the predation phase using the current number of cats.

        Args:
            space: Space containing agents and update-related information.
            n_crows: Number of crows.
            n_cuckoos: Number of cuckoos.
            n_cats: Number of cats.
            iteration: Current iteration.
            n_iterations: Maximum number of iterations.

        """

        # Gathers the cats
        cats = space.agents[n_crows + n_cuckoos:]

        # Calculates the constant
        constant = 2 - iteration / n_iterations

        # Iterates through all cats
        for i, cat in enumerate(cats):
            # Gets the corresponding cat's index
            idx = space.n_agents - n_cats + i

            # Updates the cat's velocity (eq. 13)
            r1 = r.generate_uniform_random_number()
            self.velocity[idx] += (r1 * constant *
                                   (space.best_agent.position - cat.position))

            # Updates the cat's position and clips its limits (eq. 14)
            cat.position += self.velocity[idx]
            cat.clip_by_bound()
Exemple #4
0
    def _generate_abandoned_nests(self, agents: List[Agent],
                                  prob: float) -> List[Agent]:
        """Generate a fraction of nests to be replaced.

        Args:
            agents: List of agents.
            prob: Probability of replacing worst nests.

        Returns:
            (List[Agent]): A new list of agents which can be seen as the new nests to be replaced.

        """

        # Makes a temporary copy of current agents
        new_agents = copy.deepcopy(agents)

        # Generates a bernoulli distribution array
        # It will be used to replace or not a certain nest
        b = d.generate_bernoulli_distribution(1 - prob, len(agents))

        # Iterates through every new agent
        for j, new_agent in enumerate(new_agents):
            # Generates a uniform random number
            r1 = r.generate_uniform_random_number()

            # Then, we select two random nests
            k = r.generate_integer_random_number(0, len(agents) - 1)
            l = r.generate_integer_random_number(0,
                                                 len(agents) - 1,
                                                 exclude_value=k)

            # Calculates the random walk between these two nests
            step_size = r1 * (agents[k].position - agents[l].position)

            # Finally, we replace the old nest
            # Note it will only be replaced if 'b' is 1
            new_agent.position += step_size * b[j]

        return new_agents
Exemple #5
0
    def update(self, space: Space, iteration: int) -> None:
        """Wraps Pigeon-Inspired Optimization over all agents and variables.

        Args:
            space: Space containing agents and update-related information.
            iteration: Current iteration.

        """

        # Checks if current iteration is smaller than mapping operator
        if iteration < self.n_c1:
            # Iterates through all agents
            for i, agent in enumerate(space.agents):
                # Updates current agent velocity (eq. 5)
                r1 = r.generate_uniform_random_number()
                self.velocity[i] = self.velocity[i] * np.exp(
                    -self.R *
                    (iteration + 1)) + r1 * (space.best_agent.position -
                                             agent.position)

                # Updates current agent position (eq. 6)
                agent.position += self.velocity[i]

        # Checks if current iteration is smaller than landmark operator
        elif iteration < self.n_c2:
            # Calculates the number of possible pigeons (eq. 7)
            self.n_p = int(self.n_p / 2) + 1

            # Sorts agents according to their fitness
            space.agents.sort(key=lambda x: x.fit)

            # Calculates the center position
            center = self._calculate_center(space.agents[:self.n_p])

            # Iterates through all agents
            for agent in space.agents:
                # Updates current agent position
                agent.position = self._update_center_position(
                    agent.position, center)
Exemple #6
0
    def _calculate_lambda_i(self, n_sailfishes, n_sardines):
        """Calculates the lambda value (eq. 7).

        Args:
            n_sailfishes (int): Number of sailfishes.
            n_sardines (int): Number of sardines.

        Returns:
            Lambda value from current iteration.

        """

        # Calculates the prey density (eq. 8)
        PD = 1 - (n_sailfishes / (n_sailfishes + n_sardines))

        # Generates a random uniform number
        r1 = r.generate_uniform_random_number()

        # Calculates lambda
        lambda_i = 2 * r1 * PD - PD

        return lambda_i
Exemple #7
0
    def _update_position(self, agents, best_agent, function):
        """It updates every star position and calculates their event's horizon cost.

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            function (Function): A function object.

        Returns:
            The cost of the event horizon.

        """

        # Event's horizon cost
        cost = 0

        # Iterate through all agents
        for i, agent in enumerate(agents):
            # Generate an uniform random number
            r1 = r.generate_uniform_random_number(0, 1)

            # Updates agent's position according to Equation 3
            agent.position += r1 * (best_agent.position - agent.position)

            # Evaluates agent
            agent.fit = function.pointer(agent.position)

            # If new agent's fitness is better than best
            if agent.fit < best_agent.fit:
                # Swap their positions
                agent.position, best_agent.position = best_agent.position, agent.position

                # Also swap their fitness
                agent.fit, best_agent.fit = best_agent.fit, agent.fit

            # Increment the cost with current agent's fitness
            cost += agent.fit

        return cost
Exemple #8
0
def generate_bernoulli_distribution(prob=0.0, size=1):
    """Generates a Bernoulli distribution based on an input probability.

    Args:
        prob (float): Probability of distribution.
        size (int): Size of array.

    Returns:
        Bernoulli distribution n-dimensional array.

    """

    # Creates the bernoulli array
    bernoulli_array = np.zeros(size)

    # Generates a random number
    r1 = r.generate_uniform_random_number(0, 1, size)

    # Masks the array based on input probability
    bernoulli_array[r1 < prob] = 1

    return bernoulli_array
Exemple #9
0
    def _ocean_current(self, agents, best_agent):
        """Calculates the ocean current (eq. 9).

        Args:
            agents (Agent): List of agents.
            best_agent (Agent): Best agent.

        Returns:
            A trend value for the ocean current.

        """

        # Generates an uniform random number
        r1 = r.generate_uniform_random_number()

        # Calculates the mean location of all jellyfishes
        u = np.mean([agent.position for agent in agents])

        # Calculates the ocean current (eq. 9)
        trend = best_agent.position - self.beta * r1 * u

        return trend
Exemple #10
0
    def _update(self, agents, best_agent, function, n_iterations):
        """Method that wraps Firefly Algorithm over all agents and variables.

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            function (Function): A function object.
            n_iterations (int): Maximum number of iterations.

        """

        # Calculating current iteration delta
        delta = 1 - ((10e-4) / 0.9)**(1 / n_iterations)

        # Applying update to alpha parameter
        self.alpha *= (1 - delta)

        # We copy a temporary list for iterating purposes
        temp_agents = copy.deepcopy(agents)

        # Iterating through 'i' agents
        for agent in agents:
            # Iterating through 'j' agents
            for temp in temp_agents:
                # Distance is calculated by an euclidean distance between 'i' and 'j' (Equation 8)
                distance = g.euclidean_distance(agent.position, temp.position)

                # If 'i' fit is bigger than 'j' fit
                if (agent.fit > temp.fit):
                    # Recalculate the attractiveness (Equation 6)
                    beta = self.beta * np.exp(-self.gamma * distance)

                    # Generates a random uniform distribution
                    r1 = r.generate_uniform_random_number()

                    # Updates agent's position (Equation 9)
                    agent.position = beta * \
                        (temp.position + agent.position) + \
                        self.alpha * (r1 - 0.5)
Exemple #11
0
    def _update_sailfish(self, agent, best_agent, best_sardine, lambda_i):
        """Updates the sailfish's position (eq. 6).

        Args:
            agent (Agent): Current agent's.
            best_agent (Agent): Best sailfish.
            best_sardine (Agent): Best sardine.
            lambda_i (float): Lambda value.

        Returns:
            An updated position.

        """

        # Generates a random uniform number
        r1 = r.generate_uniform_random_number()

        # Calculates the new position
        new_position = best_sardine.position - lambda_i * \
            (r1 * (best_agent.position - best_sardine.position) / 2 - agent.position)

        return new_position
Exemple #12
0
    def _initialize_agents(self):
        """Initialize agents' position array with uniform random numbers.

        """

        logger.debug('Running private method: initialize_agents().')

        # Iterates through all agents
        for agent in self.agents:
            # Iterates through all decision variables
            for j in range(agent.n_variables):
                # For each decision variable, we generate uniform random numbers
                agent.position[j] = r.generate_uniform_random_number(
                    size=agent.n_dimensions)

                # Applies the lower bound to the agent's lower bound
                agent.lb[j] = 0

                # And also the upper bound
                agent.ub[j] = 1

        logger.debug('Agents initialized.')
Exemple #13
0
    def _initialize_agents(self):
        """Initialize agents' position array with grid values.

        """

        logger.debug('Running private method: initialize_agents().')

        # Iterates through all agents and grid options
        for agent, grid in zip(self.agents, self.grid):
            # Iterates through all decision variables
            for j, (lb, ub, g) in enumerate(zip(self.lb, self.ub, grid)):
                # For each decision variable, we use the grid values
                agent.position[j] = r.generate_uniform_random_number(
                    g, g, agent.n_dimensions)

                # Applies the lower bound to the agent's lower bound
                agent.lb[j] = lb

                # And also the upper bound
                agent.ub[j] = ub

        logger.debug('Agents initialized.')
Exemple #14
0
    def _initialize_agents(self):
        """Initialize agents' position array with uniform random numbers.

        """

        logger.debug('Running private method: initialize_agents().')

        # Iterate through all agents
        for agent in self.agents:
            # Iterate through all decision variables
            for j, (lb, ub) in enumerate(zip(self.lb, self.ub)):
                # For each decision variable, we generate uniform random numbers
                agent.position[j] = r.generate_uniform_random_number(
                    lb, ub, size=agent.n_dimensions)

                # For each decision variable, we apply lower bound the agent's bound
                agent.lb[j] = lb

                # And also the upper bound
                agent.ub[j] = ub

        logger.debug('Agents initialized.')
Exemple #15
0
    def _update_velocity(self, position, best_position, velocity, iteration):
        """Updates a particle velocity (eq. 5).

        Args:
            position (np.array): Agent's current position.
            best_position (np.array): Global best position.
            velocity (np.array): Agent's current velocity.
            iteration (int): Current iteration.

        Returns:
            A new velocity.

        """

        # Generating random number
        r1 = r.generate_uniform_random_number()

        # Calculates new velocity
        new_velocity = velocity * np.exp(
            -self.R * (iteration + 1)) + r1 * (best_position - position)

        return new_velocity
Exemple #16
0
    def _separating_operator(self, agents, n_ci):
        """Performs the separating operator.

        Args:
            agents (list): List of agents.
            n_ci (int): Number of agents per clan.

        """

        # Iterates through every clan
        for i in range(self.n_clans):
            # Gets the agents for the specified clan
            clan_agents = self._get_agents_from_clan(agents, i, n_ci)

            # Gathers the worst agent in clan
            worst = clan_agents[-1]

            # Generates a new position for the worst agent in clan (Eq. 4)
            for j, (lb, ub) in enumerate(zip(worst.lb, worst.ub)):
                # For each decision variable, we generate uniform random numbers
                worst.position[j] = r.generate_uniform_random_number(
                    lb, ub, size=worst.n_dimensions)
Exemple #17
0
    def _neighbour_motion(self, agents, idx, iteration, n_iterations, motion):
        """Performs the motion induced by other krill individuals (eq. 2).

        Args:
            agents (list): List of agents.
            idx (int): Selected agent.
            iteration (int): Current iteration value.
            n_iterations (int): Maximum number of iterations.
            motion (np.array): Array of motions.

        Returns:
            The krill's neighbour motion.

        """

        # Calculates the sensing distance (eq. 7)
        sensing_distance, eucl_distance = self._sensing_distance(agents, idx)

        # Gathers the neighbours
        neighbours = self._get_neighbours(agents, idx, sensing_distance,
                                          eucl_distance)

        # Calculates the local alpha (eq. 4)
        alpha_l = self._local_alpha(agents[idx], agents[-1], agents[0],
                                    neighbours)

        # Calculates the effective coefficient (eq. 9)
        C_best = 2 * (r.generate_uniform_random_number() +
                      iteration / n_iterations)

        # Calculates the target alpha (eq. 8)
        alpha_t = self._target_alpha(agents[idx], agents[-1], agents[0],
                                     C_best)

        # Calculates the neighbour motion (eq. 2)
        neighbour_motion = self.N_max * (alpha_l + alpha_t) + self.w_n * motion

        return neighbour_motion
Exemple #18
0
    def update(self, space):
        """Wraps Butterfly Optimization Algorithm over all agents and variables.

        Args:
            space (Space): Space containing agents and update-related information.

        """

        # Iterates through all agents
        for i, agent in enumerate(space.agents):
            # Calculates fragrance for current agent (eq. 1)
            self.fragrance[i] = self.c * agent.fit**self.a

        # Iterates through all agents
        for i, agent in enumerate(space.agents):
            # Generates a uniform random number
            r1 = r.generate_uniform_random_number()

            # If random number is smaller than switch probability
            if r1 < self.p:
                # Moves current agent towards the best one (eq. 2)
                agent.position = self._best_movement(agent.position,
                                                     space.best_agent.position,
                                                     self.fragrance[i], r1)

            # If random number is bigger than switch probability
            else:
                # Generates `j` and `k` indexes
                j = r.generate_integer_random_number(0, len(space.agents))
                k = r.generate_integer_random_number(0,
                                                     len(space.agents),
                                                     exclude_value=j)

                # Moves current agent using a local movement (eq. 3)
                agent.position = self._local_movement(agent.position,
                                                      space.agents[j].position,
                                                      space.agents[k].position,
                                                      self.fragrance[i], r1)
Exemple #19
0
    def _update_velocity(self, position, best_position, local_position, selected_position, velocity):
        """Updates a single particle velocity (eq. 8).

        Args:
            position (np.array): Agent's current position.
            best_position (np.array): Global best position.
            local_position (np.array): Agent's local best position.
            selected_position (np.array): Selected agent's position.
            velocity (np.array): Agent's current velocity.

        Returns:
            A new velocity based on self-adaptive proposal.

        """

        # Generating a random number
        r1 = r.generate_uniform_random_number()

        # Calculates new velocity
        new_velocity = self.w * np.fabs(selected_position - local_position) * np.sign(velocity) + r1 * (
            local_position - position) + (1 - r1) * (best_position - position)

        return new_velocity
Exemple #20
0
def weighted_wheel_selection(weights: List[float]) -> int:
    """Selects an individual from a weight-based roulette.

    Args:
        weights: List of individuals weights.

    Returns:
        (int): Weight-based roulette individual.

    """

    # Gathers the cumulative summatory
    cumulative_sum = np.cumsum(weights)

    # Defines the selection probability
    prob = r.generate_uniform_random_number() * cumulative_sum[-1]

    for i, c_sum in enumerate(cumulative_sum):
        # If individual's cumulative sum is bigger than selection probability
        if c_sum > prob:
            return i

    return None
Exemple #21
0
    def _procreating(self, x1, x2):
        """Procreates a pair of parents into offsprings (eq. 1).

        Args:
            x1 (Agent): Father to produce the offsprings.
            x2 (Agent): Mother to produce the offsprings.

        Returns:
            Two generated offsprings based on parents.

        """

        # Makes a deep copy of father and mother
        y1, y2 = copy.deepcopy(x1), copy.deepcopy(x2)

        # Generates a uniform random number
        alpha = r.generate_uniform_random_number()

        # Calculates first and second crossovers
        y1.position = alpha * x1.position + (1 - alpha) * x2.position
        y2.position = alpha * x2.position + (1 - alpha) * x1.position

        return y1, y2
Exemple #22
0
    def _update_river(
        self, agents: List[Agent], best_agent: Agent, function: Function
    ) -> None:
        """Updates every river position (eq. 9).

        Args:
            agents: List of agents.
            best_agent: Global best agent.
            function: A Function object that will be used as the objective function.

        """

        # For every river, ignoring the sea
        for i in range(1, self.nsr):
            # Calculates a random uniform
            r1 = r.generate_uniform_random_number()

            # Updates river position
            agents[i].position += r1 * 2 * (best_agent.position - agents[i].position)

            # Clips its limits and recalculates its fitness
            agents[i].clip_by_bound()
            agents[i].fit = function(agents[i].position)
Exemple #23
0
def weighted_wheel_selection(weights):
    """Selects an individual from a weight-based roulette.

    Args:
        weights (list): List of individuals weights.

    Returns:
        A roulette selected individual.

    """

    # Gathers the cumulative summatory
    cumulative_sum = np.cumsum(weights)

    # Defines the selection probability
    prob = r.generate_uniform_random_number() * cumulative_sum[-1]

    # For every individual
    for i, c_sum in enumerate(cumulative_sum):
        # If individual's cumulative sum is bigger than selection probability
        if c_sum > prob:
            # Returns the individual
            return i
Exemple #24
0
    def _initialize_chaotic_map(self, agents):
        """Initializes a set of agents using a logistic chaotic map.

        Args:
            agents (list): List of agents.

        """

        # Iterates through all agents
        for i, agent in enumerate(agents):
            # If it is the first agent
            if i == 0:
                # Iterates through all decision variables
                for j in range(agent.n_variables):
                    # Calculates its position with a random uniform number
                    agent.position[j] = r.generate_uniform_random_number(size=agent.n_dimensions)

            # If it is not the first agent
            else:
                # Iterates through all decision variables
                for j in range(agent.n_variables):
                    # Calculates its position using logistic chaotic map (eq. 18)
                    agent.position[j] = self.eta * agents[i-1].position[j] * (1 - agents[i-1].position[j])
Exemple #25
0
    def _update(self, agents, best_agent, fragrance):
        """Method that wraps global and local pollination updates over all agents and variables.

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            fragrance (np.array): Array of fragrances.

        """

        # Iterates through all agents
        for i, agent in enumerate(agents):
            # Calculates fragrance for current agent (eq. 1)
            fragrance[i] = self.c * agent.fit ** self.a

        # Iterates through all agents
        for i, agent in enumerate(agents):
            # Generates a uniform random number
            r1 = r.generate_uniform_random_number()

            # If random number is smaller than switch probability
            if r1 < self.p:
                # Moves current agent towards the best one (eq. 2)
                agent.position = self._best_movement(
                    agent.position, best_agent.position, fragrance[i], r1)

            # If random number is bigger than switch probability
            else:
                # Generates a `j` index
                j = r.generate_integer_random_number(0, len(agents))

                #  Generates a `k` index
                k = r.generate_integer_random_number(0, len(agents), exclude_value=j)

                # Moves current agent using a local movement (eq. 3)
                agent.position = self._local_movement(
                    agent.position, agents[j].position, agents[k].position, fragrance[i], r1)
Exemple #26
0
    def _physical_diffusion(self, n_variables, n_dimensions, iteration,
                            n_iterations):
        """Performs the physical diffusion of individual krills (eq. 16-17).

        Args:
            n_variables (int): Number of decision variables.
            n_dimensions (int): Number of dimensions.
            iteration (int): Current iteration value.
            n_iterations (int): Maximum number of iterations.

        Returns:
            The physical diffusion.

        """

        # Generates uniform random numbers
        r1 = r.generate_uniform_random_number(-1,
                                              1,
                                              size=(n_variables, n_dimensions))

        # Calculates the physical diffusion (eq. 17)
        physical_diffusion = self.D_max * (1 - iteration / n_iterations) * r1

        return physical_diffusion
Exemple #27
0
    def _calculate_force(self, agents, mass, gravity):
        """Calculates agents' force (eq. 7-9).

        Args:
            agents (list): List of agents.
            mass (np.array): An array of agents' mass.
            gravity (float): Current gravity value.

        Returns:
            The attraction force between all agents.

        """

        # Calculates the force
        force = [[gravity * (mass[i] * mass[j]) / (g.euclidean_distance(agents[i].position, agents[j].position) + c.EPSILON)
                  * (agents[j].position - agents[i].position) for j in range(len(agents))] for i in range(len(agents))]

        # Transforms the force into an array
        force = np.asarray(force)

        # Applying a stochastic trait to the force
        force = np.sum(r.generate_uniform_random_number() * force, axis=1)

        return force
Exemple #28
0
    def update(self, space, n_iterations):
        """Wraps Firefly Algorithm over all agents and variables (eq. 3-9).

        Args:
            space (Space): Space containing agents and update-related information.
            n_iterations (int): Maximum number of iterations.

        """

        # Calculates current iteration delta
        delta = 1 - ((10e-4) / 0.9)**(1 / n_iterations)

        # Applies update to alpha parameter
        self.alpha *= (1 - delta)

        # We copy a temporary list for iterating purposes
        temp_agents = copy.deepcopy(space.agents)

        # Iterates through 'i' agents
        for agent in space.agents:
            # Iterates through 'j' agents
            for temp in temp_agents:
                # Distance is calculated by an euclidean distance between 'i' and 'j' (eq. 8)
                distance = g.euclidean_distance(agent.position, temp.position)

                # If 'i' fit is bigger than 'j' fit
                if agent.fit > temp.fit:
                    # Recalculate the attractiveness (eq. 6)
                    beta = self.beta * np.exp(-self.gamma * distance)

                    # Generates a random uniform distribution
                    r1 = r.generate_uniform_random_number()

                    # Updates agent's position (eq. 9)
                    agent.position = beta * (temp.position + agent.position
                                             ) + self.alpha * (r1 - 0.5)
Exemple #29
0
    def _event_horizon(self, agents, best_agent, cost):
        """It calculates the stars' crossing an event horizon (eq. 4).

        Args:
            agents (list): List of agents.
            best_agent (Agent): Global best agent.
            cost (float): The event's horizon cost.

        """

        # Calculates the radius of the event horizon
        radius = best_agent.fit / max(cost, constants.EPSILON)

        # Iterate through every agent
        for agent in agents:
            # Calculates distance between star and black hole
            distance = (np.linalg.norm(best_agent.position - agent.position))

            # If distance is smaller than horizon's radius
            if distance < radius:
                # Generates a new random star
                for j, (lb, ub) in enumerate(zip(agent.lb, agent.ub)):
                    # For each decision variable, we generate uniform random numbers
                    agent.position[j] = r.generate_uniform_random_number(lb, ub, size=agent.n_dimensions)
Exemple #30
0
    def _omnivore_consumption(self, agent, producer, consumer, C):
        """Performs the consumption update by an omnivore (eq. 8)

        Args:
            agent (Agent): Current agent.
            producer (Agent): Producer agent.
            consumer (Agent): Consumer agent.
            C (float): Consumption factor.

        Returns:
            An updated consumption by an omnivore.

        """

        # Makes a deep copy of agent
        a = copy.deepcopy(agent)

        # Generates the second random number
        r2 = r.generate_uniform_random_number()

        # Updates its position
        a.position += C * r2 * (a.position - producer.position) + (1 - r2) * (a.position - consumer.position)

        return a