Exemplo n.º 1
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Instanciating array of local positions
        local_position = np.zeros(
            (space.n_agents, space.n_variables, space.n_dimensions))

        # And also an array of velocities
        velocity = np.zeros(
            (space.n_agents, space.n_variables, space.n_dimensions))

        # Initial search space evaluation
        self._evaluate(space, function, local_position, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # Initializing a progress bar
        with tqdm(total=space.n_iterations) as b:
            # These are the number of iterations to converge
            for t in range(space.n_iterations):
                logger.file(f'Iteration {t+1}/{space.n_iterations}')

                # Updating agents
                self._update(space.agents, space.best_agent, local_position,
                             velocity)

                # Checking if agents meet the bounds limits
                space.clip_limits()

                # After the update, we need to re-evaluate the search space
                self._evaluate(space,
                               function,
                               local_position,
                               hook=pre_evaluation)

                # Every iteration, we need to dump agents, local positions and best agent
                history.dump(agents=space.agents,
                             local=local_position,
                             best_agent=space.best_agent)

                # Updates the `tqdm` status
                b.set_postfix(fitness=space.best_agent.fit)
                b.update()

                logger.file(f'Fitness: {space.best_agent.fit}')
                logger.file(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 2
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # After the update, we need to re-evaluate the search space
            self._evaluate(space, function, hook=pre_evaluation)

            # Every iteration, we need to dump agents and best agent
            history.dump(agents=space.agents, best_agent=space.best_agent)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 3
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Instantiating an array of means
        mean = np.zeros(space.n_variables)

        # Instantiating an array of standard deviations
        std = np.zeros(space.n_variables)

        # Iterates through all decision variables
        for j, (lb, ub) in enumerate(zip(space.lb, space.ub)):
            # Calculates the initial mean
            mean[j] = r.generate_uniform_random_number(lb, ub)

            # Calculates the initial standard deviation
            std[j] = ub - lb

        # Initial search space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # Initializing a progress bar
        with tqdm(total=space.n_iterations) as b:
            # These are the number of iterations to converge
            for t in range(space.n_iterations):
                logger.file(f'Iteration {t+1}/{space.n_iterations}')

                # Updating agents
                self._update(space.agents, function, mean, std)

                # Checking if agents meets the bounds limits
                space.clip_limits()

                # After the update, we need to re-evaluate the search space
                self._evaluate(space, function, hook=pre_evaluation)

                # Every iteration, we need to dump agents and best agent
                history.dump(agents=space.agents, best_agent=space.best_agent)

                # Updates the `tqdm` status
                b.set_postfix(fitness=space.best_agent.fit)
                b.update()

                logger.file(f'Fitness: {space.best_agent.fit}')
                logger.file(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 4
0
    def run(self, space, function):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Instanciating array of local positions
        local_position = np.zeros(
            (space.n_agents, space.n_variables, space.n_dimensions))

        # An array of velocities
        velocity = np.zeros(
            (space.n_agents, space.n_variables, space.n_dimensions))

        # And also an array of best particle's fitness
        fitness = np.zeros(space.n_agents)

        # Initial search space evaluation
        self._evaluate(space, function, local_position)

        # Before starting the optimization process
        # We need to copy fitness values to temporary array
        for i, agent in enumerate(space.agents):
            # Copying fitness from agent's fitness
            fitness[i] = agent.fit

        # We will define a History object for further dumping
        history = h.History()

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # Updating agents
            self._update(space.agents, space.best_agent, local_position,
                         velocity)

            # Checking if agents meets the bounds limits
            space.check_bound_limits(space.agents, space.lb, space.ub)

            # After the update, we need to re-evaluate the search space
            self._evaluate(space, function, local_position)

            # Computing particle's success and updating inertia weight
            self._compute_success(space.agents, fitness)

            # Every iteration, we need to dump the current space agents
            history.dump(space.agents, space.best_agent)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 5
0
    def run(self,
            space,
            function,
            store_best_only=False,
            pre_evaluation_hook=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (boolean): If True, only the best agent of each iteration is stored in History.
            pre_evaluation_hook (function): A function that receives the optimizer, space and function
                and returns None. This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Creates an array of velocities
        velocity = np.zeros(
            (space.n_agents, space.n_variables, space.n_dimensions))

        # Check if there is a pre-evaluation hook
        if pre_evaluation_hook:
            # Applies the hook
            pre_evaluation_hook(self, space, function)

        # Initial search space evaluation
        self._evaluate(space, function)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # Updating agents
            self._update(space.agents, function, velocity, t)

            # Checking if agents meets the bounds limits
            space.check_limits()

            # Check if there is a pre-evaluation hook
            if pre_evaluation_hook:
                # Applies the hook
                pre_evaluation_hook(self, space, function)

            # After the update, we need to re-evaluate the search space
            self._evaluate(space, function)

            # Every iteration, we need to dump agents and best agent
            history.dump(agents=space.agents, best_agent=space.best_agent)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 6
0
def test_history_load():
    new_history = history.History()

    new_history.load('models/test.pkl')

    assert len(new_history.agents) > 0

    print(new_history)
Exemplo n.º 7
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Initial search space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # Calculating the flow's intensity (eq. 6)
        flows = self._flow_intensity(space.agents)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # Initializing a progress bar
        with tqdm(total=space.n_iterations) as b:
            # These are the number of iterations to converge
            for t in range(space.n_iterations):
                logger.file(f'Iteration {t+1}/{space.n_iterations}')

                # Updating agents
                self._update(space.agents, space.best_agent, flows)

                # Checking if agents meet the bounds limits
                space.clip_limits()

                # After the update, we need to re-evaluate the search space
                self._evaluate(space, function, hook=pre_evaluation)

                # Sorting agents
                space.agents.sort(key=lambda x: x.fit)

                # Performs the raining process (eq. 12)
                self._raining_process(space.agents, space.best_agent)

                # Updates the evaporation condition
                self.d_max -= (self.d_max / space.n_iterations)

                # Every iteration, we need to dump agents and best agent
                history.dump(agents=space.agents, best_agent=space.best_agent)

                # Updates the `tqdm` status
                b.set_postfix(fitness=space.best_agent.fit)
                b.update()

                logger.file(f'Fitness: {space.best_agent.fit}')
                logger.file(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 8
0
def test_history_save_agents_setter():
    new_history = history.History()

    try:
        new_history.save_agents = "a"
    except:
        new_history.save_agents = True

    assert new_history.save_agents is True
Exemplo n.º 9
0
def test_history_store_best_only_setter():
    new_history = history.History()

    try:
        new_history.store_best_only = 'a'
    except:
        new_history.store_best_only = True

    assert new_history.store_best_only is True
Exemplo n.º 10
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Instanciating array of frequencies
        frequency = r.generate_uniform_random_number(self.f_min, self.f_max,
                                                     space.n_agents)

        # Instanciating array of velocities
        velocity = np.zeros(
            (space.n_agents, space.n_variables, space.n_dimensions))

        # And also an array of loudnesses
        loudness = r.generate_uniform_random_number(0, self.A, space.n_agents)

        # Finally, an array of pulse rates
        pulse_rate = r.generate_uniform_random_number(0, self.r,
                                                      space.n_agents)

        # Initial search space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # Updating agents
            self._update(space.agents, space.best_agent, function, t,
                         frequency, velocity, loudness, pulse_rate)

            # Checking if agents meets the bounds limits
            space.clip_limits()

            # After the update, we need to re-evaluate the search space
            self._evaluate(space, function, hook=pre_evaluation)

            # Every iteration, we need to dump agents and best agent
            history.dump(agents=space.agents, best_agent=space.best_agent)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 11
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Calculates the number of possible children
        n_children = int(space.n_agents * self.child_ratio)

        # Instantiate an array of strategies
        strategy = np.zeros(
            (n_children, space.n_variables, space.n_dimensions))

        # Iterate through all possible children
        for i in range(n_children):
            # For every decision variable
            for j, (lb, ub) in enumerate(zip(space.lb, space.ub)):
                # Initializes the strategy array with the proposed ES distance
                strategy[i][j] = 0.05 * r.generate_uniform_random_number(
                    0, ub - lb, size=space.agents[i].n_dimensions)

        # Initial search space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # Updating agents
            space.agents = self._update(space.agents, space.n_agents, function,
                                        n_children, strategy)

            # Checking if agents meets the bounds limits
            space.clip_limits()

            # After the update, we need to re-evaluate the search space
            self._evaluate(space, function, hook=pre_evaluation)

            # Every iteration, we need to dump agents and best agent
            history.dump(agents=space.agents, best_agent=space.best_agent)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 12
0
def test_history_dump():
    new_history = history.History()

    agents = [agent.Agent(n_variables=2, n_dimensions=1) for _ in range(5)]

    new_history.dump(agents=agents, best_agent=agents[4], value=0)

    assert len(new_history.agents) > 0
    assert len(new_history.best_agent) > 0
    assert new_history.value[0] == 0
Exemplo n.º 13
0
def test_history_save():
    new_history = history.History()

    agents = [agent.Agent(n_variables=2, n_dimensions=1) for _ in range(5)]

    new_history.dump(agents=agents, best_agent=agents[0])

    new_history.save('models/test.pkl')

    assert os.path.isfile('./models/test.pkl')
Exemplo n.º 14
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Initial search space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # Initializing a progress bar
        with tqdm(total=space.n_iterations) as b:
            # These are the number of iterations to converge
            for t in range(space.n_iterations):
                logger.file(f'Iteration {t+1}/{space.n_iterations}')

                # Calculates the Wormhole Existence Probability
                WEP = self.WEP_min + (t + 1) * (
                    (self.WEP_max - self.WEP_min) / space.n_iterations)

                # Calculates the Travelling Distance Rate
                TDR = 1 - (
                    (t + 1)**(1 / self.p) / space.n_iterations**(1 / self.p))

                # Updating agents
                self._update(space.agents, space.best_agent, function, WEP,
                             TDR)

                # Checking if agents meets the bounds limits
                space.clip_limits()

                # After the update, we need to re-evaluate the search space
                self._evaluate(space, function, hook=pre_evaluation)

                # Every iteration, we need to dump agents and best agent
                history.dump(agents=space.agents, best_agent=space.best_agent)

                # Updates the `tqdm` status
                b.set_postfix(fitness=space.best_agent.fit)
                b.update()

                logger.file(f'Fitness: {space.best_agent.fit}')
                logger.file(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 15
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Initial search space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # Initializing a progress bar
        with tqdm(total=space.n_iterations) as b:
            # These are the number of iterations to converge
            for t in range(space.n_iterations):
                logger.file(f'Iteration {t+1}/{space.n_iterations}')

                # Updating pitch adjusting rate
                self.PAR = self.PAR_min + \
                    (((self.PAR_max - self.PAR_min) / space.n_iterations) * t)

                # Updating bandwidth parameter
                self.bw = self.bw_max * \
                    np.exp((np.log(self.bw_min / self.bw_max) /
                            space.n_iterations) * t)

                # Updating agents
                self._update(space.agents, function)

                # Checking if agents meets the bounds limits
                space.clip_limits()

                # After the update, we need to re-evaluate the search space
                self._evaluate(space, function, hook=pre_evaluation)

                # Every iteration, we need to dump agents and best agent
                history.dump(agents=space.agents, best_agent=space.best_agent)

                # Updates the `tqdm` status
                b.set_postfix(fitness=space.best_agent.fit)
                b.update()

                logger.file(f'Fitness: {space.best_agent.fit}')
                logger.file(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 16
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Calculates the number of elephants per clan
        n_ci = space.n_agents // self.n_clans

        # If number of elephants per clan equals to zero
        if n_ci == 0:
            # Throws an error
            raise e.ValueError(
                'Number of agents should be divisible by number of clans')

        # Initial search space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # Initializing a progress bar
        with tqdm(total=space.n_iterations) as b:
            # These are the number of iterations to converge
            for t in range(space.n_iterations):
                logger.file(f'Iteration {t+1}/{space.n_iterations}')

                # Updating agents
                self._update(space.agents, function, n_ci)

                # Checking if agents meet the bounds limits
                space.clip_limits()

                # After the update, we need to re-evaluate the search space
                self._evaluate(space, function, hook=pre_evaluation)

                # Every iteration, we need to dump agents and best agent
                history.dump(agents=space.agents, best_agent=space.best_agent)

                # Updates the `tqdm` status
                b.set_postfix(fitness=space.best_agent.fit)
                b.update()

                logger.file(f'Fitness: {space.best_agent.fit}')
                logger.file(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 17
0
    def run(self, space, function):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Instanciating array of frequencies
        frequency = r.generate_uniform_random_number(self.f_min, self.f_max,
                                                     space.n_agents)

        # Instanciating array of velocities
        velocity = np.zeros(
            (space.n_agents, space.n_variables, space.n_dimensions))

        # And also an array of loudnesses
        loudness = r.generate_uniform_random_number(0, self.A, space.n_agents)

        # Finally, an array of pulse rates
        pulse_rate = r.generate_uniform_random_number(0, self.r,
                                                      space.n_agents)

        # Initial search space evaluation
        self._evaluate(space, function)

        # We will define a History object for further dumping
        history = h.History()

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # Updating agents
            self._update(space.agents, space.best_agent, space.lb, space.ub,
                         function, t, frequency, velocity, loudness,
                         pulse_rate)

            # Checking if agents meets the bounds limits
            space.check_bound_limits(space.agents, space.lb, space.ub)

            # After the update, we need to re-evaluate the search space
            self._evaluate(space, function)

            # Every iteration, we need to dump the current space agents
            history.dump(space.agents, space.best_agent)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 18
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Creates a height vector with `h_max` values
        height = r.generate_uniform_random_number(self.h_max, self.h_max,
                                                  space.n_agents)

        # Creates a length vector with 0.5 values
        length = r.generate_uniform_random_number(0.5, 0.5, space.n_agents)

        # Initial search space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # Initializing a progress bar
        with tqdm(total=space.n_iterations) as b:
            # These are the number of iterations to converge
            for t in range(space.n_iterations):
                logger.file(f'Iteration {t+1}/{space.n_iterations}')

                # Updating agents
                self._update(space.agents, space.best_agent, function, height,
                             length)

                # Checking if agents meet the bounds limits
                space.clip_limits()

                # After the update, we need to re-evaluate the search space
                self._evaluate(space, function, hook=pre_evaluation)

                # Every iteration, we need to dump agents and best agent
                history.dump(agents=space.agents, best_agent=space.best_agent)

                # Updates the `tqdm` status
                b.set_postfix(fitness=space.best_agent.fit)
                b.update()

                logger.file(f'Fitness: {space.best_agent.fit}')
                logger.file(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 19
0
    def run(self,
            space,
            function,
            store_best_only=False,
            pre_evaluation_hook=None):
        """Runs the optimization pipeline.

        Args:
            space (TreeSpace): A TreeSpace object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (boolean): If True, only the best agent of each iteration is stored in History.
            pre_evaluation_hook (function): A function that receives the optimizer, space and function
                and returns None. This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Check if there is a pre-evaluation hook
        if pre_evaluation_hook:
            # Applies the hook
            pre_evaluation_hook(self, space, function)

        # Initial tree space evaluation
        self._evaluate(space, function)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # Updating trees with designed operators
            self._update(space)

            # Check if there is a pre-evaluation hook
            if pre_evaluation_hook:
                # Applies the hook
                pre_evaluation_hook(self, space, function)

            # After the update, we need to re-evaluate the tree space
            self._evaluate(space, function)

            # Every iteration, we need to dump agents and best agent
            history.dump(agents=space.agents,
                         best_agent=space.best_agent,
                         best_tree=space.best_tree)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 20
0
def test_history_dump():
    new_history = history.History()

    agents = []
    for _ in range(5):
        agents.append(agent.Agent(n_variables=2, n_dimensions=1))

    new_history.dump(agents, agents[0])

    assert len(new_history.agents) > 0
    assert len(new_history.best_agent) > 0
Exemplo n.º 21
0
def test_history_show():
    new_history = history.History()

    agents = []
    for _ in range(5):
        agents.append(agent.Agent(n_variables=2, n_dimensions=1))

    new_history.dump(agents, agents[0])

    new_history.show()

    assert True == True
Exemplo n.º 22
0
def test_history_dump():
    new_history = history.History(save_agents=True)

    agents = [
        agent.Agent(
            n_variables=2, n_dimensions=1, lower_bound=[0, 0], upper_bound=[1, 1]
        )
        for _ in range(5)
    ]

    new_history.dump(agents=agents, best_agent=agents[4], value=0)

    assert len(new_history.agents) > 0
    assert len(new_history.best_agent) > 0
    assert new_history.value[0] == 0

    new_history = history.History(save_agents=False)

    new_history.dump(agents=agents)

    assert hasattr(new_history, "agents") is False
Exemplo n.º 23
0
    def run(self, space, function):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Initial search space evaluation
        self._evaluate(space, function)

        # Calculating the flow's intensity (Equation 6)
        flows = self._flow_intensity(space.agents)

        # We will define a History object for further dumping
        history = h.History()

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # Updating agents
            self._update(space.agents, space.best_agent, flows)

            # Checking if agents meets the bounds limits
            space.check_bound_limits(space.agents, space.lb, space.ub)

            # After the update, we need to re-evaluate the search space
            self._evaluate(space, function)

            # Sorting agents
            space.agents.sort(key=lambda x: x.fit)

            # Performs the raining process (Equation 12)
            self._raining_process(space.agents, space.best_agent)

            # Updates the evaporation condition
            self.d_max -= (self.d_max / space.n_iterations)

            # Every iteration, we need to dump the current space agents
            history.dump(space.agents, space.best_agent)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 24
0
def test_opytimizer_history_setter():
    space = search.SearchSpace(1, 1, 0, 1)
    func = function.Function(callable)
    optimizer = pso.PSO()
    hist = history.History()

    new_opytimizer = opytimizer.Opytimizer(space, optimizer, func)

    try:
        new_opytimizer.history = 1
    except:
        new_opytimizer.history = hist

    assert type(new_opytimizer.history).__name__ == 'History'
Exemplo n.º 25
0
def test_convergence_plot():
    new_history = history.History()

    new_history.load('models/test.pkl')

    agents = new_history.get(key='agents', index=(0, 0))

    try:
        convergence.plot(agents[0], agents[1], labels=1)
    except:
        convergence.plot(agents[0], agents[1], labels=['agent[0]', 'agent[1]'])

    try:
        convergence.plot(agents[0], agents[1], labels=['agent[0]'])
    except:
        convergence.plot(agents[0], agents[1])
Exemplo n.º 26
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Instanciating array of lives
        life = r.generate_uniform_random_number(70, 70, space.n_agents)

        # Instanciating array of counters
        counter = np.ones(space.n_agents)

        # Initial search space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # Updating agents
            self._update(space.agents, space.best_agent, function, life,
                         counter)

            # Checking if agents meets the bounds limits
            space.clip_limits()

            # After the update, we need to re-evaluate the search space
            self._evaluate(space, function, hook=pre_evaluation)

            # Every iteration, we need to dump agents and best agent
            history.dump(agents=space.agents, best_agent=space.best_agent)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 27
0
    def run(self, space, function):
        """Runs the optimization pipeline.

        Args:
            space (Space): A Space object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Initial search space evaluation
        self._evaluate(space, function)

        # We will define a History object for further dumping
        history = h.History()

        # These are the number of iterations to converge
        for t in range(space.n_iterations):
            logger.info(f'Iteration {t+1}/{space.n_iterations}')

            # Updating pitch adjusting rate
            self.PAR = self.PAR_min + \
                (((self.PAR_max - self.PAR_min) / space.n_iterations) * t)

            # Updating bandwidth parameter
            self.bw = self.bw_max * \
                np.exp((np.log(self.bw_min / self.bw_max) / space.n_iterations) * t)

            # Updating agents
            self._update(space.agents, space.lb, space.ub, function)

            # Checking if agents meets the bounds limits
            space.check_bound_limits(space.agents, space.lb, space.ub)

            # After the update, we need to re-evaluate the search space
            self._evaluate(space, function)

            # Every iteration, we need to dump the current space agents
            history.dump(space.agents, space.best_agent)

            logger.info(f'Fitness: {space.best_agent.fit}')
            logger.info(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 28
0
def test_history_get_convergence():
    new_history = history.History(save_agents=True)

    agents = [
        agent.Agent(n_variables=2,
                    n_dimensions=1,
                    lower_bound=[0, 0],
                    upper_bound=[1, 1]) for _ in range(5)
    ]

    new_history.dump(agents=agents,
                     best_agent=agents[4],
                     local_position=agents[0].position,
                     value=0)
    new_history.dump(agents=agents,
                     best_agent=agents[4],
                     local_position=agents[0].position,
                     value=0)

    try:
        agents_pos, agents_fit = new_history.get_convergence(key='agents',
                                                             index=5)
    except:
        agents_pos, agents_fit = new_history.get_convergence(key='agents',
                                                             index=0)

    assert agents_pos.shape == (2, 2)
    assert agents_fit.shape == (2, )

    best_agent_pos, best_agent_fit = new_history.get_convergence(
        key='best_agent')

    assert best_agent_pos.shape == (2, 2)
    assert best_agent_fit.shape == (2, )

    try:
        local_position = new_history.get_convergence(key='local_position',
                                                     index=5)
    except:
        local_position = new_history.get_convergence(key='local_position')

    assert local_position.shape == (2, )

    value = new_history.get_convergence(key='value')

    assert value.shape == (2, )
Exemplo n.º 29
0
    def run(self, space, function, store_best_only=False, pre_evaluation=None):
        """Runs the optimization pipeline.

        Args:
            space (TreeSpace): A TreeSpace object that will be evaluated.
            function (Function): A Function object that will be used as the objective function.
            store_best_only (bool): If True, only the best agent of each iteration is stored in History.
            pre_evaluation (callable): This function is executed before evaluating the function being optimized.

        Returns:
            A History object holding all agents' positions and fitness achieved during the task.

        """

        # Initial tree space evaluation
        self._evaluate(space, function, hook=pre_evaluation)

        # We will define a History object for further dumping
        history = h.History(store_best_only)

        # Initializing a progress bar
        with tqdm(total=space.n_iterations) as b:
            # These are the number of iterations to converge
            for t in range(space.n_iterations):
                logger.file(f'Iteration {t+1}/{space.n_iterations}')

                # Updating trees with designed operators
                self._update(space)

                # After the update, we need to re-evaluate the tree space
                self._evaluate(space, function, hook=pre_evaluation)

                # Every iteration, we need to dump agents and best agent
                history.dump(agents=space.agents,
                             best_agent=space.best_agent,
                             best_tree=space.best_tree)

                # Updates the `tqdm` status
                b.set_postfix(fitness=space.best_agent.fit)
                b.update()

                logger.file(f'Fitness: {space.best_agent.fit}')
                logger.file(f'Position: {space.best_agent.position}')

        return history
Exemplo n.º 30
0
def test_history_get():
    new_history = history.History()

    agents = [agent.Agent(n_variables=2, n_dimensions=1) for _ in range(5)]

    new_history.dump(agents=agents, best_agent=agents[4], value=0)

    try:
        agents = new_history.get(key='agents', index=0)
    except:
        agents = new_history.get(key='agents', index=(0, 0))

    try:
        agents = new_history.get(key='agents', index=(0, 0, 0))
    except:
        agents = new_history.get(key='agents', index=(0, 0))

    assert agents.shape == (2, 1)