Example #1
0
def test_compute_velocity_return_values(swarm, clamp, static):
    """Test if compute_velocity() gives the expected shape and range"""
    topology = Ring(static=static)
    v = topology.compute_velocity(swarm, clamp)
    assert v.shape == swarm.position.shape
    if clamp is not None:
        assert (clamp[0] <= v).all() and (clamp[1] >= v).all()
Example #2
0
def test_compute_position_return_values(swarm, bounds, static):
    """Test if compute_position() gives the expected shape and range"""
    topology = Ring(static=static)
    p = topology.compute_position(swarm, bounds)
    assert p.shape == swarm.velocity.shape
    if bounds is not None:
        assert (bounds[0] <= p).all() and (bounds[1] >= p).all()
    def __init__(
        self,
        n_particles,
        dimensions,
        options={},
        init_pos=None,
        velocity_clamp=None,
        ftol=-np.inf,
    ):

        self.logger = logging.getLogger(__name__)
        super().__init__(
            n_particles=n_particles,
            dimensions=dimensions,
            options=options,
            binary=True,
            init_pos=init_pos,
            velocity_clamp=velocity_clamp,
            ftol=ftol,
        )

        self.food_fitness = np.inf
        self.enemy_fitness = -np.inf

        self.food_pos = np.empty(0)
        self.enemy_pos = np.empty(0)

        self.assertions()
        # Initialize the resettable attributes
        self.reset()
        # Initialize the topology
        self.top = Ring(static=False)
Example #4
0
def test_update_gbest_neighborhood(swarm, p, k, static):
    """Test if update_gbest_neighborhood gives the expected return values"""
    topology = Ring(static=static)
    pos, cost = topology.compute_gbest(swarm, p=p, k=k)
    expected_pos = np.array([9.90438476e-01, 2.50379538e-03, 1.87405987e-05])
    expected_cost = 1.0002528364353296
    assert cost == pytest.approx(expected_cost)
    assert pos == pytest.approx(expected_pos)
Example #5
0
def test_update_gbest_neighborhood(swarm, p, k):
    """Test if update_gbest_neighborhood gives the expected return values"""
    topology = Ring()
    pos, cost = topology.compute_gbest(swarm, p=p, k=k)
    expected_pos = np.array([1,2,3])
    expected_cost = 1
    assert (pos == expected_pos).all()
    assert cost == expected_cost
Example #6
0
    def __init__(self, _vae: VaeWrapper, _classifier: LatentSpaceLEC,
                 _model_under_test: LECUnderTest, dataset: str,
                 output_dir: str, pso_options: dict, n_iter: int):
        """ Initialize a test generator that synthesizes new
        high-uncertainty image inputs for a given pair of VAE and a classifier.

        :param _vae: A VAE model
        :param _classifier: A classifier model attached to the latent layer
                            of the VAE
        :param _model_under_test: Model under test
        :param dataset: name of a dataset
        :param output_dir: (str) Output directory path
        :param pso_options: a dictionary containing PSO hyper-parameters,
        which are {c1, c2, w, k, p}.
        :param n_iter: PSO iteration
        """
        self.threshold = 1.0
        self.vae = _vae
        self.classifier = _classifier
        self.model_under_test = _model_under_test
        if not (os.path.exists(output_dir) and os.path.isdir(output_dir)):
            os.mkdir(output_dir)
        self.output_dir = output_dir
        self.total_cnt = 0  # total number of test generation attempted

        self.xs, self.dim = load_dataset(dataset, 'train', normalize=True)
        self.ys, self.n_classes = load_dataset(dataset, 'train', label=True)

        # self.n_particle = testgen_config["optimizer"]["n_particle"]
        self.n_iter = n_iter
        self.options = pso_options
        self.topology = Ring(static=False)
        min_bound = np.array([-1.0] * self.vae.latent_dim)
        max_bound = np.array([1.0] * self.vae.latent_dim)
        self.bounds = (min_bound, max_bound)
Example #7
0
    def __init__(self,
                 num_params,
                 c1=0.5 + np.log(2.0),
                 c2=0.5 + np.log(2.0),
                 w=0.5 / np.log(2.0),
                 popsize=256,
                 sigma_init=0.1,
                 weight_decay=0.01,
                 communication_topology='star'):
        self.num_params = num_params
        self.c1 = c1
        self.c2 = c2
        self.w = w
        self.popsize = popsize
        self.sigma_init = sigma_init
        self.weight_decay = weight_decay
        self.best_param = np.zeros(self.num_params)
        self.best_reward = 0
        self.pop_params = np.random.randn(self.popsize,
                                          self.num_params) * self.sigma_init
        self.pbest_params = self.pop_params
        self.pbest_rewards = np.zeros(self.popsize)
        self.pop_vel = np.zeros((self.popsize, self.num_params))
        self.pop_rewards = np.zeros(self.popsize)
        self.gbest_param = self.pop_params[np.argmax(self.pop_rewards)]
        self.gbest_reward = np.max(self.pop_rewards)
        self.first_iteration = True

        # Import backend modules
        l_lims = -np.ones(self.num_params)
        u_lims = np.ones(self.num_params)
        bounds = (l_lims, u_lims)
        import pyswarms.backend as P
        self.P = P
        # Global topology will always be used to compute gbest
        from pyswarms.backend.topology import Star
        self.global_topology = Star()
        self.communication_topology = communication_topology
        # Unless specified, use the star topology.
        if self.communication_topology == 'random':
            from pyswarms.backend.topology import Random
            self.topology = Random()  # The Topology Class
        elif self.communication_topology == 'local':
            from pyswarms.backend.topology import Ring
            self.topology = Ring()  # The Topology Class
        else:
            from pyswarms.backend.topology import Star
            self.topology = Star()  # The Topology Class
        self.options = {'c1': self.c1, 'c2': self.c2, 'w': self.w}
        self.swarm = self.P.create_swarm(n_particles=self.popsize,
                                         dimensions=self.num_params,
                                         options=self.options,
                                         center=self.sigma_init,
                                         bounds=bounds)
    def __fitNBeta(self, dim, n_particles, itera, options, objetive_function,
                   BetaChange, bound):
        my_topology = Ring()
        my_swarm = P.create_swarm(n_particles=n_particles,
                                  dimensions=dim,
                                  options=options,
                                  bounds=bound)
        my_swarm.pbest_cost = np.full(n_particles, np.inf)
        my_swarm.best_cost = np.inf

        for i in range(itera):
            for a in range(n_particles):
                my_swarm.position[a][0:BetaChange] = sorted(
                    my_swarm.position[a][0:BetaChange])
                for c in range(1, self.BetaChange):
                    if my_swarm.position[a][c -
                                            1] + 5 >= my_swarm.position[a][c]:
                        my_swarm.position[a][c] = my_swarm.position[a][c] + 5
            my_swarm.current_cost = objetive_function(my_swarm.position)
            my_swarm.pbest_pos, my_swarm.pbest_cost = P.operators.compute_pbest(
                my_swarm)
            #my_swarm.current_cost[np.isnan(my_swarm.current_cost)]=np.nanmax(my_swarm.current_cost)
            #my_swarm.pbest_cost = objetive_function(my_swarm.pbest_pos)

            my_swarm.best_pos, my_swarm.best_cost = my_topology.compute_gbest(
                my_swarm, options['p'], options['k'])
            if i % 20 == 0:
                print(
                    'Iteration: {} | my_swarm.best_cost: {:.4f} | days: {}'.
                    format(
                        i + 1, my_swarm.best_cost,
                        str(my_swarm.pbest_pos[my_swarm.pbest_cost.argmin()])))
            my_swarm.velocity = my_topology.compute_velocity(my_swarm,
                                                             bounds=bound)
            my_swarm.position = my_topology.compute_position(my_swarm,
                                                             bounds=bound)
        final_best_cost = my_swarm.best_cost.copy()
        final_best_pos = my_swarm.pbest_pos[
            my_swarm.pbest_cost.argmin()].copy()
        return final_best_pos, final_best_cost
def test_keyword_exception_ring(options, static):
    """Tests if exceptions are thrown when keywords are missing and a Ring topology is chosen"""
    with pytest.raises(KeyError):
        GeneralOptimizerPSO(5, 2, options, Ring(static=static))
Example #10
0
def test_invalid_k_or_p_values(options, static):
    """Tests if exception is thrown when passing
    an invalid value for k or p when using a Ring topology"""
    with pytest.raises(ValueError):
        GeneralOptimizerPSO(5, 2, options, Ring(static=static))
Example #11
0
def test_neighbor_idx(swarm, static, p, k):
    """Test if the neighbor_idx attribute is assigned"""
    topology = Ring(static=static)
    topology.compute_gbest(swarm, p=p, k=k)
    assert topology.neighbor_idx is not None
class DragonFlyOptimizer(DiscreteSwarmOptimizer):
    def assertions(self):

        if self.velocity_clamp is not None:
            if not isinstance(self.velocity_clamp, tuple):
                raise TypeError("Parameter `velocity_clamp` must be a tuple")
            if not len(self.velocity_clamp) == 2:
                raise IndexError("Parameter `velocity_clamp` must be of "
                                 "size 2")
            if not self.velocity_clamp[0] < self.velocity_clamp[1]:
                raise ValueError("Make sure that velocity_clamp is in the "
                                 "form (v_min, v_max)")

    def __init__(
        self,
        n_particles,
        dimensions,
        options={},
        init_pos=None,
        velocity_clamp=None,
        ftol=-np.inf,
    ):

        self.logger = logging.getLogger(__name__)
        super().__init__(
            n_particles=n_particles,
            dimensions=dimensions,
            options=options,
            binary=True,
            init_pos=init_pos,
            velocity_clamp=velocity_clamp,
            ftol=ftol,
        )

        self.food_fitness = np.inf
        self.enemy_fitness = -np.inf

        self.food_pos = np.empty(0)
        self.enemy_pos = np.empty(0)

        self.assertions()
        # Initialize the resettable attributes
        self.reset()
        # Initialize the topology
        self.top = Ring(static=False)

    def compute_pworst(self, swarm):  # Compute enemy position and cost
        try:
            # Infer dimensions from positions
            dimensions = swarm.dimensions
            # Create a 1-D and 2-D mask based from comparisons
            mask_cost = swarm.current_cost > swarm.pbest_cost
            mask_pos = np.repeat(mask_cost[:, np.newaxis], dimensions, axis=1)
            # Apply masks
            new_pworst_pos = np.where(~mask_pos, swarm.pbest_pos,
                                      swarm.position)
            new_pworst_cost = np.where(~mask_cost, swarm.pbest_cost,
                                       swarm.current_cost)
        except AttributeError:
            msg = "Please pass a Swarm class. You passed {}".format(
                type(swarm))
            self.logger.error(msg)
            raise
        else:
            return (new_pworst_pos, new_pworst_cost)

    def _transfer_function(self, v):
        """Helper method for the transfer function

        Parameters
        ----------
        x : numpy.ndarray
            Input vector for sigmoid computation

        Returns
        -------
        numpy.ndarray

            Output transfer function computation
        """
        return abs(v / np.sqrt(v**2 + 1))

    def compute_position(self, velocity):
        return np.random.random_sample(
            size=self.dimensions) < self._transfer_function(velocity)

    def optimize(self,
                 objective_func,
                 iters,
                 print_step=1,
                 verbose=1,
                 **kwargs):

        ub = 1
        lb = 0

        for i in range(iters):

            w = 0.9 - i * ((0.9 - 0.4) / iters)

            my_c = 0.1 - i * ((0.1 - 0) / (iters / 2))

            if my_c < 0:
                my_c = 0
            # print(my_c)
            s = 2 * random.random() * my_c  # Seperation weight
            a = 2 * random.random() * my_c  # Alignment weight
            c = 2 * random.random() * my_c  # Cohesion weight
            f = 2 * random.random()  # Food attraction weight
            e = my_c  # Enemy distraction weight

            # Compute cost for current position and personal best
            self.swarm.current_cost = objective_func(self.swarm.position,
                                                     **kwargs)
            self.swarm.pbest_cost = objective_func(self.swarm.pbest_pos,
                                                   **kwargs)
            self.swarm.pbest_pos, self.swarm.pbest_cost = compute_pbest(
                self.swarm)
            self.swarm.pworst_pos, self.swarm.pworst_cost = self.compute_pworst(
                self.swarm)

            pmin_cost_idx = np.argmin(self.swarm.pbest_cost)
            pmax_cost_idx = np.argmax(self.swarm.pworst_cost)
            # pmax_cost_idx = np.argmax(self.swarm.pbest_cost)
            # Update gbest from neighborhood

            # self.swarm.best_cost = np.min(self.swarm.pbest_cost)
            # self.swarm.pbest_pos = self.swarm.pbest_pos[np.argmin(self.swarm.pbest_cost)]

            # best_cost_yet_found = np.min(self.swarm.best_cost)

            self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
                self.swarm, 2, self.n_particles)

            # Updating Food position
            if self.swarm.pbest_cost[pmin_cost_idx] < self.food_fitness:
                self.food_fitness = self.swarm.pbest_cost[pmin_cost_idx]
                self.food_pos = self.swarm.pbest_pos[pmin_cost_idx]

            # Updating Enemy position
            if self.swarm.pworst_cost[pmax_cost_idx] > self.enemy_fitness:
                self.enemy_fitness = self.swarm.pworst_cost[pmax_cost_idx]
                self.enemy_pos = self.swarm.pworst_pos[pmax_cost_idx]

            # best_cost_yet_found = np.min(self.swarm.best_cost)

            for j in range(self.n_particles):

                S = np.zeros(self.dimensions)
                A = np.zeros(self.dimensions)
                C = np.zeros(self.dimensions)
                F = np.zeros(self.dimensions)
                E = np.zeros(self.dimensions)

                # Calculating Separation(S)

                for k in range(self.n_particles):
                    S += (self.swarm.position[k] - self.swarm.position[j])

                S = -S

                # Calculating Allignment(A)

                for k in range(self.n_particles):
                    A += self.swarm.velocity[k]
                A = (A / self.n_particles)

                # Calculating Cohesion
                for k in range(self.n_particles):
                    C += self.swarm.position[k]
                C = (C / self.n_particles) - self.swarm.position[j]

                F = self.food_pos - self.swarm.position[
                    j]  # Calculating Food postion
                E = self.enemy_pos - self.swarm.position[
                    j]  # Calculating Enemy position

                self.swarm.velocity[j] = (s * S + a * A + c * C + f * F +
                                          e * E) + w * self.swarm.velocity[j]
                self.swarm.position[j] = self.compute_position(
                    self.swarm.velocity[j])

            # Print to console
            if i % print_step == 0:
                cli_print(
                    "Iteration {}/{}, cost: {}".format(
                        i + 1, iters, np.min(self.swarm.best_cost)),
                    verbose,
                    2,
                    logger=self.logger,
                )

        # Obtain the final best_cost and the final best_position
        # final_best_cost = np.min(self.swarm.pbest_cost)
        # final_best_pos = self.swarm.pbest_pos[np.argmin(self.swarm.pbest_cost)]

        final_best_cost = self.swarm.best_cost.copy()
        final_best_pos = self.swarm.best_pos.copy()

        print("==============================\nOptimization finished\n")
        print("Final Best Cost : ", final_best_cost, "\nBest Value : ",
              final_best_pos)

        # end_report(
        #     final_best_cost, final_best_pos, verbose, logger=self.logger
        # )
        return (final_best_cost, final_best_pos)
Example #13
0

@pytest.fixture(scope="module")
def binary_reset():
    """Returns a BinaryPSO instance that has been run and reset to check
    default value"""
    pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
    pso.optimize(sphere_func, 10, verbose=0)
    pso.reset()
    return pso


@pytest.fixture
def options():
    """Default options dictionary for most PSO use-cases"""
    options_ = {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2, "r": 1}
    return options_


@pytest.fixture(params=[
                Star(),
                Ring(static=False), Ring(static=True),
                Pyramid(static=False), Pyramid(static=True),
                Random(static=False), Random(static=True),
                VonNeumann()
                ])
def topology(request):
    """Parametrized topology parameter"""
    topology_ = request.param
    return topology_
    def __init__(
        self,
        n_particles,
        dimensions_discrete,
        options,
        bounds,
        bh_strategy="periodic",
        init_pos=None,
        velocity_clamp=None,
        vh_strategy="unmodified",
        ftol=-np.inf,
        ftol_iter=1,
    ):
        """Initialize the swarm

        Attributes
        ----------
        n_particles : int
            number of particles in the swarm.
        dimensions_discrete : int
            number of discrete dimensions of the search space.
        options : dict with keys :code:`{'c1', 'c2', 'w', 'k', 'p'}`
            a dictionary containing the parameters for the specific
            optimization technique
                * c1 : float
                    cognitive parameter
                * c2 : float
                    social parameter
                * w : float
                    inertia parameter
                * k : int
                    number of neighbors to be considered. Must be a
                    positive integer less than :code:`n_particles`
                * p: int {1,2}
                    the Minkowski p-norm to use. 1 is the
                    sum-of-absolute values (or L1 distance) while 2 is
                    the Euclidean (or L2) distance.
        bounds : tuple of numpy.ndarray
            a tuple of size 2 where the first entry is the minimum bound while
            the second entry is the maximum bound. Each array must be of shape
            :code:`(dimensions,)`.
        init_pos : numpy.ndarray, optional
            option to explicitly set the particles' initial positions. Set to
            :code:`None` if you wish to generate the particles randomly.
        velocity_clamp : tuple, optional
            a tuple of size 2 where the first entry is the minimum velocity
            and the second entry is the maximum velocity. It
            sets the limits for velocity clamping.
        vh_strategy : String
            a strategy for the handling of the velocity of out-of-bounds particles.
            Only the "unmodified" and the "adjust" strategies are allowed.
        ftol : float
            relative error in objective_func(best_pos) acceptable for
            convergence
        ftol_iter : int
            number of iterations over which the relative error in
            objective_func(best_pos) is acceptable for convergence.
            Default is :code:`1`
        """
        # Initialize logger
        self.rep = Reporter(logger=logging.getLogger(__name__))
        # Assign k-neighbors and p-value as attributes
        self.k, self.p = options["k"], options["p"]

        self.dimensions_discrete = dimensions_discrete

        self.bits, self.bounds = self.discretePSO_to_binaryPSO(
            dimensions_discrete, bounds)

        # Initialize parent class
        super(BinaryPSO, self).__init__(
            n_particles=n_particles,
            dimensions=sum(self.bits),
            binary=True,
            options=options,
            init_pos=init_pos,
            velocity_clamp=velocity_clamp,
            ftol=ftol,
            ftol_iter=ftol_iter,
        )
        # self.bounds = bounds
        # Initialize the resettable attributes
        self.reset()
        # Initialize the topology
        self.top = Ring(static=False)
        self.vh = VelocityHandler(strategy=vh_strategy)
        self.bh = BoundaryHandler(strategy=bh_strategy)
        self.name = __name__
class DiscreteBoundedPSO(BinaryPSO):
    """
    This class is based on the Binary PSO class. It extends the BinaryPSO class
    by a function which allows the conversion of discrete optimization variables
    into binary variables, so that discrete optimization problems can be solved 
    """
    def __init__(
        self,
        n_particles,
        dimensions_discrete,
        options,
        bounds,
        bh_strategy="periodic",
        init_pos=None,
        velocity_clamp=None,
        vh_strategy="unmodified",
        ftol=-np.inf,
        ftol_iter=1,
    ):
        """Initialize the swarm

        Attributes
        ----------
        n_particles : int
            number of particles in the swarm.
        dimensions_discrete : int
            number of discrete dimensions of the search space.
        options : dict with keys :code:`{'c1', 'c2', 'w', 'k', 'p'}`
            a dictionary containing the parameters for the specific
            optimization technique
                * c1 : float
                    cognitive parameter
                * c2 : float
                    social parameter
                * w : float
                    inertia parameter
                * k : int
                    number of neighbors to be considered. Must be a
                    positive integer less than :code:`n_particles`
                * p: int {1,2}
                    the Minkowski p-norm to use. 1 is the
                    sum-of-absolute values (or L1 distance) while 2 is
                    the Euclidean (or L2) distance.
        bounds : tuple of numpy.ndarray
            a tuple of size 2 where the first entry is the minimum bound while
            the second entry is the maximum bound. Each array must be of shape
            :code:`(dimensions,)`.
        init_pos : numpy.ndarray, optional
            option to explicitly set the particles' initial positions. Set to
            :code:`None` if you wish to generate the particles randomly.
        velocity_clamp : tuple, optional
            a tuple of size 2 where the first entry is the minimum velocity
            and the second entry is the maximum velocity. It
            sets the limits for velocity clamping.
        vh_strategy : String
            a strategy for the handling of the velocity of out-of-bounds particles.
            Only the "unmodified" and the "adjust" strategies are allowed.
        ftol : float
            relative error in objective_func(best_pos) acceptable for
            convergence
        ftol_iter : int
            number of iterations over which the relative error in
            objective_func(best_pos) is acceptable for convergence.
            Default is :code:`1`
        """
        # Initialize logger
        self.rep = Reporter(logger=logging.getLogger(__name__))
        # Assign k-neighbors and p-value as attributes
        self.k, self.p = options["k"], options["p"]

        self.dimensions_discrete = dimensions_discrete

        self.bits, self.bounds = self.discretePSO_to_binaryPSO(
            dimensions_discrete, bounds)

        # Initialize parent class
        super(BinaryPSO, self).__init__(
            n_particles=n_particles,
            dimensions=sum(self.bits),
            binary=True,
            options=options,
            init_pos=init_pos,
            velocity_clamp=velocity_clamp,
            ftol=ftol,
            ftol_iter=ftol_iter,
        )
        # self.bounds = bounds
        # Initialize the resettable attributes
        self.reset()
        # Initialize the topology
        self.top = Ring(static=False)
        self.vh = VelocityHandler(strategy=vh_strategy)
        self.bh = BoundaryHandler(strategy=bh_strategy)
        self.name = __name__

    def optimize(self,
                 objective_func,
                 iters,
                 n_processes=None,
                 verbose=True,
                 **kwargs):
        """Optimize the swarm for a number of iterations

        Performs the optimization to evaluate the objective
        function :code:`f` for a number of iterations :code:`iter.`

        Parameters
        ----------
        objective_func : function
            objective function to be evaluated
        iters : int
            number of iterations
        n_processes : int, optional
            number of processes to use for parallel particle evaluation
            Defaut is None with no parallelization.
        verbose : bool
            enable or disable the logs and progress bar (default: True = enable logs)
        kwargs : dict
            arguments for objective function

        Returns
        -------
        tuple
            the local best cost and the local best position among the
            swarm.
        """
        # Apply verbosity
        if verbose:
            log_level = logging.INFO
        else:
            log_level = logging.NOTSET

        self.rep.log("Obj. func. args: {}".format(kwargs), lvl=logging.DEBUG)
        self.rep.log(
            "Optimize for {} iters with {}".format(iters, self.options),
            lvl=log_level,
        )
        # Populate memory of the handlers
        self.bh.memory = self.swarm.position
        self.vh.memory = self.swarm.position

        # Setup Pool of processes for parallel evaluation
        pool = None if n_processes is None else mp.Pool(n_processes)

        self.swarm.pbest_cost = np.full(self.swarm_size[0], np.inf)
        ftol_history = deque(maxlen=self.ftol_iter)
        for i in self.rep.pbar(iters, self.name) if verbose else range(iters):

            # Compute cost for current position and personal best
            ''' Binary swarm postitions need to be transformed to discrete swarm
                postitions first, because the objective function expects discrete
                values (only positions are transformed!), original binary
                position is saved in binary_swarm_position'''
            binary_swarm_position = self.BinarySwarmPositions_to_DiscreteSwarmPositions(
            )

            # Evaluate Cost Function
            self.swarm.current_cost = compute_objective_function(
                self.swarm, objective_func, pool, **kwargs)
            ''' Transform discrete swarm positions back to binary positions, 
            because the PSO works on binary particles'''
            self.swarm.position = binary_swarm_position

            self.swarm.pbest_pos, self.swarm.pbest_cost = compute_pbest(
                self.swarm)
            best_cost_yet_found = np.min(self.swarm.best_cost)
            # Update gbest from neighborhood
            self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
                self.swarm, p=self.p, k=self.k)
            if verbose:
                # Print to console
                self.rep.hook(best_cost=self.swarm.best_cost)
            # Save to history
            hist = self.ToHistory(
                best_cost=self.swarm.best_cost,
                mean_pbest_cost=np.mean(self.swarm.pbest_cost),
                mean_neighbor_cost=np.mean(self.swarm.best_cost),
                position=self.swarm.position,
                velocity=self.swarm.velocity,
            )
            self._populate_history(hist)
            # Verify stop criteria based on the relative acceptable cost ftol
            relative_measure = self.ftol * (1 + np.abs(best_cost_yet_found))
            delta = (np.abs(self.swarm.best_cost - best_cost_yet_found) <
                     relative_measure)
            if i < self.ftol_iter:
                ftol_history.append(delta)
            else:
                ftol_history.append(delta)
                if all(ftol_history):
                    break
            # Perform position velocity update
            self.swarm.velocity = self.top.compute_velocity(
                self.swarm, self.velocity_clamp, self.vh)
            self.swarm.position = self._compute_position(self.swarm)

        # Obtain the final best_cost and the final best_position
        final_best_cost = self.swarm.best_cost.copy()
        final_best_pos = self.swarm.pbest_pos[
            self.swarm.pbest_cost.argmin()].copy()
        self.rep.log(
            "Optimization finished | best cost: {}, best pos: {}".format(
                final_best_cost, final_best_pos),
            lvl=log_level,
        )
        # Close Pool of Processes
        if n_processes is not None:
            pool.close()

        return (final_best_cost, final_best_pos)

    def discretePSO_to_binaryPSO(self, dimensions_discrete, bounds):
        """
        Translate a discrete PSO-problem into a binary PSO-problem by
        calculating the number of bits necessary to represent the discrete
        optimization problem with "dimensions_discrete" number of discrete
        variables as a binary optimization problem. The bounds are encoded in 
        the binary representation and might be tightened.
        
        Parameters
        ----------  
        dimensions_discrete: integer
            dimension of the discrete search space.
        bounds : tuple of numpy.ndarray
            a tuple of size 2 where the first entry is the minimum bound while
            the second entry is the maximum bound. Each array must be of shape
            :code:`(dimensions,)`.
        """

        bits = []

        for n in range(0, dimensions_discrete):

            # Number of bits required rounding down!
            bits.append(
                int(np.log10(bounds[1][n] - bounds[0][n] + 1) / np.log10(2)))

            # Adjust upper bound accordingly
            bounds[1][n] = bounds[0][n] + 2**bits[n] - 1

        return bits, bounds

    def BinarySwarmPositions_to_DiscreteSwarmPositions(self):
        """
        Converts binary self.swarm.position to discrete values. Returns the 
        original binary position, so that it can be used to restore 
        self.swarm.position to the original binary values.
        """

        binary_position = self.swarm.position
        discrete_position = np.zeros(
            (self.n_particles, self.dimensions_discrete))

        cum_sum = 0

        for i in range(0, self.dimensions_discrete):

            bit = self.bits[i]
            lb = self.bounds[0][i]

            discrete_position[:,[i]] = lb + \
            self.bool2int(binary_position[:,cum_sum:cum_sum+bit])

            cum_sum = cum_sum + bit

        # Set swarm position to discrete integer values
        self.swarm.position = discrete_position.astype(int)

        return binary_position

    def bool2int(self, x):
        """
        Converts a binary variable represented by an array x (row vector) into
        an integer value
        """

        x_int = np.zeros((x.shape[0], 1))

        for row in range(0, x.shape[0]):
            row_int = 0

            for i, j in enumerate(x[row, :]):
                row_int += j << i

            x_int[row] = row_int

        return x_int