示例#1
0
文件: es.py 项目: arturomf94/estool
    def __init__(self,
                 num_params,
                 c1=0.5 + np.log(2.0),
                 c2=0.5 + np.log(2.0),
                 w=0.5 / np.log(2.0),
                 popsize=256,
                 sigma_init=0.1,
                 weight_decay=0.01,
                 communication_topology='star'):
        self.num_params = num_params
        self.c1 = c1
        self.c2 = c2
        self.w = w
        self.popsize = popsize
        self.sigma_init = sigma_init
        self.weight_decay = weight_decay
        self.best_param = np.zeros(self.num_params)
        self.best_reward = 0
        self.pop_params = np.random.randn(self.popsize,
                                          self.num_params) * self.sigma_init
        self.pbest_params = self.pop_params
        self.pbest_rewards = np.zeros(self.popsize)
        self.pop_vel = np.zeros((self.popsize, self.num_params))
        self.pop_rewards = np.zeros(self.popsize)
        self.gbest_param = self.pop_params[np.argmax(self.pop_rewards)]
        self.gbest_reward = np.max(self.pop_rewards)
        self.first_iteration = True

        # Import backend modules
        l_lims = -np.ones(self.num_params)
        u_lims = np.ones(self.num_params)
        bounds = (l_lims, u_lims)
        import pyswarms.backend as P
        self.P = P
        # Global topology will always be used to compute gbest
        from pyswarms.backend.topology import Star
        self.global_topology = Star()
        self.communication_topology = communication_topology
        # Unless specified, use the star topology.
        if self.communication_topology == 'random':
            from pyswarms.backend.topology import Random
            self.topology = Random()  # The Topology Class
        elif self.communication_topology == 'local':
            from pyswarms.backend.topology import Ring
            self.topology = Ring()  # The Topology Class
        else:
            from pyswarms.backend.topology import Star
            self.topology = Star()  # The Topology Class
        self.options = {'c1': self.c1, 'c2': self.c2, 'w': self.w}
        self.swarm = self.P.create_swarm(n_particles=self.popsize,
                                         dimensions=self.num_params,
                                         options=self.options,
                                         center=self.sigma_init,
                                         bounds=bounds)
示例#2
0
def test_compute_position_return_values(swarm, bounds):
    """Test if compute_position() gives the expected shape and range"""
    topology = Star()
    p = topology.compute_position(swarm, bounds)
    assert p.shape == swarm.velocity.shape
    if bounds is not None:
        assert (bounds[0] <= p).all() and (bounds[1] >= p).all()
示例#3
0
def test_compute_velocity_return_values(swarm, clamp):
    """Test if compute_velocity() gives the expected shape and range"""
    topology = Star()
    v = topology.compute_velocity(swarm, clamp)
    assert v.shape == swarm.position.shape
    if clamp is not None:
        assert (clamp[0] <= v).all() and (clamp[1] >= v).all()
def evolution_search(f, para_b):
    begin_time = datetime.now()
    Timestamps_list = []
    Target_list = []
    Parameters_list = []
    keys = list(para_b.keys())
    dim = len(keys)
    plog = PrintLog(keys)

    min = np.ones(dim)
    max = np.ones(dim)
    value_list = list(parameters.values())
    for i_v in range(dim):
        min[i_v] = value_list[i_v][0]
        max[i_v] = value_list[i_v][1]
    bounds = (min, max)
    plog.print_header(initialization=True)

    my_topology = Star()
    my_options ={'c1': 0.6, 'c2': 0.3, 'w': 0.4}
    my_swarm = P.create_swarm(n_particles=20, dimensions=dim, options=my_options, bounds=bounds)  # The Swarm Class

    iterations = 30  # Set 100 iterations
    for i in range(iterations):
        # Part 1: Update personal best

        # for evaluated_result in map(evaluate, my_swarm.position):
        #     my_swarm.current_cost = np.append(evaluated_result)
        # for best_personal_result in map(evaluate, my_swarm.pbest_pos):  # Compute personal best pos
        #     my_swarm.pbest_cost = np.append(my_swarm.pbest_cost, best_personal_result)
        my_swarm.current_cost = np.array(list(map(evaluate, my_swarm.position)))
        #print(my_swarm.current_cost)
        my_swarm.pbest_cost = np.array(list(map(evaluate, my_swarm.pbest_pos)))
        my_swarm.pbest_pos, my_swarm.pbest_cost = P.compute_pbest(my_swarm)  # Update and store

        # Part 2: Update global best
        # Note that gbest computation is dependent on your topology
        if np.min(my_swarm.pbest_cost) < my_swarm.best_cost:
            my_swarm.best_pos, my_swarm.best_cost = my_topology.compute_gbest(my_swarm)

        # Let's print our output
        #if i % 2 == 0:
        #    print('Iteration: {} | my_swarm.best_cost: {:.4f}'.format(i + 1, my_swarm.best_cost))

        # Part 3: Update position and velocity matrices
        # Note that position and velocity updates are dependent on your topology
        my_swarm.velocity = my_topology.compute_velocity(my_swarm)
        my_swarm.position = my_topology.compute_position(my_swarm)

        Parameters_list.append(my_swarm.best_pos.tolist())
        Target_list.append(1-my_swarm.best_cost)
        elapse_time = (datetime.now() - begin_time).total_seconds()
        Timestamps_list.append(elapse_time)
#        print("The best candidate: ", my_swarm.best_pos)
#        print("The best result: ", res[1])
        plog.print_step(my_swarm.best_pos, 1 - my_swarm.best_cost)
        if i == 0:
            plog.print_header(initialization=False)

    return Timestamps_list, Target_list, Parameters_list
示例#5
0
def test_compute_gbest_return_values(swarm):
    """Test if compute_gbest() gives the expected return values"""
    topology = Star()
    expected_cost = 1
    expected_pos = np.array([1,2,3])
    pos, cost = topology.compute_gbest(swarm)
    assert cost == expected_cost
    assert (pos == expected_pos).all()
示例#6
0
def test_compute_gbest_return_values(swarm):
    """Test if compute_gbest() gives the expected return values"""
    topology = Star()
    expected_cost = 1.0002528364353296
    expected_pos = np.array([9.90438476e-01, 2.50379538e-03, 1.87405987e-05])
    pos, cost = topology.compute_gbest(swarm)
    assert cost == pytest.approx(expected_cost)
    assert (pos == pytest.approx(expected_pos))
示例#7
0
    def __init__(self):
        self.swarm_size = 0
        self.my_topology = Star()  # connect to n nearest neighbors
        self.my_options = {'c1': 4, 'c2': 1, 'w': 0.3}
        self.phys_swarm_pos = []
        self.logical_swarm = 0  #self.logical_swarm = P.create_swarm(n_particles=0, dimensions=2, options=self.my_options, bounds=([0,0], [self.game_info.map_size[0], self.game_info.map_size[1]]) , init_pos=phys_swarm_pos, clamp=(0,10))
        self.iter_of_last_update = 0

        self.num_overlords = 0
示例#8
0
    def __init__(self):
        self.swarm_size = 0
        self.my_topology = Star() # connect to n nearest neighbors -> Ring()
        #options should be parameterized...
        self.my_options = {'c1': 4, 'c2': 1, 'w': 0.3}
        # might need dummy values for the following and wait until phys swarm exists to continue...
        self.phys_swarm_pos = []
        self.log_swarm = \
        P.create_swarm(n_particles=0, 
                       dimensions=2, 
                       options=self.my_options, 
                       bounds=([0,0], 
                       [self.game_info.map_size[0], self.game_info.map_size[1]]),
                       init_pos=phys_swarm_pos, clamp=(0,10))

        self.iter_of_last_update = 0
        self.num_overlords = 0
        #load in cnn model
        self.brain = model.load("brain")
示例#9
0
                                               dimensions=dim,
                                               options=options,
                                               velocity_clamp=clamp,
                                               topology=my_topology)
 elif (topoModel == "random"):
     from pyswarms.backend.topology import Random as PSOTopoRandom
     my_topology = PSOTopoRandom()
     options = {'c1': c1, 'c2': c2, 'w': w, 'k': k, 'p': p}
     optimizer = ps.single.GeneralOptimizerPSO(n_particles=swarm_size,
                                               dimensions=dim,
                                               options=options,
                                               velocity_clamp=clamp,
                                               topology=my_topology)
 elif (topoModel == "star"):
     from pyswarms.backend.topology import Star
     my_topology = Star(static=False)
     optimizer = ps.single.GeneralOptimizerPSO(n_particles=swarm_size,
                                               dimensions=dim,
                                               options=options,
                                               velocity_clamp=clamp,
                                               topology=my_topology)
 elif (topoModel == "local"):
     options = {'c1': c1, 'c2': c2, 'w': w, 'k': k, 'p': p}
     optimizer = ps.single.LocalBestPSO(n_particles=swarm_size,
                                        dimensions=dim,
                                        options=options,
                                        velocity_clamp=clamp)
 else:
     print("Chose global best with ", swarm_size, " dims : ", dim,
           "options : ", options)
     print()
示例#10
0
 def optimizer(self, request):
     global parameters
     if request.param.__name__ == "GeneralOptimizerPSO":
         return request.param, {**parameters, **{"topology": Star()}}
     return request.param, parameters
        return fAcc_test
#------------------------------------------------------------------------------
#############################  PSO Algorithm ##################################
#------------------------------------------------------------------------------
# ---------------------- Rjb
# Parameter setting
PSO_hyper = {
        'Max_iter'    :  50, # number of neurons in a layer
        'Pop_size'    :  30, # activation function
        'dimensions'  :  2, # number of neurons in a layer
        'c1'          :  0.5, # activation function
        'c2'          :  0.3, # number of neurons in a layer
        'w'           :  0.9, # Mean squared error
        }
# Optimization of FFNN1 --------------------------------> best pos = [498.98, 519.548]
my_topology = Star() # The Topology Class
options = {'c1': PSO_hyper['c1'], 'c2': PSO_hyper['c2'], 'w':PSO_hyper['w']} # Set-up hyperparameters
bounds=[(X_train1.shape[0],X_train1.shape[0]), (2*X_train1.shape[0]+1,2*X_train1.shape[0]+1)]  # input bounds [(x1_min,x1_max),(x2_min,x2_max)...]
optimizer = ps.single.GlobalBestPSO(n_particles=PSO_hyper['Pop_size'], dimensions=PSO_hyper['dimensions'], 
                                    options=options, bounds=bounds)
cost1, pos1 = optimizer.optimize(F1_1, iters=PSO_hyper['Max_iter'])
# Parameter setting
PSO_hyper = {
        'Max_iter'    :  50, # number of neurons in a layer
        'Pop_size'    :  30, # activation function
        'dimensions'  :  2, # number of neurons in a layer
        'c1'          :  0.5, # activation function
        'c2'          :  0.3, # number of neurons in a layer
        'w'           :  0.9, # Mean squared error
        }
# Optimization of FFNN2 --------------------------------> best pos = [9.229, 11.102]
示例#12
0
    def __init__(
        self,
        n_particles,
        dimensions,
        options,
        bounds=None,
        bh_strategy="periodic",
        velocity_clamp=None,
        vh_strategy="unmodified",
        center=1.00,
        ftol=-np.inf,
        init_pos=None,
    ):
        """
        A custom optimizer modified from pyswarms.single.global_best
        https://github.com/ljvmiranda921/pyswarms/blob/master/pyswarms/single/global_best.py
        Attributes
        ----------
        n_particles : int
            number of particles in the swarm.
        dimensions : int
            number of dimensions in the space.
        options : dict with keys :code:`{'c1', 'c2', 'w'}`
            a dictionary containing the parameters for the specific
            optimization technique.
                * c1 : float
                    cognitive parameter
                * c2 : float
                    social parameter
                * w : float
                    inertia parameter
        bounds : tuple of numpy.ndarray, optional
            a tuple of size 2 where the first entry is the minimum bound while
            the second entry is the maximum bound. Each array must be of shape
            :code:`(dimensions,)`.
        bh_strategy : str
            a strategy for the handling of out-of-bounds particles.
        velocity_clamp : tuple, optional
            a tuple of size 2 where the first entry is the minimum velocity and
            the second entry is the maximum velocity. It sets the limits for
            velocity clamping.
        vh_strategy : str
            a strategy for the handling of the velocity of out-of-bounds particles.
        center : list (default is :code:`None`)
            an array of size :code:`dimensions`
        ftol : float
            relative error in objective_func(best_pos) acceptable for
            convergence. Default is :code:`-np.inf`
        init_pos : numpy.ndarray, optional
            option to explicitly set the particles' initial positions. Set to
            :code:`None` if you wish to generate the particles randomly.
        """
        super(PSOoptimizer, self).__init__(
            n_particles=n_particles,
            dimensions=dimensions,
            options=options,
            bounds=bounds,
            velocity_clamp=velocity_clamp,
            center=center,
            ftol=ftol,
            init_pos=init_pos,
        )

        # Initialize logger
        self.rep = Reporter(logger=logging.getLogger(__name__))
        # Initialize the resettable attributes
        self.reset()
        # Initialize the topology
        self.top = Star()
        self.bh = BoundaryHandler(strategy=bh_strategy)
        self.vh = VelocityHandler(strategy=vh_strategy)
        self.name = __name__

        # Populate memory of the handlers
        self.bh.memory = self.swarm.position
        self.vh.memory = self.swarm.position
        self.swarm.pbest_cost = np.full(self.swarm_size[0], np.inf)

        # Set reached requirement
        self.reached_requirement = 0
示例#13
0
def PSO_grad_optimize(n_particles, dimensions, bounds, init_pose, select_num,
                      iteration, lr, init_state, init_act):
    #Input:
    # n_particles:num of particles
    #dimension: dim of varibales
    #bounds: boundary of actions
    #init_pose: initial pose
    #select_num: num of particle kept
    #iteration: number of iterations

    #lr: learning rate of apply gradient
    #init_state: initial state input for gradient compute
    #init_act:  initial action input for gradient compute

    #output:
    #the best pose
    #a list of selected pos

    #define partcicle
    my_topology = Star()  # The Topology Class
    my_options = {
        'c1': 0.1,
        'c2': 0.000,
        'w': 0.000
    }  # arbitrarily set #0.01,0.01
    my_swarm = P.create_swarm(n_particles=n_particles,
                              dimensions=dimensions,
                              options=my_options,
                              bounds=bounds,
                              init_pos=init_pose)  # The Swarm Class

    for i in range(iteration):
        # Part 1: Update personal best
        step_num = int(dimensions / actor_critic.act_dim)
        my_swarm.current_cost = f(my_swarm.position)
        cur_action = my_swarm.position.reshape(
            (n_particles, step_num, actor_critic.act_dim))
        gradients = actor_critic.compute_gradient(step_num, init_act,
                                                  init_state, cur_action)

        my_swarm.pbest_pos = cur_action.reshape(
            (-1, dimensions)) + gradients[0].reshape((-1, dimensions)) * lr

        my_swarm.pbest_pos = np.clip(my_swarm.pbest_pos, -0.015, 0.015)
        my_swarm.pbest_cost = f(my_swarm.pbest_pos)
        print(my_swarm.pbest_cost)

        # Part 2: Update global best
        # Note that gbest computation is dependent on your topology
        if np.min(my_swarm.pbest_cost) < my_swarm.best_cost:
            my_swarm.best_pos, my_swarm.best_cost = my_topology.compute_gbest(
                my_swarm)

        # Let's print our output

        print('Iteration: {} | my_swarm.best_cost: {:.4f}'.format(
            i + 1, my_swarm.best_cost))

        # Part 3: Update position and velocity matrices
        # Note that position and velocity updates are dependent on your topology
        my_swarm.velocity = my_topology.compute_velocity(my_swarm)
        my_swarm.position = my_topology.compute_position(my_swarm)
        my_swarm.position = np.clip(my_swarm.position, -0.015, 0.015)

    best_index = my_swarm.pbest_cost.argsort()[:select_num]
    print(best_index)
    good_actions = my_swarm.pbest_pos[best_index].tolist()
    good_actions.append(my_swarm.best_pos)
    return np.clip(my_swarm.best_pos, -0.015, 0.015), np.asarray(good_actions)
示例#14
0

@pytest.fixture(scope="module")
def binary_reset():
    """Returns a BinaryPSO instance that has been run and reset to check
    default value"""
    pso = BinaryPSO(10, 2, {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2})
    pso.optimize(sphere_func, 10, verbose=0)
    pso.reset()
    return pso


@pytest.fixture
def options():
    """Default options dictionary for most PSO use-cases"""
    options_ = {"c1": 0.5, "c2": 0.7, "w": 0.5, "k": 2, "p": 2, "r": 1}
    return options_


@pytest.fixture(params=[
                Star(),
                Ring(static=False), Ring(static=True),
                Pyramid(static=False), Pyramid(static=True),
                Random(static=False), Random(static=True),
                VonNeumann()
                ])
def topology(request):
    """Parametrized topology parameter"""
    topology_ = request.param
    return topology_
示例#15
0
def test_neighbor_idx(swarm):
    """Test if the neighbor_idx attribute is assigned"""
    topology = Star()
    topology.compute_gbest(swarm)
    assert topology.neighbor_idx is not None