コード例 #1
0
def test_compute_velocity_return_values(swarm, clamp):
    """Test if compute_velocity() gives the expected shape and range"""
    topology = Star()
    v = topology.compute_velocity(swarm, clamp)
    assert v.shape == swarm.position.shape
    if clamp is not None:
        assert (clamp[0] <= v).all() and (clamp[1] >= v).all()
コード例 #2
0
def evolution_search(f, para_b):
    begin_time = datetime.now()
    Timestamps_list = []
    Target_list = []
    Parameters_list = []
    keys = list(para_b.keys())
    dim = len(keys)
    plog = PrintLog(keys)

    min = np.ones(dim)
    max = np.ones(dim)
    value_list = list(parameters.values())
    for i_v in range(dim):
        min[i_v] = value_list[i_v][0]
        max[i_v] = value_list[i_v][1]
    bounds = (min, max)
    plog.print_header(initialization=True)

    my_topology = Star()
    my_options ={'c1': 0.6, 'c2': 0.3, 'w': 0.4}
    my_swarm = P.create_swarm(n_particles=20, dimensions=dim, options=my_options, bounds=bounds)  # The Swarm Class

    iterations = 30  # Set 100 iterations
    for i in range(iterations):
        # Part 1: Update personal best

        # for evaluated_result in map(evaluate, my_swarm.position):
        #     my_swarm.current_cost = np.append(evaluated_result)
        # for best_personal_result in map(evaluate, my_swarm.pbest_pos):  # Compute personal best pos
        #     my_swarm.pbest_cost = np.append(my_swarm.pbest_cost, best_personal_result)
        my_swarm.current_cost = np.array(list(map(evaluate, my_swarm.position)))
        #print(my_swarm.current_cost)
        my_swarm.pbest_cost = np.array(list(map(evaluate, my_swarm.pbest_pos)))
        my_swarm.pbest_pos, my_swarm.pbest_cost = P.compute_pbest(my_swarm)  # Update and store

        # Part 2: Update global best
        # Note that gbest computation is dependent on your topology
        if np.min(my_swarm.pbest_cost) < my_swarm.best_cost:
            my_swarm.best_pos, my_swarm.best_cost = my_topology.compute_gbest(my_swarm)

        # Let's print our output
        #if i % 2 == 0:
        #    print('Iteration: {} | my_swarm.best_cost: {:.4f}'.format(i + 1, my_swarm.best_cost))

        # Part 3: Update position and velocity matrices
        # Note that position and velocity updates are dependent on your topology
        my_swarm.velocity = my_topology.compute_velocity(my_swarm)
        my_swarm.position = my_topology.compute_position(my_swarm)

        Parameters_list.append(my_swarm.best_pos.tolist())
        Target_list.append(1-my_swarm.best_cost)
        elapse_time = (datetime.now() - begin_time).total_seconds()
        Timestamps_list.append(elapse_time)
#        print("The best candidate: ", my_swarm.best_pos)
#        print("The best result: ", res[1])
        plog.print_step(my_swarm.best_pos, 1 - my_swarm.best_cost)
        if i == 0:
            plog.print_header(initialization=False)

    return Timestamps_list, Target_list, Parameters_list
コード例 #3
0
class Legion(sc2.BotAI):
    def __init__(self):
        self.swarm_size = 0
        self.my_topology = Star()  # connect to n nearest neighbors
        self.my_options = {'c1': 4, 'c2': 1, 'w': 0.3}
        self.phys_swarm_pos = []
        self.logical_swarm = 0  #self.logical_swarm = P.create_swarm(n_particles=0, dimensions=2, options=self.my_options, bounds=([0,0], [self.game_info.map_size[0], self.game_info.map_size[1]]) , init_pos=phys_swarm_pos, clamp=(0,10))
        self.iter_of_last_update = 0

        self.num_overlords = 0

    # stagger calls per iteration to enhance speed maybe?
    async def on_step(self, iteration):
        await self.distribute_workers()
        await self.build_overlords()
        await self.scout()
        await self.build_workers()
        await self.build_queens()
        await self.inject_larva()
        await self.build_extractor()
        await self.build_offensive_buildings()
        await self.expand()
        await self.upgrade()
        await self.build_swarm()
        if self.units(MUTALISK).exists and iteration % 5 == 0:
            await self.attack(self.units(MUTALISK), iteration)
#        if self.units(ZERGLING).amount > 35 and iteration % 10 == 0:
#           await self.attack(self.units(ZERGLING))
        if self.units(ROACH).amount > 5 and iteration % 4 == 0:
            await self.attack(self.units(ROACH), iteration)

    # figure out a way for the overlords to not send themselves to their death...
    async def scout(self):
        if self.num_overlords != self.units(OVERLORD).amount:
            self.num_overlords = self.units(OVERLORD).amount
            orders = []
            for unit, pos in zip(self.units(OVERLORD),
                                 self.expansion_locations):
                orders.append(unit.move(pos))
            await self.do_actions(orders)

    # reverse these if statements to check existance before cost...
    async def build_offensive_buildings(self):
        hatcheries = self.townhalls.ready
        if self.can_afford(SPAWNINGPOOL) and not self.units(
                SPAWNINGPOOL).exists and not self.already_pending(
                    SPAWNINGPOOL):
            await self.build(SPAWNINGPOOL, near=hatcheries.first)
            return
        # I want to build this near the main ramp...
#        if self.can_afford(SPINECRAWLER) and self.units(SPINECRAWLER).amount < 4:
#            await self.build(SPINECRAWLER, near=hatcheries.first, max_distance=8)
#            return
        if self.can_afford(ROACHWARREN) and self.units(
                SPAWNINGPOOL
        ).ready and not self.units(
                ROACHWARREN).exists and not self.already_pending(ROACHWARREN):
            await self.build(ROACHWARREN, near=hatcheries.first)
            return
        if self.can_afford(EVOLUTIONCHAMBER) and len(
                self.units(EVOLUTIONCHAMBER)) < 1:
            await self.build(EVOLUTIONCHAMBER, near=hatcheries.first)
        if self.can_afford(LAIR) and not self.units(
                LAIR).exists and not self.already_pending(LAIR):
            await self.do(self.townhalls.first.build(LAIR))
            return
        if self.can_afford(SPIRE) and self.units(
                SPIRE).amount < 2 and not self.already_pending(SPIRE):
            await self.build(SPIRE, near=hatcheries.first)
            return
        if self.can_afford(INFESTATIONPIT) and self.units(
                LAIR).ready and not self.units(
                    INFESTATIONPIT).exists and not self.already_pending(
                        INFESTATIONPIT):
            await self.build(INFESTATIONPIT, near=hatcheries.first)
            return
        if self.can_afford(HIVE) and not self.units(
                HIVE).exists and not self.already_pending(HIVE):
            await self.do(self.townhalls.first.build(HIVE))
            return
        if self.can_afford(GREATERSPIRE) and self.units(HIVE).ready \
                and self.units(SPIRE).ready and not self.already_pending(GREATERSPIRE) and not self.units(GREATERSPIRE).exists:
            await self.do(self.units(SPIRE).first.build(GREATERSPIRE))

    # more succint way of writing this... refactor this!!!
    # check timing to ensure spire upgrades dont retard others...
    # incorporate the army specific upgrades for roaches and hydras
    async def upgrade(self):
        if self.units(EVOLUTIONCHAMBER).ready.idle.exists:
            for evo in self.units(EVOLUTIONCHAMBER).ready.idle:
                abilities = await self.get_available_abilities(evo)
                targetAbilities = [
                    AbilityId.RESEARCH_ZERGMISSILEWEAPONSLEVEL1,
                    AbilityId.RESEARCH_ZERGMISSILEWEAPONSLEVEL2,
                    AbilityId.RESEARCH_ZERGMISSILEWEAPONSLEVEL3,
                    AbilityId.RESEARCH_ZERGGROUNDARMORLEVEL1,
                    AbilityId.RESEARCH_ZERGGROUNDARMORLEVEL2,
                    AbilityId.RESEARCH_ZERGGROUNDARMORLEVEL3
                ]
                if self.units(GREATERSPIRE).exists:
                    targetAbilities.extend([
                        AbilityId.RESEARCH_ZERGMELEEWEAPONSLEVEL1,
                        AbilityId.RESEARCH_ZERGMELEEWEAPONSLEVEL2,
                        AbilityId.RESEARCH_ZERGMELEEWEAPONSLEVEL3
                    ])
                for ability in targetAbilities:
                    if ability in abilities:
                        if self.can_afford(ability):
                            err = await self.do(evo(ability))
                            if not err:
                                break

        if self.units(GREATERSPIRE).ready.idle.exists:
            gs = self.units(GREATERSPIRE).ready.idle.random
            abilities = await self.get_available_abilities(gs)
            targetAbilities = [
                AbilityId.RESEARCH_ZERGFLYERATTACKLEVEL1,
                AbilityId.RESEARCH_ZERGFLYERATTACKLEVEL2,
                AbilityId.RESEARCH_ZERGFLYERATTACKLEVEL3,
                AbilityId.RESEARCH_ZERGFLYERARMORLEVEL1,
                AbilityId.RESEARCH_ZERGFLYERARMORLEVEL2,
                AbilityId.RESEARCH_ZERGFLYERARMORLEVEL3
            ]
            for ability in targetAbilities:
                if self.can_afford(ability) and ability in abilities:
                    err = await self.do(gs(ability))
                    if not err:
                        break

    # FUTURE: incorporate dynamic composition by either tech level or AI/ML
    async def build_swarm(self):
        if self.units(SPAWNINGPOOL).ready:
            larvae = self.units(LARVA).ready.noqueue
            for larva in larvae:
                if self.supply_left > 0:
                    if self.units(SPIRE).ready:
                        #                        if self.units(CORRUPTOR).amount < 5 and self.can_afford(CORRUPTOR):
                        #                           await self.do(larva.train(CORRUPTOR))
                        #                           continue
                        #                        if self.units(HIVE).ready and self.units(BROODLORD).amount < 5 and self.can_afford(BROODLORD):
                        #                          await self.do(self.units(CORRUPTOR).random.train(BROODLORD))
                        if self.can_afford(
                                MUTALISK
                        ) and self.supply_left > 2:  # 30 muta = 60 c, possibly set to max cap...
                            await self.do(larva.train(MUTALISK))
                            continue
                    if self.units(
                            ROACHWARREN
                    ).ready and self.supply_left > 2 and self.units(
                            ROACH).amount < 30 and self.can_afford(
                                ROACH):  # 30 roach = 60 c
                        await self.do(larva.train(ROACH))
                        continue


#                    if self.units(ZERGLING).amount < 60 and self.can_afford(ZERGLING):  # 60 lings = 30 c
#                       await self.do(larva.train(ZERGLING))
#                       continue

# This could be more succinct...

#need to consider a separate swarm list in order to allow damaged units to RTB
# investigate multithreading and running each unit in own thread to avoid a double for loop...
# might need to pursue subswarms or multiple swarms to avoid reinitializing the swarm so often
# there is a parameter for multiprocessing...
# This algo might only work for flying units.
#might have to build a multiswarm architecture for overlords to pass data to mutalisks...
# investigate periodic reinitialization to break convergance after defeating enemy army...

    async def attack(self, phys_swarm, iteration):
        if phys_swarm.amount > 3:
            orders = []
            # reinitialize swarm if needed
            if phys_swarm.amount > self.swarm_size + 3 or iteration > self.iter_of_last_update + 75:
                self.swarm_size = phys_swarm.amount
                self.phys_swarm_pos = []
                for unit in phys_swarm:
                    self.phys_swarm_pos.append(
                        [unit.position.x, unit.position.y])
                self.phys_swarm_pos = np.array(self.phys_swarm_pos)
                self.logical_swarm = P.create_swarm(
                    n_particles=phys_swarm.amount,
                    dimensions=2,
                    options=self.my_options,
                    bounds=([0, 0], [
                        self.game_info.map_size[0], self.game_info.map_size[1]
                    ]),
                    init_pos=self.phys_swarm_pos,
                    clamp=(0, 4.0))
                self.iter_of_last_update = iteration

            self.logical_swarm.current_cost = self.fitness(
                self.logical_swarm.position, phys_swarm)
            self.logical_swarm.pbest_cost = self.fitness(
                self.logical_swarm.pbest_pos, phys_swarm)
            self.logical_swarm.pbest_pos, self.logical_swarm.pbest_cost = P.compute_pbest(
                self.logical_swarm)

            if np.min(self.logical_swarm.pbest_cost
                      ) < self.logical_swarm.best_cost:
                self.logical_swarm.best_pos, self.logical_swarm.best_cost = self.my_topology.compute_gbest(
                    self.logical_swarm)

            self.logical_swarm.velocity = self.my_topology.compute_velocity(
                self.logical_swarm)
            self.logical_swarm.position = self.my_topology.compute_position(
                self.logical_swarm)
            # Extract positions from above and issue movement/attack orders
            # loop through np array compiling positions and appending them to orders list.

            # I havent seen this behavior....
            #            wounded_units = phys_swarm.filter(lambda u: u.health_percentage <= .7)
            #            phys_swarm = phys_swarm.filter(lambda u: u.health_percentage > .7)

            #            for unit in wounded_units:
            #                unit.move(self.townhalls.first.position)
            #                print("Retreating wounded unit")

            # The mutas are still ignoring nearby enemies.
            for row, unit in zip(self.logical_swarm.position, phys_swarm):
                if self.known_enemy_units.closer_than(unit.radar_range,
                                                      unit.position).exists:
                    orders.append(unit.stop())
                    orders.append(
                        unit.attack(
                            self.known_enemy_units.closest_to(
                                unit.position).position))
                elif self.known_enemy_units.closer_than(
                        20, Point2(Pointlike((row[0], row[1])))).exists:
                    orders.append(
                        unit.attack(
                            self.known_enemy_units.closest_to(
                                Point2(Pointlike((row[0], row[1]))))))
                else:
                    orders.append(
                        unit.move(Point2(Pointlike((row[0], row[1])))))

            await self.do_actions(orders)

    def fitness(self, logical_swarm_pos, phys_swarm):
        #account for own health, enemy health/splash&anti air damage/
        # upgrade this function...
        # revert this back to a regular one unit function and apply the pyswarm evaluate method
        cost = []

        # the coefficients within can be optimized...
        if self.known_enemy_units.exists:
            for logical_pos in logical_swarm_pos:
                target_point = Point2(
                    Pointlike((logical_pos[0], logical_pos[1])))
                cost.append( \
                        (1-(10/phys_swarm.amount))*self.known_enemy_units.closest_distance_to(target_point) + \
                        (10/phys_swarm.amount)*self.townhalls.first.distance_to(target_point) + \
                        (2 * self.known_enemy_units.closer_than(10, target_point).amount - phys_swarm.closer_than(10, target_point).amount) \
                        )
        else:
            for logical_pos in logical_swarm_pos:
                cost.append(40 / phys_swarm.center.distance2_to(
                    Point2(Pointlike((logical_pos[0], logical_pos[1])))))
        return np.array(cost)

    # Economic Functions

    async def build_workers(self):
        for larva in self.units(LARVA).ready.noqueue:
            if self.can_afford(
                    DRONE
            ) and self.workers.amount < self.townhalls.amount * 16 and self.workers.amount < 80:
                await self.do(larva.train(DRONE))

    async def build_queens(self):
        if self.units(SPAWNINGPOOL).ready.exists:
            for hatchery in self.units(HATCHERY).ready.noqueue:
                if self.can_afford(QUEEN) and self.units(
                        QUEEN).amount < self.townhalls.amount:
                    await self.do(hatchery.train(QUEEN))

    async def inject_larva(self):
        for queen, hatchery in zip(
                self.units(QUEEN).idle, self.townhalls.ready):
            abilities = await self.get_available_abilities(queen)
            if AbilityId.EFFECT_INJECTLARVA in abilities:
                await self.do(queen(EFFECT_INJECTLARVA, hatchery))

    async def build_overlords(self):
        if self.supply_left < 9 and self.units(OVERLORD).amount < 26:
            larvae = self.units(LARVA).ready.noqueue
            for larva in larvae:
                if self.can_afford(
                        OVERLORD) and not self.already_pending(OVERLORD):
                    await self.do(larva.train(OVERLORD))

    async def build_extractor(self):
        for hatchery in self.units(HATCHERY).ready:
            vespene = self.state.vespene_geyser.closer_than(15.0, hatchery)
            for v in vespene:
                if not self.can_afford(EXTRACTOR):
                    break
                worker = self.select_build_worker(v.position)
                if worker is None:
                    break
                if not self.units(EXTRACTOR).closer_than(1.0, v).exists:
                    await self.do(worker.build(EXTRACTOR, v))

    # upgrade the logic of this to expand when a base becomes exhausted...
    async def expand(self):
        if self.townhalls.amount < 3 and self.can_afford(HATCHERY):
            await self.expand_now()
コード例 #4
0
ファイル: es.py プロジェクト: arturomf94/estool
class Pyswarms:
    ''' Pyswarms Wrapper '''
    def __init__(self,
                 num_params,
                 c1=0.5 + np.log(2.0),
                 c2=0.5 + np.log(2.0),
                 w=0.5 / np.log(2.0),
                 popsize=256,
                 sigma_init=0.1,
                 weight_decay=0.01,
                 communication_topology='star'):
        self.num_params = num_params
        self.c1 = c1
        self.c2 = c2
        self.w = w
        self.popsize = popsize
        self.sigma_init = sigma_init
        self.weight_decay = weight_decay
        self.best_param = np.zeros(self.num_params)
        self.best_reward = 0
        self.pop_params = np.random.randn(self.popsize,
                                          self.num_params) * self.sigma_init
        self.pbest_params = self.pop_params
        self.pbest_rewards = np.zeros(self.popsize)
        self.pop_vel = np.zeros((self.popsize, self.num_params))
        self.pop_rewards = np.zeros(self.popsize)
        self.gbest_param = self.pop_params[np.argmax(self.pop_rewards)]
        self.gbest_reward = np.max(self.pop_rewards)
        self.first_iteration = True

        # Import backend modules
        l_lims = -np.ones(self.num_params)
        u_lims = np.ones(self.num_params)
        bounds = (l_lims, u_lims)
        import pyswarms.backend as P
        self.P = P
        # Global topology will always be used to compute gbest
        from pyswarms.backend.topology import Star
        self.global_topology = Star()
        self.communication_topology = communication_topology
        # Unless specified, use the star topology.
        if self.communication_topology == 'random':
            from pyswarms.backend.topology import Random
            self.topology = Random()  # The Topology Class
        elif self.communication_topology == 'local':
            from pyswarms.backend.topology import Ring
            self.topology = Ring()  # The Topology Class
        else:
            from pyswarms.backend.topology import Star
            self.topology = Star()  # The Topology Class
        self.options = {'c1': self.c1, 'c2': self.c2, 'w': self.w}
        self.swarm = self.P.create_swarm(n_particles=self.popsize,
                                         dimensions=self.num_params,
                                         options=self.options,
                                         center=self.sigma_init,
                                         bounds=bounds)

    def ask(self):
        '''returns a list of parameters'''
        if self.first_iteration:
            self.solutions = np.copy(self.swarm.position)
            return self.pop_params

        self.swarm.velocity = self.topology.compute_velocity(self.swarm)
        self.swarm.position = self.topology.compute_position(self.swarm)
        self.solutions = np.copy(self.swarm.position)
        return self.solutions

    def tell(self, reward_table_result):
        # input must be a numpy float array
        assert (len(reward_table_result) == self.popsize
                ), "Inconsistent reward_table size reported."

        reward_table = -np.array(reward_table_result)  # Maximize!

        if self.weight_decay > 0:
            l2_decay = compute_weight_decay(self.weight_decay, self.solutions)
            reward_table += l2_decay

        self.swarm.current_cost = reward_table  #pyswarm

        # If it's the first iteration, initialize pbest_cost, best_reward and
        # best_param
        if self.first_iteration:
            self.first_iteration = False
            self.swarm.pbest_cost = np.copy(self.swarm.current_cost)
            self.best_reward = np.min(self.swarm.current_cost)
            self.best_param = self.solutions[np.argmin(
                self.swarm.current_cost)]
        else:
            self.swarm.pbest_pos, self.swarm.pbest_cost = self.P.compute_pbest(
                self.swarm)  # Update and store

        # Update gbest.
        if self.communication_topology == 'random':
            self.swarm.best_pos, self.swarm.best_cost = self.topology.compute_gbest(
                self.swarm, k=(self.popsize * 3) // 4)  # k = 4 typically
        elif self.communication_topology == 'local':
            self.swarm.best_pos, self.swarm.best_cost = self.topology.compute_gbest(
                self.swarm, p=2, k=2)
        else:  # star
            self.swarm.best_pos, self.swarm.best_cost = self.topology.compute_gbest(
                self.swarm)

        # Update best_reward and best_param if pertinent.
        if np.min(self.swarm.pbest_cost) < self.best_reward:
            self.best_reward = np.min(self.swarm.pbest_cost)
            self.best_param = self.solutions[np.argmin(self.swarm.pbest_cost)]

    def rms_stdev(self):
        return np.std(self.solutions)

    def best_param(self):
        return self.best_param

    def current_param(self):
        return self.solutions[np.argmin(self.swarm.current_cost)]

    def result(
        self
    ):  # return best params so far, along with historically best reward, curr reward, sigma_init
        return (self.best_param, -self.best_reward,
                -np.min(self.swarm.current_cost), self.sigma_init)
コード例 #5
0
ファイル: optimize.py プロジェクト: lixinyuu/sklearn-pso
class PSOoptimizer(SwarmOptimizer):
    def __init__(
        self,
        n_particles,
        dimensions,
        options,
        bounds=None,
        bh_strategy="periodic",
        velocity_clamp=None,
        vh_strategy="unmodified",
        center=1.00,
        ftol=-np.inf,
        init_pos=None,
    ):
        """
        A custom optimizer modified from pyswarms.single.global_best
        https://github.com/ljvmiranda921/pyswarms/blob/master/pyswarms/single/global_best.py
        Attributes
        ----------
        n_particles : int
            number of particles in the swarm.
        dimensions : int
            number of dimensions in the space.
        options : dict with keys :code:`{'c1', 'c2', 'w'}`
            a dictionary containing the parameters for the specific
            optimization technique.
                * c1 : float
                    cognitive parameter
                * c2 : float
                    social parameter
                * w : float
                    inertia parameter
        bounds : tuple of numpy.ndarray, optional
            a tuple of size 2 where the first entry is the minimum bound while
            the second entry is the maximum bound. Each array must be of shape
            :code:`(dimensions,)`.
        bh_strategy : str
            a strategy for the handling of out-of-bounds particles.
        velocity_clamp : tuple, optional
            a tuple of size 2 where the first entry is the minimum velocity and
            the second entry is the maximum velocity. It sets the limits for
            velocity clamping.
        vh_strategy : str
            a strategy for the handling of the velocity of out-of-bounds particles.
        center : list (default is :code:`None`)
            an array of size :code:`dimensions`
        ftol : float
            relative error in objective_func(best_pos) acceptable for
            convergence. Default is :code:`-np.inf`
        init_pos : numpy.ndarray, optional
            option to explicitly set the particles' initial positions. Set to
            :code:`None` if you wish to generate the particles randomly.
        """
        super(PSOoptimizer, self).__init__(
            n_particles=n_particles,
            dimensions=dimensions,
            options=options,
            bounds=bounds,
            velocity_clamp=velocity_clamp,
            center=center,
            ftol=ftol,
            init_pos=init_pos,
        )

        # Initialize logger
        self.rep = Reporter(logger=logging.getLogger(__name__))
        # Initialize the resettable attributes
        self.reset()
        # Initialize the topology
        self.top = Star()
        self.bh = BoundaryHandler(strategy=bh_strategy)
        self.vh = VelocityHandler(strategy=vh_strategy)
        self.name = __name__

        # Populate memory of the handlers
        self.bh.memory = self.swarm.position
        self.vh.memory = self.swarm.position
        self.swarm.pbest_cost = np.full(self.swarm_size[0], np.inf)

        # Set reached requirement
        self.reached_requirement = 0

    def get_current_pos(self):
        return self.swarm.position

    def update(self, iters, current_cost, **kwargs):
        """
        Optimize the swarm for one iteration by providing its cost
        manually.
        Parameters
        ----------
        iters : int
            the current iterations
        current_cost : ndarray
            the current cost which should be provided
        """

        self.rep.log("Obj. func. args: {}".format(kwargs), lvl=logging.DEBUG)
        self.rep.log(
            "Optimize for {} iters with {}".format(iters, self.options),
            lvl=logging.DEBUG,
        )

        self.swarm.current_cost = current_cost
        self.swarm.pbest_pos, self.swarm.pbest_cost = compute_pbest(self.swarm)
        # Set best_cost_yet_found for ftol
        best_cost_yet_found = self.swarm.best_cost
        self.swarm.best_pos, self.swarm.best_cost = self.top.compute_gbest(
            self.swarm)
        # fmt: on
        # self.rep.hook(best_cost=self.swarm.best_cost)
        # Save to history
        hist = self.ToHistory(
            best_cost=self.swarm.best_cost,
            mean_pbest_cost=np.mean(self.swarm.pbest_cost),
            mean_neighbor_cost=self.swarm.best_cost,
            position=self.swarm.position,
            velocity=self.swarm.velocity,
        )
        self._populate_history(hist)

        # Verify stop criteria based on the relative acceptable cost ftol
        relative_measure = self.ftol * (1 + np.abs(best_cost_yet_found))
        if (np.abs(self.swarm.best_cost - best_cost_yet_found) <
                relative_measure):
            self.reached_requirement = 1

        # Perform velocity and position updates
        self.swarm.velocity = self.top.compute_velocity(
            self.swarm, self.velocity_clamp, self.vh, self.bounds)
        self.swarm.position = self.top.compute_position(
            self.swarm, self.bounds, self.bh)

    def finalize(self):
        """
        Obtain the final best_cost and the final best_position
        """
        final_best_cost = self.swarm.best_cost.copy()
        final_best_pos = self.swarm.pbest_pos[
            self.swarm.pbest_cost.argmin()].copy()
        # Write report in log and return final cost and position
        self.rep.log(
            "Optimization finished | best cost: {}, best pos: {}".format(
                final_best_cost, final_best_pos),
            lvl=logging.INFO,
        )
        return (final_best_cost, final_best_pos)

    def optimize(self, objective_func, iters, **kwargs):
        #
        for _iter in range(iters):
            swarm_pos = self.get_current_pos()
            current_cost = objective_func(swarm_pos, **kwargs)
            self.update(_iter, current_cost)
        return self.finalize()
コード例 #6
0
ファイル: legion_v1-3.py プロジェクト: kmheckel/Starcraft2AI
class Legion(sc2.BotAI):

    def __init__(self):
        self.swarm_size = 0
        self.my_topology = Star() # connect to n nearest neighbors
        self.my_options = {'c1': 4, 'c2': 1, 'w': 0.3}
        self.phys_swarm_pos = []
        self.logical_swarm = 0 #self.logical_swarm = P.create_swarm(n_particles=0, dimensions=2, options=self.my_options, bounds=([0,0], [self.game_info.map_size[0], self.game_info.map_size[1]]) , init_pos=phys_swarm_pos, clamp=(0,10))

    # stagger calls per iteration to enhance speed maybe?
    async def on_step(self, iteration):
        await self.distribute_workers()
        await self.build_overlords()
        await self.build_workers()
        await self.build_queens()
        await self.inject_larva()
        await self.build_extractor()
        await self.build_offensive_buildings()
        await self.expand()
        await self.build_swarm()
        if self.units(MUTALISK).exists and iteration % 5 == 0:
            await self.attack(self.units(MUTALISK))

    # reverse these if statements to check existance before cost...
    async def build_offensive_buildings(self):
        hatcheries = self.townhalls.ready
        if self.can_afford(SPAWNINGPOOL) and not self.units(SPAWNINGPOOL).exists:
            await self.build(SPAWNINGPOOL, near=hatcheries.first)
            return
        # this needs adjusted to check for spawning pool...
        if self.can_afford(ROACHWARREN) and self.units(SPAWNINGPOOL).ready and not self.units(ROACHWARREN).exists:
            await self.build(ROACHWARREN, near=hatcheries.first)
            return
        if self.can_afford(LAIR) and not self.units(LAIR).exists:
            await self.do(self.townhalls.first.build(LAIR))
            return
        if self.can_afford(SPIRE) and not self.units(SPIRE).exists:
            await self.build(SPIRE, near=hatcheries.first)
            return
        if self.can_afford(INFESTATIONPIT) and self.units(LAIR).ready and not self.units(INFESTATIONPIT).exists:
            await self.build(INFESTATIONPIT, near=hatcheries.first)
            return
        if self.can_afford(HIVE) and not self.units(HIVE).exists:
            await self.do(self.townhalls.first.build(HIVE))
            return
        # This probably can be reduced to a 2 argument statement... VVVV
        if self.can_afford(GREATERSPIRE) and self.units(HIVE).ready \
                and self.units(SPIRE).ready and not self.units(GREATERSPIRE).exists:
            await self.do(self.units(SPIRE).first.build(GREATERSPIRE))


    # FUTURE: incorporate dynamic composition by either tech level or AI/ML
    async def build_swarm(self):
        if self.units(SPAWNINGPOOL).ready:
            larvae = self.units(LARVA).ready.noqueue
            for larva in larvae:
                if self.supply_left > 0:
                    if self.units(SPIRE).ready:
#                        if self.units(CORRUPTOR).amount < 5 and self.can_afford(CORRUPTOR):
#                           await self.do(larva.train(CORRUPTOR))
#                           continue
#                        elif self.units(HIVE).ready and self.units(BROODLORD).amount < 5 and self.can_afford(BROODLORD):
#                           await self.do(self.units(CORRUPTOR).random.train(BROODLORD))
                        if self.can_afford(MUTALISK): # 30 muta = 60 c, possibly set to max cap...
                            await self.do(larva.train(MUTALISK))
                            continue
#                    if self.units(ROACHWARREN).ready and self.units(ROACH).amount < 30 and self.can_afford(ROACH): # 30 roach = 60 c
#                       await self.do(larva.train(ROACH))
#                       continue
#                   if self.units(ZERGLING).amount < 60 and self.can_afford(ZERGLING):  # 60 lings = 30 c
#                       await self.do(larva.train(ZERGLING))
#                       continue
 

    #need to consider a separate swarm list in order to allow damaged units to RTB
    # investigate multithreading and running each unit in own thread to avoid a double for loop...
    # might need to pursue subswarms or multiple swarms to avoid reinitializing the swarm so often
    # there is a parameter for multiprocessing...
    # This algo might only work for flying units.
    #might have to build a multiswarm architecture for overlords to pass data to mutalisks...
    async def attack(self, phys_swarm):
        if phys_swarm.amount > 10:
            orders = []
            # reinitialize swarm if needed
            if phys_swarm.amount > self.swarm_size + 3:
                self.swarm_size = phys_swarm.amount
                self.phys_swarm_pos = []
                for unit in phys_swarm:
                    self.phys_swarm_pos.append([unit.position.x, unit.position.y])
                self.phys_swarm_pos = np.array(self.phys_swarm_pos)

                self.logical_swarm = P.create_swarm(n_particles=phys_swarm.amount, dimensions=2, options=self.my_options, bounds=([0,0], [self.game_info.map_size[0], self.game_info.map_size[1]]) , init_pos=self.phys_swarm_pos, clamp=(0,4.0))
                
            self.logical_swarm.current_cost = self.fitness(self.logical_swarm.position, phys_swarm)
            self.logical_swarm.pbest_cost = self.fitness(self.logical_swarm.pbest_pos, phys_swarm)
            self.logical_swarm.pbest_pos, self.logical_swarm.pbest_cost = P.compute_pbest(self.logical_swarm)

            if np.min(self.logical_swarm.pbest_cost) < self.logical_swarm.best_cost:
                self.logical_swarm.best_pos, self.logical_swarm.best_cost = self.my_topology.compute_gbest(self.logical_swarm)

            self.logical_swarm.velocity = self.my_topology.compute_velocity(self.logical_swarm)
            self.logical_swarm.position = self.my_topology.compute_position(self.logical_swarm)
            # Extract positions from above and issue movement/attack orders
            # loop through np array compiling positions and appending them to orders list.
           
            # The mutas are still ignoring nearby enemies.
            for row, unit in zip(self.logical_swarm.position, phys_swarm):
                if self.known_enemy_units.closer_than(unit.radar_range, unit.position).exists:
                    orders.append(unit.stop())
                    orders.append(unit.attack(self.known_enemy_units.closest_to(unit.position).position))
                elif self.known_enemy_units.exists:
                    orders.append(unit.attack(self.known_enemy_units.closest_to(Point2(Pointlike((row[0], row[1]))))))
                else:
                    orders.append(unit.move(Point2(Pointlike((row[0], row[1])))))
#                    if self.known_enemy_units.in_attack_range_of(unit).exists:
 #                       orders.append(unit.attack(self.known_enemy_units.closest_to(unit.position).position))

            await self.do_actions(orders)


    def fitness(self, logical_swarm_pos, phys_swarm):
        #account for own health, enemy health/splash&anti air damage/
        # upgrade this function...
        # revert this back to a regular one unit function and apply the pyswarm evaluate method
        cost = []
   
        if self.known_enemy_units.exists:
            for logical_pos in logical_swarm_pos:
                cost.append(self.known_enemy_units.closest_distance_to(Point2(Pointlike((logical_pos[0], logical_pos[1])))))
        else:
            for logical_pos in logical_swarm_pos:
                cost.append(1 / phys_swarm.center.distance2_to(Point2(Pointlike((logical_pos[0], logical_pos[1])))))
        return np.array(cost)
#scouting...        else:
#           for logical_pos in logical_swarm_pos:
#               cost.append(self.known_enemy_units.closest_distance_to(Point2(Pointlike(logical_pos[0], logical_pos[1]))))

    # Economic Functions
    # Theres a bug resulting in overproduction of overlords and workers as well as 2 extra expansions....

    async def build_workers(self):
        for larva in self.units(LARVA).ready.noqueue:    
            if self.can_afford(DRONE) and self.workers.amount < self.townhalls.amount * 12:
                await self.do(larva.train(DRONE))

    async def build_queens(self):    
        if self.units(SPAWNINGPOOL).ready.exists:
            for hatchery in self.units(HATCHERY).ready.noqueue:
                if self.can_afford(QUEEN) and self.units(QUEEN).amount < self.townhalls.amount:
                    await self.do(hatchery.train(QUEEN))

    async def inject_larva(self):
        for queen, hatchery in zip(self.units(QUEEN).idle, self.townhalls.ready):
            abilities = await self.get_available_abilities(queen)
            if AbilityId.EFFECT_INJECTLARVA in abilities:
                await self.do(queen(EFFECT_INJECTLARVA, hatchery))

    async def build_overlords(self):
        if self.supply_left < 9 and self.units(OVERLORD).amount < 26:
            larvae = self.units(LARVA).ready.noqueue
            for larva in larvae:
                if self.can_afford(OVERLORD) and not self.already_pending(OVERLORD):
                    await self.do(larva.train(OVERLORD))

    async def build_extractor(self):
        for hatchery in self.units(HATCHERY).ready:
            vespene = self.state.vespene_geyser.closer_than(15.0, hatchery)
            for v in vespene:
                if not self.can_afford(EXTRACTOR):
                    break
                worker = self.select_build_worker(v.position)
                if worker is None:
                    break
                if not self.units(EXTRACTOR).closer_than(1.0, v).exists:
                    await self.do(worker.build(EXTRACTOR, v))

    # upgrade the logic of this to expand when a base becomes exhausted...
    async def expand(self):
        if self.townhalls.amount < 5 and self.can_afford(HATCHERY):
            await self.expand_now()
コード例 #7
0
def PSO_grad_optimize(n_particles, dimensions, bounds, init_pose, select_num,
                      iteration, lr, init_state, init_act):
    #Input:
    # n_particles:num of particles
    #dimension: dim of varibales
    #bounds: boundary of actions
    #init_pose: initial pose
    #select_num: num of particle kept
    #iteration: number of iterations

    #lr: learning rate of apply gradient
    #init_state: initial state input for gradient compute
    #init_act:  initial action input for gradient compute

    #output:
    #the best pose
    #a list of selected pos

    #define partcicle
    my_topology = Star()  # The Topology Class
    my_options = {
        'c1': 0.1,
        'c2': 0.000,
        'w': 0.000
    }  # arbitrarily set #0.01,0.01
    my_swarm = P.create_swarm(n_particles=n_particles,
                              dimensions=dimensions,
                              options=my_options,
                              bounds=bounds,
                              init_pos=init_pose)  # The Swarm Class

    for i in range(iteration):
        # Part 1: Update personal best
        step_num = int(dimensions / actor_critic.act_dim)
        my_swarm.current_cost = f(my_swarm.position)
        cur_action = my_swarm.position.reshape(
            (n_particles, step_num, actor_critic.act_dim))
        gradients = actor_critic.compute_gradient(step_num, init_act,
                                                  init_state, cur_action)

        my_swarm.pbest_pos = cur_action.reshape(
            (-1, dimensions)) + gradients[0].reshape((-1, dimensions)) * lr

        my_swarm.pbest_pos = np.clip(my_swarm.pbest_pos, -0.015, 0.015)
        my_swarm.pbest_cost = f(my_swarm.pbest_pos)
        print(my_swarm.pbest_cost)

        # Part 2: Update global best
        # Note that gbest computation is dependent on your topology
        if np.min(my_swarm.pbest_cost) < my_swarm.best_cost:
            my_swarm.best_pos, my_swarm.best_cost = my_topology.compute_gbest(
                my_swarm)

        # Let's print our output

        print('Iteration: {} | my_swarm.best_cost: {:.4f}'.format(
            i + 1, my_swarm.best_cost))

        # Part 3: Update position and velocity matrices
        # Note that position and velocity updates are dependent on your topology
        my_swarm.velocity = my_topology.compute_velocity(my_swarm)
        my_swarm.position = my_topology.compute_position(my_swarm)
        my_swarm.position = np.clip(my_swarm.position, -0.015, 0.015)

    best_index = my_swarm.pbest_cost.argsort()[:select_num]
    print(best_index)
    good_actions = my_swarm.pbest_pos[best_index].tolist()
    good_actions.append(my_swarm.best_pos)
    return np.clip(my_swarm.best_pos, -0.015, 0.015), np.asarray(good_actions)