예제 #1
0
def transform_state(timesteps):
    state = state_modifier.modified_state_space(timesteps[0])[0]
    # print("state : ", state)
    # state= (state[0].flatten())
    # state=state.reshape(1, state.shape[0])
    # print(state.shape)
    return state
예제 #2
0
    def step(self, obs):
        super().step(obs)

        features, _, _ = state_modifier.modified_state_space(obs)
        selected = features[0]
        friendly_unit_density = features[3]
        enemy_unit_density = features[4]
        enemy_hit_points = features[2]
        if np.all(friendly_unit_density == 0):
            return self.actuator.compute_action(Action.NO_OP, selected,
                                                friendly_unit_density,
                                                enemy_unit_density,
                                                enemy_hit_points)
        if self.state == 0 or np.all(selected == 0):
            self.state = 1
            return self.actuator.compute_action(Action.SELECT, selected,
                                                friendly_unit_density,
                                                enemy_unit_density,
                                                enemy_hit_points)
        else:
            self.state = 0
            return self.actuator.compute_action(Action.ATTACK_CLOSEST,
                                                selected,
                                                friendly_unit_density,
                                                enemy_unit_density,
                                                enemy_hit_points)
예제 #3
0
    def step(self, obs):

        reduced_obs = state_modifier.modified_state_space(obs)
        sarsd = env_handler.step(reduced_obs, self.policy)

        if (game_finished(sarsd)):
            self.train()
def transform_state(timesteps):
    state = state_modifier.modified_state_space(timesteps[0])[0]
    print("feature screen: ", timesteps[0].observation.feature_screen.shape)
    print("feature mini map: ", timesteps[0].observation.feature_minimap.shape)
    print("non-spatial features: ", timesteps[0].observation.player)
    # print("state : ", state)
    # state= (state[0].flatten())
    # state=state.reshape(1, state.shape[0])
    # print(state.shape)
    return state
 def step(self, obs):
 
     simp_obs = state_modifier.modified_state_space(obs)
 
 
     self.iteration += 1
     super(terran_agent, self).step(obs) # crashes with simple obs
     
     if self.unit_type_is_selected(obs, units.Terran.Marine) and self.iteration % 20 == 0:
         return self.handle_action(obs)
         
     else:
         return self.select_units_by_type(obs, units.Terran.Marine)
         
     return actions.FUNCTIONS.no_op()
예제 #6
0
    def step(self, obs):
        super().step(obs)

        features, _ = state_modifier.modified_state_space(obs)
        selected = features[0]
        friendly_unit_density = features[2]
        enemy_unit_density = features[4]
        if np.all(friendly_unit_density == 0):
            return self.actuator.compute_action(Action.NO_OP, selected,
                                                friendly_unit_density,
                                                enemy_unit_density)
        if not self.actuator.units_selected or np.all(selected == 0):
            return self.actuator.compute_action(Action.SELECT, selected,
                                                friendly_unit_density,
                                                enemy_unit_density)
        else:
            return self.actuator.compute_action(Action.ATTACK, selected,
                                                friendly_unit_density,
                                                enemy_unit_density)
예제 #7
0
    def __init__(self):
        super(terran_agent, self).__init__()
        self.attack_coordinates = None
        self.iteration = 0
        
        self.actuator = Actuator()
        self.PPO = PPOAgent()
        
    
        
    def step(self, obs):
<<<<<<< HEAD
=======
    
        simp_obs = state_modifier.modified_state_space(obs)
    
        self.iteration += 1
>>>>>>> 50eda19452d03e2520e35a9c714c6e0efadc1a30
        super(terran_agent, self).step(obs)
        self.iteration += 1
        ###################################################
        
        
        
        simp_obs = state_modifier.modified_state_space(obs)
        selected = simp_obs[0]
        friendly_unit_density = simp_obs[2]
        enemy_unit_density = simp_obs[4]