def test(train_ep: int, num_episodes: int, game_interface: HFOAttackingPlayer, features: DiscreteHighLevelFeatures, agent: QLearningAgent, actions: ActionManager): # Run training using Q-Learning score = 0 agent.test_episodes.append(train_ep) for ep in range(num_episodes): print('<Test> {}/{}:'.format(ep, num_episodes)) while game_interface.in_game(): # Update environment features: observation = game_interface.get_state() curr_state_id = features.get_state_index(observation) has_ball = features.has_ball(observation) # Act: action_idx = agent.exploit_actions(curr_state_id) hfo_action = actions.map_action(action_idx) # Step: status, observation = game_interface.step(hfo_action, has_ball) # Save Metrics: agent.save_visited_state(curr_state_id, action_idx) agent.cum_reward += reward_function(status) print(':: Episode: {}; reward: {}'.format(ep, agent.cum_reward)) score += 1 if game_interface.status == GOAL else 0 # Reset player: agent.reset(training=False) # Game Reset game_interface.reset() agent.scores.append(score) actions_name = [ actions_manager.map_action_to_str(i) for i in range(agent.num_actions) ] agent.export_metrics(training=False, actions_name=actions_name)
def test(num_episodes: int, game_interface: HFOAttackingPlayer, features: DiscreteFeatures, agent: QLearningAgentTest, actions: DiscreteActionsV5, reward_funct) -> float: """ @param num_episodes: number of episodes to run @param game_interface: game interface, that manages interactions between both; @param features: features interface, from the observation array, gets the main features for the agent; @param agent: learning agent; @param actions: actions interface; @param reward_funct: reward function used @return: (float) the average reward """ # Run training using Q-Learning sum_score = 0 for ep in range(num_episodes): # Check if server still up: if game_interface.hfo.step() == SERVER_DOWN: raise ServerDownError("testing; episode={}".format(ep)) # Go to origin position: features.update_features(game_interface.get_state()) go_to_origin_position(game_interface=game_interface, features=features, actions=actions) # Test loop: debug_counter = 0 # TODO remove while game_interface.in_game(): # Update environment features: curr_state_id = features.get_state_index() has_ball = features.has_ball() if not has_ball: hfo_action_params = GO_TO_BALL num_rep = 5 else: # Act: debug_counter += 1 action_idx = agent.exploit_actions(curr_state_id) hfo_action_params, num_rep = \ actions.map_action_idx_to_hfo_action( agent_pos=features.get_pos_tuple(), has_ball=has_ball, action_idx=action_idx) # Step: status, observation = execute_action( action_params=hfo_action_params, repetitions=num_rep, has_ball=has_ball, game_interface=game_interface) # update features: reward = reward_funct(status) features.update_features(observation) sum_score += reward # Game Reset game_interface.reset() print("## AVR Test reward = ", sum_score / num_episodes) return sum_score / num_episodes
def test(num_episodes: int, game_interface: HFOAttackingPlayer, features: DiscreteFeatures1TeammateV1, agent: QLearningAgent, actions: DiscreteActions1TeammateV1, reward_funct) -> float: """ @param num_episodes: number of episodes to run @param game_interface: game interface, that manages interactions between both; @param features: features interface, from the observation array, gets the main features for the agent; @param agent: learning agent; @param actions: actions interface; @param reward_funct: reward function used @return: (float) the win rate """ # Run training using Q-Learning num_goals = 0 for ep in range(num_episodes): # Check if server still up: if game_interface.hfo.step() == SERVER_DOWN: print("Server is down while testing; episode={}".format(ep)) break # Go to origin position: features.update_features(game_interface.get_state()) go_to_origin_position(game_interface=game_interface, features=features, actions=actions) # Test loop: debug_counter = 0 # TODO remove while game_interface.in_game(): # Update environment features: curr_state_id = features.get_state_index() has_ball = features.has_ball() # Act: debug_counter += 1 action_idx = agent.act(curr_state_id) action_name = actions.map_action_to_str(action_idx, has_ball) print("Agent playing {}".format(action_name)) # Step: status = execute_action(action_name=action_name, features=features, game_interface=game_interface) # update features: reward = reward_funct(status) num_goals += 1 if reward == 1 else 0 if status == OUT_OF_TIME: if debug_counter < 5: raise NoActionPlayedError( "agent was only able to choose {}".format(debug_counter)) # Game Reset game_interface.reset() print("<<TEST>> NUM Goals = ", num_goals) print("<<TEST>> NUM episodes = ", (ep + 1)) print("<<TEST>> AVR win rate = ", num_goals / (ep + 1)) return num_goals / num_episodes
def test(num_episodes: int, game_interface: HFOAttackingPlayer, features: discrete_features_v2.DiscreteFeaturesV2, agent: QLearningAgentV5, actions: DiscreteActionsV5, reward_funct): """ @param num_episodes: number of episodes to run @param game_interface: game interface, that manages interactions between both; @param features: features interface, from the observation array, gets the main features for the agent; @param agent: learning agent; @param actions: actions interface; @param reward_funct: reward function used @return: (int) the avarage reward """ # Run training using Q-Learning sum_score = 0 for ep in range(num_episodes): print('<Test> {}/{}:'.format(ep, num_episodes)) # Go to origin position: features.update_features(game_interface.get_state()) go_to_origin_position(game_interface=game_interface, features=features, actions=actions) # Test loop: while game_interface.in_game(): # Update environment features: curr_state_id = features.get_state_index() has_ball = features.has_ball() # Act: action_idx = agent.exploit_actions(curr_state_id) hfo_action_params, num_rep = \ actions.map_action_idx_to_hfo_action( agent_pos=features.get_pos_tuple(), has_ball=has_ball, action_idx=action_idx) action_name = actions.map_action_to_str(action_idx, has_ball) # Step: rep_counter_aux = 0 while game_interface.in_game() and rep_counter_aux < num_rep: status, observation = game_interface.step( hfo_action_params, has_ball) rep_counter_aux += 1 reward = reward_funct(status) # update features: features.update_features(observation) # Save metrics: agent.save_visited_state(curr_state_id, action_idx) sum_score += reward # Reset player: agent.reset(training=False) # Game Reset game_interface.reset() return sum_score / num_episodes
def train(num_episodes: int, game_interface: HFOAttackingPlayer, features: discrete_features_v2.DiscreteFeaturesV2, agent: QLearningAgentV4, actions: DiscreteActionsV2, reward_funct): for ep in range(num_episodes): # print('<Training> Episode {}/{}:'.format(ep, num_episodes)) aux_positions_names = set() aux_actions_played = set() while game_interface.in_game(): # Update environment features: features.update_features(game_interface.get_state()) curr_state_id = features.get_state_index() has_ball = features.has_ball() # Act: action_idx = agent.act(curr_state_id) hfo_action: tuple = actions.map_action_idx_to_hfo_action( agent_pos=features.get_pos_tuple(), has_ball=has_ball, action_idx=action_idx) # Step: status, observation = game_interface.step(hfo_action, has_ball) reward = reward_funct(status) # Save metrics: agent.save_visited_state(curr_state_id, action_idx) agent.cum_reward += reward aux_positions_names.add(features.get_position_name()) action_name = actions.map_action_to_str(action_idx, has_ball) aux_actions_played.add(action_name) # Update environment features: prev_state_id = curr_state_id features.update_features(observation) curr_state_id = features.get_state_index() agent.store_ep(state_idx=prev_state_id, action_idx=action_idx, reward=reward, next_state_idx=curr_state_id, has_ball=has_ball, done=not game_interface.in_game()) agent.learn() # print(':: Episode: {}; reward: {}; epsilon: {}; positions: {}; ' # 'actions: {}'.format(ep, agent.cum_reward, agent.epsilon, # aux_positions_names, aux_actions_played)) agent.save_metrics(agent.old_q_table, agent.q_table) # Reset player: agent.reset() agent.update_hyper_parameters(episode=ep, num_total_episodes=num_episodes) # Game Reset game_interface.reset() agent.save_model() actions_name = [actions_manager.map_action_to_str(i, has_ball=True) for i in range(agent.num_actions)] agent.export_metrics(training=True, actions_name=actions_name)
def train(num_episodes: int, game_interface: HFOAttackingPlayer, features: DiscreteHighLevelFeatures, agent: QLearningAgent, actions: ActionManager): for ep in range(num_episodes): print('<Training> Episode {}/{}:'.format(ep, num_episodes)) while game_interface.in_game(): # Update environment features: observation = game_interface.get_state() curr_state_id = features.get_state_index(observation) has_ball = features.has_ball(observation) # Act: action_idx = agent.act(curr_state_id) hfo_action = actions.map_action(action_idx) # Step: status, observation = game_interface.step(hfo_action, has_ball) reward = reward_function(status) # Save metrics: agent.save_visited_state(curr_state_id, action_idx) agent.cum_reward += reward # Update environment features: prev_state_id = curr_state_id curr_state_id = features.get_state_index(observation) # Update agent agent.learn(prev_state_id, action_idx, reward, status, curr_state_id) print(':: Episode: {}; reward: {}'.format(ep, agent.cum_reward)) agent.save_metrics(agent.old_q_table, agent.q_table) # Reset player: agent.reset() agent.update_hyper_parameters() # Game Reset game_interface.reset() agent.save_model() actions_name = [ actions_manager.map_action_to_str(i) for i in range(agent.num_actions) ] agent.export_metrics(training=False, actions_name=actions_name)
def test(train_ep: int, num_episodes: int, game_interface: HFOAttackingPlayer, features: discrete_features_v2.DiscreteFeaturesV2, agent: QLearningAgentV4, actions: DiscreteActionsV2, reward_funct): # Run training using Q-Learning score = 0 agent.test_episodes.append(train_ep) for ep in range(num_episodes): print('<Test> {}/{}:'.format(ep, num_episodes)) prev_state_id =-1 while game_interface.in_game(): # Update environment features: features.update_features(game_interface.get_state()) curr_state_id = features.get_state_index() has_ball = features.has_ball() # Act: if prev_state_id != curr_state_id: print([round(val, 2) for val in agent.q_table[curr_state_id]]) action_idx = agent.exploit_actions(curr_state_id) hfo_action: tuple = actions.map_action_idx_to_hfo_action( agent_pos=features.get_pos_tuple(), has_ball=has_ball, action_idx=action_idx) # Step: status, observation = game_interface.step(hfo_action, has_ball) prev_state_id = curr_state_id # Save Metrics: agent.save_visited_state(curr_state_id, action_idx) agent.cum_reward += reward_funct(status) print(':: Episode: {}; reward: {}'.format(ep, agent.cum_reward)) score += 1 if game_interface.status == GOAL else 0 # Reset player: agent.reset(training=False) # Game Reset game_interface.reset() agent.scores.append(score) actions_name = [actions_manager.map_action_to_str(i, has_ball=True) for i in range(agent.num_actions)] agent.export_metrics(training=False, actions_name=actions_name)
def train(num_train_episodes: int, num_total_train_ep: int, game_interface: HFOAttackingPlayer, features: DiscreteFeatures1TeammateV1, agent: QLearningAgent, actions: DiscreteActions1TeammateV1, reward_funct): """ @param num_train_episodes: number of episodes to train in this iteration @param num_total_train_ep: number total of episodes to train @param game_interface: game interface, that manages interactions between both; @param features: features interface, from the observation array, gets the main features for the agent; @param agent: learning agent; @param actions: actions interface; @param reward_funct: reward function used @return: (QLearningAgentV5) the agent """ sum_score = 0 sum_epsilons = 0 agent.counter_explorations = 0 agent.counter_exploitations = 0 for ep in range(num_train_episodes): # Check if server still up: if game_interface.hfo.step() == SERVER_DOWN: raise ServerDownError("training; episode={}".format(ep)) # Go to origin position: features.update_features(game_interface.get_state()) go_to_origin_position(game_interface=game_interface, features=features, actions=actions) # Start learning loop debug_counter = 0 # TODO remove while game_interface.in_game(): # Update environment features: curr_state_id = features.get_state_index() has_ball = features.has_ball() # Act: debug_counter += 1 action_idx = agent.act(curr_state_id) action_name = actions.map_action_to_str(action_idx, has_ball) # print("Agent playing {} for {}".format(action_name, num_rep)) # Step: status = execute_action(action_name=action_name, features=features, game_interface=game_interface) # Update environment features: reward = reward_funct(status) sum_score += reward new_state_id = features.get_state_index() agent.store_ep(state_idx=curr_state_id, action_idx=action_idx, reward=reward, next_state_idx=new_state_id, has_ball=has_ball, done=not game_interface.in_game()) if game_interface.get_game_status() == OUT_OF_TIME: if debug_counter < 5: raise NoActionPlayedError( "agent was only able to choose {}".format(debug_counter)) agent.learn_buffer() agent.update_hyper_parameters(num_total_episodes=num_total_train_ep) sum_epsilons += agent.epsilon # Game Reset game_interface.reset() print("<<TRAIN>> AVR reward = ", sum_score / num_train_episodes) print("<<TRAIN>> %Explorations={}% ".format( round( (agent.counter_explorations / (agent.counter_exploitations + agent.counter_explorations)), 4) * 100))
class Player: def __init__(self, num_opponents: int, num_teammates: int): # Game Interface: self.game_interface = HFOAttackingPlayer(num_opponents=num_opponents, num_teammates=num_teammates) self.game_interface.connect_to_server() # Features Interface: self.features = DiscreteFeatures1Teammate(num_op=num_opponents, num_team=num_teammates) # Actions Interface: self.actions = DiscreteActionsModule() # Agent instance: self.agent = QAgent(num_features=self.features.num_features, num_actions=self.actions.get_num_actions(), learning_rate=0.1, discount_factor=0.9, epsilon=1, final_epsilon=0.3) def get_reward(self, status: int) -> int: return basic_reward(status) def set_starting_game_conditions(self, game_interface: HFOAttackingPlayer, features: DiscreteFeatures1Teammate, start_with_ball: bool = True, start_pos: tuple = None): """ Set starting game conditions. Move for initial position, for example """ if not start_pos: pos_name, start_pos = random.choice( list(STARTING_POSITIONS.items())) if start_with_ball: # Move to starting position: self.actions.dribble_to_pos(start_pos, features, game_interface) else: if self.features.has_ball(): self.actions.kick_to_pos((0, 0), features, game_interface) # Move to starting position: self.actions.move_to_pos(start_pos, features, game_interface) # Informs the other players that it is ready to start: game_interface.hfo.say(settings.PLAYER_READY_MSG) def train(self, num_train_episodes: int, num_total_train_ep: int, start_with_ball: bool = True): """ @param num_train_episodes: number of episodes to train in this iteration @param num_total_train_ep: number total of episodes to train @param start_with_ball: bool @raise ServerDownError @return: (QLearningAgentV5) the agent """ # metrics variables: _num_wins = 0 _sum_epsilons = 0 for ep in range(num_train_episodes): # Check if server still running: self.game_interface.check_server_is_up() # Update features: self.features.update_features(self.game_interface.get_state()) # Go to origin position: self.set_starting_game_conditions( game_interface=self.game_interface, features=self.features, start_with_ball=start_with_ball) # Start learning loop goal = False # bool flag while self.game_interface.in_game(): # Update environment features: features_array = self.features.get_features().copy() # Act: action_idx = self.agent.act(features_array) status = self.actions.execute_action( action_idx=action_idx, features=self.features, game_interface=self.game_interface) # Every step we update replay memory and train main network done = not self.game_interface.in_game() goal = self.game_interface.scored_goal() self.agent.store_transition( curr_st=features_array, action_idx=action_idx, reward=self.get_reward(status), new_st=self.features.get_features(), done=done) # Train self.agent.train(goal) # Update auxiliar variables: _sum_epsilons += self.agent.epsilon _num_wins += 1 if self.game_interface.scored_goal() else 0 # Update Agent: self.agent.restart(num_total_train_ep) # Game Reset self.game_interface.reset() print("[TRAIN: Summary] WIN rate = {}; AVR epsilon = {}".format( _num_wins / num_train_episodes, _sum_epsilons / num_train_episodes)) def test(self, num_episodes: int, start_with_ball: bool = True, training: bool = False) -> float: """ @param num_episodes: number of episodes to run @param start_with_ball: flag @param training: flag @return: (float) the win rate """ starting_pos_list = list(STARTING_POSITIONS.values()) # metrics variables: _num_wins = 0 for ep in range(num_episodes): # Check if server still running: self.game_interface.check_server_is_up() # Update features: self.features.update_features(self.game_interface.get_state()) # Set up gaming conditions: self.set_starting_game_conditions( game_interface=self.game_interface, features=self.features, start_pos=starting_pos_list[ep % len(starting_pos_list)], start_with_ball=start_with_ball) # Start learning loop prev_action_idx = None while self.game_interface.in_game(): # Update environment features: features_array = self.features.get_features().copy() # Act: action_idx = self.agent.exploit_actions(features_array) if prev_action_idx != action_idx and not training: print("ACTION:: {}".format( self.actions.map_action_to_str( action_idx, self.features.has_ball()))) prev_action_idx = action_idx self.actions.execute_action(action_idx=action_idx, features=self.features, game_interface=self.game_interface) # Update auxiliar variables: _num_wins += 1 if self.game_interface.scored_goal() else 0 # Game Reset self.game_interface.reset() avr_win_rate = _num_wins / num_episodes print("[TEST: Summary] WIN rate = {};".format(avr_win_rate)) return avr_win_rate
class Player: def __init__(self, num_opponents: int, num_teammates: int, port: int = 6000, online: bool = True): # Game Interface: self.game_interface = HFOAttackingPlayer(num_opponents=num_opponents, num_teammates=num_teammates, port=port) if online: self.game_interface.connect_to_server() # Features Interface: self.features = PlasticFeatures(num_op=num_opponents, num_team=num_teammates) # Actions Interface: self.actions = Actions() # Agent instance: self.agent = DQNAgent(num_features=self.features.num_features, num_actions=self.actions.get_num_actions(), learning_rate=0.005, discount_factor=0.99, epsilon=1, final_epsilon=0.001, epsilon_decay=0.99995, tau=0.125) def get_reward(self, game_status: int) -> int: if game_status == GOAL: return 1000 elif game_status in [CAPTURED_BY_DEFENSE, OUT_OF_BOUNDS, OUT_OF_TIME]: return -1000 else: return -1 def set_starting_game_conditions(self, game_interface: HFOAttackingPlayer, features: PlasticFeatures, start_with_ball: bool = True, start_pos: tuple = None): """ Set starting game conditions. Move for initial position, for example """ if not start_pos: pos_name, start_pos = random.choice( list(STARTING_POSITIONS.items())) if start_with_ball: # Move to starting position: self.actions.dribble_to_pos(start_pos, features, game_interface) else: if self.features.has_ball(): self.actions.kick_to_pos((0, 0), features, game_interface) # Move to starting position: self.actions.move_to_pos(start_pos, features, game_interface) # Informs the other players that it is ready to start: game_interface.hfo.say(settings.PLAYER_READY_MSG) def test(self, num_episodes: int, start_with_ball: bool = True) -> float: """ @param num_episodes: number of episodes to run @param start_with_ball: flag @return: (float) the win rate """ starting_pos_list = list(STARTING_POSITIONS.values()) # metrics variables: _num_wins = 0 for ep in range(num_episodes): # Check if server still running: try: self.game_interface.check_server_is_up() except ServerDownError as e: print("!!SERVER DOWN!! TEST {}/{}".format(ep, num_episodes)) avr_win_rate = round(_num_wins / (ep + 1), 2) print("[TEST: Summary] WIN rate = {};".format(avr_win_rate)) return avr_win_rate # Update features: self.features.update_features(self.game_interface.get_state()) # Set up gaming conditions: self.set_starting_game_conditions( game_interface=self.game_interface, features=self.features, start_pos=starting_pos_list[ep % len(starting_pos_list)], start_with_ball=start_with_ball) print("\nNEW TEST [{}]".format( starting_pos_list[ep % len(starting_pos_list)])) # print("FEATURES: ", self.features.get_features()) # Start learning loop status = IN_GAME prev_action_idx = None while self.game_interface.in_game(): if self.features.has_ball(): # Update environment features: features_array = self.features.get_features().copy() # Act: action_idx = self.agent.exploit_actions(features_array) if prev_action_idx != action_idx: print("ACTION:: {}".format( self.actions.map_action_to_str(action_idx))) prev_action_idx = action_idx self.actions.execute_action( action_idx=action_idx, features=self.features, game_interface=self.game_interface) else: if prev_action_idx != -1: print("ACTION:: MOVE!!") prev_action_idx = -1 status = self.actions.no_ball_action( features=self.features, game_interface=self.game_interface) # Update auxiliar variables: if self.game_interface.scored_goal() or status == GOAL: print("[GOAL]") _num_wins += 1 else: print("[FAIL]") # Game Reset self.game_interface.reset() avr_win_rate = round(_num_wins / num_episodes, 2) print("[TEST: Summary] WIN rate = {};".format(avr_win_rate)) return avr_win_rate def train(self, num_train_episodes: int, num_total_train_ep: int, start_with_ball: bool = True): """ @param num_train_episodes: number of episodes to train in this iteration @param num_total_train_ep: number total of episodes to train @param start_with_ball: bool @raise ServerDownError @return: (QLearningAgentV5) the agent """ starting_pos_list = list(STARTING_POSITIONS.values()) # metrics variables: _num_wins = 0 _sum_epsilons = 0 for ep in range(num_train_episodes): # Check if server still running: try: self.game_interface.check_server_is_up() except ServerDownError as e: print("!!SERVER DOWN!! TRAIN {}/{}".format( ep, num_train_episodes)) return # Update features: self.features.update_features(self.game_interface.get_state()) # Go to origin position: self.set_starting_game_conditions( game_interface=self.game_interface, features=self.features, start_pos=starting_pos_list[ep % len(starting_pos_list)], start_with_ball=start_with_ball) # Start learning loop status = IN_GAME episode_buffer = list() while self.game_interface.in_game(): # Has Ball: if self.features.has_ball(): # Update environment features: features_array = self.features.get_features().copy() # Act: action_idx = self.agent.act(features_array) status = self.actions.execute_action( action_idx=action_idx, features=self.features, game_interface=self.game_interface) # Every step we update replay memory and train main network done = not self.game_interface.in_game() # Store transition: # (obs, action, reward, new obs, done?) transition = np.array([ features_array, action_idx, self.get_reward(status), self.features.get_features(), done ]) episode_buffer.append(transition) # Train: self.agent.train(terminal_state=done) # No ball: else: status = self.actions.no_ball_action( features=self.features, game_interface=self.game_interface) if self.game_interface.scored_goal() or status == GOAL: _num_wins += 1 reward = self.get_reward(GOAL) else: reward = self.get_reward(status) # Add episodes: self.agent.store_episode(episode_buffer, reward=reward) # Update auxiliar variables: _sum_epsilons += self.agent.epsilon # Update Agent: self.agent.restart(num_total_train_ep) # Game Reset self.game_interface.reset() avr_epsilon = round(_sum_epsilons / num_train_episodes, 3) print("[TRAIN: Summary] WIN rate = {}; AVR epsilon = {}".format( _num_wins / num_train_episodes, avr_epsilon)) return avr_epsilon def train_offline(self, game_buffer: np.ndarray): for _ in range(5): buffer = game_buffer.copy() self.agent.train_from_batch(buffer) print("MODEL TRAINED") aux = [-1] * 6 features_base = np.array(aux) for idx in range(6): features_array = features_base.copy() features_array[idx] = 0 print("[TEST] {}".format(features_array.tolist())) action_idx = self.agent.exploit_actions(features_array, verbose=True) print("-> {}".format( self.actions.map_action_to_str(action_idx)))
NUM_OPPONENTS = 1 # Game Interface: game_interface = HFOAttackingPlayer(num_opponents=NUM_OPPONENTS, num_teammates=NUM_TEAMMATES, port=port) game_interface.connect_to_server() # Features Interface: features = DiscFeatures1Teammate(num_op=NUM_OPPONENTS, num_team=NUM_TEAMMATES) # Actions Interface: actions = Actions() for ep in range(num_games): # Update features: features.update_features(game_interface.get_state()) # Set up gaming conditions: actions.dribble_to_pos((-0.5, -0.7), features, game_interface) # Start learning loop status = IN_GAME prev_action_idx = None while game_interface.in_game(): if features.has_ball(): actions.shoot_ball(game_interface, features) else: actions.do_nothing(game_interface, features) # Update auxiliar variables: if game_interface.scored_goal() or status == GOAL: print("[GOAL]")
def train(num_train_episodes: int, num_total_train_ep: int, game_interface: HFOAttackingPlayer, features: DiscreteFeatures, agent: QLearningAgentTest, actions: DiscreteActionsV5, reward_funct): """ @param num_train_episodes: number of episodes to train in this iteration @param num_total_train_ep: number total of episodes to train @param game_interface: game interface, that manages interactions between both; @param features: features interface, from the observation array, gets the main features for the agent; @param agent: learning agent; @param actions: actions interface; @param reward_funct: reward function used @return: (QLearningAgentV5) the agent """ sum_score = 0 for ep in range(num_train_episodes): # Check if server still up: if game_interface.hfo.step() == SERVER_DOWN: raise ServerDownError("training; episode={}".format(ep)) # Go to origin position: features.update_features(game_interface.get_state()) go_to_origin_position(game_interface=game_interface, features=features, actions=actions) # Start learning loop while game_interface.in_game(): # Update environment features: curr_state_id = features.get_state_index() has_ball = features.has_ball() if not has_ball: hfo_action_params = GO_TO_BALL num_rep = 5 status, observation = execute_action( action_params=hfo_action_params, repetitions=num_rep, has_ball=has_ball, game_interface=game_interface) features.update_features(observation) reward = reward_funct(status) else: # Act: action_idx = agent.act(curr_state_id) hfo_action_params, num_rep =\ actions.map_action_idx_to_hfo_action( agent_pos=features.get_pos_tuple(), has_ball=has_ball, action_idx=action_idx) # Step: status, observation = execute_action( action_params=hfo_action_params, repetitions=num_rep, has_ball=has_ball, game_interface=game_interface) # Update environment features: reward = reward_funct(status) sum_score += reward features.update_features(observation) new_state_id = features.get_state_index() agent.store_ep(state_idx=curr_state_id, action_idx=action_idx, reward=reward, next_state_idx=new_state_id, has_ball=has_ball, done=not game_interface.in_game()) agent.learn_buffer(reward) agent.update_hyper_parameters(num_total_episodes=num_total_train_ep) # Game Reset game_interface.reset() print("## AVR Train reward = ", sum_score / num_train_episodes)
class Player: def __init__(self, num_opponents: int, num_teammates: int, port: int = 6000): # Game Interface: self.game_interface = HFOAttackingPlayer(num_opponents=num_opponents, num_teammates=num_teammates, port=port) self.game_interface.connect_to_server() # Features Interface: self.features = DiscFeatures1Teammate(num_op=num_opponents, num_team=num_teammates) # Actions Interface: self.actions = Actions() # Agent instance: self.agent = QAgent(num_features=self.features.num_features, num_actions=self.actions.get_num_actions(), learning_rate=0.1, discount_factor=0.9, epsilon=0.8) def get_reward(self, status: int) -> int: return basic_reward(status) def set_starting_game_conditions(self, game_interface: HFOAttackingPlayer, features: DiscFeatures1Teammate, start_with_ball: bool = True, start_pos: tuple = None): """ Set starting game conditions. Move for initial position, for example """ if not start_pos: pos_name, start_pos = random.choice( list(STARTING_POSITIONS.items())) if start_with_ball: # Move to starting position: self.actions.dribble_to_pos(start_pos, features, game_interface) else: if self.features.has_ball(): self.actions.kick_to_pos((0, 0), features, game_interface) # Move to starting position: self.actions.move_to_pos(start_pos, features, game_interface) # Informs the other players that it is ready to start: game_interface.hfo.say(settings.PLAYER_READY_MSG) def test(self, num_episodes: int, start_with_ball: bool = True) -> float: """ @param num_episodes: number of episodes to run @param start_with_ball: flag @return: (float) the win rate """ starting_pos_list = list(STARTING_POSITIONS.values()) # metrics variables: _num_wins = 0 for ep in range(num_episodes): # Check if server still running: self.game_interface.check_server_is_up() # Update features: self.features.update_features(self.game_interface.get_state()) # Set up gaming conditions: self.set_starting_game_conditions( game_interface=self.game_interface, features=self.features, start_pos=starting_pos_list[ep % len(starting_pos_list)], start_with_ball=start_with_ball) print("\nNEW TEST [{}]".format( starting_pos_list[ep % len(starting_pos_list)])) # Start learning loop status = IN_GAME prev_action_idx = None while self.game_interface.in_game(): if self.features.has_ball(): # Update environment features: features_array = self.features.get_features().copy() # Act: action_idx = self.agent.exploit_actions(features_array) if prev_action_idx != action_idx: print("ACTION:: {}".format( self.actions.map_action_to_str(action_idx))) prev_action_idx = action_idx self.actions.execute_action( action_idx=action_idx, features=self.features, game_interface=self.game_interface) else: if prev_action_idx != -1: print("ACTION:: MOVE!!") prev_action_idx = -1 status = self.actions.no_ball_action( features=self.features, game_interface=self.game_interface) # Update auxiliar variables: if self.game_interface.scored_goal() or status == GOAL: print("[GOAL]") _num_wins += 1 else: print("[FAIL]") # Game Reset self.game_interface.reset() avr_win_rate = _num_wins / num_episodes print("[TEST: Summary] WIN rate = {};".format(avr_win_rate)) return avr_win_rate def train(self, num_train_episodes: int, num_total_train_ep: int, start_with_ball: bool = True): """ @param num_train_episodes: number of episodes to train in this iteration @param num_total_train_ep: number total of episodes to train @param start_with_ball: bool @raise ServerDownError @return: (QLearningAgentV5) the agent """ # metrics variables: _num_wins = 0 _sum_epsilons = 0 for ep in range(num_train_episodes): # Check if server still running: self.game_interface.check_server_is_up() # Update features: self.features.update_features(self.game_interface.get_state()) # Go to origin position: self.set_starting_game_conditions( game_interface=self.game_interface, features=self.features, start_with_ball=start_with_ball) # Start learning loop status = IN_GAME episode_buffer = list() while self.game_interface.in_game(): # Has Ball: if self.features.has_ball(): # Update environment features: features_array = self.features.get_features().copy() # Act: action_idx = self.agent.act(features_array) status = self.actions.execute_action( action_idx=action_idx, features=self.features, game_interface=self.game_interface) # Every step we update replay memory and train main network done = not self.game_interface.in_game() # Store transition: # (obs, action, reward, new obs, done?) transition = np.array( [features_array, action_idx, self.get_reward(status), self.features.get_features(), done]) episode_buffer.append(transition) # No ball: else: status = self.actions.no_ball_action( features=self.features, game_interface=self.game_interface) if self.game_interface.scored_goal() or status == GOAL: _num_wins += 1 reward = self.get_reward(GOAL) else: reward = self.get_reward(status) self.agent.store_episode(episode_buffer, reward=reward) # Train: self.agent.train(terminal_state=True) # Update auxiliar variables: _sum_epsilons += self.agent.epsilon # Update Agent: self.agent.restart(num_total_train_ep) # Game Reset self.game_interface.reset() print("[TRAIN: Summary] WIN rate = {}; AVR epsilon = {}".format( _num_wins / num_train_episodes, _sum_epsilons / num_train_episodes))
def train(num_train_episodes: int, num_total_train_ep: int, game_interface: HFOAttackingPlayer, features: discrete_features_v2.DiscreteFeaturesV2, agent: QLearningAgentV5, actions: DiscreteActionsV5, save_metrics: bool, reward_funct): """ @param num_train_episodes: number of episodes to train in this iteration @param num_total_train_ep: number total of episodes to train @param game_interface: game interface, that manages interactions between both; @param features: features interface, from the observation array, gets the main features for the agent; @param agent: learning agent; @param actions: actions interface; @param save_metrics: flag, if true save the metrics; @param reward_funct: reward function used @return: (QLearningAgentV5) the agent """ for ep in range(num_train_episodes): # Go to origin position: features.update_features(game_interface.get_state()) go_to_origin_position(game_interface=game_interface, features=features, actions=actions) # Start learning loop aux_positions_names = set() aux_actions_played = set() while game_interface.in_game(): # Update environment features: curr_state_id = features.get_state_index() has_ball = features.has_ball() # Act: action_idx = agent.act(curr_state_id) hfo_action_params, num_rep =\ actions.map_action_idx_to_hfo_action( agent_pos=features.get_pos_tuple(), has_ball=has_ball, action_idx=action_idx) # Step: rep_counter_aux = 0 while game_interface.in_game() and rep_counter_aux < num_rep: status, observation = game_interface.step( hfo_action_params, has_ball) rep_counter_aux += 1 reward = reward_funct(status) # Save metrics: if save_metrics: agent.save_visited_state(curr_state_id, action_idx) agent.cum_reward += reward aux_positions_names.add(features.get_position_name()) action_name = actions.map_action_to_str(action_idx, has_ball) aux_actions_played.add(action_name) # Update environment features: prev_state_id = curr_state_id features.update_features(observation) curr_state_id = features.get_state_index() agent.store_ep(state_idx=prev_state_id, action_idx=action_idx, reward=reward, next_state_idx=curr_state_id, has_ball=has_ball, done=not game_interface.in_game()) agent.learn() # print(':: Episode: {}; reward: {}; epsilon: {}; positions: {}; ' # 'actions: {}'.format(ep, agent.cum_reward, agent.epsilon, # aux_positions_names, aux_actions_played)) if save_metrics: agent.save_metrics(agent.old_q_table, agent.q_table) # Reset player: agent.reset() agent.update_hyper_parameters(episode=agent.train_eps, num_total_episodes=num_total_train_ep) # Game Reset game_interface.reset() agent.save_model() if save_metrics: actions_name = [ actions_manager.map_action_to_str(i, has_ball=True) for i in range(agent.num_actions) ] agent.export_metrics(training=True, actions_name=actions_name) return agent