def act(self, gs: GameState) -> int: available_actions = gs.get_available_actions(gs.get_active_player()) state_vec = gs.get_vectorized_state() predicted_Q_values = self.Q_action.predict(state_vec) if np.random.random() <= self.epsilon: chosen_action = np.random.choice(available_actions) else: chosen_action = available_actions[int( np.argmax(predicted_Q_values[available_actions]))] if self.s is not None: target = self.r + self.gamma * predicted_Q_values[int( np.argmax(self.Q_evaluation.predict(self.s)))] self.Q_action.train(self.s, self.a, target) if self.s is not None: update_Q_evaluation = self.tau * np.array( self.Q_action.model.get_weights()) + (1 - self.tau) * np.array( self.Q_evaluation.model.get_weights()) self.Q_evaluation.model.set_weights(update_Q_evaluation) self.s = state_vec self.a = to_categorical(chosen_action, self.action_space_size) self.r = 0.0 self.count_state += 1 return chosen_action
def act(self, gs: GameState) -> int: gs_unique_id = gs.get_unique_id() available_actions = gs.get_available_actions(gs.get_active_player()) state_vec = gs.get_vectorized_state() mask_vec = np.zeros((self.action_space_size, )) mask_vec[available_actions] = 1.0 v = self.critic.predict(state_vec) p = self.actor.predict(state_vec, mask_vec) indexes = np.arange(self.action_space_size) chosen_action = np.random.choice(indexes, p=p) # valid_actions_probability = p[available_actions] # valid_actions_probability_sum = np.sum(valid_actions_probability) # normalized_valid_action_probability = valid_actions_probability / valid_actions_probability_sum # # # chosen_action = np.random.choice(available_actions, p=normalized_valid_action_probability) self.v.append(v) self.s.append(state_vec) self.m.append(mask_vec) self.a.append(to_categorical(chosen_action, self.action_space_size)) if not self.is_last_episode_terminal: self.r.append(self.r_temp) self.r_temp = 0.0 self.is_last_episode_terminal = False return chosen_action
def act(self, gs: GameState) -> int: gs_unique_id = gs.get_unique_id() available_actions = gs.get_available_actions(gs.get_active_player()) state_vec = gs.get_vectorized_state() predicted_Q_values = self.Q.predict(state_vec) if np.random.random() <= self.epsilon: chosen_action = np.random.choice(available_actions) else: chosen_action = available_actions[int( np.argmax(predicted_Q_values[available_actions]))] if self.s is not None: target = self.r + self.gamma * max( predicted_Q_values[available_actions]) self.Q.train(self.s, self.a, target) self.experience.append( (self.s.copy(), self.a.copy(), self.r, state_vec.copy())) print("experience", len(self.experience)) if len(self.experience) % 10 == 0: for el in self.experience: target = el[2] + self.gamma * el[1] self.Q.train(el[0], el[1], target) self.s = state_vec self.a = to_categorical(chosen_action, self.action_space_size) self.r = 0.0 return chosen_action
def act(self, gs: GameState) -> int: #gs_unique_id = gs.get_unique_id() available_actions = gs.get_available_actions(gs.get_active_player()) state_vec = gs.get_vectorized_state() predicted_Q_values = self.Q.predict(state_vec) if np.random.random() <= self.epsilon: chosen_action = np.random.choice(available_actions) else: chosen_action = available_actions[int( np.argmax(predicted_Q_values[available_actions]))] if self.s is not None: target = self.r + self.gamma * self.alternate_Q.predict( state_vec)[available_actions][np.argmax( self.Q.predict(state_vec)[available_actions])] # final_target = self.model.predict(state) # final_target[0][action] = target # self.model.fit(state, final_target, verbose=0) self.Q.train(self.s, self.a, target) self.s = state_vec self.a = to_categorical(chosen_action, self.action_space_size) self.r = 0.0 return chosen_action
def act(self, gs: GameState) -> int: available_actions = gs.get_available_actions(gs.get_active_player()) state_vec = gs.get_vectorized_state() predicted_Q_values = self.Q_action.predict(state_vec) if np.random.random() <= self.epsilon: chosen_action = np.random.choice(available_actions) else: chosen_action = available_actions[int( np.argmax(predicted_Q_values[available_actions]))] if self.s is not None: target = self.r + self.gamma * predicted_Q_values[int( np.argmax(self.Q_evaluation.predict(self.s)))] self.Q_action.train(self.s, self.a, target) self.experience.append( (self.s.copy(), self.a.copy(), self.r, state_vec.copy())) if len(self.experience) % 10 == 0 and len( self.experience) > 0 and self.epsilon > 0: el = sample( self.experience, len(self.experience) if len(self.experience) < 30 else 30) dict = {'Exp': el} el_state = [x[0] for x in dict['Exp']] el_a = [x[1] for x in dict['Exp']] el_r = [x[2] for x in dict['Exp']] el_state_plus_1 = [x[3] for x in dict['Exp']] predicted_Q_values_list = self.Q_action.model.predict( np.array(el_state_plus_1)) dict_predict_Q_value = {'Predict': predicted_Q_values_list} Q_star = [ x[int(np.argmax(self.Q_evaluation.predict(el_state[i])))] for i, x in enumerate(dict_predict_Q_value['Predict']) ] Q_star_np = np.array(Q_star) target = np.array(el_r) + self.gamma * Q_star_np self.Q_action.retrain(np.array(el_state), np.array(el_a), target) if self.s is not None: update_Q_evaluation = self.tau * np.array( self.Q_action.model.get_weights()) + (1 - self.tau) * np.array( self.Q_evaluation.model.get_weights()) self.Q_evaluation.model.set_weights(update_Q_evaluation) self.s = state_vec self.a = to_categorical(chosen_action, self.action_space_size) self.r = 0.0 self.count_state += 1 return chosen_action
def act(self, gs: GameState) -> int: self.priority.append(0.001) self.memory.append((self.s, self.s, self.a, self.r, True)) available_actions = gs.get_available_actions(gs.get_active_player()) state_vec = gs.get_vectorized_state() predicted_Q_values = self.Q.predict(state_vec) if np.random.random() <= self.epsilon: chosen_action = np.random.choice(available_actions) else: chosen_action = available_actions[int( np.argmax(predicted_Q_values[available_actions]))] batch, importance = self.get_priority_experience_batch() for b, i in zip(batch, importance): state, next_state, action, reward, done = b target = reward if not done: if self.s is not None: # target = target + self.gamma * self.alternate_Q.predict(state_vec)[available_actions][ # np.argmax(self.Q.predict(state_vec)[available_actions])] # self.Q.train(self.s, self.a, target) q_next = reward + self.gamma * self.alternate_Q.predict( next_state)[available_actions][np.argmax( self.Q.predict(next_state)[available_actions])] target = q_next q = self.alternate_Q.predict( next_state)[available_actions][np.argmax( self.Q.predict(next_state)[available_actions])] p = (np.abs(q_next - q) + (np.e**-10))**self.alpha self.priority.append(p) self.memory.append( (state, next_state, action, reward, done)) self.Q.train(self.s, self.a, target) imp = i**(1 - self.epsilon) imp = np.reshape(imp, 1) # self.remember(self.s, state_vec, self.a, self.r, True) # batch = random.choices(self.memory, k=self.batch_size) # for state, next_state, action, reward, done in batch: # target = reward # if not done: # if self.s is not None: # target = target + self.gamma * self.alternate_Q.predict(state_vec)[available_actions][ # np.argmax(self.Q.predict(state_vec)[available_actions])] # self.Q.train(self.s, self.a, target) # self.remember(self.s, state_vec, self.a, self.r, True) self.s = state_vec self.a = to_categorical(chosen_action, self.action_space_size) self.r = 0.0 return chosen_action
def act(self, gs: GameState) -> int: available_actions = gs.get_available_actions(gs.get_active_player()) state_vec = gs.get_vectorized_state() predicted_Q_values = self.Q.predict(state_vec) if np.random.random() <= self.epsilon: chosen_action = np.random.choice(available_actions) else: chosen_action = available_actions[int( np.argmax(predicted_Q_values[available_actions]))] if self.s is not None: target = self.r + self.gamma * max( predicted_Q_values[available_actions]) self.Q.train(self.s, self.a, target) self.s = state_vec self.a = to_categorical(chosen_action, self.action_space_size) self.r = 0.0 return chosen_action
def act(self, gs: GameState) -> int: available_actions = gs.get_available_actions(gs.get_active_player()) state_vec = gs.get_vectorized_state() action_probs = self.Q_policy_function.predict(state_vec) chosen_action = np.random.choice(available_actions, p=action_probs, replace=True) self.state.append(state_vec) self.rewards.append(self.r) self.log_probs.append(np.log(action_probs)) self.probs.append(action_probs) self.action.append(chosen_action) self.a.append(to_categorical(chosen_action, self.action_space_size)) self.r = 0.0 return chosen_action
def act(self, gs: GameState) -> int: available_actions = gs.get_available_actions(gs.get_active_player()) state_vec = gs.get_vectorized_state() predicted_Q_values = self.Q.predict(state_vec) if np.random.random() <= self.epsilon: chosen_action = np.random.choice(available_actions) else: chosen_action = available_actions[int( np.argmax(predicted_Q_values[available_actions]))] batch = random.choices(self.memory, k=self.batch_size) for state, next_state, action, reward, done in batch: target = reward if not done: if self.s is not None: target = target + self.gamma * self.alternate_Q.predict( state_vec)[available_actions][np.argmax( self.Q.predict(state_vec)[available_actions])] self.Q.train(self.s, self.a, target) self.remember(self.s, state_vec, self.a, self.r, True) self.s = state_vec self.a = to_categorical(chosen_action, self.action_space_size) self.r = 0.0 return chosen_action
def act(self, gs: GameState) -> int: if self.apprentice_training_count > self.apprentice_training_before_takeover: return gs.get_available_actions(gs.get_active_player())[np.argmax( self.brain.predict(np.array([ gs.get_vectorized_state() ]))[0][gs.get_available_actions(gs.get_active_player())])] root_hash = gs.get_unique_id() memory = self.memory if self.keep_memory else dict() if root_hash not in memory: ExpertApprenticeAgent.create_node_in_memory( memory, root_hash, gs.get_available_actions(gs.get_active_player()), gs.get_active_player()) for i in range(self.max_iteration): gs_copy = gs.clone() s = gs_copy.get_unique_id() history = [] # SELECTION while not gs_copy.is_game_over() and all( (edge['n'] > 0 for edge in memory[s])): chosen_edge = max(((edge, ExpertApprenticeAgent.ucb_1(edge)) for edge in memory[s]), key=lambda kv: kv[1])[0] history.append((s, chosen_edge)) gs_copy.step(gs_copy.get_active_player(), chosen_edge['a']) s = gs_copy.get_unique_id() if s not in memory: ExpertApprenticeAgent.create_node_in_memory( memory, s, gs_copy.get_available_actions( gs_copy.get_active_player()), gs_copy.get_active_player()) # EXPANSION if not gs_copy.is_game_over(): chosen_edge = choice( list( filter(lambda e: e['n'] == 0, (edge for edge in memory[s])))) history.append((s, chosen_edge)) gs_copy.step(gs_copy.get_active_player(), chosen_edge['a']) s = gs_copy.get_unique_id() if s not in memory: ExpertApprenticeAgent.create_node_in_memory( memory, s, gs_copy.get_available_actions( gs_copy.get_active_player()), gs_copy.get_active_player()) # SIMULATION while not gs_copy.is_game_over(): gs_copy.step( gs_copy.get_active_player(), choice( gs_copy.get_available_actions( gs_copy.get_active_player()))) scores = gs_copy.get_scores() # REMONTEE DU SCORE for (s, edge) in history: edge['n'] += 1 edge['r'] += scores[edge['p']] for neighbour_edge in memory[s]: neighbour_edge['np'] += 1 target = np.zeros(gs.get_action_space_size()) for edge in memory[root_hash]: target[edge['a']] = edge['n'] target /= np.sum(target) self.states_buffer.append(gs.get_vectorized_state()) self.actions_buffer.append(target) if len(self.states_buffer) > 200: self.apprentice_training_count += 1 self.brain.fit(np.array(self.states_buffer), np.array(self.actions_buffer)) self.states_buffer.clear() self.actions_buffer.clear() if self.apprentice_training_count > self.apprentice_training_before_takeover: print('Apprentice is playing next round') return max((edge for edge in memory[root_hash]), key=lambda e: e['n'])['a']
def act(self, gs: GameState) -> int: root_hash = gs.get_unique_id() memory = self.memory if self.keep_memory else dict() if root_hash not in memory: q_values = self.brain.predict(gs.get_vectorized_state()) HalfAlphaZeroAgent.create_node_in_memory( memory, root_hash, gs.get_available_actions(gs.get_active_player()), gs.get_active_player(), q_values) for i in range(self.max_iteration): gs_copy = gs.clone() s = gs_copy.get_unique_id() history = [] # SELECTION while not gs_copy.is_game_over() and all( (edge['n'] > 0 for edge in memory[s])): chosen_edge = max(((edge, HalfAlphaZeroAgent.ucb_1(edge)) for edge in memory[s]), key=lambda kv: kv[1])[0] history.append((s, chosen_edge)) gs_copy.step(gs_copy.get_active_player(), chosen_edge['a']) s = gs_copy.get_unique_id() if s not in memory: q_values = self.brain.predict( gs_copy.get_vectorized_state()) HalfAlphaZeroAgent.create_node_in_memory( memory, s, gs_copy.get_available_actions( gs_copy.get_active_player()), gs_copy.get_active_player(), q_values) # EXPANSION if not gs_copy.is_game_over(): chosen_edge = choice( list( filter(lambda e: e['n'] == 0, (edge for edge in memory[s])))) history.append((s, chosen_edge)) gs_copy.step(gs_copy.get_active_player(), chosen_edge['a']) s = gs_copy.get_unique_id() if s not in memory: q_values = self.brain.predict( gs_copy.get_vectorized_state()) HalfAlphaZeroAgent.create_node_in_memory( memory, s, gs_copy.get_available_actions( gs_copy.get_active_player()), gs_copy.get_active_player(), q_values) scores = np.zeros(gs_copy.player_count()) scores_set = np.zeros(gs_copy.player_count()) # REMONTEE DU SCORE for (s, edge) in history: if scores_set[edge['p']] == 0: scores_set[edge['p']] = 1.0 scores[edge['p']] = edge['q'] edge['n'] += 1 edge['r'] += scores[edge['p']] for neighbour_edge in memory[s]: neighbour_edge['np'] += 1 chosen_action = max((edge for edge in memory[root_hash]), key=lambda e: e['n'])['a'] if len(self.states_buffer) > 0: self.rewards_buffer.append(self.intermediate_reward) self.states_buffer.append(gs.get_vectorized_state()) self.actions_buffer.append( to_categorical(chosen_action, gs.get_action_space_size())) self.intermediate_reward = 0.0 return chosen_action