def uct_search(self, game_state): root_node = MCNode(game_state=game_state) mc_tree = MCTree(root_node=root_node) for sim_num in range(1, self.num_simulations + 1): selected_node = self.selection(mc_tree) rewards = self.simulation(selected_node) mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards) results = [] for child in mc_tree.root_node.children: results.append((child.previous_action, child.visits, child.get_average_reward(root_node.current_player))) return results
def main(): game_state = sample_game_states[18] num_simulations = 100 ucb_constant = 0.1 player = UCTPlayer(ucb_const=ucb_constant, num_samples=1, num_simulations=num_simulations) root_node = MCNode(game_state=game_state) mc_tree = MCTree(root_node=root_node) for sim_num in range(1, player.num_simulations + 1): selected_node = player.selection(mc_tree) rewards = player.simulation(selected_node) mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards) mc_tree.visualize_tree(ucb=player.ucb_const, filename="img_{}".format(sim_num)) filenames = ["img_{}.png".format(i) for i in range(player.num_simulations, 0, -1)] gif_name = "Gif_test.gif" duration = 0.2 make_gif(filenames=filenames, outputname=gif_name, duration=duration) im = Image.open(gif_name) frames = [frame.copy() for frame in ImageSequence.Iterator(im)] frames.reverse() frames[0].save('reversed.gif', save_all=True, append_images=frames[1:])
def isuct_search(self, public_info): """Perform information set monte carlo tree search with uct policy. Returns list of tuples of possible actions and corresponding statistics (action, visit_count, average_reward)""" infoset = self.get_infoset(public_info=public_info, hand=self.hand[:]) root_node = ISMCNode(infoset) mc_tree = MCTree(root_node=root_node) for sim_num in range(1, self.num_simulations + 1): sampled_state = self.sample_game_state(public_info=public_info) selected_node, selected_game_state = self.selection( mc_tree, sampled_state) rewards = self.simulation(selected_game_state) mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards) results = [] for child in mc_tree.root_node.children: results.append( (child.previous_action, child.visits, child.get_average_reward(root_node.current_player))) return results
def main(): sim_player_list = [ NNPlayer( game_mode_nn='../players/models/bigger_classifier200.hdf5', partner_nn='../players/models/partner_model_wider_data_2.hdf5', solo_nn='../players/models/solo_model_wider_data_10.hdf5', wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'), NNPlayer( game_mode_nn='../players/models/bigger_classifier200.hdf5', partner_nn='../players/models/partner_model_wider_data_2.hdf5', solo_nn='../players/models/solo_model_wider_data_10.hdf5', wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'), NNPlayer( game_mode_nn='../players/models/bigger_classifier200.hdf5', partner_nn='../players/models/partner_model_wider_data_2.hdf5', solo_nn='../players/models/solo_model_wider_data_10.hdf5', wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'), NNPlayer( game_mode_nn='../players/models/bigger_classifier200.hdf5', partner_nn='../players/models/partner_model_wider_data_2.hdf5', solo_nn='../players/models/solo_model_wider_data_10.hdf5', wenz_nn='../players/models/wenz_model_wider_data_10.hdf5') ] game_state = sample_game_states[15] num_simulations = 100 ucb_constant = 0.1 player = UCTPlayer(ucb_const=ucb_constant, num_samples=1, num_simulations=num_simulations, simulation_player_list=None) root_node = MCNode(game_state=game_state) mc_tree = MCTree(root_node=root_node) for sim_num in range(1, player.num_simulations + 1): selected_node = player.selection(mc_tree) rewards = player.simulation(selected_node) mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards) mc_tree.visualize_tree( ucb=player.ucb_const, filename="Tree_{}nodes{}ucb_const{}game_mode".format( num_simulations, ucb_constant, game_state["game_mode"]))
def main(): sim_player_list = [NNPlayer(game_mode_nn='../players/models/bigger_classifier200.hdf5', partner_nn='../players/models/partner_model_wider_data_2.hdf5', solo_nn='../players/models/solo_model_wider_data_10.hdf5', wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'), NNPlayer(game_mode_nn='../players/models/bigger_classifier200.hdf5', partner_nn='../players/models/partner_model_wider_data_2.hdf5', solo_nn='../players/models/solo_model_wider_data_10.hdf5', wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'), NNPlayer(game_mode_nn='../players/models/bigger_classifier200.hdf5', partner_nn='../players/models/partner_model_wider_data_2.hdf5', solo_nn='../players/models/solo_model_wider_data_10.hdf5', wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'), NNPlayer(game_mode_nn='../players/models/bigger_classifier200.hdf5', partner_nn='../players/models/partner_model_wider_data_2.hdf5', solo_nn='../players/models/solo_model_wider_data_10.hdf5', wenz_nn='../players/models/wenz_model_wider_data_10.hdf5')] cum_depth = 0 for game_state in sample_game_states: num_simulations = 100 ucb_constant = 0.1 player = UCTPlayer(ucb_const=ucb_constant, num_samples=1, num_simulations=num_simulations, simulation_player_list=None) root_node = MCNode(game_state=game_state) mc_tree = MCTree(root_node=root_node) for sim_num in range(1, player.num_simulations + 1): selected_node = player.selection(mc_tree) rewards = player.simulation(selected_node) mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards) depth = mc_tree.max_depth() cum_depth += depth print('Average tree depth without NN:', cum_depth / len(sample_game_states)) cum_depth_with_nn = 0 for game_state in sample_game_states: num_simulations = 100 ucb_constant = 0.1 player = UCTPlayer(ucb_const=ucb_constant, num_samples=1, num_simulations=num_simulations, simulation_player_list=sim_player_list) root_node = MCNode(game_state=game_state) mc_tree = MCTree(root_node=root_node) for sim_num in range(1, player.num_simulations + 1): selected_node = player.selection(mc_tree) rewards = player.simulation(selected_node) mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards) depth = mc_tree.max_depth() cum_depth_with_nn += depth print('Average tree depth with NN:', cum_depth_with_nn / len(sample_game_states))