示例#1
0
def main():
    game_state = sample_game_states[18]

    num_simulations = 100
    ucb_constant = 0.1

    player = UCTPlayer(ucb_const=ucb_constant, num_samples=1, num_simulations=num_simulations)

    root_node = MCNode(game_state=game_state)
    mc_tree = MCTree(root_node=root_node)

    for sim_num in range(1, player.num_simulations + 1):
        selected_node = player.selection(mc_tree)
        rewards = player.simulation(selected_node)
        mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards)

        mc_tree.visualize_tree(ucb=player.ucb_const,
                               filename="img_{}".format(sim_num))

    filenames = ["img_{}.png".format(i) for i in range(player.num_simulations, 0, -1)]
    gif_name = "Gif_test.gif"
    duration = 0.2
    make_gif(filenames=filenames, outputname=gif_name, duration=duration)

    im = Image.open(gif_name)
    frames = [frame.copy() for frame in ImageSequence.Iterator(im)]
    frames.reverse()
    frames[0].save('reversed.gif', save_all=True, append_images=frames[1:])
示例#2
0
 def expand(self, mc_tree, node):
     not_visited_actions = set(node.game_state["possible_actions"])
     for child in node.children:
         not_visited_actions.remove(child.previous_action)
     chosen_action = random.choice(tuple(not_visited_actions))
     new_state = self.get_new_state(game_state=node.game_state,
                                    action=chosen_action)
     new_node = MCNode(parent=node, game_state=new_state, previous_action=chosen_action)
     mc_tree.add_node(node=new_node,
                      parent_node=node)
     return new_node
示例#3
0
    def uct_search(self, game_state):
        root_node = MCNode(game_state=game_state)
        mc_tree = MCTree(root_node=root_node)

        for sim_num in range(1, self.num_simulations + 1):
            selected_node = self.selection(mc_tree)
            rewards = self.simulation(selected_node)
            mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards)

        results = []
        for child in mc_tree.root_node.children:
            results.append((child.previous_action, child.visits, child.get_average_reward(root_node.current_player)))

        return results
示例#4
0
def main():
    sim_player_list = [
        NNPlayer(
            game_mode_nn='../players/models/bigger_classifier200.hdf5',
            partner_nn='../players/models/partner_model_wider_data_2.hdf5',
            solo_nn='../players/models/solo_model_wider_data_10.hdf5',
            wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'),
        NNPlayer(
            game_mode_nn='../players/models/bigger_classifier200.hdf5',
            partner_nn='../players/models/partner_model_wider_data_2.hdf5',
            solo_nn='../players/models/solo_model_wider_data_10.hdf5',
            wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'),
        NNPlayer(
            game_mode_nn='../players/models/bigger_classifier200.hdf5',
            partner_nn='../players/models/partner_model_wider_data_2.hdf5',
            solo_nn='../players/models/solo_model_wider_data_10.hdf5',
            wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'),
        NNPlayer(
            game_mode_nn='../players/models/bigger_classifier200.hdf5',
            partner_nn='../players/models/partner_model_wider_data_2.hdf5',
            solo_nn='../players/models/solo_model_wider_data_10.hdf5',
            wenz_nn='../players/models/wenz_model_wider_data_10.hdf5')
    ]

    game_state = sample_game_states[15]

    num_simulations = 100
    ucb_constant = 0.1

    player = UCTPlayer(ucb_const=ucb_constant,
                       num_samples=1,
                       num_simulations=num_simulations,
                       simulation_player_list=None)

    root_node = MCNode(game_state=game_state)
    mc_tree = MCTree(root_node=root_node)

    for sim_num in range(1, player.num_simulations + 1):
        selected_node = player.selection(mc_tree)
        rewards = player.simulation(selected_node)
        mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards)

    mc_tree.visualize_tree(
        ucb=player.ucb_const,
        filename="Tree_{}nodes{}ucb_const{}game_mode".format(
            num_simulations, ucb_constant, game_state["game_mode"]))
示例#5
0
def test_add_node(game_state_partner, next_state, different_next_state):
    root_node = MCNode(game_state=game_state_partner, parent=None, previous_action= None)
    child = MCNode(game_state=next_state, parent=root_node, previous_action=(NO_GAME, None))
    root_node.add_child(child)
    assert len(root_node.children) == 1
    child.update_rewards([30, 30, -30, -30])
    child.update_visits()
    root_node.update_visits()
    assert child.current_player == 1
    assert child.get_average_reward(child.current_player) == 30
    child.update_rewards([20, 20, -20, -20])
    child.update_visits()
    root_node.update_visits()
    assert child.get_average_reward(child.current_player) == 25
    sec_child = MCNode(game_state=different_next_state, parent=root_node, previous_action=(PARTNER_MODE, LEAVES))
    assert not root_node.fully_expanded()
    sec_child.update_rewards([-20, 20, 20, -20])
    sec_child.update_visits()
    root_node.update_visits()
    assert root_node.best_child(ucb_const=1).previous_action == (NO_GAME, None)
示例#6
0
def main():
    sim_player_list = [NNPlayer(game_mode_nn='../players/models/bigger_classifier200.hdf5',
                                partner_nn='../players/models/partner_model_wider_data_2.hdf5',
                                solo_nn='../players/models/solo_model_wider_data_10.hdf5',
                                wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'),
                       NNPlayer(game_mode_nn='../players/models/bigger_classifier200.hdf5',
                                partner_nn='../players/models/partner_model_wider_data_2.hdf5',
                                solo_nn='../players/models/solo_model_wider_data_10.hdf5',
                                wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'),
                       NNPlayer(game_mode_nn='../players/models/bigger_classifier200.hdf5',
                                partner_nn='../players/models/partner_model_wider_data_2.hdf5',
                                solo_nn='../players/models/solo_model_wider_data_10.hdf5',
                                wenz_nn='../players/models/wenz_model_wider_data_10.hdf5'),
                       NNPlayer(game_mode_nn='../players/models/bigger_classifier200.hdf5',
                                partner_nn='../players/models/partner_model_wider_data_2.hdf5',
                                solo_nn='../players/models/solo_model_wider_data_10.hdf5',
                                wenz_nn='../players/models/wenz_model_wider_data_10.hdf5')]

    cum_depth = 0
    for game_state in sample_game_states:

        num_simulations = 100
        ucb_constant = 0.1

        player = UCTPlayer(ucb_const=ucb_constant,
                           num_samples=1,
                           num_simulations=num_simulations,
                           simulation_player_list=None)

        root_node = MCNode(game_state=game_state)
        mc_tree = MCTree(root_node=root_node)

        for sim_num in range(1, player.num_simulations + 1):
            selected_node = player.selection(mc_tree)
            rewards = player.simulation(selected_node)
            mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards)

        depth = mc_tree.max_depth()
        cum_depth += depth

    print('Average tree depth without NN:', cum_depth / len(sample_game_states))

    cum_depth_with_nn = 0
    for game_state in sample_game_states:

        num_simulations = 100
        ucb_constant = 0.1

        player = UCTPlayer(ucb_const=ucb_constant,
                           num_samples=1,
                           num_simulations=num_simulations,
                           simulation_player_list=sim_player_list)

        root_node = MCNode(game_state=game_state)
        mc_tree = MCTree(root_node=root_node)

        for sim_num in range(1, player.num_simulations + 1):
            selected_node = player.selection(mc_tree)
            rewards = player.simulation(selected_node)
            mc_tree.backup_rewards(leaf_node=selected_node, rewards=rewards)

        depth = mc_tree.max_depth()
        cum_depth_with_nn += depth

    print('Average tree depth with NN:', cum_depth_with_nn / len(sample_game_states))