예제 #1
0
def f():
    if request.method == 'POST':
        data = request.get_json()
        print(data)
        state = []
        me_moves = []
        you_moves = []
        your_move = None

        for i in range(256):
            state.append(int(data['Inputs']['input1'][0][str(i + 1)]))
            if int(data['Inputs']['input1'][0][str(i + 1)]) == 1:
                me_moves.append((i // 16, i % 16))
            if int(data['Inputs']['input1'][0][str(i + 1)]) == -1:
                you_moves.append((i // 16, i % 16))

        if len(me_moves) == 0:
            game = GameState()
            origin = None
            if len(you_moves) > 0: your_move = you_moves[0]
            if len(you_moves) > 1: return 'Error: you_moves > 1'
        else:
            game = pickle.load(open('game.pkl', 'rb'))
            origin = pickle.load(open('origin.pkl', 'rb'))
            pre_state = pickle.load(open('state.pkl', 'rb'))
            your_move = None
            for i in range(16):
                for j in range(16):
                    if state[i * 16 + j] != pre_state[i * 16 + j] and state[
                            i * 16 + j] == -1:
                        if your_move != None:
                            return 'Error: your_move not none'
                        your_move = (i, j)

        if your_move != None:
            print('Moved', your_move)
            game.move(*your_move)

        player_icon = None
        if len(me_moves) == len(you_moves):
            player_icon = 'X'
        elif len(me_moves) == (len(you_moves) - 1):
            player_icon = 'O'
        else:
            return 'Error: no player icon'

        recom_moves = predict(state, model_name='gomoku_nas.hdf5')
        print(recom_moves)
        print(origin)
        print(game)

        tar, tree = MCTSPolicy(player=player_icon).move(
            game, recom_moves, 100, origin)
        game.move(*tar)
        print(game)
        pickle.dump(game, open('game.pkl', 'wb'))
        pickle.dump(tree, open('origin.pkl', 'wb'))
        state[tar[0] * 16 + tar[1]] = 1
        pickle.dump(state, open('state.pkl', 'wb'))
        return str(tar[0] * 16 + tar[1])
from gameplay import play_game
from policies import RandomPolicy, MCTSPolicy
import numpy as np
import networkx as nx

player_policies = [MCTSPolicy(), RandomPolicy()]

# For reproducibility
np.random.seed(0)

games = []
for i in range(100):
    games.append(play_game(player_policies))

graphs = [game[0] for game in games]
dot_graph_combined = nx.compose_all(graphs)
dot_graph = nx.to_pydot(dot_graph_combined)
dot_graph.set_graph_defaults(fontname='Courier')
dot_graph.write_png('multiple_game_graph.png')
예제 #3
0
    def __init__(self, players, turn_id, board):

        self.board = board
        ## to track of whose turn is now
        self.players = players
        self.turn_id = turn_id
        self.status = Game.GAME_STATUS[0]
        self.turn = self.players[self.turn_id]
        self.flag_for_drawing_canvas = False

        # in case a move needs to be made through Random
        self.random_policy = RandomPolicy()
        ## MCTSPolicy(a, b) -- a is player, b is for an opponent

        self.mctsObj_X = MCTSPolicy(self.players[0],
                                    self.players[1],
                                    board=self.board)
        self.mctsObj_O = MCTSPolicy(self.players[1],
                                    self.players[0],
                                    board=self.board)

        self.mctsObjs = [self.mctsObj_X, self.mctsObj_O]
        """
        model_dir = "./analysis-tools/models_ex/"
        model_w_file = "model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924weights.h5"
        model_json_file = "model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924in_json.json"
        """
        self.model_based_policy = None  # ModelPolicy(model_dir, model_w_file, model_json_file)
        for each_player in self.players:
            print(each_player.get_policy_mode())
            if each_player.get_policy_mode() == "MODEL":
                model_dir = "./analysis-tools/models_ex/"
                #model_w_file = "model_2020-01-09-17-23-04_BEST_SO_FAR_WITH_Early_Stop-0.730-upto2-0.925weights.h5"
                #model_json_file ="model_2020-01-09-17-23-04_BEST_SO_FAR_WITH_Early_Stop-0.730-upto2-0.925in_json.json"

                # Second best
                #model_w_file =  "model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924weights.h5"
                #model_json_file = "model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924in_json.json"
                # very good  Best
                #model_w_file = "model_2020-01-11-11-16-48_win_sample_focus_0.875_weights.h5"
                #model_json_file = "model_2020-01-11-11-16-48_win_sample_focus_0.875_in_json.json"
                # third good
                #model_json_file = "model_2020-01-11-20-11-26_win_sample_focus_0.91_in_json.json"
                #model_w_file = "model_2020-01-11-20-11-26_win_sample_focus_0.91_weights.h5"

                ## LOOKS best so far.. waiting to be done
                model_json_file = "model_2020-01-11-20-39-46_winAndLoss_sample_focus_0.71_in_json.json"
                model_w_file = "model_2020-01-11-20-39-46_winAndLoss_sample_focus_0.71_weights.h5"

                # Done
                model_json_file = "model_2020-01-11-21-07-12_winAndLoss_sample_focus_0.75_in_json.json"
                model_w_file = "model_2020-01-11-21-07-12_winAndLoss_sample_focus_0.75_weights.h5"

                #
                model_json_file = "model_2020-01-12-08-47-16_winAndLoss_sample_focus_0.749_in_json.json"
                model_w_file = "model_2020-01-12-08-47-16_winAndLoss_sample_focus_0.749_weights.h5"

                #model_w_file ="model_2020-01-12-18-59-53_winAndLoss_sample_focus_0.71_weights.h5"
                #model_json_file = "model_2020-01-12-18-59-53_winAndLoss_sample_focus_0.71_in_json.json"

                # Done -- 1K weighted for preventing lose
                #model_w_file = "model_2020-01-12-19-29-34_winAndLoss_Loss1KWeights_sample_focus_0.70_weights.h5"
                #model_json_file = "model_2020-01-12-19-29-34_winAndLoss_Loss1KWeights_sample_focus_0.70_in_json.json"

                # Done
                model_w_file = "model_2020-01-12-21-02-40_winAndLoss_sample_focus_0.733_weights.h5"
                model_json_file = "model_2020-01-12-21-02-40_winAndLoss_sample_focus_0.733_in_json.json"

                # DOne -- below are from a buggy weighting scheme..
                model_json_file = "model_2020-01-12-21-40-17_winAndLoss_combinedWithUniq_sample_focus_0.649_in_json.json"
                model_w_file = "model_2020-01-12-21-40-17_winAndLoss_combinedWithUniq_sample_focus_0.649_weights.h5"

                # WORST SO FAR
                model_w_file = "model_2020-01-13-07-27-36_winAndLoss_combinedWithUniq_sample_focus_0.41_weights.h5"
                model_json_file = "model_2020-01-13-07-27-36_winAndLoss_combinedWithUniq_sample_focus_0.41_in_json.json"

                # BEST SO FAR
                model_json_file = "model_2020-01-13-21-02-40_winAndLoss_Loss1KWeights_sample_focus_0.718_in_json.json"
                model_w_file = "model_2020-01-13-21-02-40_winAndLoss_Loss1KWeights_sample_focus_0.718_weights.h5"
                # DONE
                model_json_file = "model_2020-01-14-00-19-22_winAndLoss_combinedWithUniq_sample_focus_0.65_in_json.json"
                model_w_file = "model_2020-01-14-00-19-22_winAndLoss_combinedWithUniq_sample_focus_0.65_weights.h5"
                # DONE
                #model_json_file = "model_2020-01-14-21-34-33_winAndLoss_withOneHotEncodeForLabel_sample_focus_0.7108_in_json.json"
                #model_w_file = "model_2020-01-14-21-34-33_winAndLoss_withOneHotEncodeForLabel_sample_focus_0.7108_weights.h5"
                model_obj = each_player.get_model_obj()
                #self.model_based_policy = ModelPolicy(model_obj) #model_dir, model_w_file, model_json_file)
                break
        self.game_id = uuid.uuid1()
예제 #4
0
class Game():
    GAME_STATUS = ["Playing", "End", "Draw"]

    def __init__(self, players, turn_id, board):

        self.board = board
        ## to track of whose turn is now
        self.players = players
        self.turn_id = turn_id
        self.status = Game.GAME_STATUS[0]
        self.turn = self.players[self.turn_id]
        self.flag_for_drawing_canvas = False

        # in case a move needs to be made through Random
        self.random_policy = RandomPolicy()
        ## MCTSPolicy(a, b) -- a is player, b is for an opponent

        self.mctsObj_X = MCTSPolicy(self.players[0],
                                    self.players[1],
                                    board=self.board)
        self.mctsObj_O = MCTSPolicy(self.players[1],
                                    self.players[0],
                                    board=self.board)

        self.mctsObjs = [self.mctsObj_X, self.mctsObj_O]
        """
        model_dir = "./analysis-tools/models_ex/"
        model_w_file = "model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924weights.h5"
        model_json_file = "model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924in_json.json"
        """
        self.model_based_policy = None  # ModelPolicy(model_dir, model_w_file, model_json_file)
        for each_player in self.players:
            print(each_player.get_policy_mode())
            if each_player.get_policy_mode() == "MODEL":
                model_dir = "./analysis-tools/models_ex/"
                #model_w_file = "model_2020-01-09-17-23-04_BEST_SO_FAR_WITH_Early_Stop-0.730-upto2-0.925weights.h5"
                #model_json_file ="model_2020-01-09-17-23-04_BEST_SO_FAR_WITH_Early_Stop-0.730-upto2-0.925in_json.json"

                # Second best
                #model_w_file =  "model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924weights.h5"
                #model_json_file = "model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924in_json.json"
                # very good  Best
                #model_w_file = "model_2020-01-11-11-16-48_win_sample_focus_0.875_weights.h5"
                #model_json_file = "model_2020-01-11-11-16-48_win_sample_focus_0.875_in_json.json"
                # third good
                #model_json_file = "model_2020-01-11-20-11-26_win_sample_focus_0.91_in_json.json"
                #model_w_file = "model_2020-01-11-20-11-26_win_sample_focus_0.91_weights.h5"

                ## LOOKS best so far.. waiting to be done
                model_json_file = "model_2020-01-11-20-39-46_winAndLoss_sample_focus_0.71_in_json.json"
                model_w_file = "model_2020-01-11-20-39-46_winAndLoss_sample_focus_0.71_weights.h5"

                # Done
                model_json_file = "model_2020-01-11-21-07-12_winAndLoss_sample_focus_0.75_in_json.json"
                model_w_file = "model_2020-01-11-21-07-12_winAndLoss_sample_focus_0.75_weights.h5"

                #
                model_json_file = "model_2020-01-12-08-47-16_winAndLoss_sample_focus_0.749_in_json.json"
                model_w_file = "model_2020-01-12-08-47-16_winAndLoss_sample_focus_0.749_weights.h5"

                #model_w_file ="model_2020-01-12-18-59-53_winAndLoss_sample_focus_0.71_weights.h5"
                #model_json_file = "model_2020-01-12-18-59-53_winAndLoss_sample_focus_0.71_in_json.json"

                # Done -- 1K weighted for preventing lose
                #model_w_file = "model_2020-01-12-19-29-34_winAndLoss_Loss1KWeights_sample_focus_0.70_weights.h5"
                #model_json_file = "model_2020-01-12-19-29-34_winAndLoss_Loss1KWeights_sample_focus_0.70_in_json.json"

                # Done
                model_w_file = "model_2020-01-12-21-02-40_winAndLoss_sample_focus_0.733_weights.h5"
                model_json_file = "model_2020-01-12-21-02-40_winAndLoss_sample_focus_0.733_in_json.json"

                # DOne -- below are from a buggy weighting scheme..
                model_json_file = "model_2020-01-12-21-40-17_winAndLoss_combinedWithUniq_sample_focus_0.649_in_json.json"
                model_w_file = "model_2020-01-12-21-40-17_winAndLoss_combinedWithUniq_sample_focus_0.649_weights.h5"

                # WORST SO FAR
                model_w_file = "model_2020-01-13-07-27-36_winAndLoss_combinedWithUniq_sample_focus_0.41_weights.h5"
                model_json_file = "model_2020-01-13-07-27-36_winAndLoss_combinedWithUniq_sample_focus_0.41_in_json.json"

                # BEST SO FAR
                model_json_file = "model_2020-01-13-21-02-40_winAndLoss_Loss1KWeights_sample_focus_0.718_in_json.json"
                model_w_file = "model_2020-01-13-21-02-40_winAndLoss_Loss1KWeights_sample_focus_0.718_weights.h5"
                # DONE
                model_json_file = "model_2020-01-14-00-19-22_winAndLoss_combinedWithUniq_sample_focus_0.65_in_json.json"
                model_w_file = "model_2020-01-14-00-19-22_winAndLoss_combinedWithUniq_sample_focus_0.65_weights.h5"
                # DONE
                #model_json_file = "model_2020-01-14-21-34-33_winAndLoss_withOneHotEncodeForLabel_sample_focus_0.7108_in_json.json"
                #model_w_file = "model_2020-01-14-21-34-33_winAndLoss_withOneHotEncodeForLabel_sample_focus_0.7108_weights.h5"
                model_obj = each_player.get_model_obj()
                #self.model_based_policy = ModelPolicy(model_obj) #model_dir, model_w_file, model_json_file)
                break
        self.game_id = uuid.uuid1()

    def show_progress_on_canvas(self, a_boolean_flag):
        self.flag_for_drawing_canvas = a_boolean_flag

    def set_to_next_player(self):
        next_turn = (self.turn_id + 1) % len(self.players)
        self.turn_id = next_turn
        self.turn = self.players[self.turn_id]

    def is_end(self):
        if self.is_draw():
            return True

        for each_player in self.players:
            if self.check_end_status(each_player):
                return True
        return False

    def check_end_status(self, a_player):
        if self.board.is_win(a_player):
            return True
        return False

    def get_input(self):
        prompt = "%s 's Turn\n" % (self.turn)
        input_from_user = input(prompt)
        r_c_in_list = input_from_user.split("_")
        r, c = r_c_in_list[0], r_c_in_list[1]
        r_int = ord(r) - ord('a')
        c_int = int(c)
        return r_int, c_int

    def validate_input(self):
        available_pos = self.board.get_available_positions()
        while True:
            r, c = self.get_input()
            if available_pos.get((r, c), 0) == 1:
                break
            print("Try again. Your input")
        return r, c

    # def convert_sequence_moves_to_vector(self):
    #     individual_sequence = [0] * 9
    #     for item in self.board.sequences_of_movements:
    #         turn_for_this_move = item.get("turn")
    #         move_made_for_this_move = item.get("position")
    #         individual_sequence[move_made_for_this_move - 1] = 1 if turn_for_this_move == "X" else 2
    #
    #     return np.array([individual_sequence])

    def play_game(self):
        turn_id = 0
        game_log = {
            'game_uuid': self.get_game_id(),
            'play_modes': {
                'X': self.players[0].get_policy_mode(),
                'O': self.players[1].get_policy_mode()
            },
            'board_size': self.board.row,
            'winner': "",
            'sequence': {}
        }
        canvas_for_drawing = None

        if self.flag_for_drawing_canvas:
            #turtle.setup(500, 500)
            canvas_for_drawing = Draw()
        is_draw_gametie = False
        """
        # Below block must be gone
        # from model_loader import ModelBasedAgent
        # model_dir = "./analysis-tools/models_ex/"
        # model_w_file = model_dir + "current_best.h5" #"model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924weights.h5"
        # model_json_file = model_dir + "current_best.json" #model_2020-01-09-15-15-06_BEST_SO_FAR_WITH_Early_Stop-0.90-upto2-0.924in_json.json"
        # model_agent_obj = ModelBasedAgent(model_w_file, model_json_file)
        # mlModel = model_agent_obj.get_model()
        """
        while self.check_end_status(self.turn) != True:
            print(self.board)
            test_instance = self.board.convert_sequence_moves_to_vector()
            #print(test_instance)
            if self.turn.get_player_type() == Player.PTYPE_HUMAN:
                # TODO -- this part is just to make a simplified interface of modelbased movement
                # later, this will be the part of Policy as a ModelPolicy class
                # for now, we assume player O would be model.. as X is always starting first

                #test_instance = np.array([an_instance])

                #prediction_move = mlModel.predict_proba(test_instance)[0]
                #pp = model_agent_obj.predict_proba(test_instance)[0]
                #UT.print_three_arrays(test_instance[0], pp, prediction_move)
                #move_by_prediction = np.argmax(pp) + 1
                #r_e, c_e = self.board.indices_to_coordinate(move_by_prediction)
                #print("R:%d C:%d \t i_e:%d R_e:%d C_e:%d" % (r_v, c_v, move_by_prediction, r_e, c_e))
                r_v, c_v = self.validate_input()

            else:  # when Player is an agent
                if self.turn.get_policy_mode() == "MODEL":
                    model_structure = 3  # 0 for regular, 1 for two tower, 2 for conv2d, 3 for conv2d+twoTowers
                    r_v, c_v = self.turn.model_based_policy.move(
                        self.board, model_structure)
                elif self.turn.get_policy_mode() == "MCTS":
                    if self.turn.get_marker() == "O":
                        r_v, c_v = self.mctsObj_O.move(self.board)
                        # TODO -- this part is just to make a simplified interface of modelbased movement
                        # This could be a place for ModelBased action
                    elif self.turn.get_marker() == "X":
                        if self.turn.get_policy_mode() == "RANDOM":
                            self.random_policy = RandomPolicy()
                            r_v, c_v = self.random_policy.move(self.board)
                            # print("AM I HERE FOR RANDOM")
                        else:
                            r_v, c_v = self.mctsObj_X.move(self.board)
                elif self.turn.get_policy_mode() == "RANDOM":
                    self.random_policy = RandomPolicy()
                    r_v, c_v = self.random_policy.move(self.board)

            self.board.set_a_move(r_v, c_v, self.turn)
            UT.print_as_log(self.board.get_available_positions())
            ## Drawing on canvas
            if self.flag_for_drawing_canvas:
                canvas_for_drawing.move_and_draw(r_v, c_v,
                                                 self.turn.get_marker())

            if self.check_end_status(self.turn):
                print("FinalResult: %s" % (self.turn.get_marker()))
                print(self.board)
                print(self.board.convert_sequence_moves_to_vector())
                #UT.print_as_log("Winning and so ending this game")
                UT.print_as_log(self.board.sequences_of_movements)
                game_log['winner'] = self.turn.get_marker()
                game_log['sequence'] = self.board.sequences_of_movements

                break

            elif self.is_draw():
                is_draw_gametie = True
                print("FinalResult: Draw")
                #UT.print_as_log("Draw.... so, exiting the game")
                print(self.board)
                print(self.board.convert_sequence_moves_to_vector())
                game_log['winner'] = "D"
                game_log['sequence'] = self.board.sequences_of_movements
                break
            else:
                self.set_to_next_player()

        ## for writing a message to the canvas
        if self.flag_for_drawing_canvas:
            result_message = "Game result -- Winner is %s" % (
                game_log.get("winner"))
            if is_draw_gametie:
                result_message = "Game result :  Draw"
            canvas_for_drawing.write_text(result_message)
            canvas_for_drawing.exit_on_click()

            #canvas_for_drawing.reset_canvas()
            #turtle.TurtleScreen._RUNNING = True
        json_str = game_log  #json.dumps(game_log)
        return json_str

    def a_move_for_agent(self):
        r, c = self.a_move_for_agent_helper()
        return r, c

    ## this is the function for an agent to come up with a smarter decision
    def a_move_for_agent_helper(self):
        all_available_positions_dict = self.board.get_available_positions()
        random_move_index = np.random.randint(
            0, len(all_available_positions_dict), 1)[0]
        r, c = list(all_available_positions_dict.keys())[random_move_index]
        return r, c

    def is_draw(self):
        if len(self.board.get_available_positions()) < 1:
            return True
        return False

    @staticmethod
    def load_a_game(afile):
        move_sequences = UT.read_a_game(afile)
        if move_sequences:
            Game.parse_history(move_sequences)

    @staticmethod
    def parse_history(adict, message_str=None):

        winner = adict.get("winner", None)
        if winner == None:
            print("Something is wrong")
            sys.exit(1)
        move_sequences = adict.get("sequence", None)
        turtle.hideturtle()
        board_obj_from_history = Board(3, 3, 3)
        # below obj is for drawing the board on a canvas.
        # if you don't like, you can make it comment
        draw_board_obj = Draw()
        for each_move in move_sequences:
            player_marker = each_move.get("turn")
            r_index, c_index = each_move.get("xy")
            p = Player("test", player_marker, 1)
            board_obj_from_history.set_a_move(r_index, c_index, p)
            draw_board_obj.move_and_draw(r_index, c_index, player_marker)
            print(board_obj_from_history)

        draw_board_obj.write_text(
            ("Winner is:  %s -- sampled %s" % (winner, str(message_str))))
        time.sleep(3)
        draw_board_obj.turtle_obj.getpen().clear()
        draw_board_obj.turtle_obj.getscreen().clearscreen()

        # draw_board_obj.exit_on_click()
        # or

    def get_game_id(self):
        return str(self.game_id)
"""
Plays many games and then plots the cumulative win rates of the players.

The player policies can be chosen from MCTS and Random.
"""

from gameplay import play_game
from policies import RandomPolicy, MCTSPolicy
from visualization import visualize_mcts_tree
import networkx as nx
import numpy as np

# Choose the player policies here:
MCTS_vs_Random = [MCTSPolicy(player='X'), RandomPolicy()]
Random_vs_MCTS = [RandomPolicy(), MCTSPolicy(player='O')]
MCTS_vs_MCTS = [MCTSPolicy(player='X'), MCTSPolicy(player='O')]
Random_vs_Random = [RandomPolicy(), RandomPolicy()]

experiments = [[MCTSPolicy(player='X'), RandomPolicy()],
               [MCTSPolicy(player='X'), RandomPolicy()],
               [RandomPolicy(), MCTSPolicy(player='O')],
               [RandomPolicy(), MCTSPolicy(player='O')],
               [MCTSPolicy(player='X'),
                MCTSPolicy(player='O')],
               [MCTSPolicy(player='X'),
                MCTSPolicy(player='O')], [RandomPolicy(),
                                          RandomPolicy()],
               [RandomPolicy(), RandomPolicy()]]

names = [
    'x_mcts_vs_o_random_1', 'x_mcts_vs_o_random_2', 'x_random_vs_o_mcts_1',
예제 #6
0
def play_game(player_policies):
    game = GameState()
    origin = None

    # Inform the player policies that a new game is starting (so they can reset their current move pointers)
    for player_policy in player_policies:
        if type(player_policy) is MCTSPolicy:
            player_policy.reset_game()

    while game.winner() is None:
        for player_policy in player_policies:
            # print("\n================ ( It is {}'s move. ) ================".format(game.turn()))

            # Player 1 : AI
            if game.turn() is "X":

                # player_policy.__init__('X')

                # recommend moves : [(0, 0), (0, 1)] etc.
                recom_moves = [(0, 0), (0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (0, 10), (0, 11), (0, 12), (0, 13), (0, 14), (0, 15),
                               (1, 0), (1, 1), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (1, 10), (1, 11), (1, 12), (1, 13), (1, 14), (1, 15),
                               (2, 0), (2, 1), (2, 2), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (2, 10), (2, 11), (2, 12), (2, 13), (2, 14), (2, 15),
                               (3, 0), (3, 1), (3, 2), (3, 3), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (3, 10), (3, 11), (3, 12), (3, 13), (3, 14), (3, 15),
                               (4, 0), (4, 1), (4, 2), (4, 3), (4, 4), (4, 5), (4, 6), (4, 7), (4, 8), (4, 9), (4, 10), (4, 11), (4, 12), (4, 13), (4, 14), (4, 15),
                               (5, 0), (5, 1), (5, 2), (5, 3), (5, 4), (5, 5), (5, 6), (5, 7), (5, 8), (5, 9), (5, 10), (5, 11), (5, 12), (5, 13), (5, 14), (5, 15),
                               (6, 0), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (6, 6), (6, 7), (6, 8), (6, 9), (6, 10), (6, 11), (6, 12), (6, 13), (6, 14), (6, 15),
                               (7, 0), (7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6), (7, 7), (7, 8), (7, 9), (7, 10), (7, 11), (7, 12), (7, 13), (7, 14), (7, 15),
                               (8, 0), (8, 1), (8, 2), (8, 3), (8, 4), (8, 5), (8, 6), (8, 7), (8, 8), (8, 9), (8, 10), (8, 11), (8, 12), (8, 13), (8, 14), (8, 15),
                               (9, 0), (9, 1), (9, 2), (9, 3), (9, 4), (9, 5), (9, 6), (9, 7), (9, 8), (9, 9), (9, 10), (9, 11), (9, 12), (9, 13), (9, 14), (9, 15),
                               (10, 0), (10, 1), (10, 2), (10, 3), (10, 4), (10, 5), (10, 6), (10, 7), (10, 8), (10, 9), (10, 10), (10, 11), (10, 12), (10, 13), (10, 14), (10, 15),
                               (11, 0), (11, 1), (11, 2), (11, 3), (11, 4), (11, 5), (11, 6), (11, 7), (11, 8), (11, 9), (11, 10), (11, 11), (11, 12), (11, 13), (11, 14), (11, 15),
                               (12, 0), (12, 1), (12, 2), (12, 3), (12, 4), (12, 5), (12, 6), (12, 7), (12, 8), (12, 9), (12, 10), (12, 11), (12, 12), (12, 13), (12, 14), (12, 15),
                               (13, 0), (13, 1), (13, 2), (13, 3), (13, 4), (13, 5), (13, 6), (13, 7), (13, 8), (13, 9), (13, 10), (13, 11), (13, 12), (13, 13), (13, 14), (13, 15),
                               (14, 0), (14, 1), (14, 2), (14, 3), (14, 4), (14, 5), (14, 6), (14, 7), (14, 8), (14, 9), (14, 10), (14, 11), (14, 12), (14, 13), (14, 14), (14, 15),
                               (15, 0), (15, 1), (15, 2), (15, 3), (15, 4), (15, 5), (15, 6), (15, 7), (15, 8), (15, 9), (15, 10), (15, 11), (15, 12), (15, 13), (15, 14), (15, 15)]
                """
                recom_moves = []
                while True:
                    try:
                        m, n = input("recommend : ").split()
                        m = int(m)
                        n = int(n)
                        tar = (m, n)
                        recom_moves.append(tar)
                    except:
                        break
                print(recom_moves)
                """

                # tar, tree = player_policy.move(game, recom_moves, 100, origin)
                tar, tree = MCTSPolicy(player='X').move(game, recom_moves, 100, origin)
                origin = copy.deepcopy(tree)

                print("MCTS: " + str(tar))

            # Player 2 : Human
            else:
                m, n = input("which Do you want? : ").split()
                m = int(m)
                n = int(n)
                tar = (m, n)

            game.move(*tar)
            print(game)

            if game.winner() is not None:
                break

    return game.winner()
예제 #7
0
                    except:
                        break
                print(recom_moves)
                """

                # tar, tree = player_policy.move(game, recom_moves, 100, origin)
                tar, tree = MCTSPolicy(player='X').move(game, recom_moves, 100, origin)
                origin = copy.deepcopy(tree)

                print("MCTS: " + str(tar))

            # Player 2 : Human
            else:
                m, n = input("which Do you want? : ").split()
                m = int(m)
                n = int(n)
                tar = (m, n)

            game.move(*tar)
            print(game)

            if game.winner() is not None:
                break

    return game.winner()


# main
winner = play_game([MCTSPolicy(player='X'), MCTSPolicy(player='O')])
print('Game over. Winner is Player ' + str(winner))