Exemple #1
0
 def test_performance(self):
     # control_group = {"Null Score" : sample_players.null_score, 
     #                 "Open Move Score" : sample_players.open_move_score,
     #                 "Improved Score" : sample_players.improved_score}
     control_group = {"Improved Score" : sample_players.improved_score}
     # test_group = {"custom_score": game_agent.custom_score,
     #             "custom_score_2": game_agent.custom_score_2,
     #             "custom_score_3": game_agent.custom_score_3}
     test_group = {"Custom Score": game_agent.custom_score}
     total = 10
     for j in range(1):
         scores = dict(zip(test_group,[[0]*len(control_group) for n in range(len(test_group))]))
         for round in range(total):
             for k in test_group:
                 for i, c in enumerate(control_group):
                     player1 = game_agent.AlphaBetaPlayer(score_fn=test_group[k])
                     player2 = game_agent.AlphaBetaPlayer(score_fn=control_group[c])
                     game = isolation.Board(player1, player2, 7, 7)
                     for _ in range(2):
                         move = random.choice(game.get_legal_moves())
                         game.apply_move(move)
                     winner, history, outcome = game.play()
                     winner_str = "Player 1" if winner == player1 else "Player 2"
                     print("Player 1:",k,"| Player 2:",c)
                     print("Winner: {}\nOutcome: {}".format(winner_str, outcome))
                     print("Percentage of blanks spaces left:", len(game.get_blank_spaces())/(game.width*game.height))
                     print("Move history:\n{!s}\n".format(history))
                     if winner == player1:
                         scores[k][i] += 1
         print(list(control_group))
         for k in scores:
             print("{0} | {1}".format(k, [x/total for x in scores[k]]))
Exemple #2
0
 def setUp(self):
     reload(game_agent)
     self.player1 = game_agent.AlphaBetaPlayer(
         score_fn=sample_players.improved_score, timeout=20)
     self.player2 = game_agent.AlphaBetaPlayer(
         score_fn=game_agent.custom_score_3, timeout=20)
     self.game = isolation.Board(self.player1, self.player2)
Exemple #3
0
    def test_get_distances_center(self):

        """
        
        |  |  |  |  |  |  |  |              | 2| 3| 2| 3| 2| 3| 2|
        |  |  |  |  |  |  |  |              | 3| 4| 1| 2| 1| 4| 3|
        |  |  |  |  |  |  |  |              | 2| 1| 2| 3| 2| 1| 2|
        |  |  |  | 1|  |  |  |    =====>    | 3| 2| 3| 0| 3| 2| 3|
        |  |  |  |  |  |  |  |              | 2| 1| 2| 3| 2| 1| 2|
        |  |  |  |  |  |  |  |              | 3| 4| 1| 2| 1| 4| 3|
        |  |  |  |  |  |  |  |              | 2| 3| 2| 3| 2| 3| 2|

              Game State                    Distances to Player 1
        """

        reload(game_agent)
        self.player1 = game_agent.AlphaBetaPlayer()
        self.player2 = game_agent.AlphaBetaPlayer()
        self.game = isolation.Board(self.player1, self.player2)
        self.game.apply_move((3, 3))
        distances = game_agent.get_distances(self.game, self.game.get_player_location(self.player1))
        assert distances == [2, 3, 2, 3, 2, 3, 2,
                             3, 4, 1, 2, 1, 4, 3,
                             2, 1, 2, 3, 2, 1, 2,
                             3, 2, 3, 0, 3, 2, 3,
                             2, 1, 2, 3, 2, 1, 2,
                             3, 4, 1, 2, 1, 4, 3, 
                             2, 3, 2, 3, 2, 3, 2]
Exemple #4
0
    def test_optimalMove_1(self):

        board_width = 9
        board_height = 9
        search_depth = 1

        self.player1 = game_agent.AlphaBetaPlayer(search_depth=search_depth, score_fn=improved_score)
        self.player2 = game_agent.AlphaBetaPlayer(search_depth=search_depth, score_fn=improved_score)
        self.game = isolation.Board(self.player1, self.player2, board_width, board_height)

        test_game = self.game.copy()
        test_game._board_state = [0, 0, 0, 0, 0, 0, 0, 0, 0,
                                  0, 0, 0, 0, 0, 0, 0, 0, 0,
                                  0, 0, 0, 1, 1, 1, 0, 0, 0,
                                  0, 0, 1, 1, 0, 1, 1, 0, 0,
                                  0, 0, 1, 1, 0, 0, 1, 0, 0,
                                  0, 0, 1, 1, 0, 1, 1, 0, 0,
                                  0, 1, 0, 1, 1, 1, 0, 0, 0,
                                  0, 0, 0, 1, 0, 0, 0, 0, 0,
                                  0, 0, 1, 0, 0, 0, 0, 0, 0,
                                  0, 74, 22]
        optimal_move_list = [(2, 1), ]
        print(test_game.to_string())
        # dummy function which always return positive value
        time_left = lambda: 150
        selection_move = test_game._active_player.get_move(test_game, time_left=time_left)
        print("Selection move:\n", selection_move)
        self.assertTrue(selection_move in optimal_move_list)
    def test_alphabeta2(self):
        player1 = game_agent.AlphaBetaPlayer(name='Player 1')
        player2 = game_agent.AlphaBetaPlayer(name='Player 2')
        #player2 = sample_players.GreedyPlayer()
        game = isolation.Board(player1, player2, width=9, height=9)
        game._board_state = [
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 0, 1, 0,
            0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0,
            0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 67
        ]
        print('Game from start')
        print('Legal moves >{}<'.format(game.get_legal_moves()))
        print(game.to_string())

        # players take turns moving on the board, so player1 should be next to move
        assert (player1 == game.active_player)

        # play the remainder of the game automatically -- outcome can be "illegal
        # move", "timeout", or "forfeit"
        winner, history, outcome = game.play()
        print("\nWinner: {}\nOutcome: {}".format(winner, outcome))
        print(game.to_string())
        print("Move history:\n{!s}".format(history))
        print(
            'Game tree evaluation order:\n[(2, 6), (2, 8), (6, 6)]\n[(1, 2)]')
    def test_alphabeta(self):

        p1 = game_agent.AlphaBetaPlayer()
        p1.search_depth = 2

        p2 = game_agent.AlphaBetaPlayer()
        p2.search_depth = 2

        # Create a smaller board for testing
        self.game = isolation.Board(p1, p2, 9, 9)

        self.game._board_state = [
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1,
            1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0,
            0, 0, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0,
            0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 51, 69
        ]

        logging.info(self.game.to_string())

        # players take turns moving on the board, so player1 should be next to move
        assert (p1 == self.game.active_player)

        winner, history, outcome = self.game.play()
        logging.info("\nWinner: {}\nOutcome: {}".format(winner, outcome))
        logging.info(self.game.to_string())
        logging.info("Move history:\n{!s}".format(history))
Exemple #7
0
    def test_get_max_depth_case1(self):
        """
        | -| -|  | -| -| -|  |              | -| -| 2| -| -| -| 4|
        |  | -| -| -|  | -| -|              | 1| -| -| -| 3| -| -|
        | -| -| -| -| -|  | -|              | -| -| -| -| -| 5| -|
        | -| 1| -| -| -| -| -|    =====>    | -| 0| -| -| -| -| -|   ===> Max height = 6
        | -| -| -| -| -| -|  |              | -| -| -| -| -| -| 6|
        | -| -| -| -| -| -| -|              | -| -| -| -| -| -| -|
        | -| -| -| -| -|  | -|              | -| -| -| -| -| 7| -|

             Game State                      Max height from Player 1, cut off at 6 
        """

        self.player1 = game_agent.AlphaBetaPlayer()
        self.player2 = game_agent.AlphaBetaPlayer()
        self.game = isolation.Board(self.player1, self.player2)
        self.game._board_state[0: self.game.height * self.game.width] = [float("inf") for _ in range(self.game.height * self.game.width)]
        self.game._board_state[1] = 0 #1
        self.game._board_state[10] = 0 #0
        self.game._board_state[14] = 0 #2
        self.game._board_state[29] = 0 #3
        self.game._board_state[37] = 0 #5
        self.game._board_state[41] = 0 #7
        self.game._board_state[42] = 0 #4
        self.game._board_state[46] = 0 #6

        self.game.apply_move((3,1))

        max_depth = game_agent.get_max_depth(self.game, self.game.get_player_location(self.player1))
        assert max_depth == 6
Exemple #8
0
 def setUp(self):
     reload(game_agent)
     self.player1 = game_agent.AlphaBetaPlayer(
         score_fn=game_agent.next_moves_score)
     self.player2 = game_agent.AlphaBetaPlayer(
         score_fn=game_agent.next_moves_score)
     self.game = isolation.Board(self.player1, self.player2)
Exemple #9
0
    def setUp(self):
        reload(game_agent)
        #self.player1 = game_agent.MinimaxPlayer(search_depth=4, score_fn=game_agent.custom_score)
        #self.player2 = game_agent.MinimaxPlayer(search_depth=4, score_fn=improved_score)

        self.player1 = game_agent.AlphaBetaPlayer(score_fn=game_agent.custom_score_3)
        self.player2 = game_agent.AlphaBetaPlayer(score_fn=improved_score)
        self.game = isolation.Board(self.player1, self.player2, 9, 9)
Exemple #10
0
 def setUp(self):
     reload(game_agent)
     self.player1 = game_agent.MinimaxPlayer()
     self.player2 = sample_players.GreedyPlayer()
     self.player3 = sample_players.RandomPlayer()
     self.player4 = game_agent.AlphaBetaPlayer()
     self.player5 = game_agent.AlphaBetaPlayer()
     self.player6 = game_agent.MinimaxPlayer()
     self.game = None
 def setUp(self):
     reload(game_agent)
     self.player1 = game_agent.AlphaBetaPlayer(
         search_depth=5, score_fn=game_agent.custom_score_2)
     self.player1.itrdeep = 0
     self.player2 = game_agent.AlphaBetaPlayer(
         search_depth=5, score_fn=game_agent.custom_score_2)
     self.player2.itrdeep = 0
     self.game = isolation.Board(self.player1, self.player2)
        def play_match(first, second):
            print(
                "*************************** *************************** *************************** ",
                "\n")
            print(" --------------- ", first, " versus ", second,
                  " ---------------- ", "\n")
            print("\n")
            print("---------------- testing alpha beta player(", num_matches,
                  ") -------------------------", "\n")
            print(
                "*************************** *************************** *************************** ",
                "\n")
            self.player1 = game_agent.AlphaBetaPlayer(first)
            self.player2 = game_agent.AlphaBetaPlayer(second)
            self.game = isolation.Board(self.player1, self.player2, height,
                                        width)

            time_millis = lambda: 1000 * timeit.default_timer()

            if Verbose or Boxes:
                print(self.game.print_board())

            for i in range(800):
                move_start = time_millis()
                time_left = lambda: TIME_LIMIT_MILLIS - (time_millis() -
                                                         move_start)

                move = self.game.active_player.get_move(self.game, time_left)
                if self.game.is_loser(self.game.active_player):
                    if self.game.active_player == self.player1:
                        lost[0] += 1
                    else:
                        lost[1] += 1
                    print("test_game.. lost:", lost)
                    # if move == (-1, -1): must be the same as "is_loser"
                    print(
                        "*************************** *************************** ",
                        "\n")
                    print("                   dead-end reached ", "\n")
                    print(
                        "*************************** *************************** ",
                        "\n")
                    break
                else:
                    self.game.apply_move(move)
                    if Verbose or Boxes:
                        print("i:", i, "_", move, "\n")
                        print(self.game.print_board(), "\n")
            for x in range(2):
                print("lost:", lost[x], "\n")
            print(
                "*************************** *************************** *************************** "
            )
            print("player_lost:", player_lost)
            print(
                "*************************** *************************** *************************** ",
                "\n")
 def test_alphabeta_7(self):
     player1 = game_agent.AlphaBetaPlayer()
     player2 = game_agent.AlphaBetaPlayer()
     game = Board(player1, player2)
     game.apply_move(game.get_legal_moves(player1)[0])
     winner, history, outcome = game.play()
     print("\nWinner: {}\nOutcome: {}".format(winner, outcome))
     print(game.to_string())
     print("Move history:\n{!s}".format(history))
Exemple #14
0
 def test_empty(self):
     reload(game_agent)
     self.player1 = game_agent.AlphaBetaPlayer()
     self.player2 = game_agent.AlphaBetaPlayer()
     game = isolation.Board(self.player1, self.player2)
     self.assertEqual(game.get_player_location(self.player1), None)
     self.assertEqual(game.get_player_location(self.player2), None)
     game.play(time_limit=100)
     self.assertNotEqual(game.get_player_location(self.player1), None)
     self.assertNotEqual(game.get_player_location(self.player2), None)
 def setUp(self):
     reload(game_agent)
     self.player1 = game_agent.AlphaBetaPlayer(name="Player 1",
                                               search_depth=1)
     self.player2 = game_agent.AlphaBetaPlayer(name="Player 2",
                                               search_depth=1)
     self.game = isolation.Board(self.player1,
                                 self.player2,
                                 width=9,
                                 height=9)
Exemple #16
0
    def test_play_game(self):
        tested_player = game_agent.AlphaBetaPlayer(score_fn=game_agent.custom_score)
        tested_player.Name = "Tested Player"
        reference_player = game_agent.AlphaBetaPlayer(score_fn=sample_players.improved_score)
        reference_player.Name = "Reference Player"

        game = isolation.Board(tested_player, reference_player)
        opposite_game = isolation.Board(reference_player, tested_player)

        self.play_game(game,tested_player)
        self.play_game(opposite_game,tested_player)
Exemple #17
0
 def testAlphaBeta(self):
     player1 = ga.AlphaBetaPlayer(search_depth=2, score_fn=sp.open_move_score)
     player2 = ga.AlphaBetaPlayer(search_depth=2, score_fn=sp.open_move_score)
     game = isolation.Board(player1, player2, 9, 9)
     game._board_state = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 0, 0, 0, 1, 1, 0, 1, 1, 0, 1, 0, 0, 0, 1, 0, 1, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 42, 14]
     print(game.to_string())
     time_millis = lambda: 1000 * timeit.default_timer()
     move_start = time_millis()
     time_left = lambda: 1000 - (time_millis() - move_start)
     player1.time_left = time_left
     m = player1.alphabeta(game, 2)
     self.assertEqual(m, (6, 3))
def test_minimax():
    while True:
        reload(game_agent)
        player1 = game_agent.AlphaBetaPlayer(score_fn=improved_score)
        player2 = game_agent.AlphaBetaPlayer(score_fn=improved_score)
        game = isolation.Board(player1, player2)
        move = random.choice(game.get_legal_moves())
        game.apply_move(move)
        move = random.choice(game.get_legal_moves())
        game.apply_move(move)
        winner, history, outcome = game.play()
        logging.info(history)
Exemple #19
0
    def setUp(self):
        reload(game_agent)

        self.minimax_player1 = game_agent.MinimaxPlayer()
        self.minimax_player2 = game_agent.MinimaxPlayer()
        self.minimax_game = isolation.Board(self.minimax_player1,
                                            self.minimax_player2)

        self.alphabeta_player1 = game_agent.AlphaBetaPlayer()
        self.alphabeta_player2 = game_agent.AlphaBetaPlayer()
        self.alphabeta_game = isolation.Board(self.alphabeta_player1,
                                              self.alphabeta_player2)
Exemple #20
0
    def test_measure_board(self):
        player = game_agent.AlphaBetaPlayer()
        game = isolation.Board(player, game_agent.AlphaBetaPlayer(),height=7, width=7)

        valueList = []
        for row, column in [(row, column) for row in range(game.height) for column in range(game.width)]:
            new_state = game.forecast_move((row,column))
            nr_moves = len(new_state.get_legal_moves(player))
            valueList.append(f'({row},{column}):{nr_moves}')


        print(f'{{{",".join(valueList)}}}')
 def test_alphabeta(self):
     self.player1 = game_agent.AlphaBetaPlayer(
         score_fn=open_move_score, search_depth=30)
     self.player2 = game_agent.AlphaBetaPlayer(
         score_fn=open_move_score, search_depth=30)
     self.game = isolation.Board(self.player1, self.player2, 9, 9)
     print("Minimax test")
     print(self.game.to_string())
     player,__,outcome = self.game.play()
     print("player 1",self.player1)
     print("player 2",self.player2)
     print ("Outcome", outcome, "player ",player)
Exemple #22
0
 def reset_boards(self):
     self.my_player = game_agent.AlphaBetaPlayer(3,game_agent.custom_score,10)
     self.opponent = sample_players.GreedyPlayer(sample_players.open_move_score)
     self.game_board_1 = TestBoard(self.my_player, self.opponent)
     self.game_board_2 = TestBoard(self.opponent,self.my_player)
     self.my_player = game_agent.AlphaBetaPlayer(5,game_agent.custom_score,10)
     self.game_board_3 = TestBoard(self.my_player, self.opponent)
     self.my_player = game_agent.AlphaBetaPlayer(10,game_agent.custom_score,10)
     self.game_board_4 = TestBoard(self.my_player, self.opponent)
     self.my_player = game_agent.AlphaBetaPlayer(3,game_agent.custom_score_2,10)
     self.game_board_5 = TestBoard(self.my_player, self.opponent)
     self.my_player = game_agent.AlphaBetaPlayer(3,game_agent.custom_score_3,10)
     self.game_board_6 = TestBoard(self.my_player, self.opponent)
Exemple #23
0
class AlphaBetaTest(unittest.TestCase):
    reload(game_agent)
    player1 = game_agent.AlphaBetaPlayer()
    player2 = game_agent.AlphaBetaPlayer()

    def setUp(self):
        game = isolation.Board(self.player1, self.player2)
        game.apply_move((0, 1))
        game.apply_move((1, 0))
        return game

    def testAB(self):
        game = self.setUp()
        self.player1.time_left = tl
        print(self.player1.alphabeta(game, 2))
Exemple #24
0
 def test_alphabeta_depth_5(self):
     player = game_agent.AlphaBetaPlayer()
     player.time_left = self.create_clock()
     best_move = player.alphabeta(self.board, 5)
     self.assertTrue(
         best_move in [(2, 3), (3, 2), (2, 5), (3, 6), (5, 2), (5, 6),
                       (6, 3), (6, 5)], 'best move: ' + str(best_move))
Exemple #25
0
 def setUp(self):
     import sample_players
     reload(game_agent)
     self.player1 = game_agent.AlphaBetaPlayer()
     self.player1.time_left = lambda: 10000
     self.player2 = sample_players.RandomPlayer()
     self.game = isolation.Board(self.player1, self.player2)
    def test_alphabeta_interface(self):
        """Test CustomPlayer.alphabeta interface with simple input """
        h, w = 9, 9  # board size
        test_depth = 1
        starting_location = (2, 7)
        adversary_location = (0, 0)  # top left corner
        iterative_search = False
        search_method = "alphabeta"
        heuristic = lambda g, p: 0.  # return 0 everywhere

        # create a player agent & a game board
        agentUT = game_agent.AlphaBetaPlayer(test_depth, heuristic)
        agentUT.time_left = lambda: 99  # ignore timeout for fixed-depth search
        board = isolation.Board(agentUT, 'null_agent', w, h)

        # place two "players" on the board at arbitrary (but fixed) locations
        board.apply_move(starting_location)
        board.apply_move(adversary_location)

        for move in board.get_legal_moves():
            next_state = board.forecast_move(move)
            v, _ = agentUT.minimax_with_score(next_state,
                                              test_depth,
                                              True,
                                              apply_alphabeta=True)

            self.assertTrue(
                type(v) == float,
                ("Alpha Beta function should return a floating " +
                 "point value approximating the score for the " +
                 "branch being searched."))
Exemple #27
0
    def test_alfabeta(self):
        #Set up board
        print("Setting up inital board for alphabeta player test....")
        self.setUp()

        #Put some moves on board to create different start states

        # Player 1 move 1
        self.game.active_player == self.player1
        self.game.apply_move((0, 0))
        # Player 2 move 1
        self.game.active_player == self.player2
        self.game.apply_move((0, 2))
        ##        # Player 1 move 2
        ##        self.game.active_player==self.player1
        ##        self.game.apply_move((2,1))
        ##        # Player 2 move 2
        ##        self.game.active_player==self.player2
        ##        self.game.apply_move((2,0))

        #Print inital board
        print("\nInital state:\n{}".format(self.game.to_string()))

        #Calculate player 1 best next move using MiniMax with AlphaBeta pruning
        best_move = (-1, -1)
        AlphaBetaPlayer = game_agent.AlphaBetaPlayer()
        best_move = AlphaBetaPlayer.get_move(self.game, self.timeLimit)
        print('Best move for Player 1 is', best_move)
    def test_ab_get_move(self):
        self.setUp(9, 9)
        game = self.game
        #game._board_state = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 50]
        #game._board_state = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 1, 0, 0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 30, 50]
        game._board_state = [
            0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
            1, 0, 1, 0, 0, 0, 0, 1, 0, 1, 0, 1, 0, 0, 0, 0, 1, 1, 1, 0, 1, 0,
            0, 0, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0,
            0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 46, 42
        ]
        print(game.to_string())

        test_depth = 2

        alphaBetaPlayer = game_agent.AlphaBetaPlayer(
            test_depth, sample_players.improved_score)
        alphaBetaPlayer.time_left = self.timeLeft
        #move = alphaBetaPlayer.get_move(game, self.timeLeft)
        move = alphaBetaPlayer.alphabeta(game, test_depth)
        print()
        print("best move from calling gggg alphabeta() with depth", test_depth,
              ":")
        self.print_move(move)
        print()
        """
    def xtest_alphabeta(self):
        self.setUp(5, 5)
        game = self.game
        alphaBetaPlayer = game_agent.AlphaBetaPlayer(1,
                                                     game_agent.custom_score)

        #self.game.apply_move((0,2))
        #self.game.apply_move((0,4))

        print('##################################################')
        print(game.to_string())
        print('##################################################')

        for i in range(25):

            moves = game.get_legal_moves()
            if len(moves) == 0:
                break

            self.print_moves(moves)
            print('active player', game.active_player)
            print('alphaBetaPlayer number of legal moves', len(moves))

            move = alphaBetaPlayer.get_move(game, self.timeLeft)
            self.print_move(move)
            game.apply_move((move))

            print('##################################################')
            print(game.to_string())
            print('##################################################', i)

        if game.is_loser(game.active_player):
            print(game.active_player, "has lost the game")
            print(game.inactive_player, "has won the game")
    def test_alphabeta_interface(self):
        """ Test AlphaBetaPlayer.alphabeta interface with simple input """
        h, w = 7, 7  # board size
        test_depth = 1
        starting_location = (5, 3)
        adversary_location = (0, 0)  # top left corner
        iterative_search = False
        search_method = "minimax"
        heuristic = lambda g, p: 0.  # return 0 everywhere

        # create a player agent & a game board
        agentUT = game_agent.AlphaBetaPlayer(game_agent.IsolationPlayer)
        agentUT.time_left = lambda: 99  # ignore timeout for fixed-depth search
        board = isolation.Board(agentUT, 'null_agent', w, h)

        # place two "players" on the board at arbitrary (but fixed) locations
        board.apply_move(starting_location)
        board.apply_move(adversary_location)

        for move in board.get_legal_moves():
            next_state = board.forecast_move(move)
            op_move = agentUT.alphabeta(next_state, test_depth)
            print("op_move = ")
            print(op_move)
            self.assertTrue(
                type(op_move) == tuple,
                ("Minimax function should return a tuple " +
                 "point value approximating the score for the " +
                 "branch being searched."))