def test_select_node(): board = cm.initialize_game_state() child_board = cm.initialize_game_state() child_board[0, 0] = cm.PLAYER1 current_node = node.mcts_node(state=board) child_node = node.mcts_node(state=child_board, parent=current_node) current_node.open_moves = [0, 3, 4] current_node.children = [child_node] selected_node = agent.select_node(current_node) # since it has unexpanded node, hence it will return the current node assert selected_node == current_node
def human_vs_agent( generate_move_1: GenMove, generate_move_2: GenMove = user_move, player_1: str = "Player 1", player_2: str = "Player 2", args_1: tuple = (), args_2: tuple = (), init_1: Callable = lambda board, player: None, init_2: Callable = lambda board, player: None, ): players = (PLAYER1, PLAYER2) for play_first in (1, -1): for init, player in zip((init_1, init_2)[::play_first], players): init(initialize_game_state(), player) saved_state = {PLAYER1: None, PLAYER2: None} board = initialize_game_state() gen_moves = (generate_move_1, generate_move_2)[::play_first] player_names = (player_1, player_2)[::play_first] gen_args = (args_1, args_2)[::play_first] playing = True end_state = GameState.STILL_PLAYING while playing: for player, player_name, gen_move, args in zip( players, player_names, gen_moves, gen_args, ): t0 = time.time() print(pretty_print_board(board)) action, saved_state[player] = gen_move(board.copy(), player, saved_state[player], *args) print('{} \'s action is {}'.format(player_name, action)) print(f"Move time: {time.time() - t0:.3f}s") board, r_board = apply_player_action(board, action, player, True) end_state = check_end_state(board, player) if end_state != GameState.STILL_PLAYING: print(pretty_print_board(board)) if end_state == GameState.IS_DRAW: print("Game ended in draw") else: print(f'{player_name} won playing \ {"X" if player == PLAYER1 else "O"}') playing = False break
def test_connected_four_right_diagonal(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 board[1, 1] = cm.PLAYER1 board[2, 2] = cm.PLAYER1 board[3, 3] = cm.PLAYER1 assert cm.connected_four(board, cm.PLAYER1)
def test_connected4_column_false(): board = cm.initialize_game_state() board[0, 1] = cm.PLAYER2 board[1, 1] = cm.PLAYER1 board[2, 1] = cm.PLAYER1 board[3, 1] = cm.PLAYER1 assert ~cm.connected_four(board, cm.PLAYER1)
def test_apply_player_action(): c_board = cm.initialize_game_state() copied_board, board = cm.apply_player_action(c_board, 3, cm.PLAYER1) test_board = np.zeros((6, 7), dtype=BoardPiece) test_board[0, 3] = cm.PLAYER1 assert np.all(test_board.__eq__(board))
def test_connected_four(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 board[1, 0] = cm.PLAYER1 board[2, 0] = cm.PLAYER1 board[3, 0] = cm.PLAYER1 assert cm.connected_four(board, cm.PLAYER1)
def test_initialize_game_state(): board = cm.initialize_game_state() assert isinstance(board, np.ndarray) assert board.dtype == BoardPiece assert board.shape == (6, 7) assert np.all(board == 0)
def test_pretty_print_board(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 pp_board = cm.pretty_print_board(board) test_board = np.zeros((6, 7), dtype=BoardPiece) test_board[0, 0] = cm.PLAYER1 str_test_board = '\n'.join([str(row) for row in test_board[::-1]]) assert pp_board.__eq__(str_test_board)
def test_check_end_state_column(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 board[1, 0] = cm.PLAYER1 board[2, 0] = cm.PLAYER1 board[3, 0] = cm.PLAYER1 assert cm.GameState.IS_WIN == cm.check_end_state(board, cm.PLAYER1) assert not cm.GameState.IS_WIN == cm.check_end_state(board, cm.PLAYER2)
def test_check_end_state_row(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 board[0, 1] = cm.PLAYER1 board[0, 2] = cm.PLAYER1 board[0, 3] = cm.PLAYER1 assert cm.GameState.IS_WIN == cm.check_end_state(board, cm.PLAYER1) assert not cm.GameState.IS_WIN == cm.check_end_state(board, cm.PLAYER2)
def test_explore_node(): board = cm.initialize_game_state() current_node = node.mcts_node(state=board) current_node.open_moves = [0, 3, 4] explored_node = agent.explore_node(current_node) # explore a new open node assert len(current_node.open_moves) == 2 assert explored_node != current_node
def test_get_open_moves_connected4(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 board[0, 1] = cm.PLAYER1 board[0, 2] = cm.PLAYER1 board[0, 3] = cm.PLAYER1 current_node = node.mcts_node(state=board, player=cm.PLAYER1) assert current_node.get_open_moves().shape[0] == 0
def test_score_right_diagonal_opponent(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 board[1, 1] = cm.PLAYER1 board[2, 2] = cm.PLAYER1 agent.AGENT = cm.PLAYER2 agent.HUMAN = cm.PLAYER1 score = agent.score_right_diagonal(board, cm.PLAYER2) assert score == -4
def test_score_row_opponent(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 board[0, 1] = cm.PLAYER1 board[0, 2] = cm.PLAYER1 agent.AGENT = cm.PLAYER2 agent.HUMAN = cm.PLAYER1 score = agent.score_row(board, cm.PLAYER2) assert score == -4
def test_score_column_opponent(): board = cm.initialize_game_state() board[0, 6] = cm.PLAYER1 board[1, 6] = cm.PLAYER1 board[2, 6] = cm.PLAYER1 board[3, 6] = cm.PLAYER1 agent.AGENT = cm.PLAYER2 agent.HUMAN = cm.PLAYER1 score = agent.score_column(board, cm.PLAYER2) assert score == -104
def test_check_board_full_true(): board = cm.initialize_game_state() board[5, 0] = cm.PLAYER1 board[5, 1] = cm.PLAYER1 board[5, 2] = cm.PLAYER1 board[5, 3] = cm.PLAYER1 board[5, 4] = cm.PLAYER1 board[5, 5] = cm.PLAYER1 board[5, 6] = cm.PLAYER1 assert cm.check_board_full(board)
def test_score_row_full(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 board[0, 1] = cm.PLAYER1 board[0, 2] = cm.PLAYER1 board[0, 3] = cm.PLAYER1 agent.AGENT = cm.PLAYER1 agent.HUMAN = cm.PLAYER2 score = agent.score_row(board, cm.PLAYER1) assert score == 105
def test_score_column_full(): board = cm.initialize_game_state() board[0, 6] = cm.PLAYER1 board[1, 6] = cm.PLAYER1 board[2, 6] = cm.PLAYER1 board[3, 6] = cm.PLAYER1 agent.AGENT = cm.PLAYER1 agent.HUMAN = cm.PLAYER2 score = agent.score_column(board, cm.PLAYER1) assert score == 105
def test_score_left_diagonal_full(): board = cm.initialize_game_state() board[0, 6] = cm.PLAYER1 board[1, 5] = cm.PLAYER1 board[2, 4] = cm.PLAYER1 board[3, 3] = cm.PLAYER1 agent.AGENT = cm.PLAYER1 agent.HUMAN = cm.PLAYER2 score = agent.score_left_diagonal(board, cm.PLAYER1) assert score == 105
def test_generate_move_winning(): """ This test is to check if the winning action is returned by the algorithm :return: """ board = cm.initialize_game_state() board[0, 0] = agent.PLAYER board[0, 1] = agent.PLAYER board[0, 2] = agent.PLAYER action, _ = agent.generate_move(board, cm.PLAYER2, {}) assert type(action) == cm.PlayerAction
def test_minimax_loss(): board = cm.initialize_game_state() board[0, 0] = cm.PLAYER1 board[0, 1] = cm.PLAYER1 board[0, 2] = cm.PLAYER1 board[0, 3] = cm.PLAYER1 agent.AGENT = cm.PLAYER2 agent.HUMAN = cm.PLAYER1 col, score = agent.minimax(board, 0, -100000000, 100000000, True) assert None == col assert -10000 == score
def test_expansion(): board = cm.initialize_game_state() current_node = node.mcts_node(state=board, player=cm.PLAYER1) current_node.total_visits = 100 current_node.open_moves = [0, 5] assert len(current_node.open_moves) == 2 assert len(current_node.children) == 0 child_node = current_node.expand_node(5) assert len(current_node.open_moves) == 1 assert len(current_node.children) == 1 assert current_node.open_moves[0] == 0 assert child_node.__eq__(current_node.children[0])
def test_check_end_state_left_diagonal(): board = cm.initialize_game_state() board[0, 1] = cm.PLAYER1 board[1, 1] = cm.PLAYER1 board[2, 1] = cm.PLAYER1 board[3, 1] = cm.PLAYER2 board[0, 2] = cm.PLAYER2 board[1, 2] = cm.PLAYER2 board[2, 2] = cm.PLAYER2 board[0, 3] = cm.PLAYER1 board[1, 3] = cm.PLAYER2 board[0, 4] = cm.PLAYER2 print(cm.pretty_print_board(board)) assert cm.GameState.IS_WIN == cm.check_end_state(board, cm.PLAYER2) assert not cm.GameState.IS_WIN == cm.check_end_state(board, cm.PLAYER1)
def test_child_selection(): board = cm.initialize_game_state() child_board01 = board.copy() child_board01[0, 0] = cm.PLAYER1 child_board02 = board.copy() child_board02[0, 3] = cm.PLAYER1 child_node01 = node.mcts_node(state=child_board01, player=cm.PLAYER1) child_node02 = node.mcts_node(state=child_board02, player=cm.PLAYER1) current_node = node.mcts_node(state=board, player=cm.PLAYER1) current_node.total_visits = 100 child_node01.total_visits = 50 child_node02.total_visits = 40 child_node01.num_wins = 35 child_node02.num_wins = 25 children_array = [child_node01, child_node02] current_node.children = children_array assert child_node01.__eq__(current_node.select_next_node())
def test_set_visit_win(): board = cm.initialize_game_state() current_node = node.mcts_node(state=board, player=cm.PLAYER1) current_node.set_visit_and_win(result=10) assert current_node.num_wins == 10 assert current_node.total_visits == 1
def test_generate_move(): board = cm.initialize_game_state() action, _ = agent.generate_move(board, cm.PLAYER1, {}) assert type(action) == cm.PlayerAction
def test_simulate_game(): board = cm.initialize_game_state() current_node = node.mcts_node(state=board, player=cm.PLAYER1) win, player = agent.simulate_game(current_node) # simulated till a win is encountered assert win
def test_check_board_full_false(): board = cm.initialize_game_state() assert ~cm.check_board_full(board)
def test_back_propagation(): board = cm.initialize_game_state() current_node = node.mcts_node(state=board) agent.back_propagation(current_node, cm.PLAYER1, False) assert current_node.total_visits == 1
def test_get_open_moves(): board = cm.initialize_game_state() current_node = node.mcts_node(state=board, player=cm.PLAYER1) assert current_node.get_open_moves().shape[0] == 7