def play(self): """Function to play a game vs the AI.""" print("Start Human vs AI\n") mcts = MonteCarloTreeSearch(self.net) game = self.game.clone() # Create a fresh clone for each game. game_over = False value = 0 node = TreeNode() print("Enter your move in the form: row, column. Eg: 1,1") go_first = input("Do you want to go first: y/n?") if go_first.lower().strip() == 'y': print("You play as X") human_value = 1 game.print_board() else: print("You play as O") human_value = -1 # Keep playing until the game is in a terminal state. while not game_over: # MCTS simulations to get the best child node. # If player_to_eval is 1 play as the Human. # Else play as the AI. if game.current_player == human_value: action = input("Enter your move: ") if isinstance(action, str): action = [int(n, 10) for n in action.split(",")] action = (1, action[0], action[1]) best_child = TreeNode() best_child.action = action else: best_child = mcts.search(game, node, CFG.temp_final) action = best_child.action game.play_action(action) # Play the child node's action. game.print_board() game_over, value = game.check_game_over(game.current_player) best_child.parent = None node = best_child # Make the child node the root node. if value == human_value * game.current_player: print("You won!") elif value == -human_value * game.current_player: print("You lost.") else: print("Draw Match") print("\n")
def go(self): print("One rule:\r\n Move piece form 'x,y' \r\n eg 1,3\r\n") print("-" * 60) print("Ready Go") mc = MonteCarloTreeSearch(self.net, 1000) node = TreeNode() board = Board() while True: if board.c_player == BLACK: action = input(f"Your piece is 'O' and move: ") action = [int(n, 10) for n in action.split(",")] action = action[0] * board.size + action[1] next_node = TreeNode(action=action) else: _, next_node = mc.search(board, node) board.move(next_node.action) board.show() next_node.parent = None node = next_node if board.is_draw(): print("-" * 28 + "Draw" + "-" * 28) return if board.is_game_over(): if board.c_player == BLACK: print("-" * 28 + "Win" + "-" * 28) else: print("-" * 28 + "Loss" + "-" * 28) return board.trigger()
def play(self): mcts = MonteCarloTreeSearch(self.net) game = deepcopy(self.game) game_over = False value = 0 node = TreeNode() valid = 0 # self.game.colorBoard() game.print_board() while not game_over: if game.current_player == self.human_player: valid = False while valid == False: piece, refpt, rot, flip = self.get_input(game) piece.create(0, (refpt[0], refpt[1])) f = 'None' if flip == 0: f == 'None' else: f = 'h' piece.flip(f) piece.rotate(90 * rot) valid = game.valid_move(piece.points, self.human_player) if valid == False: print('You selected an illegal move, please reselect') # print('attempting', piece.points) # print('corners are ', game.corners[self.human_player]) if piece.ID not in ['I5', 'I4', 'I3', 'I2']: encoding = (refpt[0] * 14 + refpt[1]) * 91 + piece.shift + ( rot // 90) * 2 + flip else: encoding = (refpt[0] * 14 + refpt[1]) * 91 + piece.shift + ( rot // 90) * 1 + flip best_child = TreeNode() best_child.action = encoding print('CHOICE WAS MADE BY A HUMAN TO PLAY', piece.ID, '@', refpt) else: best_child = mcts.search(game, node, CFG.temp_final) action = best_child.action game.play_action(action) game.print_board() # game.colorBoard() game_over, value = game.check_game_over(game.current_player) best_child.parent = None node = best_child if value == self.human_player * game.current_player: print("You won!") elif value == -self.human_player * game.current_player: print("You lost.") else: print("Draw Match")