Exemple #1
0
 def get_move(self, board: Board) -> int:
     """
     Return the next move given the board `board` based on the current values of next states
     :param board: The current board state
     :return: The next move based on the current values of next states, starting from input state
     """
     if self.move_strategy == MoveStrategy.EXPLORATION:
         # exploratory random move
         m = board.random_empty_spot()
         _ = self.get_v(
             board)  # just to ensure we have values for our board state
         return m
     else:
         # greedy move: exploiting current knowledge
         vals = self.get_v(board)  # type: np.ndarray
         while True:
             maxv_idxs = np.argwhere(
                 vals == np.amax(vals))  # positions of max values in array
             m = np.random.choice(maxv_idxs.flatten().tolist())  # type: int
             #m = np.argmax(vals)  # type: int # this instead would return 1st occurance
             if board.is_legal(m):
                 #                    print("vals=", end='')
                 #                    print(vals)
                 #                    print("m={}".format(m))
                 return m
             else:
                 vals[m] = -1.0
    def move(self, board: Board) -> (GameResult, bool):
        """
        Makes a move on the given input state
        :param board: The current state of the game
        :return: The GameResult after this move, Flag to indicate whether the move finished the game
        """
        self.board_position_log.append(board.state.copy())
        nn_input = self.board_state_to_nn_input(board.state)

        probs = self.get_valid_probs([nn_input], [board])
        probs = probs[0]

        # Most of the time our next move is the one with the highest probability after removing all illegal ones.
        # Occasionally, however we randomly chose a random move to encourage exploration
        if (self.training is True) and \
                (self.game_counter < self.pre_training_games):
            move = board.random_empty_spot()
        else:
            if np.isnan(probs).any():  # Can happen when all probabilities degenerate to 0. Best thing we can do is
                # make a random legal move
                move = board.random_empty_spot()
            else:
                move = np.random.choice(np.arange(len(probs)), p=probs)
            if not board.is_legal(move):  # Debug case only, I hope
                print("Illegal move!")

        # We record the action we selected as well as the Q values of the current state for later use when
        # adjusting NN weights.
        self.action_log.append(move)

        _, res, finished = board.move(move, self.side)

        return res, finished
Exemple #3
0
    def get_v(self, board: Board) -> np.ndarray:
        """
        Returns all values when moving from current state of 'board'
        :param board: The current board state
        :return: List of values of all possible next board states
        """
        # We build the value dictionary in a lazy manner, only adding a state when it is actually used for the first time
        #
        board_hash = board.hash_value(
        )  # needed because value dictionary maps *hashed* state to values
        if board_hash in self.v:
            vals = self.v[board_hash]
        else:
            vals = np.full(9, self.v_init)  # default initial value
            # set values for winning states to WIN_VALUE
            # (player cannot end up in a losing state after a move
            # so losing states need not be considered):
            for pos in range(vals.size):  # vals.size = BOARD_SIZE
                if board.is_legal(pos):
                    b = Board(board.state)
                    b.move(pos, self.side)
                    if b.check_win():
                        vals[pos] = self.v_win
                    elif b.num_empty() == 0:
                        # if it is not a win, and there are no other positions
                        # available, then it is a draw
                        vals[pos] = self.v_draw
            # Update dictionary:
            self.v[board_hash] = vals
#            print("v[{}]={}".format(board_hash, self.v[board_hash]))
        return vals
Exemple #4
0
 def get_move(self, board: Board) -> int:
     """
     Return the next move given the board `board` based on the current Q values
     :param board: The current board state
     :return: The next move based on the current Q values for the input state
     """
     board_hash = board.hash_value()  # type: int
     qvals = self.get_q(board_hash)  # type: np.ndarray
     while True:
         m = np.argmax(qvals)  # type: int
         if board.is_legal(m):
             return m
         else:
             qvals[m] = -1.0
    def move(self, board: Board) -> (GameResult, bool):
        """
        Implements the Player interface and makes a move on Board `board`
        :param board: The Board to make a move on
        :return: A tuple of the GameResult and a flag indicating if the game is over after this move.
        """

        # We record all game positions to feed them into the NN for training with the corresponding updated Q
        # values.
        self.board_position_log.append(board.state.copy())

        nn_input = self.board_state_to_nn_input(board.state)
        probs, qvalues = self.get_probs(nn_input)
        qvalues = np.copy(qvalues)

        # We filter out all illegal moves by setting the probability to 0. We don't change the q values
        # as we don't want the NN to waste any effort of learning different Q values for moves that are illegal
        # anyway.
        for index, p in enumerate(qvalues):
            if not board.is_legal(index):
                probs[index] = -1
            elif probs[index] < 0:
                probs[index] = 0.0

        # Most of the time our next move is the one with the highest probability after removing all illegal ones.
        # Occasionally, however we randomly chose a random move to encourage exploration
        if (self.training is True) and (np.random.rand(1) <
                                        self.random_move_prob):
            move = board.random_empty_spot()
        else:
            move = np.argmax(probs)

        # Unless this is the very first move, the max Q value of this state is also the max Q value of
        # the move that got the game from the previous state to this one.
        if len(self.action_log) > 0:
            self.next_max_log.append(qvalues[np.argmax(probs)])

        # We record the action we selected as well as the Q values of the current state for later use when
        # adjusting NN weights.
        self.action_log.append(move)
        self.values_log.append(qvalues)

        # We execute the move and return the result
        _, res, finished = board.move(move, self.side)
        return res, finished