def getNextMove(self, board):
        """TODO PyDoc"""
        print board

        SEARCH_DEPTH = 2  # Search to a depth of 3

        # How long we want to allow the search to run before it starts
        # terminating - most tournaments allow 3 minutes per turn.
        # Experience shows that 0.5 seconds is more than enough buffer time
        SEARCH_TIME_SECONDS = (MaverickAI.CALCULATION_TIMEOUT * 60) - 0.5

        # Figure out our color
        if self.isWhite:
            color = ChessBoard.WHITE
        else:
            color = ChessBoard.BLACK

        QLAI._logger.info("Calculating next move")
        (nextMv, _, nodesVisited) = self._boardSearch(board, color,
                                                      SEARCH_DEPTH, -1, 1,
                                                      True, time() +
                                                      SEARCH_TIME_SECONDS)

        # Make sure we found a move
        if nextMv is None:
            possMoves = enumMoves(board, color)
            nextMv = random.choice(possMoves)

        logStrF = "Best found move was {0} -> {1}".format(nextMv[0], nextMv[1])
        QLAI._logger.info(logStrF)
        (fromPosn, toPosn) = nextMv

        return (fromPosn, toPosn)
Example #2
0
def heuristicPcsUnderAttack(color, board):
    """Return the value of the given color's pieces that are under attack

    @param color: The color of the pieces to test -
                one of maverick.data.ChessBoard.WHITE or
                maverick.data.ChessBoard.BLACK
    @param board: a ChessBoard object

    @return: A number representing the value of the given color's
            pieces that are under attack, weighted by piece value"""

    otherColor = ChessBoard.getOtherColor(color)

    # Get posns the enemy can move to
    enemyMoveDstPosns = [m[1] for m in enumMoves(board, otherColor)]

    # Record which friendly pieces are under attack
    # A piece is under attack if its posn (pcPsn) is in myPieces
    # TODO: use filter and lambda
    piecesUnderAttack = [pcPsn for pcPsn in board.getPiecesOfColor(color)
                         if pcPsn in enemyMoveDstPosns]

    # Sum weighted values of under-attack pieces
    weightedTotal = 0
    for piecePosn in piecesUnderAttack:

        piece = board[piecePosn]
        # Check if there is a value for this piece in the mappings
        if piece.pieceType in PIECE_VALUES:
            weightedTotal += PIECE_VALUES[piece.pieceType]

    # Compress return value into range [-1..1]
    return 1 - 2 * (weightedTotal / MAX_TOTAL_PIECE_VALUE)
    def getNextMove(self, board):
        """TODO PyDoc"""
        print board

        SEARCH_DEPTH = 2  # Search to a depth of 3

        # How long we want to allow the search to run before it starts
        # terminating - most tournaments allow 3 minutes per turn.
        # Experience shows that 0.5 seconds is more than enough buffer time
        SEARCH_TIME_SECONDS = (MaverickAI.CALCULATION_TIMEOUT * 60) - 0.5

        # Figure out our color
        if self.isWhite:
            color = ChessBoard.WHITE
        else:
            color = ChessBoard.BLACK

        QLAI._logger.info("Calculating next move")
        (nextMv, _,
         nodesVisited) = self._boardSearch(board, color, SEARCH_DEPTH, -1, 1,
                                           True,
                                           time() + SEARCH_TIME_SECONDS)

        # Make sure we found a move
        if nextMv is None:
            possMoves = enumMoves(board, color)
            nextMv = random.choice(possMoves)

        logStrF = "Best found move was {0} -> {1}".format(nextMv[0], nextMv[1])
        QLAI._logger.info(logStrF)
        (fromPosn, toPosn) = nextMv

        return (fromPosn, toPosn)
 def test_bCmplx_allLegalMoves_black(self):
     b = getBoardComplex()
     
     print "considering a board"
     print b
     eMoves = enumMoves(b, _b)
     import pdb; pdb.set_trace()
     self._allLegalMoves(b, _b, eMoves)
Example #5
0
    def test_bCmplx_allLegalMoves_black(self):
        b = getBoardComplex()

        print "considering a board"
        print b
        eMoves = enumMoves(b, _b)
        import pdb
        pdb.set_trace()
        self._allLegalMoves(b, _b, eMoves)
Example #6
0
 def getNextMove(self, board):
     """Pick a random move from an enumeration of legal moves"""
     color = ChessBoard.WHITE if self.isWhite else ChessBoard.BLACK
     moveChoices = enumMoves(board, color)
     if moveChoices:
         move = random.choice(moveChoices)
         return move
     else:
         # NOTE: This code should be unreachable. If it gets here,
         #       either stale-mate detection is broken or a race condition
         #       is leading to the AI being asked for a move guess before
         #       stale-mate is properly updated
         raise MaverickAIException("No possible moves... SEE CODE COMMENT")
Example #7
0
 def getNextMove(self, board):
     """Pick a random move from an enumeration of legal moves"""
     color = ChessBoard.WHITE if self.isWhite else ChessBoard.BLACK
     moveChoices = enumMoves(board, color)
     if moveChoices:
         move = random.choice(moveChoices)
         return move
     else:
         # NOTE: This code should be unreachable. If it gets here,
         #       either stale-mate detection is broken or a race condition
         #       is leading to the AI being asked for a move guess before
         #       stale-mate is properly updated
         raise MaverickAIException("No possible moves... SEE CODE COMMENT")
Example #8
0
 def test_bWD4_correctValues(self):
     self._assertListsEq([(_Posn(6, 0), _Posn(5, 0)),
                          (_Posn(6, 0), _Posn(4, 0)),
                          (_Posn(6, 1), _Posn(5, 1)),
                          (_Posn(6, 1), _Posn(4, 1)),
                          (_Posn(6, 2), _Posn(5, 2)),
                          (_Posn(6, 2), _Posn(4, 2)),
                          (_Posn(6, 3), _Posn(5, 3)),
                          (_Posn(6, 3), _Posn(4, 3)),
                          (_Posn(6, 4), _Posn(5, 4)),
                          (_Posn(6, 4), _Posn(4, 4)),
                          (_Posn(6, 5), _Posn(5, 5)),
                          (_Posn(6, 5), _Posn(4, 5)),
                          (_Posn(6, 6), _Posn(5, 6)),
                          (_Posn(6, 6), _Posn(4, 6)),
                          (_Posn(6, 7), _Posn(5, 7)),
                          (_Posn(6, 7), _Posn(4, 7)),
                          (_Posn(7, 1), _Posn(5, 0)),
                          (_Posn(7, 1), _Posn(5, 2)),
                          (_Posn(7, 6), _Posn(5, 5)),
                          (_Posn(7, 6), _Posn(5, 7))],
                         enumMoves(getBoardWD4(), _b))
 def test_bWD4_correctValues(self):
     self._assertListsEq([(_Posn(6, 0), _Posn(5, 0)),
                          (_Posn(6, 0), _Posn(4, 0)),
                          (_Posn(6, 1), _Posn(5, 1)),
                          (_Posn(6, 1), _Posn(4, 1)),
                          (_Posn(6, 2), _Posn(5, 2)),
                          (_Posn(6, 2), _Posn(4, 2)),
                          (_Posn(6, 3), _Posn(5, 3)),
                          (_Posn(6, 3), _Posn(4, 3)),
                          (_Posn(6, 4), _Posn(5, 4)),
                          (_Posn(6, 4), _Posn(4, 4)),
                          (_Posn(6, 5), _Posn(5, 5)),
                          (_Posn(6, 5), _Posn(4, 5)),
                          (_Posn(6, 6), _Posn(5, 6)),
                          (_Posn(6, 6), _Posn(4, 6)),
                          (_Posn(6, 7), _Posn(5, 7)),
                          (_Posn(6, 7), _Posn(4, 7)),
                          (_Posn(7, 1), _Posn(5, 0)),
                          (_Posn(7, 1), _Posn(5, 2)),
                          (_Posn(7, 6), _Posn(5, 5)),
                          (_Posn(7, 6), _Posn(5, 7))],
                         enumMoves(getBoardWD4(), _b))
 def test_newB_correctValues(self):
     # "Should be 20 possible moves at start")
     self._assertListsEq([(_Posn(0, 1), _Posn(2, 0)),
                          (_Posn(0, 1), _Posn(2, 2)),
                          (_Posn(0, 6), _Posn(2, 5)),
                          (_Posn(0, 6), _Posn(2, 7)),
                          (_Posn(1, 0), _Posn(2, 0)),
                          (_Posn(1, 0), _Posn(3, 0)),
                          (_Posn(1, 1), _Posn(2, 1)),
                          (_Posn(1, 1), _Posn(3, 1)),
                          (_Posn(1, 2), _Posn(2, 2)),
                          (_Posn(1, 2), _Posn(3, 2)),
                          (_Posn(1, 3), _Posn(2, 3)),
                          (_Posn(1, 3), _Posn(3, 3)),
                          (_Posn(1, 4), _Posn(2, 4)),
                          (_Posn(1, 4), _Posn(3, 4)),
                          (_Posn(1, 5), _Posn(2, 5)),
                          (_Posn(1, 5), _Posn(3, 5)),
                          (_Posn(1, 6), _Posn(2, 6)),
                          (_Posn(1, 6), _Posn(3, 6)),
                          (_Posn(1, 7), _Posn(2, 7)),
                          (_Posn(1, 7), _Posn(3, 7))],
                         enumMoves(getBoardNew(), _w))
Example #11
0
 def test_newB_correctValues(self):
     # "Should be 20 possible moves at start")
     self._assertListsEq([(_Posn(0, 1), _Posn(2, 0)),
                          (_Posn(0, 1), _Posn(2, 2)),
                          (_Posn(0, 6), _Posn(2, 5)),
                          (_Posn(0, 6), _Posn(2, 7)),
                          (_Posn(1, 0), _Posn(2, 0)),
                          (_Posn(1, 0), _Posn(3, 0)),
                          (_Posn(1, 1), _Posn(2, 1)),
                          (_Posn(1, 1), _Posn(3, 1)),
                          (_Posn(1, 2), _Posn(2, 2)),
                          (_Posn(1, 2), _Posn(3, 2)),
                          (_Posn(1, 3), _Posn(2, 3)),
                          (_Posn(1, 3), _Posn(3, 3)),
                          (_Posn(1, 4), _Posn(2, 4)),
                          (_Posn(1, 4), _Posn(3, 4)),
                          (_Posn(1, 5), _Posn(2, 5)),
                          (_Posn(1, 5), _Posn(3, 5)),
                          (_Posn(1, 6), _Posn(2, 6)),
                          (_Posn(1, 6), _Posn(3, 6)),
                          (_Posn(1, 7), _Posn(2, 7)),
                          (_Posn(1, 7), _Posn(3, 7))],
                         enumMoves(getBoardNew(), _w))
 def test_newB_allLegalMoves(self):
     self._allLegalMoves(getBoardNew(), _w, enumMoves(getBoardNew(), _w))
 def test_newB_correctLen(self):
     self.assertEqual(20, len(enumMoves(getBoardNew(), _w)))
Example #14
0
 def test_bWD4_correctLen(self):
     self.assertEqual(28, len(enumMoves(getBoardWD4(), _w)))
 def test_bCmplx_allLegalMoves_white(self):
     b = getBoardComplex()
     self._allLegalMoves(b, _w, enumMoves(b, _w))
 def test_bWD4_allLegalMoves(self):
     self._allLegalMoves(getBoardWD4(), _b, enumMoves(getBoardWD4(), _b))
Example #17
0
 def test_bWD4_allLegalMoves(self):
     self._allLegalMoves(getBoardWD4(), _b, enumMoves(getBoardWD4(), _b))
Example #18
0
def heuristicPiecesCovered(color, board):
    """Return a number representing how many of color's pieces are covered

    @param color: one of maverick.data.ChessBoard.WHITE or
                maverick.data.ChessBoard.BLACK
    @param board: a ChessBoard object

    @return: a value representing the number of color's non-pawn pieces
            whose positions could be immediately re-taken if captured,
            weighted by piece value"""

    ## TODO (James): speed this UP. It's a huge performance bottleneck

    # Construct list of friendly pieces
    friendPiecePosns = board.getPiecesOfColor(color)

    # Accumulator for return value
    weightedReturn = 0

    # For each piece, test whether a friendly piece could move to its
    # position if it were not present
    for lostPiecePosn in friendPiecePosns:

        # Don't test this for kings - it's meaningless
        if board[lostPiecePosn].pieceType != ChessBoard.KING:
            # Build hypothetical board with the lost piece removed

            # Store the supposedly lost piece for later replacement
            # (this avoids having to make a slow deep copy)
            lostPiece = board[lostPiecePosn]

            # Eliminate the supposedly lost piece
            board[lostPiecePosn] = None

            # Build list of possible friendly moves
            # TODO (mattsh): board was hypoBoard. Did I break something?
            friendlyMoves = enumMoves(board, color)

            lostPieceType = board[lostPiecePosn].pieceType
            lostPieceValue = PIECE_VALUES[lostPieceType]

            # Test whether any move includes a move to the destination
            for move in friendlyMoves:
                moveDstPosn = move[1]
                if lostPiecePosn == moveDstPosn:
                    # Add piece value to accumulator
                    weightedReturn += lostPieceValue
                    # Only add once per piece being covered
                    break

            # VERY IMPORTANT - restore supposedly lost piece to original
            # location
            board[lostPiecePosn] = lostPiece

    # Sum the total possible piece value for all pieces of this color

    pcPosnValSumF = lambda a, b: a + PIECE_VALUES[board[b].pieceType]
    maxCoveredValue = reduce(pcPosnValSumF, friendPiecePosns, 0)

    # Compress return value into range [-1..1]
    return -1 + weightedReturn / maxCoveredValue * 2
    def _boardSearch(self, board, color, depth, alpha, beta,
                     isMaxNode, stopSrchTime):
        """Performs a board via alpha-beta pruning/quiescence search

        NOTE: Not guaranteed to stop promptly at stopSrchTime - may take some
        time to terminate. Leave a time buffer.

        Selectively explores past the final depth if many pieces have
        been captured recently, by calling quiescent search

        @param board: The starting board state to evaluate
        @param color: The color of the player to generate a move for
        @param depth: The number of plies forward that should be explored
        @param alpha: Nodes with a likability below this will be ignored
        @param beta: Nodes with a likability above this will be ignored
        @param isMaxNode: Is this a beta node? (Is this node seeking to
                        maximize the value of child nodes?)
        @param stopSrchTime: Time at which the search should begin to terminate
        @param nodesVisited: The number of nodes already searched

        @return: A tuple with the following elements:
                1. None, or a move of the form (fromChessPosn, toChessPosn)
                    representing the next move that should be made by the given
                    player
                2. The likability of this move's path in the tree, as followed
                    by the search and as determined by the likability of the
                    leaf node terminating the path
                3. The number of nodes visited in the search

        Implementation based on information found here: http://bit.ly/t1dHKA"""
        ## TODO (James): Check timeout less than once per iteration

        ## TODO (James): Make logging conditional - temporarily disabled

        ## TODO (James): Set this to True before handing in!
        # Whether to limit based on wall clock time or number of nodes seen
        USE_WALL_CLOCK = False

        # Number of nodes to visit (for when wall clock time is not used)
        NUM_NODES_TO_VISIT = 1200

        #logStrF = "Performing minimax search to depth {0}.".format(depth)
        #QLAI._logger.debug(logStrF)

        # Note that we've visited a node
        nodesVisited = 1

        otherColor = ChessBoard.getOtherColor(color)

        # Check if we are at a leaf node
        if (depth == 0):
            (a, b) = self._quiescentSearch(board, color, alpha, beta,
                                           isMaxNode)
            return (a, b, 1)

        # Check if we should otherwise terminate
        elif (time() > stopSrchTime or
              board.isKingCheckmated(color) or
              board.isKingCheckmated(otherColor)):
            return (None, evaluateBoardLikability(color, board,
                                                  self.heuristicWgts),
                    nodesVisited)

        else:
            moveChoices = enumMoves(board, color)
            #logStrF = "Considering {0} poss. moves".format(len(moveChoices))
            #QLAI._logger.debug(logStrF)

            # Check whether seeking to find minimum or maximum value
            if isMaxNode:
                newMin = alpha
                newMoveChoice = None
                for move in moveChoices:

                    # Rather than calling getPlyResult, use THIS board. Much
                    # faster. REMEMBER TO UNDO THIS HYPOTHETICAL MOVE

                    # Save the old flag sets so they can be restored
                    boardMoveUndoDict = board.getPlyResult(move[0], move[1])

                    # Find the next move for this node, and how likable the
                    # enemy will consider this child node
                    (_, nodeEnemyLikability, nVisit) = self._boardSearch(board,
                                                                 otherColor,
                                                                 depth - 1,
                                                                 newMin, beta,
                                                                 not isMaxNode,
                                                                 stopSrchTime)
                    # RESTORE THE OLD BOARD STATE - VERY IMPORTANT
                    board.undoPlyResult(boardMoveUndoDict)

                    # Note how many more nodes we've visited
                    nodesVisited += nVisit

                    # Make note of the least likable branches that it still
                    # makes sense to pursue, given how likable this one is
                    if nodeEnemyLikability > newMin:
                        newMin = nodeEnemyLikability
                        newMoveChoice = move

                    # Don't search outside of the target range
                    elif nodeEnemyLikability > beta:
                        #QLAI._logger.debug("Pruning because new value > beta")
                        return (move, beta, nodesVisited)

                    # Check to see if we've evaluated the max number of nodes
                    if ((not USE_WALL_CLOCK) and
                        (nodesVisited > NUM_NODES_TO_VISIT)):
                        return (newMoveChoice, newMin, nodesVisited)

                return (newMoveChoice, newMin, nodesVisited)
            else:
                newMax = beta
                newMoveChoice = None
                for move in moveChoices:

                    # Rather than calling getPlyResult, use THIS board. Much
                    # faster. REMEMBER TO UNDO THIS HYPOTHETICAL MOVE

                    # Save the old flag sets so they can be restored
                    boardMoveUndoDict = board.getPlyResult(move[0], move[1])

                    # Find how likable the enemy will consider this child node
                    (_, nodeEnemyLikability, nVisit) = self._boardSearch(board,
                                                                 otherColor,
                                                                 depth - 1,
                                                                 alpha, newMax,
                                                                 not isMaxNode,
                                                                 stopSrchTime)

                    # RESTORE THE OLD BOARD STATE - VERY IMPORTANT:
                    board.undoPlyResult(boardMoveUndoDict)

                    # Note how many more nodes we've visited
                    nodesVisited += nVisit

                    # Make note of the most likable branches that it still
                    # makes sense to pursue, given how likable this one is
                    if nodeEnemyLikability < newMax:
                        newMax = nodeEnemyLikability
                        newMoveChoice = move

                    # Don't bother searching outside of our target range
                    elif nodeEnemyLikability < alpha:
                        #QLAI._logger.debug("pruning because new val < alpha")
                        return (move, alpha, nodesVisited)

                    # Check to see if we've evaluated the max number of nodes
                    if ((not USE_WALL_CLOCK) and
                        (nodesVisited > NUM_NODES_TO_VISIT)):
                        return (newMoveChoice, newMin, nodesVisited)
                return (newMoveChoice, newMax, nodesVisited)
    def _boardSearch(self, board, color, depth, alpha, beta, isMaxNode,
                     stopSrchTime):
        """Performs a board via alpha-beta pruning/quiescence search

        NOTE: Not guaranteed to stop promptly at stopSrchTime - may take some
        time to terminate. Leave a time buffer.

        Selectively explores past the final depth if many pieces have
        been captured recently, by calling quiescent search

        @param board: The starting board state to evaluate
        @param color: The color of the player to generate a move for
        @param depth: The number of plies forward that should be explored
        @param alpha: Nodes with a likability below this will be ignored
        @param beta: Nodes with a likability above this will be ignored
        @param isMaxNode: Is this a beta node? (Is this node seeking to
                        maximize the value of child nodes?)
        @param stopSrchTime: Time at which the search should begin to terminate
        @param nodesVisited: The number of nodes already searched

        @return: A tuple with the following elements:
                1. None, or a move of the form (fromChessPosn, toChessPosn)
                    representing the next move that should be made by the given
                    player
                2. The likability of this move's path in the tree, as followed
                    by the search and as determined by the likability of the
                    leaf node terminating the path
                3. The number of nodes visited in the search

        Implementation based on information found here: http://bit.ly/t1dHKA"""
        ## TODO (James): Check timeout less than once per iteration

        ## TODO (James): Make logging conditional - temporarily disabled

        ## TODO (James): Set this to True before handing in!
        # Whether to limit based on wall clock time or number of nodes seen
        USE_WALL_CLOCK = False

        # Number of nodes to visit (for when wall clock time is not used)
        NUM_NODES_TO_VISIT = 1200

        #logStrF = "Performing minimax search to depth {0}.".format(depth)
        #QLAI._logger.debug(logStrF)

        # Note that we've visited a node
        nodesVisited = 1

        otherColor = ChessBoard.getOtherColor(color)

        # Check if we are at a leaf node
        if (depth == 0):
            (a, b) = self._quiescentSearch(board, color, alpha, beta,
                                           isMaxNode)
            return (a, b, 1)

        # Check if we should otherwise terminate
        elif (time() > stopSrchTime or board.isKingCheckmated(color)
              or board.isKingCheckmated(otherColor)):
            return (None,
                    evaluateBoardLikability(color, board,
                                            self.heuristicWgts), nodesVisited)

        else:
            moveChoices = enumMoves(board, color)
            #logStrF = "Considering {0} poss. moves".format(len(moveChoices))
            #QLAI._logger.debug(logStrF)

            # Check whether seeking to find minimum or maximum value
            if isMaxNode:
                newMin = alpha
                newMoveChoice = None
                for move in moveChoices:

                    # Rather than calling getPlyResult, use THIS board. Much
                    # faster. REMEMBER TO UNDO THIS HYPOTHETICAL MOVE

                    # Save the old flag sets so they can be restored
                    boardMoveUndoDict = board.getPlyResult(move[0], move[1])

                    # Find the next move for this node, and how likable the
                    # enemy will consider this child node
                    (_, nodeEnemyLikability,
                     nVisit) = self._boardSearch(board, otherColor, depth - 1,
                                                 newMin, beta, not isMaxNode,
                                                 stopSrchTime)
                    # RESTORE THE OLD BOARD STATE - VERY IMPORTANT
                    board.undoPlyResult(boardMoveUndoDict)

                    # Note how many more nodes we've visited
                    nodesVisited += nVisit

                    # Make note of the least likable branches that it still
                    # makes sense to pursue, given how likable this one is
                    if nodeEnemyLikability > newMin:
                        newMin = nodeEnemyLikability
                        newMoveChoice = move

                    # Don't search outside of the target range
                    elif nodeEnemyLikability > beta:
                        #QLAI._logger.debug("Pruning because new value > beta")
                        return (move, beta, nodesVisited)

                    # Check to see if we've evaluated the max number of nodes
                    if ((not USE_WALL_CLOCK)
                            and (nodesVisited > NUM_NODES_TO_VISIT)):
                        return (newMoveChoice, newMin, nodesVisited)

                return (newMoveChoice, newMin, nodesVisited)
            else:
                newMax = beta
                newMoveChoice = None
                for move in moveChoices:

                    # Rather than calling getPlyResult, use THIS board. Much
                    # faster. REMEMBER TO UNDO THIS HYPOTHETICAL MOVE

                    # Save the old flag sets so they can be restored
                    boardMoveUndoDict = board.getPlyResult(move[0], move[1])

                    # Find how likable the enemy will consider this child node
                    (_, nodeEnemyLikability,
                     nVisit) = self._boardSearch(board, otherColor, depth - 1,
                                                 alpha, newMax, not isMaxNode,
                                                 stopSrchTime)

                    # RESTORE THE OLD BOARD STATE - VERY IMPORTANT:
                    board.undoPlyResult(boardMoveUndoDict)

                    # Note how many more nodes we've visited
                    nodesVisited += nVisit

                    # Make note of the most likable branches that it still
                    # makes sense to pursue, given how likable this one is
                    if nodeEnemyLikability < newMax:
                        newMax = nodeEnemyLikability
                        newMoveChoice = move

                    # Don't bother searching outside of our target range
                    elif nodeEnemyLikability < alpha:
                        #QLAI._logger.debug("pruning because new val < alpha")
                        return (move, alpha, nodesVisited)

                    # Check to see if we've evaluated the max number of nodes
                    if ((not USE_WALL_CLOCK)
                            and (nodesVisited > NUM_NODES_TO_VISIT)):
                        return (newMoveChoice, newMin, nodesVisited)
                return (newMoveChoice, newMax, nodesVisited)
Example #21
0
 def test_newB_allLegalMoves(self):
     self._allLegalMoves(getBoardNew(), _w, enumMoves(getBoardNew(), _w))
Example #22
0
 def test_newB_correctLen(self):
     self.assertEqual(20, len(enumMoves(getBoardNew(), _w)))
 def test_bWD4_correctLen(self):
     self.assertEqual(28, len(enumMoves(getBoardWD4(), _w)))
    def _quiescentSearch(self, board, color, alpha, beta, isMaxNode):
        """Perform a quiescent search on the given board, examining captures

        Enumerates captures, and evaluates them to see if they alter results

        @param board: The starting board state to evaluate
        @param color: The color of the player to generate a move for
        @param alpha: Nodes with a likability below this will be ignored
        @param beta: Nodes with a likability above this will be ignored
        @param isMaxNode: Is this a beta node? (Is this node seeking to
                        maximize the value of child nodes?)

        @return: A tuple with the following elements:
                1. None, or a move of the form (fromChessPosn, toChessPosn)
                    representing the next move that should be made by the given
                    player
                2. The likability of this move's path in the tree, as followed
                    by the search and as determined by the likability of the
                    leaf node terminating the path

        No timeout is allowed. This shouldn't take long, anyway. If it does,
        then we're doing good things.

        Note: this was influenced by information here: http://bit.ly/VYlJVC """

        otherColor = ChessBoard.getOtherColor(color)

        # Note the appeal of this board, with no captures
        standPatVal = evaluateBoardLikability(color, board, self.heuristicWgts)

        # Build up a list of capture moves

        moveChoices = enumMoves(board, color)
        moveFilterFunct = lambda m: (
            (board[m[1]] is not None) and (board[m[1]].color == otherColor))
        captureMoves = filter(moveFilterFunct, moveChoices)

        # Determine whether captures are a good or a bad thing
        if isMaxNode:

            # Check whether it is even worth proceeding with evaluation
            if (standPatVal > beta):
                return (None, beta)
            elif (alpha < standPatVal):
                alpha = standPatVal

            # Evaluate all captures
            for capMv in captureMoves:
                boardMoveUndoDict = board.getPlyResult(capMv[0], capMv[1])
                moveResultScore = evaluateBoardLikability(
                    otherColor, board, self.heuristicWgts)
                board.undoPlyResult(boardMoveUndoDict)

                # Don't bother searching outside of target range
                if (moveResultScore > beta):
                    return (None, beta)

                # Check whether we've found something superior to our best
                elif (moveResultScore > alpha):
                    alpha = moveResultScore

            # All captures for this node have been evaluated - return best
            return (None, alpha)
        else:
            # Check whether it is even worth proceeding with evaluation
            if (standPatVal < alpha):
                return (None, alpha)
            elif (beta > standPatVal):
                beta = standPatVal

            # Evaluate all captures
            for capMv in captureMoves:
                boardMoveUndoDict = board.getPlyResult(capMv[0], capMv[1])
                moveResultScore = evaluateBoardLikability(
                    otherColor, board, self.heuristicWgts)
                board.undoPlyResult(boardMoveUndoDict)

                # Don't bother searching outside of target range
                if (moveResultScore > alpha):
                    return (None, alpha)

                # Check whether we've found something superior to our best
                elif (moveResultScore < beta):
                    beta = moveResultScore

            # All captures for this node have been evaluated - return best
            return (None, beta)
Example #25
0
 def test_bCmplx_allLegalMoves_white(self):
     b = getBoardComplex()
     self._allLegalMoves(b, _w, enumMoves(b, _w))
Example #26
0
def heuristicEmptySpaceCvrg(color, board):
    """Return a value representing the number of empty squares controlled

    @param color: one of maverick.data.ChessBoard.WHITE or
                maverick.data.ChessBoard.BLACK
    @param board: a ChessBoard object

    @return: a value representing the number of empty squares that the
            given color can attack on the given board, with weight for
            center squares"""

    # The value of regular and center squares
    ## TODO (James): research and tweak these.
    #                See http://tinyurl.com/cpjqnw4l
    centerSquareValue = 2
    squareValue = 1

    # Build up a list of all piece locations as tuples

    pieceLocations = []
    otherColor = ChessBoard.getOtherColor(color)

    # TODO (mattsh): Not sure, do we want to add both ours and theirs?

    # Find friendly piece locations and add to pieceLocations
    for piecePosn in board.getPiecesOfColor(color):
        pieceLocations.append(piecePosn)

    # Find enemy piece locations and add to pieceLocations
    for enemyPiecePosn in board.getPiecesOfColor(otherColor):
        pieceLocations.append(enemyPiecePosn)

    # Build list of empty squares

    emptyLocations = []

    # Check each location to see if it is occupied
    for r in range(0, ChessBoard.BOARD_LAYOUT_SIZE):
        for f in range(0, ChessBoard.BOARD_LAYOUT_SIZE):
            testPosn = ChessPosn(r, f)

            if testPosn not in pieceLocations:
                emptyLocations.append(testPosn)

    # Build list of possible friendly piece moves
    friendlyMoves = enumMoves(board, color)
    friendlyMoveDestPosns = map(lambda x: x[1], friendlyMoves)

    # Find possible moves to empty squares and build up return value

    # Accumulator for return value
    weightedReturn = 0

    for dest in emptyLocations:

        # Check if a move can be made to that Posn
        if dest in friendlyMoveDestPosns:

            #Check if it is a center square
            if __heuristicEmptySpaceCoverage_isCenterSquare(dest):
                weightedReturn += centerSquareValue
            else:
                weightedReturn += squareValue

    # Calculate total weight of empty squares on board
    totalEmptyPosnWeight = 0
    for posn in emptyLocations:
        if __heuristicEmptySpaceCoverage_isCenterSquare(posn):
            totalEmptyPosnWeight += centerSquareValue
        else:
            totalEmptyPosnWeight += squareValue

    # Compress return value into range [-1..1]
    return -1 + weightedReturn / totalEmptyPosnWeight * 2
    def _quiescentSearch(self, board, color, alpha, beta, isMaxNode):
        """Perform a quiescent search on the given board, examining captures

        Enumerates captures, and evaluates them to see if they alter results

        @param board: The starting board state to evaluate
        @param color: The color of the player to generate a move for
        @param alpha: Nodes with a likability below this will be ignored
        @param beta: Nodes with a likability above this will be ignored
        @param isMaxNode: Is this a beta node? (Is this node seeking to
                        maximize the value of child nodes?)

        @return: A tuple with the following elements:
                1. None, or a move of the form (fromChessPosn, toChessPosn)
                    representing the next move that should be made by the given
                    player
                2. The likability of this move's path in the tree, as followed
                    by the search and as determined by the likability of the
                    leaf node terminating the path

        No timeout is allowed. This shouldn't take long, anyway. If it does,
        then we're doing good things.

        Note: this was influenced by information here: http://bit.ly/VYlJVC """

        otherColor = ChessBoard.getOtherColor(color)

        # Note the appeal of this board, with no captures
        standPatVal = evaluateBoardLikability(color, board,
                                              self.heuristicWgts)

        # Build up a list of capture moves

        moveChoices = enumMoves(board, color)
        moveFilterFunct = lambda m: ((board[m[1]] is not None) and
                                    (board[m[1]].color == otherColor))
        captureMoves = filter(moveFilterFunct, moveChoices)

        # Determine whether captures are a good or a bad thing
        if isMaxNode:

            # Check whether it is even worth proceeding with evaluation
            if (standPatVal > beta):
                return (None, beta)
            elif (alpha < standPatVal):
                alpha = standPatVal

            # Evaluate all captures
            for capMv in captureMoves:
                boardMoveUndoDict = board.getPlyResult(capMv[0], capMv[1])
                moveResultScore = evaluateBoardLikability(otherColor, board,
                                                          self.heuristicWgts)
                board.undoPlyResult(boardMoveUndoDict)

                # Don't bother searching outside of target range
                if (moveResultScore > beta):
                    return (None, beta)

                # Check whether we've found something superior to our best
                elif (moveResultScore > alpha):
                    alpha = moveResultScore

            # All captures for this node have been evaluated - return best
            return (None, alpha)
        else:
            # Check whether it is even worth proceeding with evaluation
            if (standPatVal < alpha):
                return (None, alpha)
            elif (beta > standPatVal):
                beta = standPatVal

            # Evaluate all captures
            for capMv in captureMoves:
                boardMoveUndoDict = board.getPlyResult(capMv[0], capMv[1])
                moveResultScore = evaluateBoardLikability(otherColor, board,
                                                          self.heuristicWgts)
                board.undoPlyResult(boardMoveUndoDict)

                # Don't bother searching outside of target range
                if (moveResultScore > alpha):
                    return (None, alpha)

                # Check whether we've found something superior to our best
                elif (moveResultScore < beta):
                    beta = moveResultScore

            # All captures for this node have been evaluated - return best
            return (None, beta)