Exemple #1
0
    def getAction(self, board):
        self.root = Node(chess.Board(board.fen()), None)
        
        for i in range(0, Global.MAX_ITERS):
            newNode = self.treePolicy()
            tempBoard = newNode.rollout()
            newNode.backpropagate(Global.HEURISTIC(tempBoard, board.turn) - Global.HEURISTIC(board, board.turn))
            if self.iterationalFlip:
                self.depressed = not self.depressed

        if Global.BEST_PICK:
            return self.getHighestValue()
        if self.depressed:
                return self.getLeastVisited()
        return self.getMostVisited()
def playGame(agents):
    board = chess.Board()
    currentTurn = 0
    while not board.is_game_over():
        agent = agents[currentTurn % 2]
        agentMove = agent.getAction(chess.Board(board.fen()))
        board.push(agentMove)

        # if VERBOSE:
        print("Turn: " + str(currentTurn))
        print("Current Move: " + str(agentMove))
        print("Current Board:")
        print(board)
        print(Global.HEURISTIC(board, (currentTurn % 2) == 0))
        print("")

        currentTurn = currentTurn + 1
        if currentTurn > Global.MAX_TURNS:
            return "0 - 0"

    results = board.result().split("-")
    if results[0].strip() == "1":
        return "1 - 0"
    if results[1].strip() == "1":
        return "0 - 1"
    return "0 - 0"
def playGame(agents):
    board = checkers.CheckerBoard()
    currentTurn = 0
    while not board.is_over():
        agent = agents[currentTurn % 2]
        agentMove = agent.getAction(board.copy())
        board.make_move(agentMove)

        # if VERBOSE:
        print("Turn: " + str(currentTurn))
        print("Current Move: " + str(agentMove))
        print("Current Board:")
        print(board)
        print(Global.HEURISTIC(board, (currentTurn % 2) == 0))
        print("")

        currentTurn = currentTurn + 1
        if currentTurn > Global.MAX_TURNS:
            return "0 - 0"

    return board.result()