model.dictionary) policy = ActionToArray.moveEvaluations( ActionToArray.legalMovesForState( board.arrayBoard, board.board), board.arrayBoard, outputs) model.childrenPolicyEval.append(policy) model.childrenMoveNames.append( ActionToArray.legalMovesForState( board.arrayBoard, board.board)) directory = model.dictionary[board.boardToString()] if playouts > 0: index = np.argmax( MCTSCrazyhouse.PUCT_Algorithm( model.childrenStateWin[directory], model.childrenStateSeen[directory], 1, np.sum(model.childrenStateSeen[directory]), model.childrenValueEval[directory], MCTSCrazyhouse.noiseEvals( model.childrenPolicyEval[directory], noiseVal))) else: index = np.argmax( MCTSCrazyhouse.noiseEvals(model.childrenPolicyEval[directory], noiseVal)) move = model.childrenMoveNames[directory][index] if chess.Move.from_uci(move) not in board.board.legal_moves: move = ActionToArray.legalMovesForState(board.arrayBoard, board.board)[0] print("bestmove " + move) print( "info depth 1 score cp", str(
def NetworkCompetitionWhite(bestNet, testingNet, playouts, round="1"): score = 0 PGN = chess.pgn.Game() PGN.headers["Event"] = "Neural Network Comparison Test" PGN.headers["Site"] = "Cozy Computer Lounge" PGN.headers["Date"] = datetime.datetime.today().strftime('%Y-%m-%d %H:%M') PGN.headers["Round"] = round PGN.headers["White"] = "Network: " + bestNet.nameOfNetwork PGN.headers["Black"] = "Network: " + testingNet.nameOfNetwork PGN.headers["Variant"] = "crazyhouse" sim = ChessEnvironment() while sim.result == 2: #print("Win Probability:", ValueEvaluation.positionEval(sim, bestNet.neuralNet)) noiseVal = 1.0 / (2 * (sim.plies // 2 + 1)) if sim.plies % 2 == 0: if playouts > 0: bestNet.competitivePlayoutsFromPosition(playouts, sim) else: position = sim.boardToString() if position not in bestNet.dictionary: state = torch.from_numpy(sim.boardToState()) nullAction = torch.from_numpy(np.zeros(1)) # this will not be used, is only a filler testSet = DoubleHeadDataset(state, nullAction, nullAction) generatePredic = torch.utils.data.DataLoader(dataset=testSet, batch_size=len(state), shuffle=False) with torch.no_grad(): for images, labels1, labels2 in generatePredic: bestNet.neuralNet.eval() #start = time.time() outputs = bestNet.neuralNet(images)[0] #end = time.time() #print(end-start) if playouts > 0: bestNet.addPositionToMCTS(sim.boardToString(), ActionToArray.legalMovesForState(sim.arrayBoard, sim.board), sim.arrayBoard, outputs, sim) else: bestNet.dictionary[sim.boardToString()] = len(bestNet.dictionary) policy = ActionToArray.moveEvaluations(ActionToArray.legalMovesForState(sim.arrayBoard, sim.board), sim.arrayBoard, outputs) bestNet.childrenMoveNames.append(ActionToArray.legalMovesForState(sim.arrayBoard, sim.board)) bestNet.childrenPolicyEval.append(policy) directory = bestNet.dictionary[sim.boardToString()] if playouts > 0: index = np.argmax( MCTSCrazyhouse.PUCT_Algorithm(bestNet.childrenStateWin[directory], bestNet.childrenStateSeen[directory], 1, np.sum(bestNet.childrenStateSeen[directory]), bestNet.childrenValueEval[directory], MCTSCrazyhouse.noiseEvals(bestNet.childrenPolicyEval[directory], noiseVal)) ) else: index = np.argmax(MCTSCrazyhouse.noiseEvals(bestNet.childrenPolicyEval[directory], noiseVal)) move = bestNet.childrenMoveNames[directory][index] if chess.Move.from_uci(move) not in sim.board.legal_moves: move = ActionToArray.legalMovesForState(sim.arrayBoard, sim.board)[0] #print(move) sim.makeMove(move) sim.gameResult() elif sim.plies % 2 == 1: if playouts > 0: testingNet.competitivePlayoutsFromPosition(playouts, sim) else: position = sim.boardToString() if position not in testingNet.dictionary: state = torch.from_numpy(sim.boardToState()) nullAction = torch.from_numpy(np.zeros(1)) # this will not be used, is only a filler testSet = DoubleHeadDataset(state, nullAction, nullAction) generatePredic = torch.utils.data.DataLoader(dataset=testSet, batch_size=len(state), shuffle=False) with torch.no_grad(): for images, labels1, labels2 in generatePredic: testingNet.neuralNet.eval() outputs = testingNet.neuralNet(images)[0] if playouts > 0: testingNet.addPositionToMCTS(sim.boardToString(), ActionToArray.legalMovesForState(sim.arrayBoard, sim.board), sim.arrayBoard, outputs, sim) else: testingNet.dictionary[sim.boardToString()] = len(testingNet.dictionary) policy = ActionToArray.moveEvaluations(ActionToArray.legalMovesForState(sim.arrayBoard, sim.board), sim.arrayBoard, outputs) testingNet.childrenMoveNames.append(ActionToArray.legalMovesForState(sim.arrayBoard, sim.board)) testingNet.childrenPolicyEval.append(policy) directory = testingNet.dictionary[sim.boardToString()] if playouts > 0: index = np.argmax( MCTSCrazyhouse.PUCT_Algorithm(testingNet.childrenStateWin[directory], testingNet.childrenStateSeen[directory], 1, np.sum(testingNet.childrenStateSeen[directory]), testingNet.childrenValueEval[directory], MCTSCrazyhouse.noiseEvals(testingNet.childrenPolicyEval[directory], noiseVal)) ) else: index = np.argmax(MCTSCrazyhouse.noiseEvals(testingNet.childrenPolicyEval[directory], noiseVal)) move = testingNet.childrenMoveNames[directory][index] if chess.Move.from_uci(move) not in sim.board.legal_moves: move = ActionToArray.legalMovesForState(sim.arrayBoard, sim.board)[0] #print(move) sim.makeMove(move) sim.gameResult() if sim.plies == 1: node = PGN.add_variation(chess.Move.from_uci(move)) else: node = node.add_variation(chess.Move.from_uci(move)) #print(sim.board) if sim.result == 1: PGN.headers["Result"] = "1-0" if sim.result == 0: PGN.headers["Result"] = "1/2-1/2" score = 0.5 if sim.result == -1: PGN.headers["Result"] = "0-1" score = 1 print(PGN) return score
print("PLAYOUTS:", ENGINE_PLAYOUTS) # START SEARCHING FOR MCTS TREE if ENGINE_PLAYOUTS > 0: start = time.time() print("CHOSEN DEPTH:",model.DEPTH_VALUE) model.competitivePlayoutsFromPosition(ENGINE_PLAYOUTS, board) end = time.time() TIME_SPENT = end-start directory = model.dictionary[board.boardToString()] if board.plies > 10 or board.plies < 2: index = np.argmax(model.childrenStateSeen[directory]) else: index = np.argmax(MCTSCrazyhouse.noiseEvals(model.childrenPolicyEval[directory], noiseVal)) move = model.childrenMoveNames[directory][index] else: state = torch.from_numpy(board.boardToState()) # moves in a position moveNames = ActionToArray.legalMovesForState(board.arrayBoard, board.board) mate = isThereMate(board, moveNames, model.matefinder) if mate != None: index = mate print("I see mate!") else: model.neuralNet.eval() outputs = model.neuralNet(state)[0]
def NetworkCompetitionWhite(bestNet, playouts, round="1"): PGN = chess.pgn.Game() PGN.headers["Event"] = "Neural Network Comparison Test" PGN.headers["Site"] = "Cozy Computer Lounge" PGN.headers["Date"] = datetime.datetime.today().strftime('%Y-%m-%d %H:%M') PGN.headers["Round"] = round PGN.headers["White"] = "Network: " + bestNet.nameOfNetwork PGN.headers["Black"] = "You" PGN.headers["Variant"] = "crazyhouse" sim = ChessEnvironment() while sim.result == 2: noiseVal = 0.0 / (10 * (sim.plies // 2 + 1)) if sim.plies % 2 == 0: if playouts > 0: start = time.time() bestNet.competitivePlayoutsFromPosition(playouts, sim) end = time.time() print(end - start) else: position = sim.boardToString() if position not in bestNet.dictionary: image = torch.from_numpy(sim.boardToState()) outputs = bestNet.neuralNet(image)[0] if playouts > 0: bestNet.addPositionToMCTS( sim.boardToString(), ActionToArray.legalMovesForState( sim.arrayBoard, sim.board), sim.arrayBoard, outputs, sim) else: bestNet.dictionary[sim.boardToString()] = len( bestNet.dictionary) policy = ActionToArray.moveEvaluations( ActionToArray.legalMovesForState( sim.arrayBoard, sim.board), sim.arrayBoard, outputs) bestNet.childrenMoveNames.append( ActionToArray.legalMovesForState( sim.arrayBoard, sim.board)) bestNet.childrenPolicyEval.append(policy) directory = bestNet.dictionary[sim.boardToString()] if playouts > 0: index = np.argmax( MCTSCrazyhouse.PUCT_Algorithm( bestNet.childrenStateWin[directory], bestNet.childrenStateSeen[directory], 1, np.sum(bestNet.childrenStateSeen[directory]), bestNet.childrenValueEval[directory], MCTSCrazyhouse.noiseEvals( bestNet.childrenPolicyEval[directory], noiseVal))) else: index = np.argmax( MCTSCrazyhouse.noiseEvals( bestNet.childrenPolicyEval[directory], noiseVal)) move = bestNet.childrenMoveNames[directory][index] if chess.Move.from_uci(move) not in sim.board.legal_moves: move = ActionToArray.legalMovesForState( sim.arrayBoard, sim.board)[0] # PRINT WIN PROBABILITY W/ MCTS? print("-----") print(move) print("Win Probability: {:.4f} %".format( 100 * ValueEvaluation.positionEval(sim, bestNet.neuralNet))) if playouts > 0 and bestNet.childrenStateSeen[directory][index] > 0: mctsWinRate = 100 * bestNet.childrenStateWin[directory][ index] / bestNet.childrenStateSeen[directory][index] print("MCTS Win Probability: {:.4f} %".format(mctsWinRate)) totalWinRate = (100 * ValueEvaluation.positionEval( sim, bestNet.neuralNet) + mctsWinRate) / 2 print("Total Win Probability: {:.4f} %".format(totalWinRate)) print("-----") sim.makeMove(move) sim.gameResult() elif sim.plies % 2 == 1: legal = False while not legal: move = input("Enter move: ") if len(move) == 4 or len(move) == 5: if chess.Move.from_uci(move) in sim.board.legal_moves: legal = True else: print("Illegal move! Try again:") else: print("Illegal move! Try again:") print(move) sim.makeMove(move) sim.gameResult() if sim.plies == 1: node = PGN.add_variation(chess.Move.from_uci(move)) else: node = node.add_variation(chess.Move.from_uci(move)) print(sim.board) print("WHITE POCKET") print(sim.whiteCaptivePieces) print("BLACK POCKET") print(sim.blackCaptivePieces) if sim.result == 1: PGN.headers["Result"] = "1-0" if sim.result == 0: PGN.headers["Result"] = "1/2-1/2" if sim.result == -1: PGN.headers["Result"] = "0-1" print(PGN)