def playTak(): currentGameState = GameState() print(currentGameState) with tf.Graph().as_default(): # make graph to apply the network # boards = tf.placeholder(tf.float32) # pieceCounts = tf.placeholder(tf.float32) # inferenceOp = TakNet.inference(boards, pieceCounts) # mimic training graph structure to load variables globalStep = tf.contrib.framework.get_or_create_global_step() # get data boards, pieceCounts, realScores = TakNet.inputs(False) # instantiate prediction graph scores = TakNet.inference(boards, pieceCounts) # calculate loss totalLoss, meanLoss = TakNet.loss(realScores, scores) # train for one batch and update parameters # noinspection PyUnusedLocal trainOp = TakNet.train(totalLoss, meanLoss, globalStep) with tf.Session() as session: # restore weights from model in Network folder tf.train.Saver().restore(session, tf.train.latest_checkpoint("Network")) tf.get_variable_scope().reuse_variables() boards2 = tf.placeholder(tf.float32) pieceCounts2 = tf.placeholder(tf.float32) inferenceOp = TakNet.inference(boards2, pieceCounts2) while True: try: if currentGameState.turnIndicator == 1: startTime = time.time() move = miniMax(currentGameState, session, inferenceOp, boards2, pieceCounts2) endTime = time.time() print("TakticalBot: " + move) print("Time: " + str(endTime - startTime)) else: move = input("Player: ") if move == "quit": break currentGameState = currentGameState.applyMove(move) print(currentGameState) winner = currentGameState.checkVictory() if winner != 2: if winner == 1: print("TakticalBot Won!") elif winner == -1: print("You Won!") else: print("It was a draw") break except TakException as exception: print(exception) continue
def parseGame(csvRow): trainingGameStates = [] gameState = GameState() resultIndicator = csvRow[1] if resultIndicator == "F-0" or resultIndicator == "R-0": # win for white baseScore = 1 elif resultIndicator == "0-F" or resultIndicator == "0-R": # loss for white baseScore = -1 else: # draw ("1/2-1/2") baseScore = 0 listOfMoves = csvRow[0].split(',') numberOfMoves = len(listOfMoves) for i in range(numberOfMoves): gameState = gameState.applyMove(toPTN(listOfMoves[i])) # print(gameState) trainingGameStates.append( TrainingGameState(copy.deepcopy(gameState), baseScore / (numberOfMoves - i))) return trainingGameStates
class Client: def __init__(self, width, height, evaluator="default", name="JJF"): self.color = "" self.width = width self.height = height self.innerstate = GameState(width, height) self.evaluator = Evaluator(evaluator) self.name = name self.innerstate.populate_bauern() def find_best_move(self): rating, move = self.evaluator.evaluate(self.innerstate, -100, 100) return move def connect(self): print(self.name) def start_game(self): self.color = input() print("ok") self.innerstate = GameState(self.width, self.height) self.innerstate.populate_bauern() def end_game(self): movestring = input() if movestring == "done": return else: return #error! def run(self): self.connect() while True: turn = "white" self.start_game() while True: if turn == self.color: move = self.find_best_move() print( Move.write_move(move, turn == "black", self.innerstate)) else: movestring = input() if movestring == "done": break else: move = Move.parse_move(movestring, turn == "black", self.innerstate) if not self.innerstate.checkIfLegal(move): print("uups") #error self.innerstate.applyMove(move) self.innerstate.rotateBoard() turn = self.invert_turn(turn) if self.innerstate.game_is_finished() != None: self.end_game() break @staticmethod def invert_turn(turn): return "black" if turn == "white" else "white"