def heuristic_value(self, board, move): data = [{'state': board.fen(), 'move': move}] prec = md.preprocessor() prec.fit(raw_data=data) data = prec.transform() theta = self.model.predict_proba(data) return theta * eval.evaluate_board(board), theta
def pvs(self, depth, alpha, beta, color): if self.hash_board(depth, color) in self.cache: return self.cache[self.hash_board(depth, color)] if depth == 0 or not self.board.legal_moves: self.cache[self.hash_board(depth, color)] = evaluate_board(self.board) return self.cache[self.hash_board(depth, color)] if time.time() - self.start > self.total_time: return alpha moves = list(self.board.legal_moves) for move in moves: self.board.push(move) if moves.index(move) == 0: score = -self.pvs(depth - 1, -beta, -alpha, -color) else: score = -self.pvs(depth - 1, -alpha - 1, -alpha, -color) if alpha < score < beta: score = -self.pvs(depth - 1, -beta, -score, -color) if alpha <= score: alpha = score self.board.pop() if alpha >= beta: break return alpha
def record(self, board, score): self.visits["total"] = self.visits.get("total", 1) + 1 self.visits[board.fen()] = self.visits.get(board.fen(), 0) + 1 #dataset = {'input': np.asarray(list(board.fen().encode('utf8'))), 'target': score} #self.data.append(dataset) self.log('Visit Recorded', msg_type=2) #return self.model.fit(dataset = self.data) return eval.evaluate_board(board)
def play_value(board, movehistory=None): print('play_value') if board.is_checkmate(): record(board, eval.evaluate_board(board)) return eval.evaluate_board(board) heuristic_vals = {} for move in board.pseudo_legal_moves: board.push(move) heuristic_vals[move] = -heuristic_value(board) board.pop() move = max(heuristic_vals, key=heuristic_vals.get) board.push(move) value = -play_value(board) board.pop() record(board, value) return value
def play_value(self, board, depth=5): if board.is_checkmate() or depth == 0: self.record(board, eval.evaluate_board(board)) return eval.evaluate_board(board) heuristic_vals = {} for move in board.legal_moves: board.push(move) val = self.heuristic_value(board, move) if val is not None: heuristic_vals[move] = val[0] board.pop() move = max(heuristic_vals, key=heuristic_vals.get) board.push(move) value = self.play_value(board, depth=depth - 1) #print("value" + str(value)) board.pop() self.record(board, value) #self.log('Playout Complete') return value
def quiesce(alpha, beta, board): # need to import evaluate.py stand_pat = eval.evaluate_board(board) if (stand_pat >= beta): return beta if (alpha < stand_pat): alpha = stand_pat for move in board.legal_moves: if board.is_capture(move): board.push(move) score = -quiesce(-beta, -alpha, board) board.pop() if (score >= beta): return beta if (score > alpha): alpha = score return alpha