def get_move(self, board): current_depth = 0 # possible_moves = [move for move in board.legal_moves] # shuffle(possible_moves) possible_moves = get_possible_moves(board, self.color, [], 0, history=self.history, trans_table=self.trans_table) best_move = None best_score = float('-inf') for move in possible_moves: board.push_uci(move.uci()) if board.is_checkmate() and board.turn != self.color: return move h = ttu.hash_(board) score = self.alpha_beta(board, self.heuristic, float('-inf'), float('inf'), False, current_depth + 1, self.maximum_depth) self.trans_table[h] = score board.pop() if score > best_score: best_score = score best_move = move # print("AlphaBeta:",best_score) print("Move made by AI:", best_move.uci()) return best_move
def alpha_beta(self, board, heuristic, alpha, beta, max_turn, depth, best, pline): original_best = best[0] if depth == 0 or board.is_game_over(): curr_score = heuristic(board, self.color, max_turn) if curr_score > best[0]: pline.clear() best.clear() best.append(curr_score) return curr_score else: return best[0] possible_moves = get_possible_moves(board, max_turn, self.pv_line, self.maximum_depth - depth, history=self.history) best_score = float('-inf') if max_turn else float('inf') for move in possible_moves: board.push_uci(move.uci()) score = self.alpha_beta(board, heuristic, alpha, beta, not max_turn, depth - 1, best, pline) if original_best != best[0]: original_best = best[0] pline.append(board.pop()) else: board.pop() if max_turn and score > best_score: best_score = score if best_score >= beta: if not board.is_capture(move): piece = board.piece_at(move.from_square) self.history[max_turn][piece.piece_type][ move.to_square] += pow(2, depth) return best_score alpha = max(alpha, best_score) if not max_turn and score < best_score: best_score = score if best_score <= alpha: if not board.is_capture(move): piece = board.piece_at(move.from_square) self.history[max_turn][piece.piece_type][ move.to_square] += pow(2, depth) return best_score beta = min(beta, best_score) return best_score
def alpha_beta(self, board, heuristic, alpha, beta, max_turn, current_depth, maximum_depth): if current_depth == maximum_depth or board.is_game_over(): return heuristic(board, self.color, max_turn) # captures = mvvlva(board, self.color) # moves = [move for move in board.legal_moves if move not in captures] # shuffle(moves) # possible_moves = captures + moves possible_moves = get_possible_moves(board, max_turn, [], 0, history=self.history) best_score = float('-inf') if max_turn else float('inf') for move in possible_moves: board.push_uci(move.uci()) score = self.alpha_beta(board, heuristic, alpha, beta, not max_turn, current_depth + 1, maximum_depth) board.pop() if max_turn and score > best_score: best_score = score if best_score >= beta: if not board.is_capture(move): piece = board.piece_at(move.from_square) self.history[max_turn][piece.piece_type][ move.to_square] += pow(2, current_depth) return best_score alpha = max(alpha, best_score) if not max_turn and score < best_score: best_score = score if best_score <= alpha: if not board.is_capture(move): piece = board.piece_at(move.from_square) self.history[max_turn][piece.piece_type][ move.to_square] += pow(2, current_depth) return best_score beta = min(beta, best_score) return best_score
def get_move(self, board): """ Top level function for alpha_beta :param board: Board object :return: returns a Move object to be used in chess_game.py """ current_depth = 0 # possible_moves = [move for move in board.legal_moves] # shuffle(possible_moves) possible_moves = get_possible_moves(board, True, self.pv_line, current_depth, history=self.history) best_move = None best_score = float('-inf') score_array = [best_score] for move in possible_moves: board.push_uci(move.uci()) if board.is_checkmate() and board.turn != self.color: return move score = self.alpha_beta(board, self.heuristic, float('-inf'), float('inf'), False, self.maximum_depth - 1, score_array) board.pop() if score > best_score: best_score = score best_move = move # print("AlphaBeta:",best_score) #self.pv_line.reverse() print(self.pv_line) print("Combined: ", best_move) return best_move
def get_move(self, board): current_depth = 0 # possible_moves = [move for move in board.legal_moves] # shuffle(possible_moves) possible_moves = get_possible_moves(board, True, self.pv_line, current_depth, history=self.history) best_move = None best_score = float('-inf') score_array = [best_score] for move in possible_moves: board.push_uci(move.uci()) if board.is_checkmate() and board.turn != self.color: return move h = ttu.hash_(board) score = self.trans_table.get(h) if score is None: score = self.alpha_beta(board, self.heuristic, float('-inf'), float('inf'), False, self.maximum_depth - 1, score_array, self.pv_line) self.trans_table[h] = score board.pop() if score > best_score: best_score = score best_move = move # print("AlphaBeta:",best_score) self.pv_line.reverse() # print(self.pv_line) return best_move
def alpha_beta(self, board, heuristic, alpha, beta, max_turn, depth, best): """ Same as Alpha Beta from PV Agent :param board: chess board :param heuristic: heuristic function :param alpha: alpha value :param beta: beta value :param max_turn: maximum depth you wanna go to :param depth: current depth :param best: best score :return: best move """ original_best = best[0] if depth == 0 or board.is_game_over(): curr_score = heuristic(board, self.color, max_turn) if curr_score > best[0]: self.pv_line.clear() best.clear() best.append(curr_score) return curr_score else: return curr_score possible_moves = get_possible_moves(board, max_turn, self.pv_line, self.maximum_depth - depth, history=self.history) best_score = float('-inf') if max_turn else float('inf') for move in possible_moves: board.push_uci(move.uci()) score = self.alpha_beta(board, heuristic, alpha, beta, not max_turn, depth - 1, best) if original_best != best[0]: original_best = best[0] self.pv_line.insert(0, board.pop()) else: board.pop() if max_turn and score > best_score: best_score = score if best_score >= beta: if not board.is_capture(move): piece = board.piece_at(move.from_square) self.history[max_turn][piece.piece_type][ move.to_square] += pow(2, depth) return best_score alpha = max(alpha, best_score) if not max_turn and score < best_score: best_score = score if best_score <= alpha: if not board.is_capture(move): piece = board.piece_at(move.from_square) self.history[max_turn][piece.piece_type][ move.to_square] += pow(2, depth) return best_score beta = min(beta, best_score) return best_score