def alphabeta_move(board: HexBoard, depth: int, is_max: bool, show_AI=False): """ Set is_max to True for BLUE player, and False for RED player. You can set the depth to whatever you want really, just don't go too deep it'll take forever. Set show_AI to True if you want to see it's scoring process """ legal_moves = board.get_move_list() best_score = -np.inf best_move = None for move in legal_moves: sim_board = _update_board(board, move, is_max) if sim_board.check_win( sim_board.BLUE if is_max else sim_board.RED ): # KILLER MOVE: If we find a move in the simulation that wins, make that move no matter what if show_AI: print(f"KILLER MOVE FOUND: {move}") best_move = move best_score = np.inf break score = alphabeta( sim_board, depth=depth, alpha=-np.inf, beta=np.inf, is_max=is_max ) # For some reason performs better if you use is_max=False if show_AI: print(f"CURRENT SCORE: {score} for MOVE: {move}") if score > best_score: best_score = score best_move = move if show_AI: print(f"BEST MOVE: {best_move} with BEST SCORE: {best_score}") return best_move
def alphabeta_Id(board: HexBoard, depth: int, alpha: float, beta: float, is_max: bool) -> float: # board.print() print("in alphabeta_Id000000000") try: (hit, g, ttbm) = transposition_table.lookup(board, board.get_board_size(), depth) print("in alphabeta_Id", hit, g, ttbm) except Exception as e: # print('Exception in running lookup function: ' + str(e)) if hit(): return g if depth == 0 or board.is_game_over(): g = dijkstra_eval(board) bm = () legals = board.get_move_list() if legals: if is_max: g: float = -_INF for move in ttbm + legals: updated_board: HexBoard = _update_board( board, move, is_max) # y do we make the move first? gc = alphabeta_Id(updated_board, depth - 1, alpha, beta, is_max) if gc > g: bm = updated_board g = gc alpha = max(alpha, g) if beta <= alpha: break else: # if is_max False g: float = _INF for move in ttbm + legals: updated_board: HexBoard = _update_board(board, move, is_max) gc = alphabeta_Id(updated_board, depth - 1, alpha, beta, is_max) if gc < g: bm = updated_board g = gc beta = min(beta, g) if beta <= alpha: break transposition_table.store(updated_board, updated_board.get_board_size(), g, depth, bm) return g else: print("NO MORE LEGAL MOVES LEFT") return dijkstra_eval(board)
def alphabeta(board: HexBoard, depth: int, alpha: float, beta: float, is_max: bool) -> float: # board.print() if depth == 0 or board.is_game_over(): return dijkstra_eval(board) legals = board.get_move_list() if legals: if is_max: g: float = -_INF for move in legals: updated_board: HexBoard = _update_board(board, move, is_max) g = max( g, alphabeta(updated_board, depth - 1, alpha, beta, is_max=False)) alpha = max(alpha, g) if beta <= alpha: break else: g: float = _INF for move in legals: updated_board: HexBoard = _update_board(board, move, is_max) g = min( g, alphabeta(updated_board, depth - 1, alpha, beta, is_max=True)) beta = min(beta, g) if beta <= alpha: break return g else: print("NO MORE LEGAL MOVES LEFT") return dijkstra_eval(board)
def alphabeta_move_Id(board: HexBoard, is_max: bool, show_AI=False): #, depth: int """ Set is_max to True for BLUE player, and False for RED player. You can set the depth to whatever you want really, just don't go too deep it'll take forever. Set show_AI to True if you want to see it's scoring process """ legal_moves = board.get_move_list() print("num of legal moves", len(legal_moves)) best_score = -np.inf best_move = None for move in legal_moves: sim_board = _update_board(board, move, is_max) if sim_board.check_win( sim_board.BLUE if is_max else sim_board.RED ): # KILLER MOVE: If we find a move in the simulation that wins, make that move no matter what if show_AI: print(f"KILLER MOVE FOUND: {move}") best_move = move best_score = np.inf break #thread = threading.Thread(target=iterative_deepening, kwargs=dict(board = sim_board,alpha=-np.inf,beta = np.inf, is_max = is_max)) #thread.start() with concurrent.futures.ThreadPoolExecutor() as executor: future = executor.submit(iterative_deepening, sim_board, alpha=-np.inf, beta=np.inf, is_max=is_max) score = future.result() print("score ", score) # wait here for the result to be available before continuing #thread.join() #score = iterative_deepening(sim_board, alpha=-np.inf, beta=np.inf, # is_max=is_max) # For some reason performs better if you use is_max=False if show_AI: print(f"ID CURRENT SCORE: {score} for MOVE: {move}") if score > best_score: best_score = score best_move = move if show_AI: print(f"BEST MOVE_ID: {best_move} with BEST SCORE: {best_score}") return best_move
def minimax(board: HexBoard, depth: int, is_max: bool) -> float: if depth == 0 or board.is_game_over(): board.print() return dijkstra_eval(board) legals = board.get_move_list() if legals: if is_max: g: float = -_INF for move in legals: updated_board: HexBoard = _update_board(board, move, is_max) g = max(g, minimax(updated_board, depth - 1, not is_max)) else: g: float = _INF for move in legals: updated_board: HexBoard = _update_board(board, move, is_max) g = min(g, minimax(updated_board, depth - 1, not is_max)) return g