コード例 #1
0
def normal_search(board, current_piece, current_depth, target_depth,
                  prev_move_row, prev_move_column):
    if is_terminal_state(board, current_piece) == True:
        return (None, None,
                heuristic_evaluation_function(board, computer_piece))

    if current_depth == target_depth:
        if is_board_quiet(board, current_piece, prev_move_row,
                          prev_move_column) == True:
            return (None, None,
                    heuristic_evaluation_function(board, computer_piece))
        else:
            return quiescence_search_alg(board, current_piece, 0, 3,
                                         prev_move_row, prev_move_column)

    if current_depth % 2 == 0:
        current_value = -math.inf
        current_row = -1
        current_column = -1
    else:
        current_value = math.inf
        current_row = -1
        current_column = -1

    valid_moves = ordering_moves(board, current_piece, current_depth)

    if len(valid_moves) == 0:
        return (None, None, 0)
    else:
        for move in valid_moves:
            board_modified = np.copy(board)
            add_piece_to_board(board_modified, move[0], move[1], current_piece)
            new_row, new_column, new_value = normal_search(
                board_modified, 3 - current_piece, current_depth + 1,
                target_depth, move[0], move[1])

            if current_depth % 2 == 0:
                if new_value > current_value:
                    current_value = new_value
                    current_row = move[0]
                    current_column = move[1]
            else:
                if new_value < current_value:
                    current_value = new_value
                    current_row = move[0]
                    current_column = move[1]

        return (current_row, current_column, current_value)
コード例 #2
0
def negamax_alg(board, current_piece, color, current_depth, target_depth):
    from Board_Representation import get_moves

    if current_depth == target_depth or is_terminal_state(board, current_piece) == True:
        return (None, None, color * heuristic_evaluation_function(board, computer_piece))

    current_value = -math.inf
    current_row = -1
    current_column = -1

    valid_moves = get_moves(board, current_piece)

    if len(valid_moves) == 0:
        return (None, None, 0)
    else:
        for move in valid_moves:
            board_modified = np.copy(board)
            add_piece_to_board(board_modified, move[0], move[1], current_piece)
            new_row, new_column, new_value = negamax_alg(board_modified, 3 - current_piece, -color,
                                                         current_depth + 1, target_depth)

            if -new_value > current_value:
                current_value = -new_value
                current_row = move[0]
                current_column = move[1]

        return (current_row, current_column, current_value)
コード例 #3
0
def alpha_beta_pruning_alg(board, current_piece, alpha, beta, current_depth,
                           target_depth):
    if current_depth == target_depth or is_terminal_state(
            board, current_piece) == True:
        return (None, None,
                heuristic_evaluation_function(board, computer_piece))

    if current_depth % 2 == 0:
        current_value = -math.inf
        current_row = -1
        current_column = -1
    else:
        current_value = math.inf
        current_row = -1
        current_column = -1

    valid_moves = ordering_moves(board, current_piece, current_depth)

    if len(valid_moves) == 0:
        return (None, None, 0)
    else:
        for move in valid_moves:
            board_modified = np.copy(board)
            add_piece_to_board(board_modified, move[0], move[1], current_piece)
            new_row, new_column, new_value = alpha_beta_pruning_alg(
                board_modified, 3 - current_piece, alpha, beta,
                current_depth + 1, target_depth)

            if current_depth % 2 == 0:
                if new_value > current_value:
                    current_value = new_value
                    current_row = move[0]
                    current_column = move[1]
                alpha = max(alpha, new_value)
                if alpha >= beta:
                    break
            else:
                if new_value < current_value:
                    current_value = new_value
                    current_row = move[0]
                    current_column = move[1]
                beta = min(beta, new_value)
                if alpha >= beta:
                    break
        return (current_row, current_column, current_value)
コード例 #4
0
def local_maximization(board, piece):
    from Board_Representation import get_moves

    current_value = -math.inf
    current_row = -1
    current_column = -1

    valid_moves = get_moves(board, piece)

    for move in valid_moves:
        board_modified = np.copy(board)
        add_piece_to_board(board_modified, move[0], move[1], piece)
        new_value = heuristic_evaluation_function(board_modified, piece)

        if new_value > current_value:
            current_value = new_value
            current_row = move[0]
            current_column = move[1]

    return (current_row, current_column)
コード例 #5
0
def ordering_moves(board, current_piece, depth):
    from Board_Representation import get_moves

    possible_moves = get_moves(board, current_piece)
    scores_possible_moves = []

    for move in possible_moves:
        board_modified = np.copy(board)
        add_piece_to_board(board_modified, move[0], move[1], current_piece)
        scores_possible_moves.append(
            heuristic_evaluation_function(board_modified, current_piece))

    possible_moves_sorted = ()
    if depth % 2 == 0:
        possible_moves_sorted, scores_possible_moves_sorted = zip(
            *sorted(zip(possible_moves, scores_possible_moves), reverse=True))
    elif depth % 2 != 0:
        possible_moves_sorted, scores_possible_moves_sorted = zip(
            *sorted(zip(possible_moves, scores_possible_moves)))

    return [move for move in possible_moves_sorted]