Пример #1
0
def test_ir_builder_vector_argminmax(dense_input_tensor: np.ndarray,
                                     engine: MlirJitEngine, aliases: AliasMap):
    # Build Function
    ir_builder = MLIRFunctionBuilder(
        "vector_arg_min_and_max",
        input_types=["tensor<?xi32, #CV64>"],
        return_types=["i64", "i64"],
        aliases=aliases,
    )
    (vec, ) = ir_builder.inputs
    arg_min = ir_builder.graphblas.reduce_to_scalar(vec, "argmin")
    arg_max = ir_builder.graphblas.reduce_to_scalar(vec, "argmax")
    ir_builder.return_vars(arg_min, arg_max)
    vector_arg_min_and_max = ir_builder.compile(engine=engine,
                                                passes=GRAPHBLAS_PASSES)

    # Test Results
    input_tensor = sparsify_array(dense_input_tensor, [True])
    assert input_tensor.verify()
    res_min, res_max = vector_arg_min_and_max(input_tensor)

    minimum = np.min(dense_input_tensor)
    maximum = np.max(dense_input_tensor)

    dwimmed_dense_input_tensor = np.copy(dense_input_tensor)
    dwimmed_dense_input_tensor[dwimmed_dense_input_tensor == 0] = maximum + 1
    assert res_min == np.argmin(dwimmed_dense_input_tensor)

    dwimmed_dense_input_tensor = np.copy(dense_input_tensor)
    dwimmed_dense_input_tensor[dwimmed_dense_input_tensor == 0] = minimum - 1
    assert res_max == np.argmax(dwimmed_dense_input_tensor)
Пример #2
0
def test_haversine_distance():
    # Sanity check
    # https://www.igismap.com/haversine-formula-calculate-geographic-distance-earth/
    # Nebraska
    # v1 = Vector.from_values([0], [41.507483])
    # w1 = Vector.from_values([0], [-99.436554])
    # Kansas
    # v2 = Vector.from_values([0], [38.504048])
    # w2 = Vector.from_values([0], [-98.315949])

    # Build a function to call the haversine_distance utility
    from mlir_graphblas.mlir_builder import MLIRFunctionBuilder

    irb = MLIRFunctionBuilder(
        "haversine_distance",
        input_types=[
            "tensor<?xf64, #CV64>",
            "tensor<?xf64, #CV64>",
            "tensor<?xf64, #CV64>",
            "tensor<?xf64, #CV64>",
        ],
        return_types=["tensor<?xf64, #CV64>"],
        aliases=mlalgo._build_common_aliases(),
    )
    v1, w1, v2, w2 = irb.inputs
    result = algo_utils.haversine_distance(irb, v1, w1, v2, w2)
    irb.return_vars(result)
    compiled_func = irb.compile()

    # haversine_distance(v1, w1, v2, w2)[0].new().isclose(347.3, abs_tol=0.1)
    v1 = MLIRSparseTensor(
        np.array([[0]], dtype=np.uint64),
        np.array([41.507483], dtype=np.float64),
        np.array([1], dtype=np.uint64),
        np.array([True], dtype=np.bool8),
    )
    w1 = MLIRSparseTensor(
        np.array([[0]], dtype=np.uint64),
        np.array([-99.436554], dtype=np.float64),
        np.array([1], dtype=np.uint64),
        np.array([True], dtype=np.bool8),
    )
    v2 = MLIRSparseTensor(
        np.array([[0]], dtype=np.uint64),
        np.array([38.504048], dtype=np.float64),
        np.array([1], dtype=np.uint64),
        np.array([True], dtype=np.bool8),
    )
    w2 = MLIRSparseTensor(
        np.array([[0]], dtype=np.uint64),
        np.array([-98.315949], dtype=np.float64),
        np.array([1], dtype=np.uint64),
        np.array([True], dtype=np.bool8),
    )

    dist = compiled_func(v1, w1, v2, w2)
    assert math.isclose(dist.values[0], 347.3, abs_tol=0.1)
Пример #3
0
def test_builder_attribute(engine: MlirJitEngine, aliases: AliasMap):
    ir_builder = MLIRFunctionBuilder(
        "no_op",
        input_types=["tensor<?x?xf64, #CSR64>"],
        return_types=("tensor<?x?xf64, #CSR64>", ),
        aliases=aliases,
    )
    (input_var, ) = ir_builder.inputs
    ir_builder.return_vars(input_var)

    no_op = ir_builder.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    assert no_op.builder == ir_builder
Пример #4
0
def test_matrix_to_from_coo(engine: MlirJitEngine, aliases: AliasMap):
    irb = MLIRFunctionBuilder(
        "matrix_to_coo",
        input_types=["tensor<?x?xf64, #CSR64>"],
        return_types=["tensor<?x?xindex>", "tensor<?xf64>"],
        aliases=aliases,
    )
    (tensor, ) = irb.inputs
    indices, values = irb.graphblas.to_coo(tensor)
    irb.return_vars(indices, values)
    matrix_to_coo = irb.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test results
    input_indices = np.array(
        [[0, 1], [0, 2], [1, 0], [1, 3], [2, 0], [2, 4], [6, 2]],
        dtype=np.uint64)
    input_values = np.array([-100, 200, 300, 400, 175, 222, 333.333],
                            dtype=np.float64)
    sizes = np.array([7, 5], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    graph = MLIRSparseTensor(input_indices, input_values, sizes, sparsity)
    graph.verify()

    new_indices, new_values = matrix_to_coo(graph)

    np.testing.assert_equal(input_indices, new_indices)
    np.testing.assert_allclose(input_values, new_values)

    irb = MLIRFunctionBuilder(
        "matrix_from_coo",
        input_types=["tensor<?x?xindex>", "tensor<?xf64>", "index", "index"],
        return_types=["tensor<?x?xf64, #CSR64>"],
        aliases=aliases,
    )
    (indices, values, nrows, ncols) = irb.inputs
    tensor = irb.graphblas.from_coo(indices, values, (nrows, ncols))
    irb.return_vars(tensor)
    matrix_from_coo = irb.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test results
    g2 = matrix_from_coo(input_indices, input_values, 7, 5)
    g2.verify()

    g2_indices, g2_values = matrix_to_coo(g2)

    np.testing.assert_equal(input_indices, g2_indices)
    np.testing.assert_allclose(input_values, g2_values)
Пример #5
0
def test_ir_select_random(engine: MlirJitEngine, aliases: AliasMap):
    # Build Function
    ir_builder = MLIRFunctionBuilder(
        "test_select_random",
        input_types=["tensor<?x?xf64, #CSR64>", "i64", "i64"],
        return_types=["tensor<?x?xf64, #CSR64>"],
        aliases=aliases,
    )
    M, n, context = ir_builder.inputs
    filtered = ir_builder.graphblas.matrix_select_random(
        M, n, context, choose_n="choose_first")
    ir_builder.return_vars(filtered)
    test_select_random = ir_builder.compile(engine=engine,
                                            passes=GRAPHBLAS_PASSES)

    # Test Results
    dense_input_tensor = np.array(
        [
            [1, 0, 0, 0, 0],
            [-9, 2, 3, 0, 0],
            [0, 0, 4, 1, 1],
            [0, 0, 5, 6, 0],
            [0, 0, 0, -9, 0],
        ],
        dtype=np.float64,
    )
    input_tensor = sparsify_array(dense_input_tensor, [False, True])
    assert input_tensor.verify()

    result = test_select_random(input_tensor, 2, 0xB00)
    assert result.verify()
    dense_result = result.toarray()

    # choose_first always selects the first N elements on the row
    expected_output_tensor = np.array(
        [
            [1, 0, 0, 0, 0],
            [-9, 2, 0, 0, 0],
            [0, 0, 4, 1, 0],
            [0, 0, 5, 6, 0],
            [0, 0, 0, -9, 0],
        ],
        dtype=np.float64,
    )

    np.testing.assert_equal(expected_output_tensor, dense_result)
Пример #6
0
def test_vector_to_from_coo(engine: MlirJitEngine, aliases: AliasMap):
    irb = MLIRFunctionBuilder(
        "vector_to_coo",
        input_types=["tensor<?xf64, #CV64>"],
        return_types=["tensor<?x?xindex>", "tensor<?xf64>"],
        aliases=aliases,
    )
    (tensor, ) = irb.inputs
    indices, values = irb.graphblas.to_coo(tensor)
    irb.return_vars(indices, values)
    vector_to_coo = irb.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test results
    input_indices = np.array([[1], [6], [8]], dtype=np.uint64)
    input_values = np.array([19.234, 1.1, 2.2], dtype=np.float64)
    sizes = np.array([20], dtype=np.uint64)
    sparsity = np.array([True], dtype=np.bool8)
    vec = MLIRSparseTensor(input_indices, input_values, sizes, sparsity)
    vec.verify()

    new_indices, new_values = vector_to_coo(vec)

    np.testing.assert_equal(input_indices, new_indices)
    np.testing.assert_allclose(input_values, new_values)

    irb = MLIRFunctionBuilder(
        "vector_from_coo",
        input_types=["tensor<?x?xindex>", "tensor<?xf64>", "index"],
        return_types=["tensor<?xf64, #CV64>"],
        aliases=aliases,
    )
    (indices, values, size) = irb.inputs
    tensor = irb.graphblas.from_coo(indices, values, (size, ))
    irb.return_vars(tensor)
    vector_from_coo = irb.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test results
    v2 = vector_from_coo(input_indices, input_values, 20)
    v2.verify()

    v2_indices, v2_values = vector_to_coo(v2)

    np.testing.assert_equal(input_indices, v2_indices)
    np.testing.assert_allclose(input_values, v2_values)
Пример #7
0
def test_ir_gt_thunk(engine: MlirJitEngine, aliases: AliasMap):
    # Build Function
    ir_builder = MLIRFunctionBuilder(
        "gt_thunk",
        input_types=["tensor<?x?xf64, #CSR64>", "f64"],
        return_types=["tensor<?x?xf64, #CSR64>"],
        aliases=aliases,
    )
    M, threshold = ir_builder.inputs
    twelve_scalar = ir_builder.arith.constant(12, "f64")
    thirty_four_scalar = ir_builder.arith.constant(34, "f64")
    M2 = ir_builder.graphblas.apply(M, "div", left=twelve_scalar)
    M3 = ir_builder.graphblas.apply(M2, "div", right=thirty_four_scalar)
    filtered = ir_builder.graphblas.select(M3, "gt", threshold)
    ir_builder.return_vars(filtered)
    gt_thunk = ir_builder.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test Results
    dense_input_tensor = np.array(
        [
            [1, 0, 0, 0, 0],
            [-9, 2, 3, 0, 0],
            [0, 0, 4, 0, 0],
            [0, 0, 5, 6, 0],
            [0, 0, 0, -9, 0],
        ],
        dtype=np.float64,
    )
    dense_input_tensor_mask = dense_input_tensor.astype(bool)
    input_tensor = sparsify_array(dense_input_tensor, [False, True])
    assert input_tensor.verify()

    for threshold in np.unique(dense_input_tensor):
        result = gt_thunk(input_tensor, threshold)
        assert result.verify()
        dense_result = result.toarray()

        expected_dense_result = np.copy(dense_input_tensor)
        expected_dense_result[dense_input_tensor_mask] /= 12.0
        expected_dense_result[dense_input_tensor_mask] **= -1
        expected_dense_result[dense_input_tensor_mask] /= 34.0
        expected_dense_result[expected_dense_result <= threshold] = 0

        assert np.all(dense_result == expected_dense_result)
Пример #8
0
def test_ir_select_random_uniform(engine: MlirJitEngine, aliases: AliasMap):
    # Build Function
    ir_builder = MLIRFunctionBuilder(
        "test_select_random_uniform",
        input_types=["tensor<?x?xf64, #CSR64>", "i64", "!llvm.ptr<i8>"],
        return_types=["tensor<?x?xf64, #CSR64>"],
        aliases=aliases,
    )
    M, n, context = ir_builder.inputs
    filtered = ir_builder.graphblas.matrix_select_random(
        M, n, context, choose_n="choose_uniform")
    ir_builder.return_vars(filtered)
    test_select_random_uniform = ir_builder.compile(engine=engine,
                                                    passes=GRAPHBLAS_PASSES)

    # Test Results
    dense_input_tensor = np.array(
        [
            [1, 0, 0, 0, 0],
            [-9, 2, 3, 0, 0],
            [0, 0, 4, 1, 1],
            [0, 0, 5, 6, 0],
            [0, 0, 0, -9, 0],
        ],
        dtype=np.float64,
    )
    input_tensor = sparsify_array(dense_input_tensor, [False, True])
    assert input_tensor.verify()

    rng = ChooseUniformContext(seed=2)
    result = test_select_random_uniform(input_tensor, 2, rng)
    assert result.verify()
    dense_result = result.toarray()

    expected_row_count = np.minimum((dense_input_tensor != 0).sum(axis=1), 2)
    actual_row_count = (dense_result != 0).sum(axis=1)
    np.testing.assert_equal(expected_row_count, actual_row_count)

    # check for correct truncation
    assert len(result.indices[1]) == result.pointers[1][-1]
    assert len(result.values) == result.pointers[1][-1]
Пример #9
0
def test_ir_transpose(
    engine: MlirJitEngine,
    aliases: AliasMap,
):
    # Build Functions
    ir_builder = MLIRFunctionBuilder(
        "transpose_wrapper",
        input_types=["tensor<?x?xf64, #CSR64>"],
        return_types=["tensor<?x?xf64, #CSC64>"],
        aliases=aliases,
    )
    (input_matrix, ) = ir_builder.inputs

    output_matrix = ir_builder.graphblas.transpose(input_matrix,
                                                   "tensor<?x?xf64, #CSC64>")
    ir_builder.return_vars(output_matrix)
    transpose_wrapper = ir_builder.compile(engine=engine,
                                           passes=GRAPHBLAS_PASSES)

    # Test Results
    dense_input_matrix = np.array(
        [
            [0, 7, 7, 0, 7],
            [0, 1, 7, 0, 0],
        ],
        dtype=np.float64,
    )
    input_matrix = sparsify_array(dense_input_matrix, [False, True])
    assert input_matrix.verify()

    output_matrix = transpose_wrapper(input_matrix)
    assert output_matrix.verify()

    output_matrix = output_matrix.toarray()

    expected_output_matrix = dense_input_matrix.T

    assert np.all(expected_output_matrix == output_matrix)
Пример #10
0
def test_ir_builder_for_loop_float_iter(engine: MlirJitEngine,
                                        aliases: AliasMap):
    # Build Function

    ir_builder = MLIRFunctionBuilder("times_three",
                                     input_types=["f64"],
                                     return_types=["f64"],
                                     aliases=aliases)
    (input_var, ) = ir_builder.inputs
    zero_f64 = ir_builder.arith.constant(0.0, "f64")
    total = ir_builder.new_var("f64")

    with ir_builder.for_loop(0, 3, iter_vars=[(total, zero_f64)]) as for_vars:
        updated_sum = ir_builder.arith.addf(input_var, total)
        for_vars.yield_vars(updated_sum)

    result_var = for_vars.returned_variable[0]
    ir_builder.return_vars(result_var)

    assert ir_builder.get_mlir()

    # Test Compiled Function
    times_three = ir_builder.compile(engine=engine)
    assert np.isclose(times_three(1.3), 3.9)
Пример #11
0
def test_ir_builder_convert_layout_wrapper(engine: MlirJitEngine,
                                           aliases: AliasMap):
    ir_builder = MLIRFunctionBuilder(
        "convert_layout_wrapper",
        input_types=["tensor<?x?xf64, #CSR64>"],
        return_types=("tensor<?x?xf64, #CSC64>", ),
        aliases=aliases,
    )
    (input_var, ) = ir_builder.inputs
    convert_layout_result = ir_builder.graphblas.convert_layout(
        input_var, "tensor<?x?xf64, #CSC64>")
    ir_builder.return_vars(convert_layout_result)

    assert ir_builder.get_mlir()

    # Test Compiled Function
    convert_layout_wrapper_callable = ir_builder.compile(
        engine=engine, passes=GRAPHBLAS_PASSES)

    indices = np.array(
        [
            [1, 2],
            [4, 3],
        ],
        dtype=np.uint64,
    )
    values = np.array([1.2, 4.3], dtype=np.float64)
    sizes = np.array([8, 8], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)

    input_tensor = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert input_tensor.verify()

    dense_input_tensor = np.zeros([8, 8], dtype=np.float64)
    dense_input_tensor[1, 2] = 1.2
    dense_input_tensor[4, 3] = 4.3
    assert np.isclose(dense_input_tensor, input_tensor.toarray()).all()
    output_tensor = convert_layout_wrapper_callable(input_tensor)
    assert output_tensor.verify()

    assert np.isclose(dense_input_tensor, output_tensor.toarray()).all()
Пример #12
0
def test_ir_builder_bad_input_multi_value_mlir_variable():
    ir_builder = MLIRFunctionBuilder("some_func",
                                     input_types=[],
                                     return_types=("i8", ))

    iter_i8_var = ir_builder.new_var("i8")
    lower_i8_var = ir_builder.arith.constant(1, "i8")
    iter_i64_var = ir_builder.new_var("i64")
    lower_i64_var = ir_builder.arith.constant(1, "i64")
    with ir_builder.for_loop(0,
                             1,
                             1,
                             iter_vars=[(iter_i8_var, lower_i8_var),
                                        (iter_i64_var, lower_i64_var)
                                        ]) as for_vars:
        constant_i8_var = ir_builder.arith.constant(8, "i8")
        constant_i64_var = ir_builder.arith.constant(64, "i64")

        # Raise when yielding too few values
        with pytest.raises(ValueError,
                           match="Expected 2 yielded values, but got 1."):
            for_vars.yield_vars(constant_i8_var)

        # Raise when yielding too many values
        with pytest.raises(ValueError,
                           match="Expected 2 yielded values, but got 3."):
            for_vars.yield_vars(constant_i8_var, constant_i64_var,
                                lower_i64_var)

        # Raise when yielding incorrect types
        with pytest.raises(TypeError, match=" have different types."):
            for_vars.yield_vars(constant_i64_var, constant_i8_var)

        for_vars.yield_vars(constant_i8_var, constant_i64_var)

    # Raise when returning multiple valued variable
    with pytest.raises(TypeError, match=" is not a valid return value"):
        ir_builder.return_vars(for_vars.returned_variable)

    # Raise when using multiple valued variable as operand
    assigned_to_i8_var = ir_builder.new_var("i8")
    c1_i8_var = ir_builder.arith.constant(1, "i8")
    with pytest.raises(
            TypeError,
            match=
            "Cannot access MLIRTuple .+ directly. Use index notation to access an element.",
    ):
        ir_builder.add_statement(
            f"{assigned_to_i8_var.assign} = arith.addi {c1_i8_var}, {for_vars.returned_variable} : i8"
        )

    with pytest.raises(
            TypeError,
            match=
            "Cannot access MLIRTuple .+ directly. Use index notation to access an element.",
    ):
        ir_builder.arith.addi(c1_i8_var, for_vars.returned_variable)

    with pytest.raises(
            TypeError,
            match=
            "Cannot access MLIRTuple .+ directly. Use index notation to access an element.",
    ):
        ir_builder.arith.addi(for_vars.returned_variable, c1_i8_var)

    # Raise when using multiple valued variable indexed via out-of-bound int index as operand
    with pytest.raises(IndexError):
        for_vars.returned_variable[999]

    # Raise when indexing into multiple valued variable via slice
    with pytest.raises(TypeError, match="Expects int, not"):
        ir_builder.return_vars(for_vars.returned_variable[:])

    # Raise when returning a non-MLIRVar
    with pytest.raises(
            TypeError,
            match="10 is not a valid return value, expected MLIRVar."):
        ir_builder.return_vars(10)

    # Raise when returning value incompatible with return type.
    c1_i64_var = ir_builder.arith.constant(1, "i64")
    with pytest.raises(
            TypeError,
            match=
            r"Return type of MLIRVar\(name=.+, type=i64\) does not match i8",
    ):
        ir_builder.return_vars(c1_i64_var)

    # Raise when iterator variables have incompatible types
    with pytest.raises(TypeError, match=" have different types."):
        with ir_builder.for_loop(
                0,
                1,
                1,
                iter_vars=[(iter_i8_var, lower_i64_var),
                           (iter_i64_var, lower_i8_var)],
        ) as bad_for_vars:
            pass

    ir_builder.return_vars(for_vars.returned_variable[0])
Пример #13
0
def test_ir_select_random_weighted(engine: MlirJitEngine, aliases: AliasMap):
    # Build Function
    ir_builder = MLIRFunctionBuilder(
        "test_select_random_weighted",
        input_types=["tensor<?x?xf64, #CSR64>", "i64", "!llvm.ptr<i8>"],
        return_types=["tensor<?x?xf64, #CSR64>"],
        aliases=aliases,
    )
    M, n, context = ir_builder.inputs
    filtered = ir_builder.graphblas.matrix_select_random(
        M, n, context, choose_n="choose_weighted")
    ir_builder.return_vars(filtered)
    test_select_random_weighted = ir_builder.compile(engine=engine,
                                                     passes=GRAPHBLAS_PASSES)

    # Test Results
    # for weighted sampling to make sense, weights must all be >= 0
    dense_input_tensor = np.array(
        [
            [1, 0, 0, 0, 0],
            [1, 2, 4, 0, 0],  # using this row for stats check below
            [0, 0, 1, 100, 1],
            [0, 0, 5, 6, 0],
            [0, 0, 0, 1, 0],
        ],
        dtype=np.float64,
    )
    input_tensor = sparsify_array(dense_input_tensor, [False, True])
    assert input_tensor.verify()

    # basic checks
    rng = ChooseWeightedContext(seed=2)
    result = test_select_random_weighted(input_tensor, 2, rng)
    assert result.verify()
    dense_result = result.toarray()

    expected_row_count = np.minimum((dense_input_tensor != 0).sum(axis=1), 2)
    actual_row_count = (dense_result != 0).sum(axis=1)
    np.testing.assert_equal(expected_row_count, actual_row_count)

    # rough statistical check of row 1
    counts = defaultdict(lambda: 0)
    n = 100
    for i in range(n):
        result = test_select_random_weighted(input_tensor, 1, rng)
        assert result.verify()
        dense_result = result.toarray()
        choice = np.argmax(dense_result[1])
        counts[choice] += 1

    assert sorted(counts.keys()) == [0, 1, 2]
    row_1 = dense_input_tensor[1]
    row_1_sum = row_1.sum()
    print(counts)
    for key, actual_count in counts.items():
        prob = row_1[key] / row_1_sum
        expected_count = prob * n
        # binomial standard deviation
        stddev = (n * prob * (1 - prob))**0.5
        assert abs(expected_count - actual_count) < (
            2 * stddev
        ), f"key: {key}, expected: {expected_count}, actual: {actual_count}, stdev: {stddev}"
Пример #14
0
def test_ir_diag(
    matrix_type_template: str,
    vector_type_template: str,
    mlir_type: str,
    engine: MlirJitEngine,
    aliases: AliasMap,
):
    matrix_type = matrix_type_template.format(scalar_type=mlir_type)
    vector_type = vector_type_template.format(scalar_type=mlir_type)
    np_type = MLIR_TYPE_TO_NP_TYPE[mlir_type]

    # Build Functions
    ir_builder = MLIRFunctionBuilder(
        f"diag_func_{mlir_type}",
        input_types=[vector_type, matrix_type],
        return_types=[
            matrix_type,
            vector_type,
        ],
        aliases=aliases,
    )
    (input_vector, input_matrix) = ir_builder.inputs

    output_matrix = ir_builder.graphblas.diag(input_vector, matrix_type)
    output_vector = ir_builder.graphblas.diag(input_matrix, vector_type)
    ir_builder.return_vars(output_matrix, output_vector)
    diag_func = ir_builder.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test Results
    dense_input_vector = np.array(
        [0, 0, 0, 1, 0, -2, 0, 0],
        dtype=np_type,
    )
    input_vector = sparsify_array(dense_input_vector, [True])
    assert input_vector.verify()
    dense_input_matrix = np.array(
        [
            [0, 7, 7, 0, 7],
            [0, 1, 7, 0, 0],
            [0, 1, 0, 7, 0],
            [0, 7, 0, 2, 0],
            [7, 7, 0, 0, 0],
        ],
        dtype=np_type,
    )
    input_matrix = sparsify_array(dense_input_matrix, [False, True])
    assert input_matrix.verify()
    matrix_type_is_csc = [1, 0] == SparseTensorType.parse(
        matrix_type, aliases).encoding.ordering
    if matrix_type_is_csc:
        input_matrix = engine.csr_to_csc(input_matrix)

    output_matrix, output_vector = diag_func(input_vector, input_matrix)
    assert output_matrix.verify()
    assert output_vector.verify()

    output_matrix = output_matrix.toarray()
    output_vector = output_vector.toarray()

    expected_output_matrix = np.diagflat(dense_input_vector)
    expected_output_vector = np.diag(dense_input_matrix)

    assert np.all(expected_output_matrix == output_matrix)
    assert np.all(expected_output_vector == output_vector)
Пример #15
0
def test_ir_reduce_to_vector(
    input_type_template: str,
    reduce_rows_output_type_template: str,
    reduce_columns_output_type_template: str,
    mlir_type: str,
    engine: MlirJitEngine,
    aliases: AliasMap,
):
    input_type = input_type_template.format(scalar_type=mlir_type)
    reduce_rows_output_type = reduce_rows_output_type_template.format(
        scalar_type=mlir_type)
    reduce_columns_output_type = reduce_columns_output_type_template.format(
        scalar_type=mlir_type)
    np_type = MLIR_TYPE_TO_NP_TYPE[mlir_type]

    # Build Functions
    ir_builder = MLIRFunctionBuilder(
        f"reduce_func_{mlir_type}",
        input_types=[input_type],
        return_types=[
            reduce_rows_output_type,
            "tensor<?xi64, #CV64>",
            reduce_rows_output_type,
            "tensor<?xi64, #CV64>",
            "tensor<?xi64, #CV64>",
            "tensor<?xi64, #CV64>",
        ],
        aliases=aliases,
    )
    (matrix, ) = ir_builder.inputs

    reduced_rows = ir_builder.graphblas.reduce_to_vector(matrix, "plus", 1)
    reduced_columns = ir_builder.graphblas.reduce_to_vector(matrix, "count", 0)

    zero_scalar = ir_builder.arith.constant(0, mlir_type)
    reduced_rows_clamped = ir_builder.graphblas.apply(reduced_rows,
                                                      "min",
                                                      right=zero_scalar)
    reduced_rows_clamped = ir_builder.graphblas.apply(reduced_rows_clamped,
                                                      "identity")

    reduced_columns_abs = ir_builder.graphblas.apply(reduced_columns, "abs")
    reduced_columns_abs = ir_builder.graphblas.apply(reduced_columns_abs,
                                                     "identity")
    reduced_columns_negative_abs = ir_builder.graphblas.apply(
        reduced_columns, "ainv")
    reduced_columns_negative_abs = ir_builder.graphblas.apply(
        reduced_columns_negative_abs, "identity")

    reduced_rows_argmin = ir_builder.graphblas.reduce_to_vector(
        matrix, "argmin", 1)
    reduced_columns_argmax = ir_builder.graphblas.reduce_to_vector(
        matrix, "argmax", 0)

    ir_builder.return_vars(
        reduced_rows,
        reduced_columns,
        reduced_rows_clamped,
        reduced_columns_negative_abs,
        reduced_rows_argmin,
        reduced_columns_argmax,
    )
    reduce_func = ir_builder.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test Results
    dense_input_tensor = np.array(
        [
            [1, 0, 0, 0],
            [-2, 0, 3, -4],
            [0, 0, 0, 0],
            [0, 0, 5, -6],
            [0, -7, 0, 8],
        ],
        dtype=np_type,
    )
    input_tensor = sparsify_array(dense_input_tensor, [False, True])
    input_type_is_csc = [1, 0
                         ] == SparseTensorType.parse(input_type,
                                                     aliases).encoding.ordering
    if input_type_is_csc:
        input_tensor = engine.csr_to_csc(input_tensor)

    (
        reduced_rows,
        reduced_columns,
        reduced_rows_clamped,
        reduced_columns_negative_abs,
        reduced_rows_argmin,
        reduced_columns_argmax,
    ) = reduce_func(input_tensor)

    assert reduced_rows.verify()
    assert reduced_columns.verify()
    assert reduced_rows_clamped.verify()
    assert reduced_columns_negative_abs.verify()
    assert reduced_rows_argmin.verify()
    assert reduced_columns_argmax.verify()

    reduced_rows = reduced_rows.toarray()
    reduced_columns = reduced_columns.toarray()
    reduced_rows_clamped = reduced_rows_clamped.toarray()
    reduced_columns_negative_abs = reduced_columns_negative_abs.toarray()
    reduced_rows_argmin = reduced_rows_argmin.toarray()
    reduced_columns_argmax = reduced_columns_argmax.toarray()

    expected_reduced_rows = dense_input_tensor.sum(axis=1)
    expected_reduced_columns = (dense_input_tensor.astype(bool).sum(
        axis=0).astype(np_type))

    expected_reduced_rows_clamped = np.copy(expected_reduced_rows)
    expected_reduced_rows_clamped[expected_reduced_rows_clamped > 0] = 0

    expected_reduced_columns_negative_abs = -np.abs(expected_reduced_columns)

    M = dense_input_tensor.copy()
    M[dense_input_tensor == 0] = dense_input_tensor.max() + 1
    expected_reduced_rows_argmin = np.argmin(M, axis=1)

    M = dense_input_tensor.copy()
    M[dense_input_tensor == 0] = dense_input_tensor.min() - 1
    expected_reduced_columns_argmax = np.argmax(M, axis=0)

    assert np.all(reduced_rows == expected_reduced_rows)
    assert np.all(reduced_columns == expected_reduced_columns)
    assert np.all(reduced_rows_clamped == expected_reduced_rows_clamped)
    assert np.all(
        reduced_columns_negative_abs == expected_reduced_columns_negative_abs)
    assert np.all(reduced_rows_argmin == expected_reduced_rows_argmin)
    assert np.all(reduced_columns_argmax == expected_reduced_columns_argmax)
Пример #16
0
def test_ir_builder_for_loop_user_specified_vars(engine: MlirJitEngine):
    # Build Function

    lower_index = 3
    upper_index = 9
    delta_index = 2
    lower_i64 = 5
    delta_i64 = 7

    # this expected_sum calculation is expected to be isomorphic to the generated MLIR
    expected_sum = 7
    index_iterator = range(lower_index, upper_index, delta_index)
    i64_iterator = itertools.count(lower_i64, delta_i64)
    for iter_index, iter_i64 in zip(index_iterator, i64_iterator):
        expected_sum += lower_index * upper_index * delta_index
        expected_sum += lower_i64 * delta_i64
        expected_sum += iter_index * iter_i64

    # Build IR
    ir_builder = MLIRFunctionBuilder(
        "add_user_specified_vars",
        input_types=["i64"],
        return_types=["i64"],
    )
    (input_var, ) = ir_builder.inputs
    total = ir_builder.new_var("i64")

    lower_index_var = ir_builder.arith.constant(lower_index, "index")
    upper_index_var = ir_builder.arith.constant(upper_index, "index")
    delta_index_var = ir_builder.arith.constant(delta_index, "index")
    lower_i64_var = ir_builder.arith.constant(lower_i64, "i64")
    delta_i64_var = ir_builder.arith.constant(delta_i64, "i64")
    iter_i64_var = ir_builder.new_var("i64")

    with ir_builder.for_loop(
            lower_index_var,
            upper_index_var,
            delta_index_var,
            iter_vars=[(iter_i64_var, lower_i64_var), (total, input_var)],
    ) as for_vars:
        assert lower_index_var == for_vars.lower_var_index
        assert upper_index_var == for_vars.upper_var_index
        assert delta_index_var == for_vars.step_var_index
        assert [iter_i64_var, total] == for_vars.iter_vars
        prod_of_index_vars_0 = ir_builder.arith.muli(for_vars.lower_var_index,
                                                     for_vars.upper_var_index)
        prod_of_index_vars_1 = ir_builder.arith.muli(prod_of_index_vars_0,
                                                     for_vars.step_var_index)
        prod_of_index_vars = ir_builder.arith.index_cast(
            prod_of_index_vars_1, "i64")
        prod_of_i64_vars = ir_builder.arith.muli(lower_i64_var, delta_i64_var)
        iter_index_i64 = ir_builder.arith.index_cast(for_vars.iter_var_index,
                                                     "i64")
        prod_of_iter_vars = ir_builder.arith.muli(iter_index_i64, iter_i64_var)
        updated_sum_0 = ir_builder.arith.addi(total, prod_of_index_vars)
        updated_sum_1 = ir_builder.arith.addi(updated_sum_0, prod_of_i64_vars)
        updated_sum = ir_builder.arith.addi(updated_sum_1, prod_of_iter_vars)

        incremented_iter_i64_var = ir_builder.arith.addi(
            iter_i64_var, delta_i64_var)
        for_vars.yield_vars(incremented_iter_i64_var, updated_sum)

    result_var = for_vars.returned_variable[1]
    ir_builder.return_vars(result_var)

    assert (
        ir_builder.get_mlir()
    )  # this generated MLIR is easier to read than the above IR builder calls.

    # Test Compiled Function
    func = ir_builder.compile(engine=engine)

    calculated_sum = func(7)
    assert np.isclose(calculated_sum, expected_sum)