Пример #1
0
def test_mssp(special_passes):
    # This must be in sorted-for-CSR format. Random order breaks the constructor in strange ways.
    # fmt: off
    indices = np.array(
        [[0, 1], [0, 3], [1, 4], [1, 6], [2, 5], [3, 0], [3, 2], [4, 5],
         [5, 1], [6, 2], [6, 3], [6, 4]],
        dtype=np.uint64,
    )
    # fmt: on
    values = np.array([2, 3, 8, 4, 1, 3, 3, 7, 1, 5, 7, 3], dtype=np.float64)
    sizes = np.array([7, 7], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    m = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert m.verify()

    indices = np.array([[0, 1], [1, 3]], dtype=np.uint64)
    values = np.array([0, 0], dtype=np.float64)
    sizes = np.array([2, 7], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    v = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert v.verify()

    # Compute MSSP
    # correct answer from node #1 -- [14, 0, 9, 11, 7, 10, 4]
    # correct answer from node #3 -- [3,  5,  3,  0, 12,  4,  9]
    w = mlalgo.mssp(m, v, compile_with_passes=special_passes)

    assert (w.indices[1] == [0, 1, 2, 3, 4, 5, 6, 0, 1, 2, 3, 4, 5, 6]).all()
    assert (w.values == [14, 0, 9, 11, 7, 10, 4, 3, 5, 3, 0, 12, 4, 9]).all()
Пример #2
0
def test_vertex_nomination(special_passes):
    # fmt: off
    indices = np.array(
        [[0, 1], [0, 3], [1, 4], [1, 6], [2, 5], [3, 0], [3, 2], [4, 5],
         [5, 1], [6, 2], [6, 3], [6, 4]],
        dtype=np.uint64,
    )
    # fmt: on
    values = np.array([2, 3, 8, 4, 1, 3, 3, 7, 1, 5, 7, 3], dtype=np.float64)
    sizes = np.array([7, 7], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    m = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert m.verify()

    indices = np.array([[6]], dtype=np.uint64)
    values = np.array([0], dtype=np.float64)
    sizes = np.array([7], dtype=np.uint64)
    sparsity = np.array([True], dtype=np.bool8)
    v = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert v.verify()

    # Compute Vertex Nomination
    # correct answer for node #6 is node #4
    w = mlalgo.vertex_nomination(m, v, compile_with_passes=special_passes)
    assert w == 4

    # correct answer for nodes #0,1,5 is node #3
    indices = np.array([[0], [1], [5]], dtype=np.uint64)
    values = np.array([0, 0, 0], dtype=np.float64)
    v2 = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert v2.verify()
    w2 = mlalgo.vertex_nomination(m, v2, compile_with_passes=special_passes)
    assert w2 == 3
Пример #3
0
def test_bfs(special_passes):
    # 0 - 1    5 - 6
    # | X |    | /
    # 3 - 4 -- 2 - 7
    # fmt: off
    indices = np.array(
        [[0, 1], [0, 3], [0, 4], [1, 0], [1, 3], [1, 4], [2, 4], [2, 5],
         [2, 6], [2, 7], [3, 0], [3, 1], [3, 4], [4, 0], [4, 1], [4, 2],
         [4, 3], [5, 2], [5, 6], [6, 2], [6, 5], [7, 2]],
        dtype=np.uint64,
    )
    values = np.array(
        [
            100, 200, 300, 100, 400, 500, 99, 50, 55, 75, 200, 400, 600, 300,
            500, 99, 600, 50, 60, 55, 60, 75
        ],
        dtype=np.float64,
    )
    # fmt: on
    sizes = np.array([8, 8], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    a = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert a.verify()

    parents, levels = mlalgo.bfs(0, a, compile_with_passes=special_passes)
    expected_parents = np.array([0, 0, 4, 0, 0, 2, 2, 2])
    expected_levels = np.array([0, 1, 2, 1, 1, 3, 3, 3])

    assert np.all(parents.toarray() == expected_parents)
    assert np.all(levels.toarray() == expected_levels)
Пример #4
0
def test_random_walk():
    # fmt: off
    indices = np.array(
        [[0, 1], [0, 2], [1, 0], [1, 3], [2, 0], [2, 4], [3, 2]],
        dtype=np.uint64,
    )
    values = np.array([100, 200, 300, 400, 175, 222, 333], dtype=np.float64)
    # fmt: on
    sizes = np.array([5, 5], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    graph = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert graph.verify()

    paths = mlalgo.random_walk(graph, 20, [0, 1, 2, 3, 4, 4, 3, 2, 1, 0])

    # Perform validation for each row to verify correctness
    valid_steps = {}
    for start, end in indices:
        valid_steps.setdefault(start, set()).add(end)
    pointers = paths.pointers[-1]
    nodes = paths.values
    for irow, (istart, iend) in enumerate(zip(pointers[:-1], pointers[1:])):
        assert istart != iend, f"Initial node missing for row {irow}"
        for jstart, jend in zip(nodes[istart:int(iend - 1)],
                                nodes[int(istart + 1):iend]):
            assert jstart in valid_steps, f"Row [{irow}] {jstart} is a terminator"
            assert (jend in valid_steps[jstart]
                    ), f"Row [{irow}] {jstart}->{jend} is not a valid step"
Пример #5
0
def test_pagerank(special_passes):
    # fmt: off
    indices = np.array(
        [[0, 1], [0, 2], [1, 3], [2, 3], [2, 4], [3, 4], [4, 0]],
        dtype=np.uint64,
    )
    # fmt: on
    values = np.array([1.1, 9.8, 4.2, 7.1, 0.2, 6.9, 2.2], dtype=np.float64)
    sizes = np.array([5, 5], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    m = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert m.verify()

    expected = np.array(
        [0.2541917746, 0.1380315018, 0.1380315018, 0.2059901768, 0.2637550447])

    # Test success
    pr, niters = mlalgo.pagerank(m, tol=1e-7)
    assert np.abs(pr.values - expected).sum() < 1e-5, pr.values

    # Test maxiter reached, failed to converge
    pr, niters = mlalgo.pagerank(m,
                                 tol=1e-7,
                                 maxiter=6,
                                 compile_with_passes=special_passes)
    assert niters == 6
    assert (np.abs(pr.values - expected).sum() >
            1e-5), "Unexpectedly converged in 6 iterations"
Пример #6
0
def test_triangle_count(special_passes):
    # 0 - 1    5 - 6
    # | X |    | /
    # 3 - 4 -- 2 - 7
    # fmt: off
    indices = np.array(
        [[0, 1], [0, 3], [0, 4], [1, 0], [1, 3], [1, 4], [2, 4], [2, 5],
         [2, 6], [2, 7], [3, 0], [3, 1], [3, 4], [4, 0], [4, 1], [4, 2],
         [4, 3], [5, 2], [5, 6], [6, 2], [6, 5], [7, 2]],
        dtype=np.uint64,
    )
    values = np.array(
        [
            100, 200, 300, 100, 400, 500, 99, 50, 55, 75, 200, 400, 600, 300,
            500, 99, 600, 50, 60, 55, 60, 75
        ],
        dtype=np.float64,
    )
    # fmt: on
    sizes = np.array([8, 8], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    a = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert a.verify()

    num_triangles = mlalgo.triangle_count(a,
                                          compile_with_passes=special_passes)
    assert num_triangles == 5, num_triangles
Пример #7
0
def test_matrix_to_from_coo(engine: MlirJitEngine, aliases: AliasMap):
    irb = MLIRFunctionBuilder(
        "matrix_to_coo",
        input_types=["tensor<?x?xf64, #CSR64>"],
        return_types=["tensor<?x?xindex>", "tensor<?xf64>"],
        aliases=aliases,
    )
    (tensor, ) = irb.inputs
    indices, values = irb.graphblas.to_coo(tensor)
    irb.return_vars(indices, values)
    matrix_to_coo = irb.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test results
    input_indices = np.array(
        [[0, 1], [0, 2], [1, 0], [1, 3], [2, 0], [2, 4], [6, 2]],
        dtype=np.uint64)
    input_values = np.array([-100, 200, 300, 400, 175, 222, 333.333],
                            dtype=np.float64)
    sizes = np.array([7, 5], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    graph = MLIRSparseTensor(input_indices, input_values, sizes, sparsity)
    graph.verify()

    new_indices, new_values = matrix_to_coo(graph)

    np.testing.assert_equal(input_indices, new_indices)
    np.testing.assert_allclose(input_values, new_values)

    irb = MLIRFunctionBuilder(
        "matrix_from_coo",
        input_types=["tensor<?x?xindex>", "tensor<?xf64>", "index", "index"],
        return_types=["tensor<?x?xf64, #CSR64>"],
        aliases=aliases,
    )
    (indices, values, nrows, ncols) = irb.inputs
    tensor = irb.graphblas.from_coo(indices, values, (nrows, ncols))
    irb.return_vars(tensor)
    matrix_from_coo = irb.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test results
    g2 = matrix_from_coo(input_indices, input_values, 7, 5)
    g2.verify()

    g2_indices, g2_values = matrix_to_coo(g2)

    np.testing.assert_equal(input_indices, g2_indices)
    np.testing.assert_allclose(input_values, g2_values)
Пример #8
0
def test_vector_to_from_coo(engine: MlirJitEngine, aliases: AliasMap):
    irb = MLIRFunctionBuilder(
        "vector_to_coo",
        input_types=["tensor<?xf64, #CV64>"],
        return_types=["tensor<?x?xindex>", "tensor<?xf64>"],
        aliases=aliases,
    )
    (tensor, ) = irb.inputs
    indices, values = irb.graphblas.to_coo(tensor)
    irb.return_vars(indices, values)
    vector_to_coo = irb.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test results
    input_indices = np.array([[1], [6], [8]], dtype=np.uint64)
    input_values = np.array([19.234, 1.1, 2.2], dtype=np.float64)
    sizes = np.array([20], dtype=np.uint64)
    sparsity = np.array([True], dtype=np.bool8)
    vec = MLIRSparseTensor(input_indices, input_values, sizes, sparsity)
    vec.verify()

    new_indices, new_values = vector_to_coo(vec)

    np.testing.assert_equal(input_indices, new_indices)
    np.testing.assert_allclose(input_values, new_values)

    irb = MLIRFunctionBuilder(
        "vector_from_coo",
        input_types=["tensor<?x?xindex>", "tensor<?xf64>", "index"],
        return_types=["tensor<?xf64, #CV64>"],
        aliases=aliases,
    )
    (indices, values, size) = irb.inputs
    tensor = irb.graphblas.from_coo(indices, values, (size, ))
    irb.return_vars(tensor)
    vector_from_coo = irb.compile(engine=engine, passes=GRAPHBLAS_PASSES)

    # Test results
    v2 = vector_from_coo(input_indices, input_values, 20)
    v2.verify()

    v2_indices, v2_values = vector_to_coo(v2)

    np.testing.assert_equal(input_indices, v2_indices)
    np.testing.assert_allclose(input_values, v2_values)
Пример #9
0
def test_jit_engine_sparse_tensor(engine, mlir_type):
    mlir_template = r"""

#trait_sum_reduction = {{
  indexing_maps = [
    affine_map<(i,j,k) -> (i,j,k)>,  // A
    affine_map<(i,j,k) -> ()>        // x (scalar out)
  ],
  iterator_types = ["reduction", "reduction", "reduction"],
  doc = "x += SUM_ijk A(i,j,k)"
}}

#sparseTensor = #sparse_tensor.encoding<{{
  dimLevelType = [ "compressed", "compressed", "compressed" ],
  dimOrdering = affine_map<(i,j,k) -> (i,j,k)>,
  pointerBitWidth = 64,
  indexBitWidth = 64
}}>

func @{func_name}(%argA: tensor<10x20x30x{mlir_type}, #sparseTensor>) -> {mlir_type} {{
  %out_tensor = arith.constant dense<0.0> : tensor<{mlir_type}>
  %reduction = linalg.generic #trait_sum_reduction
     ins(%argA: tensor<10x20x30x{mlir_type}, #sparseTensor>)
    outs(%out_tensor: tensor<{mlir_type}>) {{
      ^bb(%a: {mlir_type}, %x: {mlir_type}):
        %0 = arith.addf %x, %a : {mlir_type}
        linalg.yield %0 : {mlir_type}
  }} -> tensor<{mlir_type}>
  %answer = tensor.extract %reduction[] : tensor<{mlir_type}>
  return %answer : {mlir_type}
}}

"""

    np_type = MLIR_TYPE_TO_NP_TYPE[mlir_type]

    func_name = f"func_{mlir_type}"

    mlir_text = mlir_template.format(func_name=func_name, mlir_type=mlir_type)

    indices = np.array([[0, 0, 0], [1, 1, 1]], dtype=np.uint64)
    values = np.array([1.2, 3.4], dtype=np_type)
    sizes = np.array([10, 20, 30], dtype=np.uint64)
    sparsity = np.array([True, True, True], dtype=np.bool8)

    a = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert a.verify()

    assert engine.add(mlir_text, GRAPHBLAS_PASSES) == [func_name]

    result = engine[func_name](a)
    expected_result = 4.6
    assert (
        abs(result - expected_result) < 1e-6
    ), f"""
Пример #10
0
def test_graph_search():
    # fmt: off
    indices = np.array(
        [[0, 1], [0, 2], [1, 0], [1, 3], [2, 0], [2, 4], [3, 2], [4, 4]],
        dtype=np.uint64,
    )
    values = np.array([100, 200, 300, 400, 175, 222, 333, 200],
                      dtype=np.float64)
    # fmt: on
    sizes = np.array([5, 5], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    graph = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert graph.verify()

    # Random Uniform (no seed, so truly random)
    count = mlalgo.graph_search(graph, 3, [2, 4], "random")

    # Check for one of the possible solutions:
    # [0, 1, 4] or [0, 1, 3, 4] or [0, 2, 4] or [0, 2, 4] or [4]
    # [2, 1, 3]    [1, 1, 1, 3]    [2, 1, 3]    [1, 1, 4]    [6]
    for idx, vals in [
        ([0, 1, 4], [2, 1, 3]),
        ([0, 1, 3, 4], [1, 1, 1, 3]),
        ([0, 2, 4], [2, 1, 3]),
        ([0, 2, 4], [1, 1, 4]),
        ([4], [6]),
    ]:
        if len(count.indices[0]) == len(idx):
            if (count.indices[0] == idx).all() and (count.values
                                                    == vals).all():
                break
    else:
        assert False, f"Invalid solution: idx={count.indices[0]}, vals={count.values}"

    # Random weighted
    count = mlalgo.graph_search(graph,
                                5, [0, 2],
                                "random_weighted",
                                rand_seed=14)
    assert (count.indices[0] == [0, 2, 4]).all()
    assert (count.values == [2, 3, 5]).all()

    # argmin
    count = mlalgo.graph_search(graph, 3, [0, 3], "argmin")
    assert (count.indices[0] == [0, 1, 2]).all()
    assert (count.values == [2, 3, 1]).all()

    # argmax
    count = mlalgo.graph_search(graph, 3, [0, 1], "argmax")
    assert (count.indices[0] == [2, 3, 4]).all()
    assert (count.values == [2, 1, 3]).all()
Пример #11
0
def test_ir_builder_triangle_count():
    # 0 - 1    5 - 6
    # | X |    | /
    # 3 - 4 -- 2 - 7
    # fmt: off
    indices = np.array(
        [
            [0, 1],
            [0, 3],
            [0, 4],
            [1, 0],
            [1, 3],
            [1, 4],
            [2, 4],
            [2, 5],
            [2, 6],
            [2, 7],
            [3, 0],
            [3, 1],
            [3, 4],
            [4, 0],
            [4, 1],
            [4, 2],
            [4, 3],
            [5, 2],
            [5, 6],
            [6, 2],
            [6, 5],
            [7, 2],
        ],
        dtype=np.uint64,
    )
    values = np.array(
        [
            100, 200, 300, 100, 400, 500, 99, 50, 55, 75, 200, 400, 600, 300,
            500, 99, 600, 50, 60, 55, 60, 75
        ],
        dtype=np.float64,
    )
    # fmt: on
    sizes = np.array([8, 8], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    input_tensor = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert input_tensor.verify()

    assert 5 == triangle_count(input_tensor)

    return
Пример #12
0
def test_ties():
    # fmt: off
    indices = np.array(
        [[0, 1], [0, 2], [1, 0], [1, 3], [2, 0], [2, 4], [3, 2], [4, 4]],
        dtype=np.uint64,
    )
    values = np.array([100, 200, 300, 400, 175, 222, 333, 200],
                      dtype=np.float64)
    # fmt: on
    sizes = np.array([5, 5], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)
    graph = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert graph.verify()

    subgraph = mlalgo.totally_induced_edge_sampling(graph,
                                                    0.25,
                                                    rand_seed=2021)
    assert np.all(subgraph.pointers[1] == [0, 1, 1, 3, 4, 5])
    assert np.all(subgraph.indices[1] == [2, 0, 4, 2, 4])
Пример #13
0
def test_ir_builder_convert_layout_wrapper(engine: MlirJitEngine,
                                           aliases: AliasMap):
    ir_builder = MLIRFunctionBuilder(
        "convert_layout_wrapper",
        input_types=["tensor<?x?xf64, #CSR64>"],
        return_types=("tensor<?x?xf64, #CSC64>", ),
        aliases=aliases,
    )
    (input_var, ) = ir_builder.inputs
    convert_layout_result = ir_builder.graphblas.convert_layout(
        input_var, "tensor<?x?xf64, #CSC64>")
    ir_builder.return_vars(convert_layout_result)

    assert ir_builder.get_mlir()

    # Test Compiled Function
    convert_layout_wrapper_callable = ir_builder.compile(
        engine=engine, passes=GRAPHBLAS_PASSES)

    indices = np.array(
        [
            [1, 2],
            [4, 3],
        ],
        dtype=np.uint64,
    )
    values = np.array([1.2, 4.3], dtype=np.float64)
    sizes = np.array([8, 8], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)

    input_tensor = MLIRSparseTensor(indices, values, sizes, sparsity)
    assert input_tensor.verify()

    dense_input_tensor = np.zeros([8, 8], dtype=np.float64)
    dense_input_tensor[1, 2] = 1.2
    dense_input_tensor[4, 3] = 4.3
    assert np.isclose(dense_input_tensor, input_tensor.toarray()).all()
    output_tensor = convert_layout_wrapper_callable(input_tensor)
    assert output_tensor.verify()

    assert np.isclose(dense_input_tensor, output_tensor.toarray()).all()
Пример #14
0
def test_jit_engine_zero_values(engine):

    mlir_text = """
    module  {
      func private @_mlir_ciface_sparseValuesF64(!llvm.ptr<i8>) -> memref<?xf64>
      func private @_mlir_ciface_sparseIndices64(!llvm.ptr<i8>, index) -> memref<?xindex>
      func private @_mlir_ciface_sparsePointers64(!llvm.ptr<i8>, index) -> memref<?xindex>
      func private @sparseDimSize(!llvm.ptr<i8>, index) -> index

      func @transpose(%output: !llvm.ptr<i8>, %input: !llvm.ptr<i8>) {
        %c0 = arith.constant 0 : index
        %c1 = arith.constant 1 : index


        %n_row = call @sparseDimSize(%input, %c0) : (!llvm.ptr<i8>, index) -> index
        %n_col = call @sparseDimSize(%input, %c1) : (!llvm.ptr<i8>, index) -> index
        %Ap = call @_mlir_ciface_sparsePointers64(%input, %c1) : (!llvm.ptr<i8>, index) -> memref<?xindex>
        %Aj = call @_mlir_ciface_sparseIndices64(%input, %c1) : (!llvm.ptr<i8>, index) -> memref<?xindex>
        %Ax = call @_mlir_ciface_sparseValuesF64(%input) : (!llvm.ptr<i8>) -> memref<?xf64>
        %Bp = call @_mlir_ciface_sparsePointers64(%output, %c1) : (!llvm.ptr<i8>, index) -> memref<?xindex>
        %Bi = call @_mlir_ciface_sparseIndices64(%output, %c1) : (!llvm.ptr<i8>, index) -> memref<?xindex>
        %Bx = call @_mlir_ciface_sparseValuesF64(%output) : (!llvm.ptr<i8>) -> memref<?xf64>

        %nnz = memref.load %Ap[%n_row] : memref<?xindex>

        // compute number of non-zero entries per column of A
        scf.for %arg2 = %c0 to %n_col step %c1 {
          memref.store %c0, %Bp[%arg2] : memref<?xindex>
        }
        scf.for %n = %c0 to %nnz step %c1 {
          %colA = memref.load %Aj[%n] : memref<?xindex>
          %colB = memref.load %Bp[%colA] : memref<?xindex>
          %colB1 = arith.addi %colB, %c1 : index
          memref.store %colB1, %Bp[%colA] : memref<?xindex>
        }

        // cumsum the nnz per column to get Bp
        memref.store %c0, %Bp[%n_col] : memref<?xindex>
        scf.for %col = %c0 to %n_col step %c1 {
          %temp = memref.load %Bp[%col] : memref<?xindex>
          %cumsum = memref.load %Bp[%n_col] : memref<?xindex>
          memref.store %cumsum, %Bp[%col] : memref<?xindex>
          %cumsum2 = arith.addi %cumsum, %temp : index
          memref.store %cumsum2, %Bp[%n_col] : memref<?xindex>
        }

        scf.for %row = %c0 to %n_row step %c1 {
          %j_start = memref.load %Ap[%row] : memref<?xindex>
          %row_plus1 = arith.addi %row, %c1 : index
          %j_end = memref.load %Ap[%row_plus1] : memref<?xindex>
          scf.for %jj = %j_start to %j_end step %c1 {
            %col = memref.load %Aj[%jj] : memref<?xindex>
            %dest = memref.load %Bp[%col] : memref<?xindex>

            memref.store %row, %Bi[%dest] : memref<?xindex>
            %axjj = memref.load %Ax[%jj] : memref<?xf64>
            memref.store %axjj, %Bx[%dest] : memref<?xf64>

            // Bp[col]++
            %bp_inc = memref.load %Bp[%col] : memref<?xindex>
            %bp_inc1 = arith.addi %bp_inc, %c1 : index
            memref.store %bp_inc1, %Bp[%col]: memref<?xindex>
          }
        }

        %last_last = memref.load %Bp[%n_col] : memref<?xindex>
        memref.store %c0, %Bp[%n_col] : memref<?xindex>
        scf.for %col = %c0 to %n_col step %c1 {
          %temp = memref.load %Bp[%col] : memref<?xindex>
          %last = memref.load %Bp[%n_col] : memref<?xindex>
          memref.store %last, %Bp[%col] : memref<?xindex>
          memref.store %temp, %Bp[%n_col] : memref<?xindex>
        }
        memref.store %last_last, %Bp[%n_col] : memref<?xindex>

        return
      }
    }
    """

    indices = np.array(
        [
            [0, 0],
            [1, 0],
        ],
        dtype=np.uint64,
    )
    values = np.array([8, 9], dtype=np.float64)
    sizes = np.array([2, 2], dtype=np.uint64)
    sparsity = np.array([False, True], dtype=np.bool8)

    input_tensor = MLIRSparseTensor(indices, values, sizes, sparsity)
    output_tensor = MLIRSparseTensor(indices, values, sizes, sparsity)

    assert input_tensor.verify()
    assert output_tensor.verify()
    assert engine.add(mlir_text, GRAPHBLAS_PASSES) == ["transpose"]
    assert engine.transpose(output_tensor, input_tensor) is None
    assert input_tensor.verify()
    assert np.isclose(input_tensor.toarray(), np.array([[8, 0], [9, 0]])).all()
    assert output_tensor.verify()
    assert np.isclose(output_tensor.toarray(), np.array([[8, 9], [0, 0]])).all()

    return
Пример #15
0
def test_jit_engine_sequence_of_sparse_tensors_input(engine):
    mlir_text = """
#trait_sum_reduction = {
  indexing_maps = [
    affine_map<(i,j) -> (i,j)>,
    affine_map<(i,j) -> ()>
  ],
  iterator_types = ["reduction", "reduction"],
  doc = "Sparse Tensor Sum"
}

#sparseTensor = #sparse_tensor.encoding<{
  dimLevelType = [ "compressed", "compressed" ],
  pointerBitWidth = 64,
  indexBitWidth = 64
}>

func private @ptr8_to_matrix_csr_f64_p64i64(!llvm.ptr<i8>) -> tensor<2x3xf64, #sparseTensor>

func @sparse_tensors_summation(%sequence: !llvm.ptr<!llvm.ptr<i8>>, %sequence_length: index) -> f64 {
  // Take an array of sparse 2x3 matrices

  %output_storage = arith.constant dense<0.0> : tensor<f64>

  %c0 = arith.constant 0 : index
  %c1 = arith.constant 1 : index
  %ci0 = arith.constant 0 : i64
  %ci1 = arith.constant 1 : i64
  %cf0 = arith.constant 0.0 : f64

  %sum_memref = memref.alloc() : memref<f64>
  memref.store %cf0, %sum_memref[] : memref<f64>

  scf.for %i = %c0 to %sequence_length step %c1 iter_args(%iter=%ci0) -> (i64) {

    // llvm.getelementptr just does pointer arithmetic
    %sparse_tensor_ptr_ptr = llvm.getelementptr %sequence[%iter] : (!llvm.ptr<!llvm.ptr<i8>>, i64) -> !llvm.ptr<!llvm.ptr<i8>>

    // dereference %sparse_tensor_ptr_ptr to get an !llvm.ptr<i8>
    %sparse_tensor_ptr = llvm.load %sparse_tensor_ptr_ptr : !llvm.ptr<!llvm.ptr<i8>>

    %sparse_tensor = call @ptr8_to_matrix_csr_f64_p64i64(%sparse_tensor_ptr) : (!llvm.ptr<i8>) -> tensor<2x3xf64, #sparseTensor>

    %reduction = linalg.generic #trait_sum_reduction
        ins(%sparse_tensor: tensor<2x3xf64, #sparseTensor>)
        outs(%output_storage: tensor<f64>) {
          ^bb(%a: f64, %x: f64):
            %0 = arith.addf %x, %a : f64
            linalg.yield %0 : f64
      } -> tensor<f64>
    %reduction_value = tensor.extract %reduction[] : tensor<f64>

    %current_sum = memref.load %sum_memref[] : memref<f64>
    %updated_sum = arith.addf %reduction_value, %current_sum : f64
    memref.store %updated_sum, %sum_memref[] : memref<f64>

    %plus_one = arith.addi %iter, %ci1 : i64
    scf.yield %plus_one : i64
  }

  %sum = memref.load %sum_memref[] : memref<f64>


  return %sum : f64
}
"""
    assert engine.add(mlir_text, GRAPHBLAS_PASSES) == ["sparse_tensors_summation"]

    num_sparse_tensors = 10

    # generate values
    sqrt_values = np.sqrt(np.arange(1, num_sparse_tensors + 1, 0.5))
    value_iter = iter(sqrt_values)
    next_values = lambda: np.array(
        [next(value_iter), next(value_iter)], dtype=np.float64
    )

    # generate coordinates
    coordinate_iter = itertools.count()
    next_indices = lambda: np.array(
        sorted(
            [
                (next(coordinate_iter) % 2, next(coordinate_iter) % 3),
                (next(coordinate_iter) % 2, next(coordinate_iter) % 3),
            ]
        ),
        dtype=np.uint64,
    )

    sparse_tensors = []
    for _ in range(num_sparse_tensors):
        indices = next_indices()
        values = next_values()
        sizes = np.array([2, 3], dtype=np.uint64)
        sparsity = np.array([True, True], dtype=np.bool8)
        sparse_tensor = MLIRSparseTensor(indices, values, sizes, sparsity)
        assert sparse_tensor.verify()
        sparse_tensors.append(sparse_tensor)

    expected_sum = sqrt_values.sum()
    assert np.isclose(
        expected_sum,
        engine.sparse_tensors_summation(sparse_tensors, num_sparse_tensors),
    )

    return
Пример #16
0
def test_verify(nrows, ncols):
    for indices in itertools.chain.from_iterable(
            itertools.combinations(list(range(nrows * ncols)), n)
            for n in range(nrows * ncols + 1)):
        rows = [x // ncols for x in indices]
        cols = [x % ncols for x in indices]
        vals = np.arange(len(indices))
        M = gb.Matrix.from_values(rows, cols, vals, nrows=nrows, ncols=ncols)
        M.wait()

        # CSR
        d = M.ss.export("csr", sort=True)
        r, c, v = M.to_values()
        mt = MLIRSparseTensor(
            np.stack([r, c]).T.copy(),
            v,
            np.array(M.shape, dtype=np.uint64),
            np.array([False, True], dtype=np.bool8),
        )
        verify(mt)
        assert mt.verify()
        np.testing.assert_array_equal(mt.get_pointers(0), [])
        np.testing.assert_array_equal(mt.get_pointers(1), d["indptr"])
        np.testing.assert_array_equal(mt.get_indices(0), [])
        np.testing.assert_array_equal(mt.get_indices(1), d["col_indices"])
        np.testing.assert_array_equal(mt.values, d["values"])
        assert mt.shape == M.shape
        assert mt.sizes == M.shape

        # HyperCSR
        d = M.ss.export("hypercsr", sort=True)
        r, c, v = M.to_values()
        mt = MLIRSparseTensor(
            np.stack([r, c]).T.copy(),
            v,
            np.array(M.shape, dtype=np.uint64),
            np.array([True, True], dtype=np.bool8),
        )
        verify(mt)
        assert mt.verify()
        np.testing.assert_array_equal(mt.get_pointers(0), [0, len(d["rows"])])
        np.testing.assert_array_equal(mt.get_pointers(1), d["indptr"])
        np.testing.assert_array_equal(mt.get_indices(0), d["rows"])
        np.testing.assert_array_equal(mt.get_indices(1), d["col_indices"])
        np.testing.assert_array_equal(mt.values, d["values"])
        assert mt.shape == M.shape
        assert mt.sizes == M.shape

        # CSC
        d = M.ss.export("csc", sort=True)
        M2 = M.T.new()
        M2.wait()
        c, r, v = M2.to_values()
        mt = MLIRSparseTensor(
            np.stack([c, r]).T.copy(),
            v,
            np.array(M.shape[::-1], dtype=np.uint64),
            np.array([False, True], dtype=np.bool8),
            np.array([1, 0], dtype=np.uint64),
        )
        verify(mt)
        assert mt.verify()
        np.testing.assert_array_equal(mt.get_pointers(0), [])
        np.testing.assert_array_equal(mt.get_pointers(1), d["indptr"])
        np.testing.assert_array_equal(mt.get_indices(0), [])
        np.testing.assert_array_equal(mt.get_indices(1), d["row_indices"])
        np.testing.assert_array_equal(mt.values, d["values"])
        assert mt.shape == M.shape
        assert mt.sizes[::-1] == M.shape

        # HyperCSC
        d = M.ss.export("hypercsc", sort=True)
        M2 = M.T.new()
        M2.wait()
        c, r, v = M2.to_values()
        mt = MLIRSparseTensor(
            np.stack([c, r]).T.copy(),
            v,
            np.array(M.shape[::-1], dtype=np.uint64),
            np.array([True, True], dtype=np.bool8),
            np.array([1, 0], dtype=np.uint64),
        )
        verify(mt)
        assert mt.verify()
        np.testing.assert_array_equal(mt.get_pointers(0), [0, len(d["cols"])])
        np.testing.assert_array_equal(mt.get_pointers(1), d["indptr"])
        np.testing.assert_array_equal(mt.get_indices(0), d["cols"])
        np.testing.assert_array_equal(mt.get_indices(1), d["row_indices"])
        np.testing.assert_array_equal(mt.values, d["values"])
        assert mt.shape == M.shape
        assert mt.sizes[::-1] == M.shape

        if vals.size == nrows * ncols:
            # FullR
            d = M.ss.export("fullr", sort=True)
            r, c, v = M.to_values()
            mt = MLIRSparseTensor(
                np.stack([r, c]).T.copy(),
                v,
                np.array(M.shape, dtype=np.uint64),
                np.array([False, False], dtype=np.bool8),
            )
            verify(mt)
            assert mt.verify()
            np.testing.assert_array_equal(mt.values, d["values"].ravel())
            assert mt.shape == M.shape
            assert mt.sizes == M.shape

            # FullC
            d = M.ss.export("fullc", sort=True)
            M2 = M.T.new()
            M2.wait()
            c, r, v = M2.to_values()
            mt = MLIRSparseTensor(
                np.stack([c, r]).T.copy(),
                v,
                np.array(M.shape[::-1], dtype=np.uint64),
                np.array([False, False], dtype=np.bool8),
                np.array([1, 0], dtype=np.uint64),
            )
            verify(mt)
            assert mt.verify()
            np.testing.assert_array_equal(mt.values, d["values"].T.ravel())
            assert mt.shape == M.shape
            assert mt.sizes[::-1] == M.shape