Example #1
0
def compile_and_build_engine(
    module: ir.Module) -> execution_engine.ExecutionEngine:
  """Compiles an MLIR module and builds a JIT execution engine.

  Args:
    module: The MLIR module.

  Returns:
    A JIT execution engine for the MLIR module.

  """
  pipeline = (
      f"sparsification,"
      f"sparse-tensor-conversion,"
      f"builtin.func(linalg-bufferize,convert-linalg-to-loops,convert-vector-to-scf),"
      f"convert-scf-to-cf,"
      f"func-bufferize,"
      f"arith-bufferize,"
      f"builtin.func(tensor-bufferize,finalizing-bufferize),"
      f"convert-vector-to-llvm{{reassociate-fp-reductions=1 enable-index-optimizations=1}},"
      f"lower-affine,"
      f"convert-memref-to-llvm,"
      f"convert-std-to-llvm,"
      f"reconcile-unrealized-casts")
  PassManager.parse(pipeline).run(module)
  return execution_engine.ExecutionEngine(
      module, opt_level=_OPT_LEVEL, shared_libs=[_get_support_lib_name()])
Example #2
0
def build_compile_and_run_SpMM(attr: st.EncodingAttr, support_lib: str,
                               compiler):
    # Build.
    module = build_SpMM(attr)
    func = str(module.operation.regions[0].blocks[0].operations[0].operation)
    module = ir.Module.parse(func + boilerplate(attr))
    # Compile.
    compiler(module)
    engine = execution_engine.ExecutionEngine(module,
                                              opt_level=0,
                                              shared_libs=[support_lib])
    # Set up numpy input, invoke the kernel, and get numpy output.
    # Built-in bufferization uses in-out buffers.
    # TODO: replace with inplace comprehensive bufferization.
    Cin = np.zeros((3, 2), np.double)
    Cout = np.zeros((3, 2), np.double)
    Cin_memref_ptr = ctypes.pointer(
        ctypes.pointer(rt.get_ranked_memref_descriptor(Cin)))
    Cout_memref_ptr = ctypes.pointer(
        ctypes.pointer(rt.get_ranked_memref_descriptor(Cout)))
    engine.invoke('main', Cout_memref_ptr, Cin_memref_ptr)
    Cresult = rt.ranked_memref_to_numpy(Cout_memref_ptr[0])

    # Sanity check on computed result.
    expected = [[12.3, 12.0], [0.0, 0.0], [16.5, 19.8]]
    if np.allclose(Cresult, expected):
        pass
    else:
        quit(f'FAILURE')
Example #3
0
def build_compile_and_run_SDDMMM(attr: st.EncodingAttr, opt: str,
                                 support_lib: str, compiler):
    # Build.
    module = build_SDDMM(attr)
    func = str(module.operation.regions[0].blocks[0].operations[0].operation)
    module = ir.Module.parse(func + boilerplate(attr))

    # Compile.
    compiler(module)
    engine = execution_engine.ExecutionEngine(module,
                                              opt_level=0,
                                              shared_libs=[support_lib])

    # Set up numpy input and buffer for output.
    a = np.array([[1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 8.1],
                  [1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 8.2],
                  [1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3, 8.3],
                  [1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 7.4, 8.4],
                  [1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5],
                  [1.6, 2.6, 3.6, 4.6, 5.6, 6.6, 7.6, 8.6],
                  [1.7, 2.7, 3.7, 4.7, 5.7, 6.7, 7.7, 8.7],
                  [1.8, 2.8, 3.8, 4.8, 5.8, 6.8, 7.8, 8.8]], np.float64)
    b = np.ones((8, 8), np.float64)
    c = np.zeros((8, 8), np.float64)

    mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a)))
    mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b)))
    mem_c = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(c)))

    # Allocate a MemRefDescriptor to receive the output tensor.
    # The buffer itself is allocated inside the MLIR code generation.
    ref_out = rt.make_nd_memref_descriptor(2, ctypes.c_double)()
    mem_out = ctypes.pointer(ctypes.pointer(ref_out))

    # Invoke the kernel and get numpy output.
    # Built-in bufferization uses in-out buffers.
    # TODO: replace with inplace comprehensive bufferization.
    engine.invoke('main', mem_out, mem_a, mem_b, mem_c)

    # Sanity check on computed result. Only a few elements
    # are sampled from the full dense matrix multiplication.
    full_matmul = np.matmul(a, b)
    expected = np.zeros((8, 8), np.float64)
    expected[0, 0] = 1.0 * full_matmul[0, 0]
    expected[0, 2] = 2.0 * full_matmul[0, 2]
    expected[4, 1] = 3.0 * full_matmul[4, 1]
    c = rt.ranked_memref_to_numpy(mem_out[0])
    if np.allclose(c, expected):
        pass
    else:
        quit(f'FAILURE')
Example #4
0
def compile_and_build_engine(
        module: ir.Module) -> execution_engine.ExecutionEngine:
    """Compiles an MLIR module and builds a JIT execution engine.

  Args:
    module: The MLIR module.

  Returns:
    A JIT execution engine for the MLIR module.

  """
    pipeline = f"sparse-compiler"
    PassManager.parse(pipeline).run(module)
    return execution_engine.ExecutionEngine(
        module, opt_level=_OPT_LEVEL, shared_libs=[_get_support_lib_name()])
Example #5
0
def build_compile_and_run_output(attr: st.EncodingAttr, support_lib: str,
                                 compiler):
    # Build and Compile.
    module = ir.Module.parse(boilerplate(attr))
    compiler(module)
    engine = execution_engine.ExecutionEngine(module,
                                              opt_level=0,
                                              shared_libs=[support_lib])

    # Invoke the kernel and compare output.
    with tempfile.TemporaryDirectory() as test_dir:
        out = os.path.join(test_dir, 'out.tns')
        buf = out.encode('utf-8')
        mem_a = ctypes.pointer(ctypes.pointer(
            ctypes.create_string_buffer(buf)))
        engine.invoke('main', mem_a)

        actual = open(out).read()
        if actual != expected():
            quit('FAILURE')
Example #6
0
def build_compile_and_run_SpMM(attr: st.EncodingAttr, support_lib: str,
                               compiler):
    # Build.
    module = build_SpMM(attr)
    func = str(module.operation.regions[0].blocks[0].operations[0].operation)
    module = ir.Module.parse(func + boilerplate(attr))

    # Compile.
    compiler(module)
    engine = execution_engine.ExecutionEngine(module,
                                              opt_level=0,
                                              shared_libs=[support_lib])

    # Set up numpy input and buffer for output.
    a = np.array(
        [[1.1, 0.0, 0.0, 1.4], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 3.3, 0.0]],
        np.float64)
    b = np.array([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0]], np.float64)
    c = np.zeros((3, 2), np.float64)

    mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a)))
    mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b)))
    mem_c = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(c)))
    # Allocate a MemRefDescriptor to receive the output tensor.
    # The buffer itself is allocated inside the MLIR code generation.
    ref_out = rt.make_nd_memref_descriptor(2, ctypes.c_double)()
    mem_out = ctypes.pointer(ctypes.pointer(ref_out))

    # Invoke the kernel and get numpy output.
    # Built-in bufferization uses in-out buffers.
    # TODO: replace with inplace comprehensive bufferization.
    engine.invoke('main', mem_out, mem_a, mem_b, mem_c)

    # Sanity check on computed result.
    expected = np.matmul(a, b)
    c = rt.ranked_memref_to_numpy(mem_out[0])
    if np.allclose(c, expected):
        pass
    else:
        quit(f'FAILURE')
def _run_test(support_lib, kernel):
    """Compiles, runs and checks results."""
    module = ir.Module.parse(kernel)
    _SparseCompiler()(module)
    engine = execution_engine.ExecutionEngine(module,
                                              opt_level=0,
                                              shared_libs=[support_lib])

    # Set up numpy inputs and buffer for output.
    a = np.array(
        [[1.1, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 6.6, 0.0]],
        np.float64)
    b = np.array(
        [[1.1, 0.0, 0.0, 2.8], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]],
        np.float64)

    mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a)))
    mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b)))

    # The sparse tensor output is a pointer to pointer of char.
    out = ctypes.c_char(0)
    mem_out = ctypes.pointer(ctypes.pointer(out))

    # Invoke the kernel.
    engine.invoke('main', mem_a, mem_b, mem_out)

    # Retrieve and check the result.
    rank, nse, shape, values, indices = test_tools.sparse_tensor_to_coo_tensor(
        support_lib, mem_out[0], np.float64)

    # CHECK: PASSED
    if np.allclose(values, [2.2, 2.8, 6.6]) and np.allclose(
            indices, [[0, 0], [0, 3], [2, 2]]):
        print('PASSED')
    else:
        quit('FAILURE')
 def jit(self, module: ir.Module) -> execution_engine.ExecutionEngine:
     """Wraps the module in a JIT execution engine."""
     return execution_engine.ExecutionEngine(module,
                                             opt_level=self.opt_level,
                                             shared_libs=self.shared_libs)