Exemple #1
0
def build_compile_and_run_SpMM(attr: st.EncodingAttr, support_lib: str,
                               compiler):
    # Build.
    module = build_SpMM(attr)
    func = str(module.operation.regions[0].blocks[0].operations[0].operation)
    module = ir.Module.parse(func + boilerplate(attr))
    # Compile.
    compiler(module)
    engine = execution_engine.ExecutionEngine(module,
                                              opt_level=0,
                                              shared_libs=[support_lib])
    # Set up numpy input, invoke the kernel, and get numpy output.
    # Built-in bufferization uses in-out buffers.
    # TODO: replace with inplace comprehensive bufferization.
    Cin = np.zeros((3, 2), np.double)
    Cout = np.zeros((3, 2), np.double)
    Cin_memref_ptr = ctypes.pointer(
        ctypes.pointer(rt.get_ranked_memref_descriptor(Cin)))
    Cout_memref_ptr = ctypes.pointer(
        ctypes.pointer(rt.get_ranked_memref_descriptor(Cout)))
    engine.invoke('main', Cout_memref_ptr, Cin_memref_ptr)
    Cresult = rt.ranked_memref_to_numpy(Cout_memref_ptr[0])

    # Sanity check on computed result.
    expected = [[12.3, 12.0], [0.0, 0.0], [16.5, 19.8]]
    if np.allclose(Cresult, expected):
        pass
    else:
        quit(f'FAILURE')
def build_compile_and_run_SDDMMM(attr: st.EncodingAttr, opt: str,
                                 support_lib: str, compiler):
    # Build.
    module = build_SDDMM(attr)
    func = str(module.operation.regions[0].blocks[0].operations[0].operation)
    module = ir.Module.parse(func + boilerplate(attr))

    # Compile.
    compiler(module)
    engine = execution_engine.ExecutionEngine(module,
                                              opt_level=0,
                                              shared_libs=[support_lib])

    # Set up numpy input and buffer for output.
    a = np.array([[1.1, 2.1, 3.1, 4.1, 5.1, 6.1, 7.1, 8.1],
                  [1.2, 2.2, 3.2, 4.2, 5.2, 6.2, 7.2, 8.2],
                  [1.3, 2.3, 3.3, 4.3, 5.3, 6.3, 7.3, 8.3],
                  [1.4, 2.4, 3.4, 4.4, 5.4, 6.4, 7.4, 8.4],
                  [1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5],
                  [1.6, 2.6, 3.6, 4.6, 5.6, 6.6, 7.6, 8.6],
                  [1.7, 2.7, 3.7, 4.7, 5.7, 6.7, 7.7, 8.7],
                  [1.8, 2.8, 3.8, 4.8, 5.8, 6.8, 7.8, 8.8]], np.float64)
    b = np.ones((8, 8), np.float64)
    c = np.zeros((8, 8), np.float64)

    mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a)))
    mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b)))
    mem_c = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(c)))

    # Allocate a MemRefDescriptor to receive the output tensor.
    # The buffer itself is allocated inside the MLIR code generation.
    ref_out = rt.make_nd_memref_descriptor(2, ctypes.c_double)()
    mem_out = ctypes.pointer(ctypes.pointer(ref_out))

    # Invoke the kernel and get numpy output.
    # Built-in bufferization uses in-out buffers.
    # TODO: replace with inplace comprehensive bufferization.
    engine.invoke('main', mem_out, mem_a, mem_b, mem_c)

    # Sanity check on computed result. Only a few elements
    # are sampled from the full dense matrix multiplication.
    full_matmul = np.matmul(a, b)
    expected = np.zeros((8, 8), np.float64)
    expected[0, 0] = 1.0 * full_matmul[0, 0]
    expected[0, 2] = 2.0 * full_matmul[0, 2]
    expected[4, 1] = 3.0 * full_matmul[4, 1]
    c = rt.ranked_memref_to_numpy(mem_out[0])
    if np.allclose(c, expected):
        pass
    else:
        quit(f'FAILURE')
Exemple #3
0
 def run(self, np_arg0: np.ndarray) -> np.ndarray:
   """Runs the test on the given numpy array, and returns the resulting
   numpy array."""
   assert self._engine is not None, \
       'StressTest: must call compile() before run()'
   self._assertEqualsRoundtripTp(
       self._tyconv.get_RankedTensorType_of_nparray(np_arg0))
   np_out = np.zeros(np_arg0.shape, dtype=np_arg0.dtype)
   self._assertEqualsRoundtripTp(
       self._tyconv.get_RankedTensorType_of_nparray(np_out))
   mem_arg0 = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(np_arg0)))
   mem_out = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(np_out)))
   self._engine.invoke('main', mem_out, mem_arg0)
   return rt.ranked_memref_to_numpy(mem_out[0])
Exemple #4
0
def build_compile_and_run_SpMM(attr: st.EncodingAttr, support_lib: str,
                               compiler):
    # Build.
    module = build_SpMM(attr)
    func = str(module.operation.regions[0].blocks[0].operations[0].operation)
    module = ir.Module.parse(func + boilerplate(attr))

    # Compile.
    compiler(module)
    engine = execution_engine.ExecutionEngine(module,
                                              opt_level=0,
                                              shared_libs=[support_lib])

    # Set up numpy input and buffer for output.
    a = np.array(
        [[1.1, 0.0, 0.0, 1.4], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 3.3, 0.0]],
        np.float64)
    b = np.array([[1.0, 2.0], [4.0, 3.0], [5.0, 6.0], [8.0, 7.0]], np.float64)
    c = np.zeros((3, 2), np.float64)

    mem_a = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(a)))
    mem_b = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(b)))
    mem_c = ctypes.pointer(ctypes.pointer(rt.get_ranked_memref_descriptor(c)))
    # Allocate a MemRefDescriptor to receive the output tensor.
    # The buffer itself is allocated inside the MLIR code generation.
    ref_out = rt.make_nd_memref_descriptor(2, ctypes.c_double)()
    mem_out = ctypes.pointer(ctypes.pointer(ref_out))

    # Invoke the kernel and get numpy output.
    # Built-in bufferization uses in-out buffers.
    # TODO: replace with inplace comprehensive bufferization.
    engine.invoke('main', mem_out, mem_a, mem_b, mem_c)

    # Sanity check on computed result.
    expected = np.matmul(a, b)
    c = rt.ranked_memref_to_numpy(mem_out[0])
    if np.allclose(c, expected):
        pass
    else:
        quit(f'FAILURE')
def create_sparse_tensor(
    filename: str, sparsity: Sequence[sparse_tensor.DimLevelType]
) -> Tuple[ctypes.c_void_p, np.ndarray]:
    """Creates an MLIR sparse tensor from the input file.

  Args:
    filename: A string for the name of the file that contains the tensor data in
      a COO-flavored format.
    sparsity: A sequence of DimLevelType values, one for each dimension of the
      tensor.

  Returns:
    A Tuple containing the following values:
    storage: A ctypes.c_void_p for the MLIR sparse tensor storage.
    shape: A 1D numpy array of integers, for the shape of the tensor.

  Raises:
    OSError: If there is any problem in loading the supporting C shared library.
    ValueError:  If the shared library doesn't contain the needed routine.
  """
    with ir.Context() as ctx, ir.Location.unknown():
        module = _get_create_sparse_tensor_kernel(sparsity)
        module = ir.Module.parse(module)
        engine = compile_and_build_engine(module)

    # A sparse tensor descriptor to receive the kernel result.
    c_tensor_desc = _SparseTensorDescriptor()
    # Convert the filename to a byte stream.
    c_filename = ctypes.c_char_p(bytes(filename, "utf-8"))

    arg_pointers = [
        ctypes.byref(ctypes.pointer(c_tensor_desc)),
        ctypes.byref(c_filename)
    ]

    # Invoke the execution engine to run the module and return the result.
    engine.invoke(_ENTRY_NAME, *arg_pointers)
    shape = runtime.ranked_memref_to_numpy(ctypes.pointer(c_tensor_desc.shape))
    return c_tensor_desc.storage, shape